From 66e46c8068d601127cefe12c516c7c9ad5fabc55 Mon Sep 17 00:00:00 2001 From: Paige Calvert Date: Tue, 11 Mar 2025 10:16:07 -0600 Subject: [PATCH 1/9] llm txt file testing --- package.json | 2 +- repomix-output.txt | 60942 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60943 insertions(+), 1 deletion(-) create mode 100644 repomix-output.txt diff --git a/package.json b/package.json index 04b574a858..48bd905c1a 100644 --- a/package.json +++ b/package.json @@ -5,7 +5,7 @@ "scripts": { "docusaurus": "docusaurus", "start": "docusaurus start", - "build": "docusaurus build", + "build": "repomix && docusaurus build", "swizzle": "docusaurus swizzle", "deploy": "docusaurus deploy", "clear": "docusaurus clear", diff --git a/repomix-output.txt b/repomix-output.txt new file mode 100644 index 0000000000..f84c0b5ceb --- /dev/null +++ b/repomix-output.txt @@ -0,0 +1,60942 @@ +This file is a merged representation of the entire codebase, combined into a single document by Repomix. + +================================================================ +File Summary +================================================================ + +Purpose: +-------- +This file contains a packed representation of the entire repository's contents. +It is designed to be easily consumable by AI systems for analysis, code review, +or other automated processes. + +File Format: +------------ +The content is organized as follows: +1. This summary section +2. Repository information +3. Directory structure +4. Multiple file entries, each consisting of: + a. A separator line (================) + b. The file path (File: path/to/file) + c. Another separator line + d. The full contents of the file + e. A blank line + +Usage Guidelines: +----------------- +- This file should be treated as read-only. Any changes should be made to the + original repository files, not this packed version. +- When processing this file, use the file path to distinguish + between different files in the repository. +- Be aware that this file may contain sensitive information. Handle it with + the same level of security as you would the original repository. + +Notes: +------ +- Some files may have been excluded based on .gitignore rules and Repomix's configuration +- Binary files are not included in this packed representation. Please refer to the Repository Structure section for a complete list of file paths, including binary files +- Files matching patterns in .gitignore are excluded +- Files matching default ignore patterns are excluded + +Additional Info: +---------------- + +================================================================ +Directory Structure +================================================================ +.github/ + workflows/ + algolia-crawl.yml + app-manager-release-notes.yml + auto-label.yml + kubernetes-installer-release-notes.yml + replicated-sdk-release-notes.yml + vendor-portal-release-notes.yml + dependabot.yml +docs/ + enterprise/ + auth-changing-passwords.md + auth-configuring-rbac.md + auth-identity-provider.md + cluster-management-add-nodes.md + delete-admin-console.md + embedded-manage-nodes.mdx + embedded-tls-certs.mdx + gitops-managing-secrets.mdx + gitops-workflow.mdx + image-registry-kurl.md + image-registry-rate-limits.md + image-registry-settings.mdx + installing-embedded-air-gap.mdx + installing-embedded-automation.mdx + installing-embedded-requirements.mdx + installing-embedded.mdx + installing-existing-cluster-airgapped.mdx + installing-existing-cluster-automation.mdx + installing-existing-cluster.mdx + installing-general-requirements.mdx + installing-kurl-airgap.mdx + installing-kurl-automation.mdx + installing-kurl-requirements.mdx + installing-kurl.mdx + installing-overview.md + installing-stateful-component-requirements.md + monitoring-access-dashboards.mdx + monitoring-applications.mdx + monitoring-external-prometheus.md + sbom-validating.md + snapshots-config-workflow.md + snapshots-configuring-hostpath.md + snapshots-configuring-nfs.md + snapshots-creating.md + snapshots-restoring-full.mdx + snapshots-storage-destinations.md + snapshots-troubleshooting-backup-restore.md + snapshots-updating-with-admin-console.md + snapshots-velero-cli-installing.md + snapshots-velero-installing-config.mdx + status-viewing-details.md + troubleshooting-an-app.mdx + updating-app-manager.mdx + updating-apps.mdx + updating-embedded.mdx + updating-kurl-about.mdx + updating-kurl.mdx + updating-licenses.md + updating-patching-with-kustomize.md + updating-tls-cert.md + partials/ + airgap/ + _airgap-bundle.mdx + application-links/ + _nginx-deployment.mdx + _nginx-k8s-app.mdx + _nginx-kots-app.mdx + _nginx-service.mdx + ci-cd/ + _build-source-code.mdx + _test-recs.mdx + cmx/ + _openshift-pool.mdx + _overview.mdx + _prerequisites.mdx + _supported-clusters-overview.mdx + collab-repo/ + _collab-existing-user.mdx + _collab-rbac-important.mdx + _collab-rbac-resources-important.mdx + _collab-repo-about.mdx + config/ + _affixExample.mdx + _defaultExample.mdx + _helpTextExample.mdx + _hiddenExample.mdx + _item-types.mdx + _nameExample.mdx + _property-when.mdx + _randomStringNote.mdx + _readonlyExample.mdx + _recommendedExample.mdx + _regexValidationExample.mdx + _requiredExample.mdx + _typeExample.mdx + _valueExample.mdx + _when-note.mdx + _when-requirements.mdx + _whenExample.mdx + configValues/ + _boolExample.mdx + _config-values-procedure.mdx + _configValuesExample.mdx + _fileExample.mdx + _passwordExample.mdx + _selectOneExample.mdx + _textareaExample.mdx + _textExample.mdx + custom-domains/ + _wizard.mdx + custom-resource-application/ + _additionalImages.mdx + _additionalNamespaces.mdx + _allowRollback.mdx + _graphs-templates.mdx + _graphs.mdx + _icon.mdx + _minKotsVersion.mdx + _ports-applicationURL.mdx + _ports-kurl-note.mdx + _ports-localPort.mdx + _ports-serviceName.mdx + _ports-servicePort.mdx + _ports.mdx + _proxyRegistryDomain.mdx + _releaseNotes.mdx + _replicatedRegistryDomain.mdx + _requireMinimalRBACPrivileges.mdx + _servicePort-note.mdx + _statusInformers.mdx + _supportMinimalRBACPrivileges.mdx + _targetKotsVersion.mdx + _title.mdx + customers/ + _change-channel.mdx + _download.mdx + embedded-cluster/ + _definition.mdx + _ec-config.mdx + _multi-node-ha-arch.mdx + _port-reqs.mdx + _proxy-install-limitations.mdx + _proxy-install-reqs.mdx + _requirements.mdx + _update-air-gap-admin-console.mdx + _update-air-gap-cli.mdx + _update-air-gap-overview.mdx + _update-overview.mdx + _warning-do-not-downgrade.mdx + getting-started/ + _create-promote-release.mdx + _csdl-overview.mdx + _gitea-ec-config.mdx + _gitea-helmchart-cr-ec.mdx + _gitea-helmchart-cr.mdx + _gitea-k8s-app-cr.mdx + _gitea-kots-app-cr-ec.mdx + _gitea-kots-app-cr.mdx + _grafana-config.mdx + _grafana-helmchart.mdx + _grafana-k8s-app.mdx + _grafana-kots-app.mdx + _kubernetes-training.mdx + _labs-intro.mdx + _related-topics.mdx + _replicated-definition.mdx + _test-your-changes.mdx + _tutorial-intro.mdx + _vm-requirements.mdx + gitops/ + _gitops-not-recommended.mdx + helm/ + _gitops-limitation.mdx + _helm-builder-requirements.mdx + _helm-cr-builder-airgap-intro.mdx + _helm-cr-builder-example.mdx + _helm-cr-chart-name.mdx + _helm-cr-chart-release-name.mdx + _helm-cr-chart-version.mdx + _helm-cr-chart.mdx + _helm-cr-exclude.mdx + _helm-cr-namespace.mdx + _helm-cr-optional-values-recursive-merge.mdx + _helm-cr-optional-values-when.mdx + _helm-cr-optional-values.mdx + _helm-cr-upgrade-flags.mdx + _helm-cr-values.mdx + _helm-cr-weight-limitation.mdx + _helm-cr-weight.mdx + _helm-definition.mdx + _helm-install-beta.mdx + _helm-install-prereqs.mdx + _helm-package.mdx + _helm-template-limitation.mdx + _helm-version-limitation.mdx + _hook-weights-limitation.mdx + _hooks-limitation.mdx + _installer-only-annotation.mdx + _kots-helm-cr-description.mdx + _replicated-deprecated.mdx + _replicated-helm-migration.mdx + _set-values-config-example.mdx + _set-values-license-example.mdx + _v2-native-helm-cr-example.mdx + image-registry/ + _docker-compatibility.mdx + _image-registry-settings.mdx + install/ + _access-admin-console.mdx + _airgap-bundle-build.mdx + _airgap-bundle-download.mdx + _airgap-bundle-view-contents.mdx + _airgap-license-download.mdx + _automation-intro-embedded.mdx + _automation-intro-existing.mdx + _config-values-procedure.mdx + _download-kotsadm-bundle.mdx + _download-kurl-bundle.mdx + _ec-prereqs.mdx + _embedded-ha-step.mdx + _embedded-login-password.mdx + _extract-kurl-bundle.mdx + _firewall-openings-intro.mdx + _firewall-openings.mdx + _ha-load-balancer-about.mdx + _ha-load-balancer-prereq.mdx + _install-kots-cli-airgap.mdx + _install-kots-cli.mdx + _intro-air-gap.mdx + _intro-embedded.mdx + _intro-existing.mdx + _kots-airgap-version-match.mdx + _kots-install-prompts.mdx + _kubernetes-compatibility.mdx + _kurl-about.mdx + _license-file-prereq.mdx + _placeholder-airgap-bundle.mdx + _placeholder-app-name-UI.mdx + _placeholder-namespace-embedded.mdx + _placeholder-namespace-existing.mdx + _placeholder-ro-creds.mdx + _placeholders-global.mdx + _prereqs-embedded-cluster.mdx + _prereqs-existing-cluster.mdx + _provision-cluster-intro.mdx + _push-kotsadm-images.mdx + instance-insights/ + _airgap-telemetry.mdx + _notifications-about.mdx + _supported-resources-status.mdx + kots/ + _admin-console-about.mdx + _download-portal-about.mdx + _embedded-kubernetes-definition.mdx + _kots-definition.mdx + _kots-entitlement-note.mdx + kots-cli/ + _ensure-rbac.mdx + _help.mdx + _kotsadm-namespace.mdx + _kotsadm-registry.mdx + _registry-password.mdx + _registry-username.mdx + _skip-rbac-check.mdx + _strict-sec-context-yaml.mdx + _strict-security-context.mdx + _use-minimal-rbac.mdx + _wait-duration.mdx + _with-minio.mdx + kurl/ + _installers.mdx + _kurl-availability.mdx + _kurl-definition.mdx + linter-rules/ + _allow-privilege-escalation.mdx + _application-icon.mdx + _application-spec.mdx + _application-statusInformers.mdx + _config-option-invalid-regex-validator.mdx + _config-option-invalid-type.mdx + _config-option-is-circular.mdx + _config-option-password-type.mdx + _config-option-regex-validator-invalid-type.mdx + _config-spec.mdx + _container-image-latest-tag.mdx + _container-image-local-image-name.mdx + _container-resource-limits.mdx + _container-resource-requests.mdx + _container-resources.mdx + _deprecated-kubernetes-installer-version.mdx + _hardcoded-namespace.mdx + _invalid_type.mdx + _invalid-helm-release-name.mdx + _invalid-kubernetes-installer.mdx + _invalid-min-kots-version.mdx + _invalid-rendered-yaml.mdx + _invalid-target-kots-version.mdx + _invalid-yaml.mdx + _linter-definition.mdx + _may-contain-secrets.mdx + _missing-api-version-field.mdx + _missing-kind-field.mdx + _preflight-spec.mdx + _privileged.mdx + _repeat-option-malformed-yamlpath.mdx + _repeat-option-missing-template.mdx + _repeat-option-missing-valuesByGroup.mdx + _replicas-1.mdx + _resource-limits-cpu.mdx + _resource-limits-memory.mdx + _resource-requests-cpu.mdx + _resource-requests-memory.mdx + _troubleshoot-spec.mdx + _volume-docker-sock.mdx + _volumes-host-paths.mdx + monitoring/ + _limitation-ec.mdx + _overview-prom.mdx + preflights/ + _analyzers-note.mdx + _http-requests-cr.mdx + _http-requests-secret.mdx + _k8s-distro-cr.mdx + _k8s-distro-secret.mdx + _k8s-version-cr.mdx + _k8s-version-secret.mdx + _mysql-cr.mdx + _mysql-secret.mdx + _node-count-secret.mdx + _node-cpu-cr.mdx + _node-cpu-secret.mdx + _node-ephem-storage-cr.mdx + _node-ephem-storage-secret.mdx + _node-mem-cr.mdx + _node-mem-secret.mdx + _node-req-cr.mdx + _node-req-secret.mdx + _node-storage-cr.mdx + _node-storage-secret.mdx + _preflight-sb-helm-templates.mdx + _preflights-add-analyzers.mdx + _preflights-define-xref.mdx + _preflights-define.mdx + _preflights-sb-about.mdx + _preflights-sb-note.mdx + _preflights-spec-locations.mdx + _preflights-strict.mdx + proxy-service/ + _step-creds.mdx + _step-custom-domain.mdx + redactors/ + _redactors-about.mdx + releases/ + _required-releases-description.mdx + _required-releases-limitations.mdx + _version-label-reqs-helm.mdx + replicated-cli/ + _app.mdx + _authorize-with-token-note.mdx + _authtype.mdx + _chart-yaml-dir-reqs.mdx + _help.mdx + _login.mdx + _logout.mdx + _output.mdx + _password-stdin.mdx + _password.mdx + _skip-validation.mdx + _sudo-install.mdx + _token-stdin.mdx + _token.mdx + _username.mdx + _verify-install.mdx + _yaml-dir.mdx + replicated-sdk/ + _401-unauthorized.mdx + _dependency-yaml.mdx + _integration-mode-install.mdx + _kots-version-req.mdx + _overview.mdx + _registry-logout.mdx + _sdk-values.mdx + snapshots/ + _checkVersion.mdx + _installVelero.mdx + _limitation-cli-restores.mdx + _limitation-dr.mdx + _limitation-install-method.mdx + _limitation-no-ec-support.mdx + _limitation-os.mdx + _node-agent-mem-limit.mdx + _registryCredentialsNote.mdx + _resticDaemonSet.mdx + _restore-types.mdx + _restoreTable.mdx + _step-get-backups.mdx + _step-restore.mdx + _updateDefaultStorage.mdx + status-informers/ + _aggregate-status-intro.mdx + _aggregateStatus.mdx + _statusesTable.mdx + support-bundles/ + _configmap-note.mdx + _customize-support-bundle-spec.mdx + _deploy-status-cr.mdx + _deploy-status-secret.mdx + _ec-support-bundle-intro.mdx + _generate-bundle-admin-console.mdx + _generate-bundle-default-kots.mdx + _generate-bundle-ec.mdx + _generate-bundle-host.mdx + _generate-bundle.mdx + _http-requests-cr.mdx + _http-requests-secret.mdx + _install-plugin.mdx + _k8s-version-cr.mdx + _k8s-version-secret.mdx + _logs-limits-cr.mdx + _logs-limits-secret.mdx + _logs-selectors-cr.mdx + _logs-selectors-secret.mdx + _node-resources-cr.mdx + _node-resources-secret.mdx + _node-status-cr.mdx + _node-status-secret.mdx + _redis-mysql-cr.mdx + _redis-mysql-secret.mdx + _run-pods-cr.mdx + _run-pods-secret.mdx + _support-bundle-add-analyzers.mdx + _support-bundle-add-logs.mdx + _support-bundle-custom-collectors.mdx + template-functions/ + _go-sprig.mdx + _integer-comparison.mdx + _ne-comparison.mdx + _string-comparison.mdx + _use-cases.mdx + updating/ + _admin-console-air-gap.mdx + _admin-console.mdx + _installerRequirements.mdx + _upgradePrompt.mdx + vendor-api/ + _api-about.mdx + _team-token-note.mdx + reference/ + cron-expressions.md + custom-resource-about.md + custom-resource-application.mdx + custom-resource-backup.md + custom-resource-config.mdx + custom-resource-helmchart-v2.mdx + custom-resource-helmchart.mdx + custom-resource-identity.md + custom-resource-lintconfig.mdx + custom-resource-preflight.md + custom-resource-redactor.md + embedded-cluster-install.mdx + embedded-config.mdx + kots-cli-admin-console-garbage-collect-images.md + kots-cli-admin-console-generate-manifests.mdx + kots-cli-admin-console-index.md + kots-cli-admin-console-push-images.md + kots-cli-admin-console-upgrade.mdx + kots-cli-backup-index.md + kots-cli-backup-ls.md + kots-cli-docker-ensure-secret.md + kots-cli-docker-index.md + kots-cli-download.md + kots-cli-enable-ha.md + kots-cli-get-apps.md + kots-cli-get-backups.md + kots-cli-get-config.md + kots-cli-get-index.md + kots-cli-get-restores.md + kots-cli-get-versions.md + kots-cli-getting-started.md + kots-cli-global-flags.md + kots-cli-identity-service-enable-shared-password.md + kots-cli-identity-service-index.md + kots-cli-install.mdx + kots-cli-pull.md + kots-cli-remove.md + kots-cli-reset-password.md + kots-cli-reset-tls.md + kots-cli-restore-index.md + kots-cli-restore-ls.md + kots-cli-set-config.mdx + kots-cli-set-index.md + kots-cli-upload.mdx + kots-cli-upstream-download.md + kots-cli-upstream-upgrade.mdx + kots-cli-upstream.md + kots-cli-velero-configure-aws-s3.md + kots-cli-velero-configure-azure.md + kots-cli-velero-configure-gcp.md + kots-cli-velero-configure-hostpath.mdx + kots-cli-velero-configure-internal.md + kots-cli-velero-configure-nfs.mdx + kots-cli-velero-configure-other-s3.mdx + kots-cli-velero-ensure-permissions.md + kots-cli-velero-index.md + kots-cli-velero-print-fs-instructions.md + linter.mdx + replicated-cli-api-get.mdx + replicated-cli-api-patch.mdx + replicated-cli-api-post.mdx + replicated-cli-api-put.mdx + replicated-cli-api.mdx + replicated-cli-app-create.mdx + replicated-cli-app-ls.mdx + replicated-cli-app-rm.mdx + replicated-cli-app.mdx + replicated-cli-channel-create.mdx + replicated-cli-channel-demote.mdx + replicated-cli-channel-disable-semantic-versioning.mdx + replicated-cli-channel-enable-semantic-versioning.mdx + replicated-cli-channel-inspect.mdx + replicated-cli-channel-ls.mdx + replicated-cli-channel-rm.mdx + replicated-cli-channel-un-demote.mdx + replicated-cli-channel.mdx + replicated-cli-cluster-addon-create-object-store.mdx + replicated-cli-cluster-addon-create.mdx + replicated-cli-cluster-addon-ls.mdx + replicated-cli-cluster-addon-rm.mdx + replicated-cli-cluster-addon.mdx + replicated-cli-cluster-create.mdx + replicated-cli-cluster-kubeconfig.mdx + replicated-cli-cluster-ls.mdx + replicated-cli-cluster-nodegroup-ls.mdx + replicated-cli-cluster-nodegroup.mdx + replicated-cli-cluster-port-expose.mdx + replicated-cli-cluster-port-ls.mdx + replicated-cli-cluster-port-rm.mdx + replicated-cli-cluster-port.mdx + replicated-cli-cluster-prepare.mdx + replicated-cli-cluster-rm.mdx + replicated-cli-cluster-shell.mdx + replicated-cli-cluster-update-nodegroup.mdx + replicated-cli-cluster-update-ttl.mdx + replicated-cli-cluster-update.mdx + replicated-cli-cluster-upgrade.mdx + replicated-cli-cluster-versions.mdx + replicated-cli-cluster.mdx + replicated-cli-completion.mdx + replicated-cli-customer-archive.mdx + replicated-cli-customer-create.mdx + replicated-cli-customer-download-license.mdx + replicated-cli-customer-inspect.mdx + replicated-cli-customer-ls.mdx + replicated-cli-customer-update.mdx + replicated-cli-customer.mdx + replicated-cli-default-clear-all.mdx + replicated-cli-default-clear.mdx + replicated-cli-default-set.mdx + replicated-cli-default-show.mdx + replicated-cli-default.mdx + replicated-cli-installer-create.mdx + replicated-cli-installer-ls.mdx + replicated-cli-installer.mdx + replicated-cli-installing.mdx + replicated-cli-instance-inspect.mdx + replicated-cli-instance-ls.mdx + replicated-cli-instance-tag.mdx + replicated-cli-instance.mdx + replicated-cli-login.mdx + replicated-cli-logout.mdx + replicated-cli-registry-add-dockerhub.mdx + replicated-cli-registry-add-ecr.mdx + replicated-cli-registry-add-gar.mdx + replicated-cli-registry-add-gcr.mdx + replicated-cli-registry-add-ghcr.mdx + replicated-cli-registry-add-other.mdx + replicated-cli-registry-add-quay.mdx + replicated-cli-registry-add.mdx + replicated-cli-registry-ls.mdx + replicated-cli-registry-rm.mdx + replicated-cli-registry-test.mdx + replicated-cli-registry.mdx + replicated-cli-release-compatibility.mdx + replicated-cli-release-create.mdx + replicated-cli-release-download.mdx + replicated-cli-release-inspect.mdx + replicated-cli-release-lint.mdx + replicated-cli-release-ls.mdx + replicated-cli-release-promote.mdx + replicated-cli-release-test.mdx + replicated-cli-release-update.mdx + replicated-cli-release.mdx + replicated-cli-version-upgrade.mdx + replicated-cli-version.mdx + replicated-cli-vm-create.mdx + replicated-cli-vm-ls.mdx + replicated-cli-vm-port-expose.mdx + replicated-cli-vm-port-ls.mdx + replicated-cli-vm-port-rm.mdx + replicated-cli-vm-port.mdx + replicated-cli-vm-rm.mdx + replicated-cli-vm-update-ttl.mdx + replicated-cli-vm-update.mdx + replicated-cli-vm-versions.mdx + replicated-cli-vm.mdx + replicated-sdk-apis.md + replicated.mdx + template-functions-about.mdx + template-functions-config-context.md + template-functions-examples.mdx + template-functions-identity-context.md + template-functions-kurl-context.md + template-functions-license-context.md + template-functions-static-context.md + vendor-api-using.md + release-notes/ + rn-app-manager.md + rn-embedded-cluster.md + rn-kubernetes-installer.md + rn-replicated-sdk.md + rn-vendor-platform.md + rn-whats-new.md + templates/ + procedure.md + process-multiple-procedures.md + release-notes.md + vendor/ + admin-console-adding-buttons-links.mdx + admin-console-customize-app-icon.md + admin-console-customize-config-screen.md + admin-console-display-app-status.md + admin-console-port-forward.mdx + admin-console-prometheus-monitoring.mdx + ci-overview.md + ci-workflows-github-actions.md + ci-workflows.mdx + compatibility-matrix-usage.md + config-screen-about.md + config-screen-conditional.mdx + config-screen-map-inputs.md + custom-domains-using.md + custom-domains.md + custom-metrics.md + customer-adoption.md + customer-reporting.md + data-availability.md + database-config-adding-options.md + embedded-disaster-recovery.mdx + embedded-overview.mdx + embedded-using.mdx + helm-image-registry.mdx + helm-install-airgap.mdx + helm-install-overview.mdx + helm-install-release.md + helm-install-troubleshooting.mdx + helm-install-values-schema.mdx + helm-native-about.mdx + helm-native-v2-using.md + helm-optional-charts.md + helm-optional-value-keys.md + helm-packaging-airgap-bundles.mdx + helm-v2-migrate.md + identity-service-configuring.md + insights-app-status.md + install-with-helm.mdx + installer-history.mdx + instance-data-export.md + instance-insights-details.md + instance-insights-event-data.mdx + instance-notifications-config.mdx + kots-faq.mdx + kurl-about.mdx + kurl-nodeport-services.mdx + kurl-reset.mdx + licenses-about-types.md + licenses-about.mdx + licenses-adding-custom-fields.md + licenses-download.md + licenses-install-types.mdx + licenses-reference-helm.md + licenses-reference-kots-runtime.mdx + licenses-reference-sdk.mdx + licenses-referencing-fields.md + licenses-using-builtin-fields.mdx + licenses-verify-fields-sdk-api.md + namespaces.md + offsite-backup.md + operator-defining-additional-images.mdx + operator-defining-additional-namespaces.md + operator-packaging-about.md + operator-referencing-images.md + orchestrating-resource-deployment.md + packaging-air-gap-excluding-minio.md + packaging-cleaning-up-jobs.md + packaging-embedded-kubernetes.mdx + packaging-include-resources.md + packaging-ingress.md + packaging-installer-storage.mdx + packaging-kots-versions.md + packaging-private-images.md + packaging-private-registry-security.md + packaging-public-images.mdx + packaging-rbac.md + packaging-using-tls-certs.mdx + planning-questionnaire.md + policies-data-transmission.md + policies-infrastructure-and-subprocessors.md + policies-support-lifecycle.md + policies-vulnerability-patch.md + preflight-defining.mdx + preflight-examples.mdx + preflight-host-preflights.md + preflight-running.md + preflight-sb-helm-templates-about.md + preflight-support-bundle-about.mdx + private-images-about.md + private-images-kots.mdx + private-images-replicated.mdx + private-images-tags-digests.md + quick-start.mdx + releases-about.mdx + releases-creating-channels.md + releases-creating-cli.mdx + releases-creating-customer.mdx + releases-creating-releases.mdx + releases-share-download-portal.md + releases-sharing-license-install-script.mdx + replicated-api-tokens.md + replicated-onboarding.mdx + replicated-sdk-airgap.mdx + replicated-sdk-customizing.md + replicated-sdk-development.mdx + replicated-sdk-installing.mdx + replicated-sdk-overview.mdx + replicated-sdk-slsa-validating.md + resources-annotations-templating.md + snapshots-configuring-backups.md + snapshots-hooks.md + snapshots-overview.mdx + support-bundle-customizing.mdx + support-bundle-embedded.mdx + support-bundle-examples.mdx + support-bundle-generating.mdx + support-enabling-direct-bundle-uploads.md + support-host-support-bundles.md + support-inspecting-support-bundles.md + support-modular-support-bundle-specs.md + support-online-support-bundle-specs.md + support-submit-request.md + team-management-github-username.mdx + team-management-google-auth.md + team-management-rbac-configuring.md + team-management-rbac-resource-names.md + team-management-saml-auth.md + team-management-slack-config.mdx + team-management-two-factor-auth.md + team-management.md + telemetry-air-gap.mdx + testing-about.md + testing-cluster-addons.md + testing-how-to.md + testing-ingress.md + testing-pricing.mdx + testing-supported-clusters.md + tutorial-adding-db-config.md + tutorial-cli-create-app.mdx + tutorial-cli-create-customer.mdx + tutorial-cli-create-new-version.mdx + tutorial-cli-create-release.mdx + tutorial-cli-deploy-app.mdx + tutorial-cli-install-app-manager.mdx + tutorial-cli-install-cli.mdx + tutorial-cli-manifests.mdx + tutorial-cli-setup.mdx + tutorial-cli-update-app.mdx + tutorial-config-create-app.md + tutorial-config-create-customer.md + tutorial-config-create-release.md + tutorial-config-get-chart.md + tutorial-config-install-kots.md + tutorial-config-package-chart.md + tutorial-config-setup.md + tutorial-ecr-private-images.md + tutorial-embedded-cluster-create-app.mdx + tutorial-embedded-cluster-create-customer.mdx + tutorial-embedded-cluster-create-release.mdx + tutorial-embedded-cluster-install.mdx + tutorial-embedded-cluster-package-chart.mdx + tutorial-embedded-cluster-setup.mdx + tutorial-kots-helm-create-app.md + tutorial-kots-helm-create-customer.md + tutorial-kots-helm-create-release.md + tutorial-kots-helm-get-chart.md + tutorial-kots-helm-install-helm.md + tutorial-kots-helm-install-kots.md + tutorial-kots-helm-package-chart.md + tutorial-kots-helm-setup.md + tutorial-preflight-helm-add-spec.mdx + tutorial-preflight-helm-create-customer.mdx + tutorial-preflight-helm-create-release.mdx + tutorial-preflight-helm-get-chart.mdx + tutorial-preflight-helm-install-kots.mdx + tutorial-preflight-helm-install.mdx + tutorial-preflight-helm-setup.mdx + using-third-party-registry-proxy.mdx + vendor-portal-application-settings.md + vendor-portal-creating-account.md + vendor-portal-manage-app.md + intro-kots.mdx + intro-replicated.mdx + intro.md +src/ + components/ + HomepageFeatures.js + HomepageFeatures.module.css + css/ + custom.css + theme/ + Admonition/ + index.js + styles.module.css + DocItem/ + Footer/ + index.js + styles.module.css + EditThisPage/ + index.js + styles.module.css +static/ + images/ + icons/ + chat_bubble_white.svg + vendor_portal_1.svg + git-pull-request.svg + logo.svg + report.svg + undraw_docusaurus_mountain.svg + undraw_docusaurus_react.svg + undraw_docusaurus_tree.svg + js/ + activecampaign.js + qualified.js + visitoranalytics.js +.gitignore +babel.config.js +CODEOWNERS +config.json +docusaurus.config.js +LICENSE +netlify.toml +package.json +README.md +sidebars.js +variables.js + +================================================================ +Files +================================================================ + +================ +File: .github/workflows/algolia-crawl.yml +================ +name: scrape +concurrency: scrape +on: + push: + branches: + - main + workflow_dispatch: +jobs: + scrape: + runs-on: ubuntu-latest + steps: + - name: check out code 🛎d + uses: actions/checkout@v4 + # when scraping the site, inject secrets as environment variables + # then pass their values into the Docker container using "-e" syntax + # and inject config.json contents as another variable + - name: scrape the site 🧽 + env: + ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} + ALGOLIA_API_KEY: ${{ secrets.ALGOLIA_API_KEY }} + run: | + docker run \ + -e ALGOLIA_APP_ID -e ALGOLIA_API_KEY \ + -e CONFIG="$(cat config.json)" \ + algolia/docsearch-scraper + +================ +File: .github/workflows/app-manager-release-notes.yml +================ +name: app-manager-release-notes +on: + repository_dispatch: + types: [app-manager-release-notes] + inputs: + version: + description: KOTS version + required: true + +jobs: + generate-release-notes-pr: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v4 + + - name: Generate Release Notes + id: release-notes + env: + KOTS_VERSION: ${{ github.event.client_payload.version }} + uses: replicatedhq/release-notes-generator@main + with: + owner-repo: replicatedhq/kots + head: $KOTS_VERSION + title: ${KOTS_VERSION#v} + description: 'Support for Kubernetes: 1.29, 1.30, and 1.31' + include-pr-links: false + github-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Update Release Notes + env: + PATTERN: ".+RELEASE_NOTES_PLACEHOLDER.+" + run: | + cat <> /tmp/release-notes.txt + + ${{ steps.release-notes.outputs.release-notes }} + EOT + sed -i -E "/$PATTERN/r /tmp/release-notes.txt" docs/release-notes/rn-app-manager.md + rm -rf /tmp/release-notes.txt + + - name: Create Pull Request # creates a PR if there are differences + uses: peter-evans/create-pull-request@v7 + id: cpr + with: + token: ${{ secrets.REPLICATED_GH_PAT }} + commit-message: App Manager ${{ github.event.client_payload.version }} release notes + title: App Manager ${{ github.event.client_payload.version }} release notes + branch: automation/app-manager-release-notes-${{ github.event.client_payload.version }} + delete-branch: true + base: "main" + body: "Automated changes by the [app-manager-release-notes](https://github.com/replicatedhq/replicated-docs/blob/main/.github/workflows/app-manager-release-notes.yml) GitHub action" + + - name: Check outputs + run: | + echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" + echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" + + - name: Slack Notification + uses: slackapi/slack-github-action@v2.0.0 + with: + payload: | + { + "version": "${{ github.event.client_payload.version }}", + "pull_request_url": "${{steps.cpr.outputs.pull-request-url}}" + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.KOTS_RELEASE_NOTES_SLACK_WEBHOOK }} + +================ +File: .github/workflows/auto-label.yml +================ +on: + pull_request: + types: [opened] + +name: Auto-label new PRs + +jobs: + label: + runs-on: ubuntu-latest + steps: + - uses: actions/github-script@v7 + with: + github-token: ${{ secrets.DOCS_GH_PAT }} + script: | + const labels = ['type::feature', 'type::docs'] + github.rest.issues.addLabels({ + ...context.repo, + issue_number: context.issue.number, + labels + }) + +================ +File: .github/workflows/kubernetes-installer-release-notes.yml +================ +name: kubernetes-installer-release-notes +on: + repository_dispatch: + types: [kubernetes-installer-release-notes] + inputs: + version: + description: kURL version + required: true + +jobs: + generate-release-notes-pr: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v4 + + - name: Generate Release Notes + id: release-notes + env: + KURL_VERSION: ${{ github.event.client_payload.version }} + uses: replicatedhq/release-notes-generator@main + with: + owner-repo: replicatedhq/kurl + head: $KURL_VERSION + title: $KURL_VERSION + include-pr-links: false + github-token: ${{ secrets.GITHUB_TOKEN }} + feature-type-labels: type::feature,kurl::type::feature + + - name: Update Release Notes + env: + PATTERN: ".+RELEASE_NOTES_PLACEHOLDER.+" + run: | + cat <> /tmp/release-notes.txt + + ${{ steps.release-notes.outputs.release-notes }} + EOT + sed -i -E "/$PATTERN/r /tmp/release-notes.txt" docs/release-notes/rn-kubernetes-installer.md + rm -rf /tmp/release-notes.txt + + - name: Create Pull Request # creates a PR if there are differences + uses: peter-evans/create-pull-request@v7 + id: cpr + with: + token: ${{ secrets.REPLICATED_GH_PAT }} + commit-message: Kubernetes Installer ${{ github.event.client_payload.version }} release notes + title: Kubernetes Installer ${{ github.event.client_payload.version }} release notes + branch: automation/kubernetes-installer-release-notes-${{ github.event.client_payload.version }} + delete-branch: true + base: "main" + body: "Automated changes by the [kubernetes-installer-release-notes](https://github.com/replicatedhq/replicated-docs/blob/main/.github/workflows/kubernetes-installer-release-notes.yml) GitHub action" + + - name: Check outputs + run: | + echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" + echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" + + - name: Slack Notification + uses: slackapi/slack-github-action@v2.0.0 + with: + payload: | + { + "version": "${{ github.event.client_payload.version }}", + "pull_request_url": "${{steps.cpr.outputs.pull-request-url}}" + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.KURL_RELEASE_NOTES_SLACK_WEBHOOK }} + +================ +File: .github/workflows/replicated-sdk-release-notes.yml +================ +name: replicated-sdk-release-notes +on: + repository_dispatch: + types: [replicated-sdk-release-notes] + inputs: + version: + description: Replicated SDK version + required: true + prev_version: + description: Previous Replicated SDK version + required: true + +jobs: + generate-release-notes-pr: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: Generate Release Notes + id: release-notes + env: + REPLICATED_SDK_VERSION: ${{ github.event.client_payload.version }} + PREV_REPLICATED_SDK_VERSION: ${{ github.event.client_payload.prev_version }} + uses: replicatedhq/release-notes-generator@main + with: + owner-repo: replicatedhq/replicated-sdk + base: $PREV_REPLICATED_SDK_VERSION + head: $REPLICATED_SDK_VERSION + title: $REPLICATED_SDK_VERSION + include-pr-links: false + github-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Update Release Notes + env: + PATTERN: ".+RELEASE_NOTES_PLACEHOLDER.+" + run: | + cat <> /tmp/release-notes.txt + + ${{ steps.release-notes.outputs.release-notes }} + EOT + sed -i -E "/$PATTERN/r /tmp/release-notes.txt" docs/release-notes/rn-replicated-sdk.md + rm -rf /tmp/release-notes.txt + + - name: Create Pull Request # creates a PR if there are differences + uses: peter-evans/create-pull-request@v7 + id: cpr + with: + token: ${{ secrets.REPLICATED_GH_PAT }} + commit-message: Replicated SDK ${{ github.event.client_payload.version }} release notes + title: Replicated SDK ${{ github.event.client_payload.version }} release notes + branch: automation/replicated-sdk-release-notes-${{ github.event.client_payload.version }} + delete-branch: true + base: "main" + body: "Automated changes by the [replicated-sdk-release-notes](https://github.com/replicatedhq/replicated-docs/blob/main/.github/workflows/replicated-sdk-release-notes.yml) GitHub action" + + - name: Check outputs + run: | + echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" + echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" + + - name: Slack Notification + uses: slackapi/slack-github-action@v2.0.0 + with: + payload: | + { + "version": "${{ github.event.client_payload.version }}", + "pull_request_url": "${{steps.cpr.outputs.pull-request-url}}" + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.REPLICATED_SDK_RELEASE_NOTES_SLACK_WEBHOOK }} + +================ +File: .github/workflows/vendor-portal-release-notes.yml +================ +name: vendor-portal-release-notes +on: + repository_dispatch: + types: [vendor-portal-release-notes] + inputs: + version: + description: Vendor Portal version + required: true + +jobs: + generate-release-notes: + runs-on: ubuntu-20.04 + outputs: + releaseNotes: ${{ steps.release-notes.outputs.release-notes }} + steps: + - uses: actions/checkout@v4 + + - name: Generate Release Notes + id: release-notes + env: + VENDOR_PORTAL_VERSION: ${{ github.event.client_payload.version }} + uses: replicatedhq/release-notes-generator@main + with: + owner-repo: replicatedhq/vandoor + head: $VENDOR_PORTAL_VERSION + title: $VENDOR_PORTAL_VERSION + include-pr-links: false + github-token: ${{ secrets.VENDOR_PORTAL_PAT }} + feature-type-labels: type::feature + generate-release-notes-pr: + runs-on: ubuntu-20.04 + needs: generate-release-notes + if: ${{ needs.generate-release-notes.outputs.releaseNotes != '' || needs.generate-release-notes.outputs.releaseNotes != null }} + steps: + - uses: actions/checkout@v4 + - name: Update Release Notes + env: + PATTERN: ".+RELEASE_NOTES_PLACEHOLDER.+" + run: | + cat <> /tmp/release-notes.txt + + ${{ needs.generate-release-notes.outputs.releaseNotes }} + EOT + sed -i -E "/$PATTERN/r /tmp/release-notes.txt" docs/release-notes/rn-vendor-platform.md + rm -rf /tmp/release-notes.txt + + - name: Create Pull Request # creates a PR if there are differences + uses: peter-evans/create-pull-request@v7 + id: cpr + with: + token: ${{ secrets.REPLICATED_GH_PAT }} + commit-message: Vendor Portal ${{ github.event.client_payload.version }} release notes + title: Vendor Portal ${{ github.event.client_payload.version }} release notes + branch: automation/vendor-portal-release-notes-${{ github.event.client_payload.version }} + delete-branch: true + base: "main" + body: "Automated changes by the [vendor-portal-release-notes](https://github.com/replicatedhq/replicated-docs/blob/main/.github/workflows/vendor-portal-release-notes.yml) GitHub action" + + - name: Check outputs + run: | + echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" + echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" + + - name: Slack Notification + uses: slackapi/slack-github-action@v2.0.0 + with: + payload: | + { + "version": "${{ github.event.client_payload.version }}", + "pull_request_url": "${{ steps.cpr.outputs.pull-request-url }}" + } + env: + SLACK_WEBHOOK_URL: ${{ secrets.VENDOR_PORTAL_RELEASE_NOTES_SLACK_WEBHOOK }} + +================ +File: .github/dependabot.yml +================ +version: 2 +updates: + # Update npm dependencies based on package.json + - package-ecosystem: "npm" + directory: "/" # Root directory + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + labels: + - "dependencies" + - "npm" + - "type::security" + + # Keep Dependabot itself up to date + - package-ecosystem: "github-actions" + directory: "/" # Root directory (or specify if actions are elsewhere) + schedule: + interval: "weekly" + labels: + - "dependencies" + - "github-actions" + - "type::security" + +================ +File: docs/enterprise/auth-changing-passwords.md +================ +# Changing an Admin Console Password + +When you install for the first time with Replicated kURL, the Replicated KOTS Admin Console is secured with a single shared password that is set automatically for all users. Replicated recommends that you change this to a new, unique password for security purposes as this automated password is displayed to the user in plain text. + +The Admin Console password is salted and one-way hashed using bcrypt. The irreversible hash is stored in a Secret named `kotsadm-password`. The password is not retrievable if lost. If you lose your Admin Console password, reset your password to access the Admin Console. + +For more information about bcrypt, see [bcrypt](https://en.wikipedia.org/wiki/Bcrypt) on Wikipedia. + +:::note +Users with Identity Provider (IDP) access cannot change their password using this procedure. If an attempt is made, IDP users receive a message in the user interface to contact the identity service provider to change their password. For more information about resetting an IDP user password, see [Resetting Authentication](auth-identity-provider#resetting-authentication) in _Using an Identity Provider for User Access (Beta)_. +::: + +To change your Admin Console password: + +1. Log in to the Admin Console using your current password. +1. In the drop-down in the top right of any page, click **Change password**. +1. In the Change Admin Console Password dialog, edit the fields. + + - The new password must be at least 6 characters and must not be the same as your current password. + - The **New Password** and **Confirm New Password** fields must match each other. + +1. Click **Change Password**. + + If there are any issues with changing the password, an error message displays the specific problem. + + When the password change succeeds, the current session closes and you are redirected to the Log In page. + +1. Log in with the new password. + +================ +File: docs/enterprise/auth-configuring-rbac.md +================ +# Configuring Role-based Access Control (Beta) + +You can regulate access to the Replicated KOTS Admin Console resources based on the roles of individual users within your organization. + +To configure role based access control (RBAC) for the Admin Console: +1. Go to the **Access** page. Under **Role Based Access Control Group Policy**, click **Add a group**. +1. Enter a group name that matches one of the group names already established with your identity provider. +1. Choose one of the pre-defined Admin Console roles to be assigned to that group. For a list of Admin Console roles, see [Admin Console roles](#admin-console-roles) below. +1. Click **Add group**. + +![Role Based Access Control](/images/identity-service-kotsadm-rbac.png) + +## Admin Console Roles + +The Admin Console comes with pre-defined identity service roles that can be assigned to groups when you configure RBAC for the Admin Console. + +- **Read Access:** This role has read permissions to all resources. + +- **Write Access:** This role has write permissions to all resources. + +## Support Roles + +- **Read Access:** This role has read permissions to all resources except the application's file tree. + +- **Write Access:** This role has write permissions to the following resources: + + * Support bundles + * Preflight checks + +================ +File: docs/enterprise/auth-identity-provider.md +================ +# Using an Identity Provider for User Access (Beta) + +When you install an application for the first time, the Replicated KOTS Admin Console is secured with a single shared password for all users. It is possible to further configure the Admin Console to authenticate users with your organization's user management system. This feature is only available for licenses that have the Replicated identity service feature enabled. + +Replicated KOTS leverages the open source project Dex as an intermediary to control access to the Admin Console. Dex implements an array of protocols for querying other user-management systems, known as connectors. For more information, see the [Dex documentation](https://dexidp.io/docs/). + +The identity service has the following limitations: +* Only available for installations in a cluster created by Replicated kURL. +* Only available through the Admin Console. + +## Prerequisite + +When you are installing the Admin Console and setting up TLS certificates on the HTTPS page, you must configure the hostname to use to access the Admin Console. The hostname is required whether you are using the identity service with either a self-signed certificate or a custom certificate. For more information about configuring the hostname field, see [Install and Deploy the Application](installing-kurl#install-app) in _Online Installation with kURL_. + +## Configuration + +To begin, click the **Access** tab at the top of the Admin Console. +Here you can configure access to the Admin Console, integrating with one of the supported identity providers. + +![Configure Identity Provider](/images/access-identity.png) + +## Supported Providers + +**OpenID Connect:** For more information, see the [OpenID Connect documentation](https://openid.net/connect/). + +## Resetting Authentication + +When you enable identity provider access to the Admin Console, shared password authentication is disabled. +If you want to re-enable the shared password authentication, run the `kubectl kots identity-service enable-shared-password --namespace [namespace]` command. For more information, see [identity-service enable-shared-password](/reference/kots-cli-identity-service-enable-shared-password/) in the KOTS CLI documentation. + +================ +File: docs/enterprise/cluster-management-add-nodes.md +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Adding Nodes to kURL Clusters + + + +This topic describes how to add primary and secondary nodes to a Replicated kURL cluster. + +## Overview + +You can generate commands in the Replicated KOTS Admin Console to join additional primary and secondary nodes to kURL clusters. Primary nodes run services that control the cluster. Secondary nodes run services that control the pods that host the application containers. Adding nodes can help manage resources to ensure that the application runs smoothly. + +For high availability clusters, Kubernetes recommends using at least three primary nodes, and that you use an odd number of nodes to help with leader selection if machine or zone failure occurs. For more information, see [Creating Highly Available Clusters with kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) in the Kubernetes documentation. + +## Join Primary and Secondary Nodes + +You can join primary and secondary nodes on the Admin Console **Cluster management** page. + +To add primary and secondary nodes: + +1. (Air Gap Only) For air gapped environments, download and extract the `.tar.gz` bundle on the remote node before running the join command. +1. In the Admin Console, click **Cluster Management > Add a node**. +1. Copy the command that displays in the text box and run it on the node that you are joining to the cluster. + + ![Join node in Admin Console](/images/join-node.png) + + [View a larger image](/images/join-node.png) + +================ +File: docs/enterprise/delete-admin-console.md +================ +# Deleting the Admin Console and Removing Applications + +This topic describes how to remove installed applications and delete the Replicated KOTS Admin Console. The information in this topic applies to existing cluster installations with KOTS. + +## Remove an Application + +The Replicated KOTS CLI `kots remove` command removes the reference to an installed application from the Admin Console. When you use `kots remove`, the Admin Console no longer manages the application because the record of that application’s installation is removed. This means that you can no longer manage the application through the Admin Console or through the KOTS CLI. + +By default, `kots remove` does not delete any of the installed Kubernetes resources for the application from the cluster. To remove both the reference to an application from the Admin Console and remove any resources for the application from the cluster, you can run `kots remove` with the `--undeploy` flag. + +It can be useful to remove only the reference to an application from the Admin Console if you want to reinstall the application, but you do not want to recreate the namespace or other Kubernetes resources. For example, if you installed an application using an incorrect license file and need to reinstall with the correct license. + +To remove an application: + +1. Run the following command to list the installed applications for a namespace: + ``` + kubectl kots get apps -n NAMESPACE + ``` + Replace `NAMESPACE` with the name of the namespace where the Admin Console is installed. + + In the output of this command, note the slug for the application that you want to remove. + +1. Run _one_ of the following commands: + + * Remove only the reference to the application from the Admin Console: + + ``` + kubectl kots remove APP_SLUG -n NAMESPACE + ``` + Replace: + * `APP_SLUG` with the slug for the application that you want to remove. + * `NAMESPACE` with the name of the namespace where the Admin Console is installed. + + * Remove the reference to the application from the Admin Console and remove its resources from the cluster: + + ``` + kubectl kots remove APP_SLUG -n NAMESPACE --undeploy + ``` + + :::note + Optionally, use the `--force` flag to remove the application reference from the Admin Console when the application has already been deployed. The `--force` flag is implied when `--undeploy` is used. For more information, see [remove](/reference/kots-cli-remove) in _KOTS CLI_. + ::: + + +## Delete the Admin Console + +When you install an application, KOTS creates the Kubernetes resources for the Admin Console itself on the cluster. The Admin Console includes Deployments and Services, Secrets, and other resources such as StatefulSets and PersistentVolumeClaims. + +By default, KOTS also creates Kubernetes ClusterRole and ClusterRoleBinding resources that grant permissions to the Admin Console on the cluster level. These `kotsadm-role` and `kotsadm-rolebinding` resources are managed outside of the namespace where the Admin Console is installed. Alternatively, when the Admin Console is installed with namespace-scoped access, KOTS creates Role and RoleBinding resources inside the namespace where the Admin Console is installed. + +In existing cluster installations, if the Admin Console is not installed in the `default` namespace, then you delete the Admin Console by deleting the namespace where it is installed. + +If you installed the Admin Console with namespace-scoped access, then the Admin Console Role and RoleBinding RBAC resources are also deleted when you delete the namespace. Alternatively, if you installed with the default cluster-scoped access, then you manually delete the Admin Console ClusterRole and ClusterRoleBindings resources from the cluster. For more information, see [supportMinimalRBACPrivileges](/reference/custom-resource-application#supportminimalrbacprivileges) and [requireMinimalRBACPrivileges](/reference/custom-resource-application#requireminimalrbacprivileges) in _Application_. + +For more information about installing with cluster- or namespace-scoped access, see [RBAC Requirements](/enterprise/installing-general-requirements#rbac-requirements) in _Installation Requirements_. + +To completely delete the Admin Console from an existing cluster: + +1. Run the following command to delete the namespace where the Admin Console is installed: + + :::important + This command deletes everything inside the specified namespace, including the Admin Console Role and RoleBinding resources if you installed with namespace-scoped access. + ::: + + ``` + kubectl delete ns NAMESPACE + ``` + Replace `NAMESPACE` with the name of the namespace where the Admin Console is installed. + + :::note + You cannot delete the `default` namespace. + ::: + +1. (Cluster-scoped Access Only) If you installed the Admin Console with the default cluster-scoped access, run the following commands to delete the Admin Console ClusterRole and ClusterRoleBinding from the cluster: + + ``` + kubectl delete clusterrole kotsadm-role + ``` + + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` + +1. (Optional) To uninstall the KOTS CLI, see [Uninstall](https://docs.replicated.com/reference/kots-cli-getting-started#uninstall) in _Installing the KOTS CLI_. + +================ +File: docs/enterprise/embedded-manage-nodes.mdx +================ +import HaArchitecture from "../partials/embedded-cluster/_multi-node-ha-arch.mdx" + +# Managing Multi-Node Clusters with Embedded Cluster + +The topic describes managing nodes in clusters created with Replicated Embedded Cluster, including how to add nodes and enable high-availability for multi-node clusters. + +## Limitations + +Multi-node clusters with Embedded Cluster have the following limitations: + +* Support for multi-node clusters with Embedded Cluster is Beta. Only single-node embedded clusters are Generally Available (GA). + +* High availability for Embedded Cluster in an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). + +* The same Embedded Cluster data directory used at installation is used for all nodes joined to the cluster. This is either the default `/var/lib/embedded-cluster` directory or the directory set with the [`--data-dir`](/reference/embedded-cluster-install#flags) flag. You cannot choose a different data directory for Embedded Cluster when joining nodes. + +## Add Nodes to a Cluster (Beta) {#add-nodes} + +You can add nodes to create a multi-node cluster in online (internet-connected) and air-gapped (limited or no outbound internet access) environments. The Admin Console provides the join command that you use to join nodes to the cluster. + +:::note +Multi-node clusters are not highly available by default. For information about enabling high availability, see [Enable High Availability for Multi-Node Clusters (Alpha)](#ha) below. +::: + +To add nodes to a cluster: + +1. (Optional) In the Embedded Cluster Config, configure the `roles` key to customize node roles. For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. When you are done, create and promote a new release with the updated Config. + +1. Do one of the following to get the join command from the Admin Console: + + 1. To add nodes during the application installation process, follow the steps in [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) to install. A **Nodes** screen is displayed as part of the installation flow in the Admin Console that allows you to choose a node role and copy the relevant join command. + + 1. Otherwise, if you have already installed the application: + + 1. Log in to the Admin Console. + + 1. If you promoted a new release that configures the `roles` key in the Embedded Cluster Config, update the instance to the new version. See [Performing Updates in Embedded Clusters](/enterprise/updating-embedded). + + 1. Go to **Cluster Management > Add node** at the top of the page. + + Add node page in the Admin Console + + [View a larger version of this image](/images/admin-console-add-node.png) + +1. Either on the Admin Console **Nodes** screen that is displayed during installation or in the **Add a Node** dialog, select one or more roles for the new node that you will join. Copy the join command. + + Note the following: + + * If the Embedded Cluster Config [roles](/reference/embedded-config#roles) key is not configured, all new nodes joined to the cluster are assigned the `controller` role by default. The `controller` role designates nodes that run the Kubernetes control plane. Controller nodes can also run other workloads, such as application or Replicated KOTS workloads. + + * Roles are not updated or changed after a node is added. If you need to change a node’s role, reset the node and add it again with the new role. + + * For multi-node clusters with high availability (HA), at least three `controller` nodes are required. You can assign both the `controller` role and one or more `custom` roles to the same node. For more information about creating HA clusters with Embedded Cluster, see [Enable High Availability for Multi-Node Clusters (Alpha)](#ha) below. + + * To add non-controller or _worker_ nodes that do not run the Kubernetes control plane, select one or more `custom` roles for the node and deselect the `controller` role. + +1. Do one of the following to make the Embedded Cluster installation assets available on the machine that you will join to the cluster: + + * **For online (internet-connected) installations**: SSH onto the machine that you will join. Then, use the same commands that you ran during installation to download and untar the Embedded Cluster installation assets on the machine. See [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + + * **For air gap installations with limited or no outbound internet access**: On a machine that has internet access, download the Embedded Cluster installation assets (including the air gap bundle) using the same command that you ran during installation. See [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). Then, move the downloaded assets to the air-gapped machine that you will join, and untar. + + :::important + The Embedded Cluster installation assets on each node must all be the same version. If you use a different version than what is installed elsewhere in the cluster, the cluster will not be stable. To download a specific version of the Embedded Cluster assets, select a version in the **Embedded cluster install instructions** dialog. + ::: + +1. On the machine that you will join to the cluster, run the join command that you copied from the Admin Console. + + **Example:** + + ```bash + sudo ./APP_SLUG join 10.128.0.32:30000 TxXboDstBAamXaPdleSK7Lid + ``` + **Air Gap Example:** + + ```bash + sudo ./APP_SLUG join --airgap-bundle APP_SLUG.airgap 10.128.0.32:30000 TxXboDstBAamXaPdleSK7Lid + ``` + +1. In the Admin Console, either on the installation **Nodes** screen or on the **Cluster Management** page, verify that the node appears. Wait for the node's status to change to Ready. + +1. Repeat these steps for each node you want to add. + +## Enable High Availability for Multi-Node Clusters (Alpha) {#ha} + +Multi-node clusters are not highly available by default. The first node of the cluster is special and holds important data for Kubernetes and KOTS, such that the loss of this node would be catastrophic for the cluster. Enabling high availability (HA) requires that at least three controller nodes are present in the cluster. Users can enable HA when joining the third node. + +:::important +High availability for Embedded Cluster in an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). +::: + +### HA Architecture + + + +For more information about the Embedded Cluster built-in extensions, see [Built-In Extensions](/vendor/embedded-overview#built-in-extensions) in _Embedded Cluster Overview_. + +### Requirements + +Enabling high availability has the following requirements: + +* High availability is supported with Embedded Cluster 1.4.1 or later. + +* High availability is supported only for clusters where at least three nodes with the `controller` role are present. + +### Limitations + +Enabling high availability has the following limitations: + +* High availability for Embedded Cluster in an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). + +* The `--enable-ha` flag serves as a feature flag during the Alpha phase. In the future, the prompt about migrating to high availability will display automatically if the cluster is not yet HA and you are adding the third or more controller node. + +* HA multi-node clusters use rqlite to store support bundles up to 100 MB in size. Bundles over 100 MB can cause rqlite to crash and restart. + +### Best Practices for High Availability + +Consider the following best practices and recommendations for creating HA clusters: + +* At least three _controller_ nodes that run the Kubernetes control plane are required for HA. This is because clusters use a quorum system, in which more than half the nodes must be up and reachable. In clusters with three controller nodes, the Kubernetes control plane can continue to operate if one node fails because a quorum can still be reached by the remaining two nodes. By default, with Embedded Cluster, all new nodes added to a cluster are controller nodes. For information about customizing the `controller` node role, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. + +* Always use an odd number of controller nodes in HA clusters. Using an odd number of controller nodes ensures that the cluster can make decisions efficiently with quorum calculations. Clusters with an odd number of controller nodes also avoid split-brain scenarios where the cluster runs as two, independent groups of nodes, resulting in inconsistencies and conflicts. + +* You can have any number of _worker_ nodes in HA clusters. Worker nodes do not run the Kubernetes control plane, but can run workloads such as application or Replicated KOTS workloads. + +### Create a Multi-Node HA Cluster + +To create a multi-node HA cluster: + +1. Set up a cluster with at least two controller nodes. You can do an online (internet-connected) or air gap installation. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +1. SSH onto a third node that you want to join to the cluster as a controller. + +1. Run the join command provided in the Admin Console **Cluster Management** tab and pass the `--enable-ha` flag. For example: + + ```bash + sudo ./APP_SLUG join --enable-ha 10.128.0.80:30000 tI13KUWITdIerfdMcWTA4Hpf + ``` + +1. After the third node joins the cluster, type `y` in response to the prompt asking if you want to enable high availability. + + ![high availability command line prompt](/images/embedded-cluster-ha-prompt.png) + [View a larger version of this image](/images/embedded-cluster-ha-prompt.png) + +1. Wait for the migration to complete. + +================ +File: docs/enterprise/embedded-tls-certs.mdx +================ +# Updating Custom TLS Certificates in Embedded Cluster Installations + +This topic describes how to update custom TLS certificates in Replicated Embedded Cluster installations. + +## Update Custom TLS Certificates + +Users can provide custom TLS certificates with Embedded Cluster installations and can update TLS certificates through the Admin Console. + +:::important +Adding the `acceptAnonymousUploads` annotation temporarily creates a vulnerability for an attacker to maliciously upload TLS certificates. After TLS certificates have been uploaded, the vulnerability is closed again. + +Replicated recommends that you complete this upload process quickly to minimize the vulnerability risk. +::: + +To upload a new custom TLS certificate in Embedded Cluster installations: + +1. SSH onto a controller node where Embedded Cluster is installed. Then, run the following command to start a shell so that you can access the cluster with kubectl: + + ```bash + sudo ./APP_SLUG shell + ``` + Where `APP_SLUG` is the unique slug of the installed application. + +1. In the shell, run the following command to restore the ability to upload new TLS certificates by adding the `acceptAnonymousUploads` annotation: + + ```bash + kubectl -n kotsadm annotate secret kotsadm-tls acceptAnonymousUploads=1 --overwrite + ``` + +1. Run the following command to get the name of the kurl-proxy server: + + ```bash + kubectl get pods -A | grep kurl-proxy | awk '{print $2}' + ``` + :::note + This server is named `kurl-proxy`, but is used in both Embedded Cluster and kURL installations. + ::: + +1. Run the following command to delete the kurl-proxy pod. The pod automatically restarts after the command runs. + + ```bash + kubectl delete pods PROXY_SERVER + ``` + + Replace `PROXY_SERVER` with the name of the kurl-proxy server that you got in the previous step. + +1. After the pod has restarted, go to `http://:30000/tls` in your browser and complete the process in the Admin Console to upload a new certificate. + +================ +File: docs/enterprise/gitops-managing-secrets.mdx +================ +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" + +# Managing Secrets with KOTS Auto-GitOps (Alpha) + + + +When you enable Auto-GitOps, the Replicated KOTS Admin Console pushes the rendered application manifests to the configured git repository. Application manifests often contain secrets and sensitive information that should not be committed to git. + +Replicated KOTS v1.18 introduces an integration with SealedSecrets to encrypt secrets before committing. +This integration is currently alpha and subject to change in future releases of KOTS. For more information, see the [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) Github repository. + +To enable this integration, a Secret with specific labels must be deployed to the same namespace as the Admin Console. +This secret must contain the SealedSecrets public key and is used by KOTS to replace all Secret objects created by the application and by the Admin Console. + +This Secret must be manually deployed to the same namespace as the Admin Console. There is currently no way to automate or use the Admin Console to configure this functionality. The Secret can be named anything unique that does not conflict with application Secrets. The labels in this example YAML file are important and must be used. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: kots-sealed-secret + namespace: NAMESPACE + labels: + kots.io/buildphase: secret + kots.io/secrettype: sealedsecrets +data: + cert.pem: SEALED_SECRET_KEY +``` + +Replace: + +- `NAMESPACE` with the namespace where the Admin Console is installed. + +- `SEALED_SECRET_KEY` with the base64 encoded, sealed Secret public key. The sealed Secret public key is included in the sealed Secret controller logs during startup. + + **Example:** + + ```bash + kubectl logs -n kube-system sealed-secrets-controller-7684c7b86c-6bhhw + 2022/04/20 15:49:49 Starting sealed-secrets controller version: 0.17.5 + controller version: 0.17.5 + 2022/04/20 15:49:49 Searching for existing private keys + 2022/04/20 15:49:58 New key written to kube-system/sealed-secrets-keyxmwv2 + 2022/04/20 15:49:58 Certificate is + -----BEGIN CERTIFICATE----- + MIIEzDCCArSgAwIBAgIQIkCjUuODpQV7zK44IB3O9TANBgkqhkiG9w0BAQsFADAA + MB4XDTIyMDQyMDE1NDk1OFoXDTMyMDQxNzE1NDk1OFowADCCAiIwDQYJKoZIhvcN + AQEBBQADggIPADCCAgoCggIBAN0cle8eERYUglhGapLQZWYS078cP9yjOZpoUtXe + mpNE4eLBMo2bDAOopL9YV6TIh2EQMGOr7Njertnf7sKl/1/ZEnIpDw+b/U40LD6o + XMymCrv9GznlsEkaqfGynsY22oamQnHNLIPTYfxUueDqqQFSJN3h1vKZaFi850I4 + y29r+kxX8gGTRmuratGw0Rd4VvHtqi4lDlD9pBToQzbYsbhiySKhClAWC8Hbwzw8 + 4rPamYO8am92jpWIw0liSJUq5urnHR+S0S2P8FlOh7nbCI4ZkmY/Edjxz6ew7yB3 + OFONxlkweD2/KMzquMgOxhxUUdrbBZxXtb6s3MUeF4ENnJ2iL73dgx7O81HTUyu4 + Ok0YK1zqlnj4B683ySV3/RAtHbJJJWJMrLqbjhUNiYf+Ey6wXHJIwqXnjkG4UjP/ + OzrAmZiMa+z/uniUS0M+6siDJuj1FZsN9o1HhwwAWKcEJov2Jlo65gRsaLvalQfr + /VGrHQ1nQ2323hNVIZNKZ6zS6HlJOyOEQ7dcW3XsP1F5gEGkKkgLklOs3jt5OF4i + 2eiimHVnXveXgYZhDudY20ungRnslO2NBpTXgKIDu4YKUXhouQe1LAOkSIdtYSJL + eBFT1cO+rYqNUnffvsv2f9cE0SLp9XQ3VD5Eb+oJCpHc0qZ37/SB3VuDsXW2U/ih + TepxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIAATAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBSvvAr9OTTWZBiCu7+b023YlCL6KzANBgkqhkiG9w0BAQsFAAOCAgEA + oXqAxZUCtZQCv23NMpABnJm2dM3qj5uZRbwqUBxutvlQ6WXKj17dbQ0SoNc2BOKT + 7hpR7wkN9Ic6UrTnx8NUf/CZwHrU+ZXzG8PigOccoP4XBJ6v7k4vOjwpuyr14Jtw + BXxcqbwK/bZPHbjn/N1eZhVyeOZlVE4oE+xbI0s6vJnn2N4tz/YrHB3VBRx9rbtN + WbbparStldRzfGyOXLZsu0eQFfHdGXtYAJP0Hougc26Wz2UEozjczUqFYc7s66Z4 + 1SCXpIpumm+aIKifjzIDPVZ3gDqpZaQYB877mCLVQ0rvfZgw/lVMtnnda+XjWh82 + YUORubKqKIM4OBM9RvaTih6k5En70Xh9ouyYgwE0fbUEvFThADVR5fUE0e7/34sE + oeAONWIZ4sbqewhvKjbYpKOZD7a9GrxCiB5C92WvA1xrI4x6F0EOK0jp16FSNuxN + us9lhAxX4V7HN3KR+O0msygeb/LAE+Vgcr3ZxlNvkIoLY318vKFsGCPgYTXLk5cs + uP2mg/JbTuntXaZTP+gM7hd8enugaUcvyX/AtduTeIXgs7KLLRZW+2M+gq/dlRwl + jCwIzOs3BKuiotGAWACaURFiKhyY+WiEpsIN1H6hswAwY0lcV1rrOeQgg9rfYvoN + 0tXH/eHuyzyHdWt0BX6LLY4cqP2rP5QyP117Vt2i1jY= + -----END CERTIFICATE----- + + 2022/04/20 15:49:58 HTTP server serving on :8080 + ... + ``` + +================ +File: docs/enterprise/gitops-workflow.mdx +================ +import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" + +# KOTS Auto-GitOps Workflow + + + +## Overview of the Auto-GitOps Workflow + +The Replicated KOTS Admin Console default workflow is configured to receive updates, show the changes, and deploy the updates to the cluster. You can enable the KOTS Auto-GitOps workflow instead. When using the Auto-GitOps workflow, changes from the Admin Console are pushed to a private Git repository, where an existing CI/CD process can execute the delivery of manifests to the cluster. Changes can include local configuration changes and upstream updates from your vendor (such as application and license updates). + +If you have more than one application installed, you can selectively enable Auto-GitOps for each application. + +After enabling the Auto-GitOps workflow for an application, the Admin Console makes your first commit with the latest available version in the Admin Console. The latest available version is often the current version that is deployed. Subsequently, the Admin Console makes separate commits with any available updates. + +If you configure automatic updates for the application, any updates from your vendor are automatically committed to your Git repository. For more information about configuring automatic updates, see [Configuring Automatic Updates](/enterprise/updating-apps). + +You can change your GitOps settings or disable Auto-GitOps at any time from the **GitOps** tab in the Admin Console. + +## Limitations + +- + +- To enable pushing updates through the Auto-GitOps workflow, you must first follow the installation workflow for the application using the Admin Console or the Replicated KOTS CLI. If the preflight checks pass during installation, then the application is deployed. + +- After you have completed the installation workflow, you can enable Auto-GitOps for all subsequent application updates. It is not required that the application deploy successfully to enable Auto-GitOps. For example, if the preflight checks fail during the installation workflow and the application is not deployed, you can still enable Auto-GitOps for subsequent application updates. + +- When you enable Auto-GitOps, the Admin Console sends all application updates, including the version that you initially installed before Auto-GitOps was enabled, to the repository that you specify. + +- If your organization has security requirements that prevent you from completing the installation workflow for the application first with the Admin Console or KOTS CLI, you cannot enable Auto-GitOps. + +## Prerequisites + +- A Git repository that you have read/write access to. +- If the repository does not have files or folders committed yet, you must make at least one commit with any content so that the connection attempt succeeds with the SSH key when you perform the following task. + +## Enable Auto-GitOps + +To enable pushing updates to the Auto-GitOps workflow: + +1. Click the **GitOps** tab at the top of the Admin Console. + +1. On the GitOps Configuration page: + + 1. If you have more than one application, select the application where you want to enable Auto-GitOps. + 1. Select the Git provider. + 1. Enter the repository details: + + + + + + + + + + + + + + + + + + +
Field NameDescription
Owner & RepositoryEnter the owner and repository name where the commit will be made.
BranchEnter the branch name or leave the field blank to use the default branch.
PathEnter the folder name in the repository where the application deployment file will be committed. If you leave this field blank, the Replicated KOTS creates a folder for you. However, the best practice is to manually create a folder in the repository labeled with the application name and dedicated for the deployment file only.
+ + 1. Click **Generate SSH Key**, and then **Copy key**. + 1. Go to your Git repository and open the settings page. On the settings page: + 1. Add the SSH public key that you copied in the previous step. + 1. Enable write access for the key. This allows the Admin Console to push commits to the repository. + +1. On the **GitOps Configuration** page, click **Test connection to repository** to verify that the Admin Console can connect. + + When the Admin Console establishes a connection to the repository, a dialog displays that says GitOps is enabled. + +================ +File: docs/enterprise/image-registry-kurl.md +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Working with the kURL Image Registry + + + +This topic describes the Replicated kURL registry for kURL clusters. + +## Overview + +The kURL Registry add-on can be used to host application images. For air gap installations, this kURL registry is automatically used to host all application images. + +With every application update, new images are pushed to the kURL registry. +To keep the registry from running out of storage, images that are no longer used are automatically deleted from the registry. + +For more information about the kURL Registry add-on, see [Registry Add-On](https://kurl.sh/docs/add-ons/registry) in the kURL documentation. + +:::note +Users can also configure their own private registry for kURL installations instead of using the kURL registry. For more information, see [Configuring Local Image Registries](/enterprise/image-registry-settings). +::: + +## Trigger Garbage Collection + +Every time the application instance is upgraded, image garbage collection automatically deletes images that are no longer used. + +You can also manually trigger image garbage collection. To manually run garbage collection: + +```bash +kubectl kots admin-console garbage-collect-images -n NAMESPACE +``` +Where `NAMESPACE` is the namespace where the application is installed. + +For more information, see [admin-console garbage-collect-images](/reference/kots-cli-admin-console-garbage-collect-images/). + +## Disable Image Garbage Collection + +Image garbage collection is enabled by default for kURL clusters that use the kURL registry. + +To disable image garbage collection: + +```bash +kubectl patch configmaps kotsadm-confg --type merge -p "{\"data\":{\"enable-image-deletion\":\"false\"}}" +``` + +To enable garbage collection again: +```bash +kubectl patch configmaps kotsadm-confg --type merge -p "{\"data\":{\"enable-image-deletion\":\"true\"}}" +``` + +## Restore Deleted Images + +Deleted images can be reloaded from air gap bundles using the `admin-console push-images` command. For more information, see [admin-console push-images](/reference/kots-cli-admin-console-push-images/) in the KOTS CLI documentation. + +The registry address and namespace can be found on the **Registry Settings** page in the Replicated KOTS Admin Console. +The registry username and password can be found in the `registry-creds` secret in the default namespace. + +## Limitations + +The kURL registry image garbage collection feature has following limitations: + +* **Optional components**: Some applications define Kubernetes resources that can be enabled or disabled dynamically. For example, template functions can be used to conditionally deploy a StatefulSet based on configuration from the user. + + If a resource is disabled and no longer deployed, its images can be included in the garbage collection. + + To prevent this from happening, include the optional images in the `additionalImages` list of the Application custom resource. For more information, see [`additionalImages`](/reference/custom-resource-application#additionalimages) in _Application_. + +* **Shared Image Registries**: The image garbage collection process assumes that the registry is not shared with any other instances of Replicated KOTS, nor shared with any external applications. If the built-in kURL registry is used by another external application, disable garbage collection to prevent image loss. + +* **Customer-Supplied Registries**: Image garbage collection is supported only when used with the built-in kURL registry. If the KOTS instance is configured to use a different registry, disable garbage collection to prevent image loss. For more information about configuring an image registry in the Admin Console, see [Configuring Local Image Registries](/enterprise/image-registry-settings). + +* **Application Rollbacks**: Image garbage collection has no effect when the `allowRollback` field in the KOTS Application custom resource is set to `true`. For more information, see [Application](/reference/custom-resource-application) in _KOTS Custom Resources_. + +================ +File: docs/enterprise/image-registry-rate-limits.md +================ +# Avoiding Docker Hub Rate Limits + +This topic describes how to avoid rate limiting for anonymous and free authenticated use of Docker Hub by providing a Docker Hub username and password to the `kots docker ensure-secret` command. + +## Overview + +On November 20, 2020, rate limits for anonymous and free authenticated use of Docker Hub went into effect. +Anonymous and Free Docker Hub users are limited to 100 and 200 container image pull requests per six hours, respectively. +Docker Pro and Docker Team accounts continue to have unlimited access to pull container images from Docker Hub. + +For more information on rate limits, see [Understanding Docker Hub rate limiting](https://www.docker.com/increase-rate-limits) on the Docker website. + +If the application that you are installing or upgrading has public Docker Hub images that are rate limited, then an error occurs when the rate limit is reached. + +## Provide Docker Hub Credentials + +To avoid errors caused by reaching the Docker Hub rate limit, a Docker Hub username and password can be passed to the `kots docker ensure-secret` command. The Docker Hub username and password are used only to increase rate limits and do not need access to any private repositories on Docker Hub. + +Example: + +```bash +kubectl kots docker ensure-secret --dockerhub-username sentrypro --dockerhub-password password --namespace sentry-pro +``` + +The `kots docker ensure-secret` command creates an image pull secret that KOTS can use when pulling images. + +KOTS then creates a new release sequence for the application to apply the image pull secret to all Kubernetes manifests that have images. After running the `kots docker ensure-secret` command, deploy this new release sequence either from the Admin Console or the KOTS CLI. + +For more information, see [docker ensure-secret](/reference/kots-cli-docker-ensure-secret) in the KOTS CLI documentation. + +================ +File: docs/enterprise/image-registry-settings.mdx +================ +import ImageRegistrySettings from "../partials/image-registry/_image-registry-settings.mdx" +import DockerCompatibility from "../partials/image-registry/_docker-compatibility.mdx" + +# Configuring Local Image Registries + +This topic describes how to configure private registry settings in the Replicated KOTS Admin Console. + +The information in this topic applies to existing cluster installations with KOTS and installations with Replicated kURL. This topic does _not_ apply to Replciated Embedded Cluster installations. + +## Overview + +Using a private registry lets you create a custom image pipeline. Any proprietary configurations that you make to the application are shared only with the groups that you allow access, such as your team or organization. You also have control over the storage location, logging messages, load balancing requests, and other configuration options. Private registries can be used with online or air gap clusters. + +## Requirement + +The domain of the image registry must support a Docker V2 protocol. KOTS has been tested for compatibility with the following registries: + + + +## Configure Local Private Registries in Online Clusters + +In online (internet-connected) installations, you can optionally use a local private image registry. You can also disable the connection or remove the registry settings if needed. + +To configure private registry settings in an online cluster: + +1. In the Admin Console, on the **Registry settings** tab, edit the fields: + + Registry Settings + + [View a larger version of this image](/images/registry-settings.png) + + The following table describes the fields: + + + +1. Click **Test Connection** to test the connection between KOTS and the registry host. + +1. Click **Save changes**. + +## Change Private Registries in Air Gap Clusters {#air-gap} + +You can change the private registry settings at any time in the Admin Console. + +To change private registry settings in an air gap cluster: + +1. In the Admin Console, on the **Registry settings** tab, select the **Disable Pushing Images to Private Registry** checkbox. Click **Save changes**. + + :::note + This is a temporary action that allows you to edit the registry namespace and hostname. If you only want to change the username or password for the registry, you do not have to disable pushing the images. + ::: + +1. Edit the fields as needed, and click **Save changes**. + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
HostnameSpecify a registry domain that uses the Docker V2 protocol.
UsernameSpecify the username for the domain.
PasswordSpecify the password for the domain.
Registry NamespaceSpecify the registry namespace. For air gap environments, this setting overwrites the registry namespace that you pushed images to when you installed KOTS.
+ +1. Deselect the **Disable Pushing Images to Private Registry** checkbox. This action re-enables KOTS to push images to the registry. + +1. Click **Test Connection** to test the connection between KOTS and the private registry host. + +1. Click **Save changes**. + +## Stop Using a Registry and Remove Registry Settings + +To stop using a registry and remove registry settings from the Admin Console: + +1. Log in to the Admin Console and go to **Registry Settings**. + +1. Click **Stop using registry** to remove the registry settings from the Admin Console. + +================ +File: docs/enterprise/installing-embedded-air-gap.mdx +================ +import UpdateAirGapAdm from "../partials/embedded-cluster/_update-air-gap-admin-console.mdx" +import UpdateAirGapCli from "../partials/embedded-cluster/_update-air-gap-cli.mdx" +import UpdateAirGapOverview from "../partials/embedded-cluster/_update-air-gap-overview.mdx" +import DoNotDowngrade from "../partials/embedded-cluster/_warning-do-not-downgrade.mdx" +import Prerequisites from "../partials/install/_ec-prereqs.mdx" + +# Air Gap Installation with Embedded Cluster + +This topic describes how to install applications with Embedded Cluster on a virtual machine (VM) or bare metal server with no outbound internet access. + +## Overview + +When an air gap bundle is built for a release containing an Embedded Cluster Config, both an application air gap bundle and an Embedded Cluster air gap bundle are built. The application air gap bundle can be used for air gap installations with Replicated kURL or with Replicated KOTS in an existing cluster. The Embedded Cluster air gap bundle is used for air gap installations with Embedded Cluster. + +The Embedded Cluster air gap bundle not only contains the assets normally contained in an application air gap bundle (`airgap.yaml`, `app.tar.gz`, and an images directory), but it also contains an `embedded-cluster` directory with the assets needed to install the infrastructure (Embedded Cluster/k0s and [extensions](/reference/embedded-config#extensions). + +During installation with Embedded Cluster in air gap environments, a Docker registry is deployed to the cluster to store application images. Infrastructure images (for Embedded Cluster and Helm extensions) and the Helm charts are preloaded on each node at installation time. + +### Requirement + +Air gap installations are supported with Embedded Cluster version 1.3.0 or later. + +### Limitations and Known Issues + +Embedded Cluster installations in air gap environments have the following limitations and known issues: + +* If you pass `?airgap=true` to the `replicated.app` endpoint but an air gap bundle is not built for the latest release, the API will not return a 404. Instead it will return the tarball without the air gap bundle (as in, with the installer and the license in it, like for online installations). + +* Images used by Helm extensions must not refer to a multi-architecture image by digest. Only x64 images are included in air gap bundles, and the digest for the x64 image will be different from the digest for the multi-architecture image, preventing the image from being discovered in the bundle. An example of a chart that does this is ingress-nginx/ingress-nginx chart. For an example of how the digests should be set to empty string to pull by tag only, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. + +* Images for Helm extensions are loaded directly into containerd so that they are available without internet access. But if an image used by a Helm extension has **Always** set as the image pull policy, Kubernetes will try to pull the image from the internet. If necessary, use the Helm values to set `IfNotPresent` as the image pull policy to ensure the extension works in air gap environments. + +* On the channel release history page, the links for **Download air gap bundle**, **Copy download URL**, and **View bundle contents** pertain to the application air gap bundle only, not the Embedded Cluster bundle. + +## Prerequisites + +Before you install, complete the following prerequisites: + + + +## Install + +To install with Embedded Cluster in an air gap environment: + +1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the target release was promoted to build the air gap bundle. Do one of the following: + * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. + * If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. + + :::note + Errors in building either the application air gap bundle or the Embedded Cluster infrastructure will be shown if present. + ::: + +1. Go to **Customers** and click on the target customer. + +1. On the **Manage customer** tab, under **License options**, enable the **Airgap Download Enabled** license field. + +1. At the top of the page, click **Install instructions > Embedded Cluster**. + + ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + +1. In the **Embedded Cluster install instructions** dialog, verify that the **Install in an air gap environment** checkbox is enabled. + + Embedded cluster install instruction dialog + + [View a larger version of this image](/images/embedded-cluster-install-dialog-airgap.png) + +1. (Optional) For **Select a version**, select a specific application version to install. By default, the latest version is selected. + +1. SSH onto the machine where you will install. + +1. On a machine with internet access, run the curl command to download the air gap installation assets as a `.tgz`. + +1. Move the downloaded `.tgz` to the air-gapped machine where you will install. + +1. On your air-gapped machine, untar the `.tgz` following the instructions provided in the **Embedded Cluster installation instructions** dialog. This will produce three files: + * The installer + * The license + * The air gap bundle (`APP_SLUG.airgap`) + +1. Install the application with the installation command copied from the **Embedded Cluster installation instructions** dialog: + + ```bash + sudo ./APP_SLUG install --license license.yaml --airgap-bundle APP_SLUG.airgap + ``` + Where `APP_SLUG` is the unique application slug. + + :::note + Embedded Cluster supports installation options such as installing behind a proxy and changing the data directory used by Embedded Cluster. For the list of flags supported with the Embedded Cluster `install` command, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + ::: + +1. When prompted, enter a password for accessing the KOTS Admin Console. + + The installation command takes a few minutes to complete. During installation, Embedded Cluster completes tasks to prepare the cluster and install KOTS in the cluster. Embedded Cluster also automatically runs a default set of [_host preflight checks_](/vendor/embedded-using#about-host-preflight-checks) which verify that the environment meets the requirements for the installer. + + **Example output:** + + ```bash + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Host files materialized! + ✔ Running host preflights + ✔ Node installation finished! + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Additional components are ready! + Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 + ``` + + At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. + +1. Go to the URL provided in the output to access to the Admin Console. + +1. On the Admin Console landing page, click **Start**. + +1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. + +1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. + + By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. + +1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. + +1. On the **Nodes** page, you can view details about the machine where you installed, including its node role, status, CPU, and memory. + + Optionally, add nodes to the cluster before deploying the application. For more information about joining nodes, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). Click **Continue**. + +1. On the **Configure [App Name]** screen, complete the fields for the application configuration options. Click **Continue**. + +1. On the **Validate the environment & deploy [App Name]** screen, address any warnings or failures identified by the preflight checks and then click **Deploy**. + + Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. + +The Admin Console dashboard opens. + +On the Admin Console dashboard, the application status changes from Missing to Unavailable while the application is being installed. When the installation is complete, the status changes to Ready. For example: + +![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + +[View a larger version of this image](/images/gitea-ec-ready.png) + +================ +File: docs/enterprise/installing-embedded-automation.mdx +================ +import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" +import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" + +# Automating Installation with Embedded Cluster + +This topic describes how to install an application with Replicated Embedded Cluster from the command line, without needing to access the Replicated KOTS Admin Console. + +## Overview + +A common use case for installing with Embedded Cluster from the command line is to automate installation, such as performing headless installations as part of CI/CD pipelines. + +With headless installation, you provide all the necessary installation assets, such as the license file and the application config values, with the installation command rather than through the Admin Console UI. Any preflight checks defined for the application run automatically during headless installations from the command line rather than being displayed in the Admin Console. + +## Prerequisite + +Create a ConfigValues YAML file to define the configuration values for the application release. The ConfigValues file allows you to pass the configuration values for an application from the command line with the install command, rather than through the Admin Console UI. For air-gapped environments, ensure that the ConfigValues file can be accessed from the installation environment. + +The KOTS ConfigValues file includes the fields that are defined in the KOTS Config custom resource for an application release, along with the user-supplied and default values for each field, as shown in the example below: + + + + + +## Online (Internet-Connected) Installation + +To install with Embedded Cluster in an online environment: + +1. Follow the steps provided in the Vendor Portal to download and untar the Embedded Cluster installation assets. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + +1. Run the following command to install: + + ```bash + sudo ./APP_SLUG install --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --admin-console-password ADMIN_CONSOLE_PASSWORD + ``` + + Replace: + * `APP_SLUG` with the unique slug for the application. + * `LICENSE_FILE` with the customer license. + * `ADMIN_CONSOLE_PASSWORD` with a password for accessing the Admin Console. + * `PATH_TO_CONFIGVALUES` with the path to the ConfigValues file. + +## Air Gap Installation + +To install with Embedded Cluster in an air-gapped environment: + +1. Follow the steps provided in the Vendor Portal to download and untar the Embedded Cluster air gap installation assets. For more information, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +1. Ensure that the Embedded Cluster installation assets are available on the air-gapped machine, then run the following command to install: + + ```bash + sudo ./APP_SLUG install --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --admin-console-password ADMIN_CONSOLE_PASSWORD \ + --airgap-bundle PATH_TO_AIRGAP_BUNDLE + ``` + + Replace: + * `APP_SLUG` with the unique slug for the application. + * `LICENSE_FILE` with the customer license. + * `PATH_TO_CONFIGVALUES` with the path to the ConfigValues file. + * `ADMIN_CONSOLE_PASSWORD` with a password for accessing the Admin Console. + * `PATH_TO_AIRGAP_BUNDLE` with the path to the Embedded Cluster `.airgap` bundle for the release. + +================ +File: docs/enterprise/installing-embedded-requirements.mdx +================ +import EmbeddedClusterRequirements from "../partials/embedded-cluster/_requirements.mdx" +import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" +import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" + +# Embedded Cluster Installation Requirements + +This topic lists the installation requirements for Replicated Embedded Cluster. Ensure that the installation environment meets these requirements before attempting to install. + +## System Requirements + + + +## Port Requirements + + + +## Firewall Openings for Online Installations with Embedded Cluster {#firewall} + + + + + + + + + + + + + + + + + + + + +
DomainDescription
`proxy.replicated.com`

Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

`replicated.app`

Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

`registry.replicated.com` *

Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

+ +* Required only if the application uses the [Replicated private registry](/vendor/private-images-replicated). + +## About Firewalld Configuration + +When Firewalld is enabled in the installation environment, Embedded Cluster modifies the Firewalld config to allow traffic over the pod and service networks and to open the required ports on the host. No additional configuration is required. + +The following rule is added to Firewalld: + +```xml + + + + + + + + + + + +``` + +The following ports are opened in the default zone: + + + + + + + + + + + + + + + + + + + + + + + + + + +
PortProtocol
6443TCP
10250TCP
9443TCP
2380TCP
4789UDP
+ +================ +File: docs/enterprise/installing-embedded.mdx +================ +import Prerequisites from "../partials/install/_ec-prereqs.mdx" + +# Online Installation with Embedded Cluster + +This topic describes how to install an application in an online (internet-connected) environment with the Replicated Embedded Cluster installer. For information about air gap installations with Embedded Cluster, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +## Prerequisites + +Before you install, complete the following prerequisites: + + + +* Ensure that the required domains are accessible from servers performing the installation. See [Firewall Openings for Online Installations](/enterprise/installing-embedded-requirements#firewall). + +## Install + +To install an application with Embedded Cluster: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers** and click on the target customer. Click **Install instructions > Embedded Cluster**. + + ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + + The **Embedded Cluster install instructions** dialog is displayed. + + Embedded cluster install instruction dialog + + [View a larger version of this image](/images/embedded-cluster-install-dialog.png) + +1. (Optional) In the **Embedded Cluster install instructions** dialog, under **Select a version**, select a specific application version to install. By default, the latest version is selected. + +1. SSH onto the machine where you will install. + +1. Run the first command in the **Embedded Cluster install instructions** dialog to download the installation assets as a `.tgz`. + +1. Run the second command to extract the `.tgz`. The will produce the following files: + + * The installer + * The license + +1. Run the third command to install the release: + + ```bash + sudo ./APP_SLUG install --license LICENSE_FILE + ``` + Where: + * `APP_SLUG` is the unique slug for the application. + * `LICENSE_FILE` is the customer license. +
+ :::note + Embedded Cluster supports installation options such as installing behind a proxy and changing the data directory used by Embedded Cluster. For the list of flags supported with the Embedded Cluster `install` command, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + ::: + +1. When prompted, enter a password for accessing the KOTS Admin Console. + + The installation command takes a few minutes to complete. During installation, Embedded Cluster completes tasks to prepare the cluster and install KOTS in the cluster. Embedded Cluster also automatically runs a default set of [_host preflight checks_](/vendor/embedded-using#about-host-preflight-checks) which verify that the environment meets the requirements for the installer. + + **Example output:** + + ```bash + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Host files materialized! + ✔ Running host preflights + ✔ Node installation finished! + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Additional components are ready! + Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 + ``` + + At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. + +1. Go to the URL provided in the output to access to the Admin Console. + +1. On the Admin Console landing page, click **Start**. + +1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. + +1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. + + By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. + +1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. + +1. On the **Nodes** page, you can view details about the machine where you installed, including its node role, status, CPU, and memory. + + Optionally, add nodes to the cluster before deploying the application. For more information about joining nodes, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). Click **Continue**. + +1. On the **Configure [App Name]** screen, complete the fields for the application configuration options. Click **Continue**. + +1. On the **Validate the environment & deploy [App Name]** screen, address any warnings or failures identified by the preflight checks and then click **Deploy**. + + Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. + +The Admin Console dashboard opens. + +On the Admin Console dashboard, the application status changes from Missing to Unavailable while the application is being installed. When the installation is complete, the status changes to Ready. For example: + +![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + +[View a larger version of this image](/images/gitea-ec-ready.png) + +================ +File: docs/enterprise/installing-existing-cluster-airgapped.mdx +================ +import IntroExisting from "../partials/install/_intro-existing.mdx" +import IntroAirGap from "../partials/install/_intro-air-gap.mdx" +import PrereqsExistingCluster from "../partials/install/_prereqs-existing-cluster.mdx" +import BuildAirGapBundle from "../partials/install/_airgap-bundle-build.mdx" +import DownloadAirGapBundle from "../partials/install/_airgap-bundle-download.mdx" +import ViewAirGapBundle from "../partials/install/_airgap-bundle-view-contents.mdx" +import LicenseFile from "../partials/install/_license-file-prereq.mdx" +import AirGapLicense from "../partials/install/_airgap-license-download.mdx" +import DownloadKotsBundle from "../partials/install/_download-kotsadm-bundle.mdx" +import InstallCommandPrompts from "../partials/install/_kots-install-prompts.mdx" +import AppNameUI from "../partials/install/_placeholder-app-name-UI.mdx" +import InstallKotsCliAirGap from "../partials/install/_install-kots-cli-airgap.mdx" +import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" +import PlaceholderRoCreds from "../partials/install/_placeholder-ro-creds.mdx" +import KotsVersionMatch from "../partials/install/_kots-airgap-version-match.mdx" + +# Air Gap Installation in Existing Clusters with KOTS + + + + + +## Prerequisites + +Complete the following prerequisites: + + + +* Ensure that there is a compatible Docker image registry available inside the network. For more information about Docker registry compatibility, see [Compatible Image Registries](/enterprise/installing-general-requirements#registries). + + KOTS rewrites the application image names in all application manifests to read from the on-premises registry, and it re-tags and pushes the images to the on-premises registry. When authenticating to the registry, credentials with `push` permissions are required. + + A single application expects to use a single namespace in the Docker image registry. The namespace name can be any valid URL-safe string, supplied at installation time. A registry typically expects the namespace to exist before any images can be pushed into it. + + :::note + Amazon Elastic Container Registry (ECR) does not use namespaces. + ::: + +## Install {#air-gap} + +To install in an air gap cluster with KOTS: + +1. Download the customer license: + + + +1. Go the channel where the target release was promoted to build and download the air gap bundle for the release: + + + +1. + +1. + +1. + +1. + + + +1. + +1. Install the KOTS Admin Console using the images that you pushed in the previous step: + + ```shell + kubectl kots install APP_NAME \ + --kotsadm-registry REGISTRY_HOST \ + --registry-username RO-USERNAME \ + --registry-password RO-PASSWORD + ``` + + Replace: + + * `APP_NAME` with a name for the application. This is the unique name that KOTS will use to refer to the application that you install. + + +1. + +1. Access the Admin Console on port 8800. If the port forward is active, go to [http://localhost:8800](http://localhost:8800) to access the Admin Console. + + If you need to reopen the port forward to the Admin Console, run the following command: + + ```shell + kubectl kots admin-console -n NAMESPACE + ``` + Replace `NAMESPACE` with the namespace where KOTS is installed. + +1. Log in with the password that you created during installation. + +1. Upload your license file. + +1. Upload the `.airgap` application air gap bundle. + +1. On the config screen, complete the fields for the application configuration options and then click **Continue**. + +1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. + + :::note + Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. + ::: + +1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. + + ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) + + [View a larger version of this image](/images/kubectl-preflight-command.png) + +The Admin Console dashboard opens. + +On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. For example: + +![Admin Console dashboard](/images/kotsadm-dashboard-graph.png) + +[View a larger version of this image](/images/kotsadm-dashboard-graph.png) + +================ +File: docs/enterprise/installing-existing-cluster-automation.mdx +================ +import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" +import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" +import PlaceholdersGlobal from "../partials/install/_placeholders-global.mdx" +import PlaceholderAirgapBundle from "../partials/install/_placeholder-airgap-bundle.mdx" +import PlaceholderNamespaceExisting from "../partials/install/_placeholder-namespace-existing.mdx" +import DownloadKotsBundle from "../partials/install/_download-kotsadm-bundle.mdx" +import InstallKotsCliAirGap from "../partials/install/_install-kots-cli-airgap.mdx" +import InstallKotsCli from "../partials/install/_install-kots-cli.mdx" +import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" +import KotsVersionMatch from "../partials/install/_kots-airgap-version-match.mdx" +import PlaceholderRoCreds from "../partials/install/_placeholder-ro-creds.mdx" +import AccessAdminConsole from "../partials/install/_access-admin-console.mdx" + +# Installing with the KOTS CLI + +This topic describes how to install an application with Replicated KOTS in an existing cluster using the KOTS CLI. + +## Overview + +You can use the KOTS CLI to install an application with Replicated KOTS. A common use case for installing from the command line is to automate installation, such as performing headless installations as part of CI/CD pipelines. + +To install with the KOTS CLI, you provide all the necessary installation assets, such as the license file and the application config values, with the installation command rather than through the Admin Console UI. Any preflight checks defined for the application run automatically from the CLI rather than being displayed in the Admin Console. + +The following shows an example of the output from the kots install command: + + ``` + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + • Waiting for Admin Console to be ready ✓ + • Waiting for installation to complete ✓ + • Waiting for preflight checks to complete ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + + • Go to http://localhost:8888 to access the application + ``` + +## Prerequisite + +Create a ConfigValues YAML file to define the configuration values for the application release. The ConfigValues file allows you to pass the configuration values for an application from the command line with the install command, rather than through the Admin Console UI. For air-gapped environments, ensure that the ConfigValues file can be accessed from the installation environment. + +The KOTS ConfigValues file includes the fields that are defined in the KOTS Config custom resource for an application release, along with the user-supplied and default values for each field, as shown in the example below: + + + + + +## Online (Internet-Connected) Installation + +To install with KOTS in an online existing cluster: + +1. + +1. Install the application: + + ```bash + kubectl kots install APP_NAME \ + --shared-password PASSWORD \ + --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --namespace NAMESPACE \ + --no-port-forward + ``` + Replace: + + + + + +## Air Gap Installation {#air-gap} + +To install with KOTS in an air-gapped existing cluster: + +1. + +1. + + + +1. + +1. Install the application: + + ```bash + kubectl kots install APP_NAME \ + --shared-password PASSWORD \ + --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --airgap-bundle PATH_TO_AIRGAP_BUNDLE \ + --namespace NAMESPACE \ + --kotsadm-registry REGISTRY_HOST \ + --registry-username RO_USERNAME \ + --registry-password RO_PASSWORD \ + --no-port-forward + ``` + + Replace: + + + + + + + + + +## (Optional) Access the Admin Console + + + +================ +File: docs/enterprise/installing-existing-cluster.mdx +================ +import IntroExisting from "../partials/install/_intro-existing.mdx" +import PrereqsExistingCluster from "../partials/install/_prereqs-existing-cluster.mdx" +import LicenseFile from "../partials/install/_license-file-prereq.mdx" +import InstallCommandPrompts from "../partials/install/_kots-install-prompts.mdx" +import AppNameUI from "../partials/install/_placeholder-app-name-UI.mdx" + +# Online Installation in Existing Clusters with KOTS + + + +## Prerequisites + +Complete the following prerequisites: + + + + +## Install {#online} + +To install KOTS and the application in an existing cluster: + +1. Run one of these commands to install the Replicated KOTS CLI and KOTS. As part of the command, you also specify a name and version for the application that you will install. + + * **For the latest application version**: + + ```shell + curl https://kots.io/install | bash + kubectl kots install APP_NAME + ``` + * **For a specific application version**: + + ```shell + curl https://kots.io/install | bash + kubectl kots install APP_NAME --app-version-label=VERSION_LABEL + ``` + + Replace, where applicable: + + + + * `VERSION_LABEL` with the label for the version of the application to install. For example, `--app-version-label=3.0.1`. + + **Examples:** + + ```shell + curl https://kots.io/install | bash + kubectl kots install application-name + ``` + + ```shell + curl https://kots.io/install | bash + kubectl kots install application-name --app-version-label=3.0.1 + ``` + +1. + +1. Access the Admin Console on port 8800. If the port forward is active, go to [http://localhost:8800](http://localhost:8800) to access the Admin Console. + + If you need to reopen the port forward to the Admin Console, run the following command: + + ```shell + kubectl kots admin-console -n NAMESPACE + ``` + Replace `NAMESPACE` with the namespace where KOTS is installed. + +1. Log in with the password that you created during installation. + +1. Upload your license file. + +1. On the config screen, complete the fields for the application configuration options and then click **Continue**. + +1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. + + :::note + Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. + ::: + +1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. + + ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) + + [View a larger version of this image](/images/kubectl-preflight-command.png) + +The Admin Console dashboard opens. + +On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. For example: + +![Admin Console dashboard](/images/kotsadm-dashboard-graph.png) + +[View a larger version of this image](/images/kotsadm-dashboard-graph.png) + +================ +File: docs/enterprise/installing-general-requirements.mdx +================ +import DockerCompatibility from "../partials/image-registry/_docker-compatibility.mdx" +import KubernetesCompatibility from "../partials/install/_kubernetes-compatibility.mdx" +import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" + +# KOTS Installation Requirements + +This topic describes the requirements for installing in a Kubernetes cluster with Replicated KOTS. + +:::note +This topic does not include any requirements specific to the application. Ensure that you meet any additional requirements for the application before installing. +::: + +## Supported Browsers + +The following table lists the browser requirements for the Replicated KOTS Admin Console with the latest version of KOTS. + +| Browser | Support | +|----------------------|-------------| +| Chrome | 66+ | +| Firefox | 58+ | +| Opera | 53+ | +| Edge | 80+ | +| Safari (Mac OS only) | 13+ | +| Internet Explorer | Unsupported | + +## Kubernetes Version Compatibility + +Each release of KOTS maintains compatibility with the current Kubernetes version, and the two most recent versions at the time of its release. This includes support against all patch releases of the corresponding Kubernetes version. + +Kubernetes versions 1.25 and earlier are end-of-life (EOL). For more information about Kubernetes versions, see [Release History](https://kubernetes.io/releases/) in the Kubernetes documentation. + +Replicated recommends using a version of KOTS that is compatible with Kubernetes 1.26 and higher. + + + +## Minimum System Requirements + +To install KOTS in an existing cluster, your environment must meet the following minimum requirements: + +* **KOTS Admin Console minimum requirements**: Clusters that have LimitRanges specified must support the following minimum requirements for the Admin Console: + + * **CPU resources and memory**: The Admin Console pod requests 100m CPU resources and 100Mi memory. + + * **Disk space**: The Admin Console requires a minimum of 5GB of disk space on the cluster for persistent storage, including: + + * **4GB for S3-compatible object store**: The Admin Console requires 4GB for an S3-compatible object store to store appplication archives, support bundles, and snapshots that are configured to use a host path and NFS storage destination. By default, KOTS deploys MinIO to satisfy this object storage requirement. During deployment, MinIO is configured with a randomly generated `AccessKeyID` and `SecretAccessKey`, and only exposed as a ClusterIP on the overlay network. + + :::note + You can optionally install KOTS without MinIO by passing `--with-minio=false` with the `kots install` command. This installs KOTS as a StatefulSet using a persistent volume (PV) for storage. For more information, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). + ::: + + * **1GB for rqlite PersistentVolume**: The Admin Console requires 1GB for a rqlite StatefulSet to store version history, application metadata, and other small amounts of data needed to manage the application(s). During deployment, the rqlite component is secured with a randomly generated password, and only exposed as a ClusterIP on the overlay network. + +* **Supported operating systems**: The following are the supported operating systems for nodes: + * Linux AMD64 + * Linux ARM64 + +* **Available StorageClass**: The cluster must have an existing StorageClass available. KOTS creates the required stateful components using the default StorageClass in the cluster. For more information, see [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) in the Kubernetes documentation. + +* **Kubernetes version compatibility**: The version of Kubernetes running on the cluster must be compatible with the version of KOTS that you use to install the application. This compatibility requirement does not include any specific and additional requirements defined by the software vendor for the application. + + For more information about the versions of Kubernetes that are compatible with each version of KOTS, see [Kubernetes Version Compatibility](#kubernetes-version-compatibility) above. + +* **OpenShift version compatibility**: For Red Hat OpenShift clusters, the version of OpenShift must use a supported Kubernetes version. For more information about supported Kubernetes versions, see [Kubernetes Version Compatibility](#kubernetes-version-compatibility) above. + +* **Storage class**: The cluster must have an existing storage class available. For more information, see [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) in the Kubernetes documentation. + +* **Port forwarding**: To support port forwarding, Kubernetes clusters require that the SOcket CAT (socat) package is installed on each node. + + If the package is not installed on each node in the cluster, you see the following error message when the installation script attempts to connect to the Admin Console: `unable to do port forwarding: socat not found`. + + To check if the package that provides socat is installed, you can run `which socat`. If the package is installed, the `which socat` command prints the full path to the socat executable file. For example, `usr/bin/socat`. + + If the output of the `which socat` command is `socat not found`, then you must install the package that provides the socat command. The name of this package can vary depending on the node's operating system. + +## RBAC Requirements + +The user that runs the installation command must have at least the minimum role-based access control (RBAC) permissions that are required by KOTS. If the user does not have the required RBAC permissions, then an error message displays: `Current user has insufficient privileges to install Admin Console`. + +The required RBAC permissions vary depending on if the user attempts to install KOTS with cluster-scoped access or namespace-scoped access: +* [Cluster-scoped RBAC Requirements (Default)](#cluster-scoped) +* [Namespace-scoped RBAC Requirements](#namespace-scoped) + +### Cluster-scoped RBAC Requirements (Default) {#cluster-scoped} + +By default, KOTS requires cluster-scoped access. With cluster-scoped access, a Kubernetes ClusterRole and ClusterRoleBinding are created that grant KOTS access to all resources across all namespaces in the cluster. + +To install KOTS with cluster-scoped access, the user must meet the following RBAC requirements: +* The user must be able to create workloads, ClusterRoles, and ClusterRoleBindings. +* The user must have cluster-admin permissions to create namespaces and assign RBAC roles across the cluster. + +### Namespace-scoped RBAC Requirements {#namespace-scoped} + +KOTS can be installed with namespace-scoped access rather than the default cluster-scoped access. With namespace-scoped access, a Kubernetes Role and RoleBinding are automatically created that grant KOTS permissions only in the namespace where it is installed. + +:::note +Depending on the application, namespace-scoped access for KOTS is required, optional, or not supported. Contact your software vendor for application-specific requirements. +::: + +To install or upgrade KOTS with namespace-scoped access, the user must have _one_ of the following permission levels in the target namespace: +* Wildcard Permissions (Default) +* Minimum KOTS RBAC Permissions + +See the sections below for more information. + +#### Wildcard Permissions (Default) + +By default, when namespace-scoped access is enabled, KOTS attempts to automatically create the following Role to acquire wildcard (`* * *`) permissions in the target namespace: + + ```yaml + apiVersion: "rbac.authorization.k8s.io/v1" + kind: "Role" + metadata: + name: "kotsadm-role" + rules: + - apiGroups: ["*"] + resources: ["*"] + verb: "*" + ``` + + To support this default behavior, the user must also have `* * *` permissions in the target namespace. + +#### Minimum KOTS RBAC Permissions + +In some cases, it is not possible to grant the user `* * *` permissions in the target namespace. For example, an organization might have security policies that prevent this level of permissions. + + If the user installing or upgrading KOTS cannot be granted `* * *` permissions in the namespace, then they can instead request the minimum RBAC permissions required by KOTS. Using the minimum KOTS RBAC permissions also requires manually creating a ServiceAccount, Role, and RoleBinding for KOTS, rather than allowing KOTS to automatically create a Role with `* * *` permissions. + + To use the minimum KOTS RBAC permissions to install or upgrade: + + 1. Ensure that the user has the minimum RBAC permissions required by KOTS. The following lists the minimum RBAC permissions: + + ```yaml + - apiGroups: [""] + resources: ["configmaps", "persistentvolumeclaims", "pods", "secrets", "services", "limitranges"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["apps"] + resources: ["daemonsets", "deployments", "statefulsets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["networking.k8s.io", "extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["namespaces", "endpoints", "serviceaccounts"] + verbs: ["get"] + - apiGroups: ["authorization.k8s.io"] + resources: ["selfsubjectaccessreviews", "selfsubjectrulesreviews"] + verbs: ["create"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec"] + verbs: ["get", "list", "watch", "create"] + - apiGroups: ["batch"] + resources: ["jobs/status"] + verbs: ["get", "list", "watch"] + ``` + + :::note + The minimum RBAC requirements can vary slightly depending on the cluster's Kubernetes distribution and the version of KOTS. Contact your software vendor if you have the required RBAC permissions listed above and you see an error related to RBAC during installation or upgrade. + ::: + + 1. Save the following ServiceAccount, Role, and RoleBinding to a single YAML file, such as `rbac.yaml`: + + ```yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + kots.io/backup: velero + kots.io/kotsadm: "true" + name: kotsadm + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + labels: + kots.io/backup: velero + kots.io/kotsadm: "true" + name: kotsadm-role + rules: + - apiGroups: [""] + resources: ["configmaps", "persistentvolumeclaims", "pods", "secrets", "services", "limitranges"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["apps"] + resources: ["daemonsets", "deployments", "statefulsets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["networking.k8s.io", "extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["namespaces", "endpoints", "serviceaccounts"] + verbs: ["get"] + - apiGroups: ["authorization.k8s.io"] + resources: ["selfsubjectaccessreviews", "selfsubjectrulesreviews"] + verbs: ["create"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec"] + verbs: ["get", "list", "watch", "create"] + - apiGroups: ["batch"] + resources: ["jobs/status"] + verbs: ["get", "list", "watch"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + kots.io/backup: velero + kots.io/kotsadm: "true" + name: kotsadm-rolebinding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kotsadm-role + subjects: + - kind: ServiceAccount + name: kotsadm + ``` + + 1. If the application contains any Custom Resource Definitions (CRDs), add the CRDs to the Role in the YAML file that you created in the previous step with as many permissions as possible: `["get", "list", "watch", "create", "update", "patch", "delete"]`. + + :::note + Contact your software vendor for information about any CRDs that are included in the application. + ::: + + **Example** + + ```yaml + rules: + - apiGroups: ["stable.example.com"] + resources: ["crontabs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + ``` + + 1. Run the following command to create the RBAC resources for KOTS in the namespace: + + ``` + kubectl apply -f RBAC_YAML_FILE -n TARGET_NAMESPACE + ``` + + Replace: + * `RBAC_YAML_FILE` with the name of the YAML file with the ServiceAccount, Role, and RoleBinding and that you created. + * `TARGET_NAMESPACE` with the namespace where the user will install KOTS. + +:::note +After manually creating these RBAC resources, the user must include both the `--ensure-rbac=false` and `--skip-rbac-check` flags when installing or upgrading. These flags prevent KOTS from checking for or attempting to create a Role with `* * *` permissions in the namespace. For more information, see [Prerequisites](installing-existing-cluster#prerequisites) in _Online Installation in Existing Clusters with KOTS_. +::: + +## Compatible Image Registries {#registries} + +A private image registry is required for air gap installations with KOTS in existing clusters. You provide the credentials for a compatible private registry during installation. You can also optionally configure a local private image registry for use with installations in online (internet-connected) environments. + +Private registry settings can be changed at any time. For more information, see [Configuring Local Image Registries](image-registry-settings). + +KOTS has been tested for compatibility with the following registries: + + + +## Firewall Openings for Online Installations with KOTS in an Existing Cluster {#firewall} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DomainDescription
Docker Hub

Some dependencies of KOTS are hosted as public images in Docker Hub. The required domains for this service are `index.docker.io`, `cdn.auth0.com`, `*.docker.io`, and `*.docker.com.`

`proxy.replicated.com` *

Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

`replicated.app`

Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

`registry.replicated.com` **

Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

`kots.io`

Requests are made to this domain when installing the Replicated KOTS CLI. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

`github.com`Requests are made to this domain when installing the Replicated KOTS CLI. For information about retrieving GitHub IP addresses, see [About GitHub's IP addresses](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-githubs-ip-addresses) in the GitHub documentation.
+ +* Required only if the application uses the [Replicated proxy registry](/vendor/private-images-about). + +** Required only if the application uses the [Replicated registry](/vendor/private-images-replicated). + +================ +File: docs/enterprise/installing-kurl-airgap.mdx +================ +import KurlAbout from "../partials/install/_kurl-about.mdx" +import IntroEmbedded from "../partials/install/_intro-embedded.mdx" +import IntroAirGap from "../partials/install/_intro-air-gap.mdx" +import PrereqsEmbeddedCluster from "../partials/install/_prereqs-embedded-cluster.mdx" +import HaLoadBalancerPrereq from "../partials/install/_ha-load-balancer-prereq.mdx" +import AirGapLicense from "../partials/install/_airgap-license-download.mdx" +import BuildAirGapBundle from "../partials/install/_airgap-bundle-build.mdx" +import DownloadAirGapBundle from "../partials/install/_airgap-bundle-download.mdx" +import ViewAirGapBundle from "../partials/install/_airgap-bundle-view-contents.mdx" +import LicenseFile from "../partials/install/_license-file-prereq.mdx" +import HAStep from "../partials/install/_embedded-ha-step.mdx" +import LoginPassword from "../partials/install/_embedded-login-password.mdx" +import DownloadKurlBundle from "../partials/install/_download-kurl-bundle.mdx" +import ExtractKurlBundle from "../partials/install/_extract-kurl-bundle.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Air Gap Installation with kURL + + + + + + + + + +## Prerequisites + +Complete the following prerequisites: + + + + + +## Install {#air-gap} + +To install an application with kURL: + +1. Download the customer license: + + + +1. Go the channel where the target release was promoted to build and download the air gap bundle for the release: + + + +1. + +1. + +1. Download the `.tar.gz` air gap bundle for the kURL installer, which includes the components needed to run the kURL cluster and install the application with KOTS. kURL air gap bundles can be downloaded from the channel where the given release is promoted: + + * To download the kURL air gap bundle for the Stable channel: + + + + * To download the kURL bundle for channels other than Stable: + + ```bash + replicated channel inspect CHANNEL + ``` + Replace `CHANNEL` with the exact name of the target channel, which can include uppercase letters or special characters, such as `Unstable` or `my-custom-channel`. + + In the output of this command, copy the curl command with the air gap URL. + +1. + +1. Run one of the following commands to install in air gap mode: + + - For a regular installation, run: + + ```bash + cat install.sh | sudo bash -s airgap + ``` + + - For high availability, run: + + ```bash + cat install.sh | sudo bash -s airgap ha + ``` + +1. + +1. + +1. Go to the address provided in the `Kotsadm` field in the output of the installation command. For example, `Kotsadm: http://34.171.140.123:8800`. + +1. On the Bypass Browser TLS warning page, review the information about how to bypass the browser TLS warning, and then click **Continue to Setup**. + +1. On the HTTPS page, do one of the following: + + - To use the self-signed TLS certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Click **Skip & continue**. + - To use a custom certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Then upload a private key and SSL certificate to secure communication between your browser and the Admin Console. Click **Upload & continue**. + +1. Log in to the Admin Console with the password that was provided in the `Login with password (will not be shown again):` field in the output of the installation command. + +1. Upload your license file. + +1. Upload the `.airgap` bundle for the release that you downloaded in an earlier step. + +1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. + + :::note + Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. + ::: + +1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. + + ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) + + [View a larger version of this image](/images/kubectl-preflight-command.png) + + The Admin Console dashboard opens. + + On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. + + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + + [View a larger version of this image](/images/gitea-ec-ready.png) + +1. (Recommended) Change the Admin Console login password: + 1. Click the menu in the top right corner of the Admin Console, then click **Change password**. + 1. Enter a new password in the dialog, and click **Change Password** to save. + + Replicated strongly recommends that you change the password from the default provided during installation in a kURL cluster. For more information, see [Changing an Admin Console Password](auth-changing-passwords). + +1. Add primary and secondary nodes to the cluster. You might add nodes to either meet application requirements or to support your usage of the application. See [Adding Nodes to Embedded Clusters](cluster-management-add-nodes). + +================ +File: docs/enterprise/installing-kurl-automation.mdx +================ +import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" +import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" +import PlaceholdersGlobal from "../partials/install/_placeholders-global.mdx" +import PlaceholderAirgapBundle from "../partials/install/_placeholder-airgap-bundle.mdx" +import PlaceholderNamespaceKurl from "../partials/install/_placeholder-namespace-embedded.mdx" +import IntroKurl from "../partials/install/_automation-intro-embedded.mdx" +import DownloadkURLBundle from "../partials/install/_download-kurl-bundle.mdx" +import ExtractKurlBundle from "../partials/install/_extract-kurl-bundle.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Installing with kURL from the Command Line + + + +This topic describes how to install an application with Replicated kURL from the command line. + +## Overview + +You can use the command line to install an application with Replicated kURL. A common use case for installing from the command line is to automate installation, such as performing headless installations as part of CI/CD pipelines. + +To install from the command line, you provide all the necessary installation assets, such as the license file and the application config values, with the installation command rather than through the Admin Console UI. Any preflight checks defined for the application run automatically during headless installations from the command line rather than being displayed in the Admin Console. + +## Prerequisite + +Create a ConfigValues YAML file to define the configuration values for the application release. The ConfigValues file allows you to pass the configuration values for an application from the command line with the install command, rather than through the Admin Console UI. For air-gapped environments, ensure that the ConfigValues file can be accessed from the installation environment. + +The KOTS ConfigValues file includes the fields that are defined in the KOTS Config custom resource for an application release, along with the user-supplied and default values for each field, as shown in the example below: + + + + + +## Online (Internet-Connected) Installation + + + +To install with kURL on a VM or bare metal server: + +1. Create the kURL cluster: + + ```bash + curl -sSL https://k8s.kurl.sh/APP_NAME | sudo bash + ``` + +1. Install the application in the cluster: + + ```bash + kubectl kots install APP_NAME \ + --shared-password PASSWORD \ + --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --namespace default \ + --no-port-forward + ``` + + Replace: + + + + + +## Air Gap Installation + +To install in an air-gapped kURL cluster: + +1. Download the kURL `.tar.gz` air gap bundle: + + + +1. + +1. Create the kURL cluster: + + ``` + cat install.sh | sudo bash -s airgap + ``` + +1. Install the application: + + ```bash + kubectl kots install APP_NAME \ + --shared-password PASSWORD \ + --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --airgap-bundle PATH_TO_AIRGAP_BUNDLE \ + --namespace default \ + --no-port-forward + ``` + + Replace: + + + + + + + +================ +File: docs/enterprise/installing-kurl-requirements.mdx +================ +import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# kURL Installation Requirements + + + +This topic lists the installation requirements for Replicated kURL. Ensure that the installation environment meets these requirements before attempting to install. + +## Minimum System Requirements + +* 4 CPUs or equivalent per machine +* 8GB of RAM per machine +* 40GB of disk space per machine +* TCP ports 2379, 2380, 6443, 6783, and 10250 open between cluster nodes +* UDP port 8472 open between cluster nodes + + :::note + If the Kubernetes installer specification uses the deprecated kURL [Weave add-on](https://kurl.sh/docs/add-ons/weave), UDP ports 6783 and 6784 must be open between cluster nodes. Reach out to your software vendor for more information. + ::: + +* Root access is required +* (Rook Only) The Rook add-on version 1.4.3 and later requires block storage on each node in the cluster. For more information about how to enable block storage for Rook, see [Block Storage](https://kurl.sh/docs/add-ons/rook/#block-storage) in _Rook Add-On_ in the kURL documentation. + +## Additional System Requirements + +You must meet the additional kURL system requirements when applicable: + +- **Supported Operating Systems**: For supported operating systems, see [Supported Operating Systems](https://kurl.sh/docs/install-with-kurl/system-requirements#supported-operating-systems) in the kURL documentation. + +- **kURL Dependencies Directory**: kURL installs additional dependencies in the directory /var/lib/kurl and the directory requirements must be met. See [kURL Dependencies Directory](https://kurl.sh/docs/install-with-kurl/system-requirements#kurl-dependencies-directory) in the kURL documentation. + +- **Networking Requirements**: Networking requirements include firewall openings, host firewalls rules, and port availability. See [Networking Requirements](https://kurl.sh/docs/install-with-kurl/system-requirements#networking-requirements) in the kURL documentation. + +- **High Availability Requirements**: If you are operating a cluster with high availability, see [High Availability Requirements](https://kurl.sh/docs/install-with-kurl/system-requirements#high-availability-requirements) in the kURL documentation. + +- **Cloud Disk Performance**: For a list of cloud VM instance and disk combinations that are known to provide sufficient performance for etcd and pass the write latency preflight, see [Cloud Disk Performance](https://kurl.sh/docs/install-with-kurl/system-requirements#cloud-disk-performance) in the kURL documentation. + +## Firewall Openings for Online Installations with kURL {#firewall} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DomainDescription
Docker Hub

Some dependencies of KOTS are hosted as public images in Docker Hub. The required domains for this service are `index.docker.io`, `cdn.auth0.com`, `*.docker.io`, and `*.docker.com.`

`proxy.replicated.com` *

Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

`replicated.app`

Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

`registry.replicated.com` **

Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

`k8s.kurl.sh`

`s3.kurl.sh`

kURL installation scripts and artifacts are served from [kurl.sh](https://kurl.sh). An application identifier is sent in a URL path, and bash scripts and binary executables are served from kurl.sh. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

For the range of IP addresses for `k8s.kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L34-L39) in GitHub.

The range of IP addresses for `s3.kurl.sh` are the same as IP addresses for the `kurl.sh` domain. For the range of IP address for `kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L28-L31) in GitHub.

`amazonaws.com``tar.gz` packages are downloaded from Amazon S3 during installations with kURL. For information about dynamically scraping the IP ranges to allowlist for accessing these packages, see [AWS IP address ranges](https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html#aws-ip-download) in the AWS documentation.
+ +* Required only if the application uses the [Replicated proxy registry](/vendor/private-images-about). + +** Required only if the application uses the [Replicated registry](/vendor/private-images-replicated). + +================ +File: docs/enterprise/installing-kurl.mdx +================ +import KurlAbout from "../partials/install/_kurl-about.mdx" +import IntroEmbedded from "../partials/install/_intro-embedded.mdx" +import PrereqsEmbeddedCluster from "../partials/install/_prereqs-embedded-cluster.mdx" +import HaLoadBalancerPrereq from "../partials/install/_ha-load-balancer-prereq.mdx" +import LicenseFile from "../partials/install/_license-file-prereq.mdx" +import HAStep from "../partials/install/_embedded-ha-step.mdx" +import LoginPassword from "../partials/install/_embedded-login-password.mdx" +import AppNameUI from "../partials/install/_placeholder-app-name-UI.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Online Installation with kURL + + + + + + + +## Prerequisites + +Complete the following prerequisites: + + + + + + + +## Install {#install-app} + +To install an application with kURL: + +1. Run one of the following commands to create the cluster with the kURL installer: + + * For a regular installation, run: + + ```bash + curl -sSL https://k8s.kurl.sh/APP_NAME | sudo bash + ``` + + * For high availability mode: + + ```bash + curl -sSL https://k8s.kurl.sh/APP_NAME | sudo bash -s ha + ``` + + Replace: + + + +1. + +1. + +1. Go to the address provided in the `Kotsadm` field in the output of the installation command. For example, `Kotsadm: http://34.171.140.123:8800`. + +1. On the Bypass Browser TLS warning page, review the information about how to bypass the browser TLS warning, and then click **Continue to Setup**. + +1. On the HTTPS page, do one of the following: + + - To use the self-signed TLS certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Click **Skip & continue**. + - To use a custom certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Then upload a private key and SSL certificate to secure communication between your browser and the Admin Console. Click **Upload & continue**. + +1. Log in to the Admin Console with the password that was provided in the `Login with password (will not be shown again):` field in the output of the installation command. + +1. Upload your license file. + +1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. + + :::note + Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. + ::: + +1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. + + ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) + + [View a larger version of this image](/images/kubectl-preflight-command.png) + + The Admin Console dashboard opens. + + On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. + + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + + [View a larger version of this image](/images/gitea-ec-ready.png) + +1. (Recommended) Change the Admin Console login password: + 1. Click the menu in the top right corner of the Admin Console, then click **Change password**. + 1. Enter a new password in the dialog, and click **Change Password** to save. + + Replicated strongly recommends that you change the password from the default provided during installation in a kURL cluster. For more information, see [Changing an Admin Console Password](auth-changing-passwords). + +1. Add primary and secondary nodes to the cluster. You might add nodes to either meet application requirements or to support your usage of the application. See [Adding Nodes to Embedded Clusters](cluster-management-add-nodes). + +================ +File: docs/enterprise/installing-overview.md +================ +# Considerations Before Installing + +Before you install an application with KOTS in an existing cluster, consider the following installation options. + +## Online (Internet-Connected) or Air Gap Installations + +Most Kubernetes clusters are able to make outbound internet requests. Inbound access is never recommended or required. +As such, most cluster operators are able to perform an online installation. + +If the target cluster does not have outbound internet access, the application can also be delivered through an air gap installation. + +To install an application in an air-gapped environment, the cluster must have access to an image registry. In this case, KOTS re-tags and pushes all images to the target registry. + +For information about installing with KOTS in air-gapped environments, see [Air Gap Installation in Existing Clusters with KOTS](installing-existing-cluster-airgapped). + +## Hardened Environments + +By default, KOTS Pods and containers are not deployed with a specific security context. For installations into a hardened environment, you can use the `--strict-security-context` flag with the installation command so that KOTS runs with a strict security context for Pods and containers. + +For more information about the security context enabled by the `--strict-security-context` flag, see [kots install](/reference/kots-cli-install). + +## Configuring Local Image Registries + +During install, KOTS can re-tag and push images to a local image registry. +This is useful to enable CVE scans, image policy validation, and other pre-deployment rules. A private image registry is required for air gapped environments, and is optional for online environments. + +For information about image registry requirements, see [Compatible Image Registries](installing-general-requirements#registries). + +## Automated (Headless) Installation + +You can automate application installation in online and air-gapped environments using the KOTS CLI. In an automated installation, you provide all the information required to install and deploy the application with the `kots install` command, rather than providing this information in the Replicated Admin Console. + +For more information, see [Installing with the KOTS CLI](/enterprise/installing-existing-cluster-automation). + +## KOTS Installations Without Object Storage + +The KOTS Admin Console requires persistent storage for state. KOTS deploys MinIO for object storage by default. + +You can optionally install KOTS without object storage. When installed without object storage, KOTS deploys the Admin Console as a StatefulSet with an attached PersistentVolume (PV) instead of as a deployment. + +For more information about how to install KOTS without object storage, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). + +================ +File: docs/enterprise/installing-stateful-component-requirements.md +================ +# Installing KOTS in Existing Clusters Without Object Storage + +This topic describes how to install Replicated KOTS in existing clusters without the default object storage, including limitations of installing without object storage. + +## Overview + +The Replicated KOTS Admin Console requires persistent storage for state. By default, KOTS deploys an S3-compatible object store to satisfy the Admin Console's persistent storage requirement. The Admin Console stores the following in object storage: +* Support bundles +* Application archives +* Backups taken with Replicated snapshots that are configured to NFS or host path storage destinations + +For more information about the Admin Console's persistent storage requirements, see [Minimum System Requirements](/enterprise/installing-general-requirements#minimum-system-requirements) in _Installation Requirements_. + +For existing cluster installations, KOTS deploys MinIO for object storage by default. + +You can optionally install KOTS without object storage. When installed without object storage, KOTS deploys the Admin Console as a Statefulset with an attached PersistentVolume (PV) instead of as a deployment. In this case, support bundles and application archives are stored in the attached PV instead of in object storage. Additionally, for local snapshots storage, KOTS uses the `local-volume-provider` Velero plugin to store backups on local PVs instead of using object storage. The `local-volume-provider` plugin uses the existing Velero service account credentials to mount volumes directly to the Velero node-agent pods. For more information, see [`local-volume-provider`](https://github.com/replicatedhq/local-volume-provider) in GitHub. + +## How to Install and Upgrade Without Object Storage + +To install KOTS without object storage in an existing cluster, you can use the `--with-minio=false` flag. + +#### `kots install --with-minio=false` + +When `--with-minio=false` is used with the `kots install` command, KOTS does _not_ deploy MinIO. KOTS deploys the Admin Console as a Statefulset with an attached PV instead of as a deployment. For command usage, see [install](/reference/kots-cli-install/). + +#### `kots admin-console upgrade --with-minio=false` + +When `--with-minio=false` is used with the `kots admin-console upgrade` command, KOTS upgrades the existing Admin Console instance to the latest version, replaces the running deployment with a StatefulSet, and removes MinIO after a data migration. This results in temporary downtime for the Admin Console, but deployed applications are unaffected. For command usage, see [admin-console upgrade](/reference/kots-cli-admin-console-upgrade/). + +================ +File: docs/enterprise/monitoring-access-dashboards.mdx +================ +# Accessing Dashboards Using Port Forwarding + +This topic includes information about how to access Prometheus, Grafana, and Alertmanager in Replicated KOTS existing cluster and Replicated kURL installations. + +For information about how to configure Prometheus monitoring in existing cluster installations, see [Configuring Prometheus Monitoring in Existing Cluster KOTS Installations](monitoring-applications). + +## Overview + +The Prometheus [expression browser](https://prometheus.io/docs/visualization/browser/), Grafana, and some preconfigured dashboards are included with Kube-Prometheus for advanced visualization. Prometheus Altertmanager is also included for alerting. You can access Prometheus, Grafana, and Alertmanager dashboards using `kubectl port-forward`. + +:::note +You can also expose these pods on NodePorts or behind an ingress controller. This is an advanced use case. For information about exposing the pods on NodePorts, see [NodePorts](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/node-ports.md) in the kube-prometheus GitHub repository. For information about exposing the pods behind an ingress controller, see [Expose via Ingress](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/exposing-prometheus-alertmanager-grafana-ingress.md) in the kube-prometheus GitHub repository. +::: + +## Prerequisite + +For existing cluster KOTS installations, first install Prometheus in the cluster and configure monitoring. See [Configuring Prometheus Monitoring in Existing Cluster KOTS Installations](monitoring-applications) + +## Access Prometheus + +To access the Prometheus dashboard: + +1. Run the following command to port forward the Prometheus service: + + ```bash + kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090 + ``` + +1. Access the dashboard at http://localhost:9090. + +## Access Grafana + +Users can access the Grafana dashboard by logging in using a default username and password. For information about configuring Grafana, see the [Grafana documentation](https://grafana.com/docs/). For information about constructing Prometheus queries, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/) in the Prometheus documentation. + +To access the Grafana dashboard: + +1. Run the following command to port forward the Grafana service: + + ```bash + kubectl --namespace monitoring port-forward deployment/grafana 3000 + ``` +1. Access the dashboard at http://localhost:3000. +1. Log in to Grafana: + * **Existing cluster**: Use the default Grafana username and password: `admin:admin`. + * **kURL cluster**: The Grafana password is randomly generated by kURL and is displayed on the command line after kURL provisions the cluster. To log in, use this password generated by kURL and the username `admin`. + + To retrieve the password, run the following kubectl command: + + ``` + kubectl get secret -n monitoring grafana-admin -o jsonpath="{.data.admin-password}" | base64 -d + ``` + +## Access Alertmanager + +Alerting with Prometheus has two phases: + +* Phase 1: Alerting rules in Prometheus servers send alerts to an Alertmanager. +* Phase 2: The Alertmanager then manages those alerts, including silencing, inhibition, aggregation, and sending out notifications through methods such as email, on-call notification systems, and chat platforms. + +For more information about configuring Alertmanager, see [Configuration](https://prometheus.io/docs/alerting/configuration/) in the Prometheus documentation. + +To access the Alertmanager dashboard: + +1. Run the following command to port forward the Alertmanager service: + + ``` + kubectl --namespace monitoring port-forward svc/prometheus-alertmanager 9093 + ``` + +1. Access the dashboard at http://localhost:9093. + +================ +File: docs/enterprise/monitoring-applications.mdx +================ +import OverviewProm from "../partials/monitoring/_overview-prom.mdx" + +# Configuring Prometheus Monitoring in Existing Cluster KOTS Installations + +This topic describes how to monitor applications and clusters with Prometheus in existing cluster installations with Replicated KOTS. + +For information about how to access Prometheus, Grafana, and Alertmanager, see [Accessing Dashboards Using Port Forwarding](/enterprise/monitoring-access-dashboards). + +For information about consuming Prometheus metrics externally in kURL installations, see [Consuming Prometheus Metrics Externally](monitoring-external-prometheus). + +## Overview + + + +## Configure Prometheus Monitoring + +For existing cluster installations with KOTS, users can install Prometheus in the cluster and then connect the Admin Console to the Prometheus endpoint to enable monitoring. + +### Step 1: Install Prometheus in the Cluster {#configure-existing} + +Replicated recommends that you use CoreOS's Kube-Prometheus distribution for installing and configuring highly available Prometheus on an existing cluster. For more information, see the [kube-prometheus](https://github.com/coreos/kube-prometheus) GitHub repository. + +This repository collects Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator. + +To install Prometheus using the recommended Kube-Prometheus distribution: + +1. Clone the [kube-prometheus](https://github.com/coreos/kube-prometheus) repository to the device where there is access to the cluster. + +1. Use `kubectl` to create the resources on the cluster: + + ```bash + # Create the namespace and CRDs, and then wait for them to be available before creating the remaining resources + kubectl create -f manifests/setup + until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done + kubectl create -f manifests/ + ``` + + For advanced and cluster-specific configuration, you can customize Kube-Prometheus by compiling the manifests using jsonnet. For more information, see the [jsonnet website](https://jsonnet.org/). + + For more information about advanced Kube-Prometheus configuration options, see [Customizing Kube-Prometheus](https://github.com/coreos/kube-prometheus#customizing-kube-prometheus) in the kube-prometheus GitHub repository. + +### Step 2: Connect to a Prometheus Endpoint + +To view graphs on the Admin Console dashboard, provide the address of a Prometheus instance installed in the cluster. + +To connect the Admin Console to a Prometheus endpoint: + +1. On the Admin Console dashboard, under Monitoring, click **Configure Prometheus Address**. +1. Enter the address for the Prometheus endpoint in the text box and click **Save**. + + ![Configuring Prometheus](/images/kotsadm-dashboard-configureprometheus.png) + + Graphs appear on the dashboard shortly after saving the address. + +================ +File: docs/enterprise/monitoring-external-prometheus.md +================ +import OverviewProm from "../partials/monitoring/_overview-prom.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Consuming Prometheus Metrics Externally + + + +This topic describes how to consume Prometheus metrics in Replicated kURL clusters from a monitoring service that is outside the cluster. + +For information about how to access Prometheus, Grafana, and Alertmanager, see [Accessing Dashboards Using Port Forwarding](/enterprise/monitoring-access-dashboards). + +## Overview + + + +For kURL installations, if the [kURL Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) is included in the kURL installer spec, then the Prometheus monitoring system is installed alongside the application. No additional configuration is required to collect metrics and view any default and custom graphs on the Admin Console dashboard. + +Prometheus is deployed in kURL clusters as a NodePort service named `prometheus-k8s` in the `monitoring` namespace. The `prometheus-k8s` service is exposed on the IP address for each node in the cluster at port 30900. + +You can run the following command to view the `prometheus-k8s` service in your cluster: + +``` +kubectl get services -l app=kube-prometheus-stack-prometheus -n monitoring +``` +The output of the command includes details about the Prometheus service, including the type of service and the ports where the service is exposed. For example: + +``` +NAME TYPE CLUSTER_IP EXTERNAL_IP PORT(S) AGE +prometheus-k8s NodePort 10.96.2.229 9090:30900/TCP 5hr +``` +As shown in the example above, port 9090 on the `prometheus-k8s` service maps to port 30900 on each of the nodes. + +For more information about NodePort services, see [Type NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) in _Services_ in the Kubernetes documentation. + +## Prerequisite + +Before you can consume Prometheus metrics in kURL clusters externally, ensure that firewall rules on all nodes in the cluster allow inbound TCP traffic on port 30900. + +## Consume Metrics from External Services + +You can connect to the `prometheus-k8s` service on port 30900 from any node in the cluster to access Prometheus metrics emitted by kURL clusters. + +To consume Prometheus metrics from an external service: + +1. Get the external IP address for one of the nodes in the cluster. You will use this IP address in the next step to access the `prometheus-k8s` service. + + You can find the IP address for a node in the output of the following command: + + ``` + kubectl describe node NODE_NAME + ``` + Where `NODE_NAME` is the name of a node in the cluster. + + :::note + Depending on the node's network configuration, there might be different IP addresses for accessing the node from an external or internal network. For example, the IP address 10.128.0.35 might be assigned to the node in the internal network, whereas the IP address used to access the node from external or public networks is 34.28.178.93. + + Consult your infrastructure team to assist you in determining which IP address to use. + ::: + +1. In a browser, go to `http://NODE_IP_ADDRESS:30900` to verify that you can connect to the `prometheus-k8s` NodePort service. Replace `NODE_IP_ADDRESS` with the external IP address that you copied in the first step. For example, `http://34.28.178.93:30900`. + + If the connection is successful, the Prometheus UI displays in the browser. + +1. From your external monitoring solution, add Prometheus as an HTTP data source using the same URL from the previous step: `http://NODE_IP_ADDRESS:30900`. + +================ +File: docs/enterprise/sbom-validating.md +================ +# Validating SBOM Signatures + +This topic describes the process to perform the validation of software bill of material (SBOM) signatures for Replicated KOTS, Replicated kURL, and Troubleshoot releases. + +## About Software Bills of Materials + +A _software bill of materials_ (SBOM) is an inventory of all components used to create a software package. SBOMs have emerged as critical building blocks in software security and software supply chain risk management. + +When you install software, validating an SBOM signature can help you understand exactly what the software package is installing. This information can help you ensure that the files are compatible with your licensing policies and help determine whether there is exposure to CVEs. + +## Prerequisite + +Before you perform these tasks, you must install cosign. For more information, see the [sigstore repository](https://github.com/sigstore/cosign) in GitHub. + + +## Validate a KOTS SBOM Signature + +Each KOTS release includes a signed SBOM for KOTS Go dependencies. + +To validate a KOTS SBOM signature: + +1. Go to the [KOTS GitHub repository](https://github.com/replicatedhq/kots/releases) and download the specific KOTS release that you want to validate. +1. Extract the tar.gz file. + + **Example:** + + ``` + tar -zxvf kots_darwin_all.tar.gz + ``` + A KOTS binary and SBOM folder are created. +1. Run the following cosign command to validate the signatures: + ``` + cosign verify-blob --key sbom/key.pub --signature sbom/kots-sbom.tgz.sig sbom/kots-sbom.tgz + ``` + +## Validate a kURL SBOM Signature + +If a kURL installer is used, then signed SBOMs for kURL Go and Javascript dependencies are combined into a TAR file and are included with the release. + +To validate a kURL SBOM signature: + +1. Go to the [kURL GitHub repository](https://github.com/replicatedhq/kURL/releases) and download the specific kURL release files that you want to validate. + + There are three assets related to the SBOM: + + - `kurl-sbom.tgz` contains SBOMs for Go and Javascript dependencies + - `kurl-sbom.tgz.sig` is the digital signature for `kurl-sbom.tgz` + - `key.pub` is the public key from the key pair used to `sign kurl-sbom.tgz` + +2. Run the following cosign command to validate the signatures: + ``` + cosign verify-blob --key key.pub --signature kurl-sbom.tgz.sig kurl-sbom.tgz + + ``` + +## Validate a Troubleshoot SBOM Signature + +A signed SBOM for Troubleshoot dependencies is included in each release. + +To validate an Troubleshoot SBOM signature: + +1. Go to the [Troubleshoot GitHub repository](https://github.com/replicatedhq/troubleshoot/releases) and download the specific Troubleshoot release files that you want to validate. + + There are three assets related to the SBOM: + + - `troubleshoot-sbom.tgz` contains a software bill of materials for Troubleshoot. + - `troubleshoot-sbom.tgz.sig` is the digital signature for `troubleshoot-sbom.tgz` + - `key.pub` is the public key from the key pair used to sign `troubleshoot-sbom.tgz` + +2. Run the following cosign command to validate the signatures: + ``` + $ cosign verify-blob --key key.pub --signature troubleshoot-sbom.tgz.sig troubleshoot-sbom.tgz + + ``` + +================ +File: docs/enterprise/snapshots-config-workflow.md +================ +# How to Set Up Backup Storage + +This topic describes the process of setting up backup storage for the Replicated snapshots feature. + +## Configuring Backup Storage for Embedded Clusters + +You must configure a backup storage destination before you can create backups. This procedure describes how to configure backup storage for snapshots for _embedded clusters_ created by Replicated kURL. + +To configure snapshots for embedded clusters: + +1. On the Snapshots tab in the Admin Console, click **Check for Velero** to see whether kURL already installed Velero in the embedded cluster. + +1. If Velero was installed, update the default internal storage settings in the Admin Console because internal storage is insufficient for full backups. See [Updating Settings in the Admin Console](snapshots-updating-with-admin-console). + +1. If Velero was not installed: + + 1. Install the Velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + + 1. Install Velero and configure a storage destination using one of the following procedures. + + - [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + - [Configuring an NFS Storage Destination](snapshots-configuring-nfs) + - [Configuring Other Storage Destinations](snapshots-storage-destinations) + +1. Optionally increase the default memory for the node-agent (restic) Pod. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). + +## Configuring Backup Storage for Existing Clusters + +You must configure a backup storage destination before you can create backups. + +Follow this process to install Velero and configure the snapshots feature: + +1. Install the Velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + +1. Install Velero and configure a storage destination using one of the following procedures. + + - [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + - [Configuring an NFS Storage Destination](snapshots-configuring-nfs) + - [Configuring Other Storage Destinations](snapshots-storage-destinations) + +1. Enable access to the Velero namespace if you are using RBAC and optionally increase the default memory for the node-agent (restic) Pod. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). + +## Next Step + +After you configure a storage destination, you can create a backup. See [Creating and Scheduling Backups](snapshots-creating). + +## Additional Resources + +* [Restoring Full Backups](snapshots-restoring-full) +* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + +================ +File: docs/enterprise/snapshots-configuring-hostpath.md +================ +import InstallVelero from "../partials/snapshots/_installVelero.mdx" +import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" +import ResticDaemonSet from "../partials/snapshots/_resticDaemonSet.mdx" +import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" +import CheckVersion from "../partials/snapshots/_checkVersion.mdx" + +# Configuring a Host Path Storage Destination + +This topic describes how to install Velero and configure a host path as your storage destination for backups. + +:::note + +::: + +## Requirements + +* The host path must be a dedicated directory. Do not use a partition used by a service like Docker or Kubernetes for ephemeral storage. +* The host path must exist and be writable by the user:group 1001:1001 on all nodes in the cluster. For example, in a Linux environment you might run `sudo chown -R 1001:1001 /backups` to change the user:group permissions. + + If you use a mounted directory for the storage destination, such as one that is created with the Common Internet File System (CIFS) or Server Message Block (SMB) protocols, ensure that you configure the user:group 1001:1001 permissions on all nodes in the cluster and from the server side as well. + + You cannot change the permissions of a mounted network shared filesystem from the client side. To reassign the user:group to 1001:1001 for a directory that is already mounted, you must remount the directory. For example, for a CIFS mounted directory, specify the `uid=1001,gid=1001` mount options in the CIFS mount command. + +## Prerequisites + +Complete the following items before you perform this task: + +* Review the limitations and considerations. See [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. +* Install the velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + +## Install Velero and Configure Host Path Storage in Online Environments + +To install Velero and configure host path storage in online environments: + +1. + +1. + +1. Run the following command to configure the host path storage destination: + + ``` + kubectl kots velero configure-hostpath --namespace NAME --hostpath /PATH + ``` + + Replace: + - `NAME` with the namespace where the Replicated KOTS Admin Console is installed and running + - `PATH` with the path to the directory where the backups will be stored + + For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +## Install Velero and Configure Host Path Storage in Air Gapped Environments + +To install Velero and configure host path storage in air gapped environments: + +1. + +1. + + + +1. + +1. Run the following command to configure the host path storage destination: + + ``` + kubectl kots velero configure-hostpath \ + --namespace NAME \ + --hostpath /PATH \ + --kotsadm-registry REGISTRY_HOSTNAME[/REGISTRY_NAMESPACE] \ + --registry-username REGISTRY_USERNAME \ + --registry-password REGISTRY_PASSWORD + ``` + + Replace: + - `NAME` with the namespace where the Admin Console is installed and running + - `PATH` with the path to the directory where the backups will be stored + - `REGISTRY_HOSTNAME` with the registry endpoint where the images are hosted + - `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional) + - `REGISTRY_USERNAME` with the username to use to authenticate with the registry + - `REGISTRY_PASSWORD` with the password to use to authenticate with the registry + + For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +## Configure Host Path Storage in the Admin Console + +Alternatively, when the Admin Console and application are already installed, you can start in the Admin Console to install Velero and configure a host path storage destination. + +To install Velero and configure host path storage for existing clusters: + +1. From the Admin Console, click **Snapshots > Settings and Schedule**. + +1. Click **Add a new storage destination**. + + The Add a new destination dialog opens and shows instructions for setting up Velero with different providers. + +1. Click **Host Path**. + + ![Snapshot Provider Host Path](/images/snapshot-provider-hostpath.png) + +1. In the Configure Host Path dialog, enter the path to the directory where the backups will be stored. Click **Get instructions**. + + ![Snapshot Provider Host Path Fields](/images/snapshot-provider-hostpath-field.png) + + A dialog opens with instructions on how to set up Velero with the specified host path configuration. + +1. Follow the steps in the dialog to install Velero and configure the storage destination. + + ![Snapshot Provider File System Instructions](/images/snapshot-provider-hostpath-instructions.png) + +1. Return to the Admin Console and either click **Check for Velero** or refresh the page to verify that the Velero installation is detected. + + +## Next Steps + +* (Existing Clusters Only) Configure Velero namespace access if you are using minimal RBAC. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* (Optional) Increase the default memory limits. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* Create or schedule backups. See [Creating and Scheduling Backups](snapshots-creating). + +## Additional Resources + +* [Troubleshooting Snapshots](/enterprise/snapshots-troubleshooting-backup-restore) + +================ +File: docs/enterprise/snapshots-configuring-nfs.md +================ +import InstallVelero from "../partials/snapshots/_installVelero.mdx" +import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" +import ResticDaemonSet from "../partials/snapshots/_resticDaemonSet.mdx" +import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" +import CheckVersion from "../partials/snapshots/_checkVersion.mdx" + +# Configuring an NFS Storage Destination + +This topic describes how to install Velero and configure a Network File System (NFS) as your storage destination for backups. + +:::note + +::: + +## Requirements + +Configuring an NFS server as a snapshots storage destination has the following requirements: + +* The NFS server must be configured to allow access from all of the nodes in the cluster. +* The NFS directory must be writable by the user:group 1001:1001. +* Ensure that you configure the user:group 1001:1001 permissions for the directory on the NFS server. +* All of the nodes in the cluster must have the necessary NFS client packages installed to be able to communicate with the NFS server. For example, the `nfs-common` package is a common package used on Ubuntu. +* Any firewalls must be properly configured to allow traffic between the NFS server and clients (cluster nodes). + +## Prerequisites + +Complete the following items before you perform this task: + +* Review the limitations and considerations. See [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. +* Install the velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + +## Install Velero and Configure NFS Storage in Online Environments + +To install Velero and configure NFS storage in an online environment: + +1. + +1. + +1. Run the following command to configure the NFS storage destination: + + ``` + kubectl kots velero configure-nfs --namespace NAME --nfs-path PATH --nfs-server HOST + ``` + + Replace: + - `NAME` with the namespace where the Replicated KOTS Admin Console is installed and running + - `PATH` with the path that is exported by the NFS server + - `HOST` with the hostname or IP address of the NFS server + + For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +## Install Velero and Configure NFS Storage in Air Gapped Environments + +To install Velero and configure NFS storage in air gapped environments: + +1. + +1. + + + +1. + +1. Run the following command to configure the NFS storage destination: + + ``` + kubectl kots velero configure-nfs \ + --namespace NAME \ + --nfs-server HOST \ + --nfs-path PATH \ + --kotsadm-registry REGISTRY_HOSTNAME[/REGISTRY_NAMESPACE] \ + --registry-username REGISTRY_USERNAME \ + --registry-password REGISTRY_PASSWORD + ``` + + Replace: + - `NAME` with the namespace where the Admin Console is installed and running + - `HOST` with the hostname or IP address of the NFS server + - `PATH` with the path that is exported by the NFS server + - `REGISTRY_HOSTNAME` with the registry endpoint where the images are hosted + - `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional) + - `REGISTRY_USERNAME` with the username to use to authenticate with the registry + - `REGISTRY_PASSWORD` with the password to use to authenticate with the registry + + For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +## Configure NFS Storage in the Admin Console + +Alternatively, when the Admin Console and application are already installed, you can start in the Admin Console to install Velero and configure an NFS storage destination. + +To install Velero and configure NFS storage for existing clusters: + +1. From the Admin Console, click **Snapshots > Settings and Schedule**. + +1. Click **Add a new storage destination**. + + The Add a new destination dialog opens and shows instructions for setting up Velero with different providers. + +1. Click **NFS**. + + ![Snapshot Provider NFS](/images/snapshot-provider-nfs.png) + +1. In the Configure NFS dialog, enter the NFS server hostname or IP Address, and the path that is exported by the NFS server. Click **Get instructions**. + + ![Snapshot Provider NFS Fields](/images/snapshot-provider-nfs-fields.png) + + A dialog opens with instructions on how to set up Velero with the specified NFS configuration. + +1. Follow the steps in the dialog to install Velero and configure the storage destination. + + ![Snapshot Provider File System Instructions](/images/snapshot-provider-nfs-instructions.png) + +1. Return to the Admin Console and either click **Check for Velero** or refresh the page to verify that the Velero installation is detected. + +## Next Steps + +* (Existing Clusters Only) Configure Velero namespace access if you are using minimal RBAC. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* (Optional) Increase the default memory limits. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* Create or schedule backups. See [Creating and Scheduling Backups](snapshots-creating). + +## Additional Resources + +* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + +================ +File: docs/enterprise/snapshots-creating.md +================ +# Creating and Scheduling Backups + +This topic describes how to use the Replicated snapshots feature to create backups. It also includes information about how to use the Replicated KOTS Admin Console create a schedule for automatic backups. For information about restoring, see [Restoring from Backups](snapshots-restoring-full). + +## Prerequisites + +- Before you can create backups, you must configure a storage destination: + + - [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + - [Configuring an NFS Storage Destination](snapshots-configuring-nfs) + - [Configuring Other Storage Destinations](snapshots-storage-destinations) + +- If you have multiple applications in the Admin Console, make sure that each application has its own Backup custom resource file so that they can be included in the full backup. Use the **View file** tab to check for the Backup custom resources (`kind: Backup`, `apiVersion: velero.io/v1`). + + If any Backup custom resource files are missing, contact your vendor. + +## Create a Full Backup (Recommended) {#full} + +Full backups, or _instance snapshots_, back up the Admin Console and all application data, including application volumes and manifest files. If you manage multiple applications with the Admin Console, data from all applications that support backups is included in a full backup. + +From a full backup, you can: +* Restore application and Admin Console data +* Restore only application data +* Restore only Admin Console data + +You can create a full backup with the following methods: +* [Create a Backup with the CLI](#cli-backup) +* [Create a Backup in the Admin Console](#admin-console-backup) + +### Create a Backup with the CLI {#cli-backup} + +To create a full backup with the Replicated KOTS CLI, run the following command: + + ``` + kubectl kots backup --namespace NAMESPACE + ``` + Replace `NAMESPACE` with the namespace where the Admin Console is installed. + +For more information, see [backup](/reference/kots-cli-backup-index) in _KOTS CLI_. + +### Create a Backup in the Admin Console {#admin-console-backup} + +To create a full backup in the Admin Console: + +1. To check if backups are supported for an application, go to the **View files** page, open the `upstream` folder, and confirm that the application includes a manifest file with `kind: Backup` and `apiVersion: velero.io/v1`. This manifest also shows which pod volumes are being backed up. + +1. Go to **Snapshots > Full Snapshots (Instance)**. +1. Click **Start a snapshot**. + + When the backup is complete, it appears in the list of backups on the page, as shown in the following image: + + ![Full snapshot page with one completed snapshot](/images/snapshot-instance-list.png) + +## Create a Partial Backup {#partial} + +Partial backups, or _application snapshots_, back up application volumes and application manifests only. Partial backups do not back up Admin Console data. + +:::note +Replicated recommends that you create full backups instead of partial backups because partial backups are not suitable for disaster recovery. See [Create a Full Backup](#full) above. +::: + +To create a partial backup in the Admin Console: + +1. Go to **Snapshots > Partial Snapshots (Application)**. + +1. If you manage multiple applications in the Admin Console, use the dropdown to select the application that you want to back up. + +1. Click **Start a snapshot**. + + When the snapshot is complete, it appears in the list of snapshots on the page as shown in the following image: + + ![Partial snapshot page with one completed snapshot](/images/snapshot-application-list.png) + +## Schedule Automatic Backups + +You can use the Admin Console to schedule full or partial backups. This is useful for automatically creating regular backups of Admin Console and application data. + +To schedule automatic backups in the Admin Console: + +1. Go to **Snapshots > Settings & Schedule**. + +1. Under **Automatic snapshots**, select **Full snapshots (Instance)** or **Partial snapshots (Application)** depending on the type of backup that you want to schedule. + + ![Snapshot Settings and Schedule page](/images/snapshot-schedule.png) + +1. (Partial Backups Only) If you manage multiple applications in the Admin Console, use the dropdown to select the application that you want to back up. + +1. Select **Enable automatic scheduled snapshots**. + +1. Configure the automatic backup schedule for the type of snapshots that you selected: + + * For **Schedule**, select Hourly, Daily, Weekly, or Custom. + * For **Cron Expression**, enter a cron expression to create a custom automatic backup schedule. For information about supported cron expressions, see [Cron Expressions](/reference/cron-expressions). + +1. (Optional) For **Retention Policy**, edit the amount of time that backup data is saved. By default, backup data is saved for 30 days. + + The retention policy applies to all backups, including both automatically- and manually-created backups. Changing the retention policy affects only backups created after the time of the change. +## Additional Resources + +[Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + +================ +File: docs/enterprise/snapshots-restoring-full.mdx +================ +import RestoreTable from "../partials/snapshots/_restoreTable.mdx" +import RestoreTypes from "../partials/snapshots/_restore-types.mdx" +import GetBackups from "../partials/snapshots/_step-get-backups.mdx" +import Restore from "../partials/snapshots/_step-restore.mdx" +import Dr from "../partials/snapshots/_limitation-dr.mdx" +import Os from "../partials/snapshots/_limitation-os.mdx" +import InstallMethod from "../partials/snapshots/_limitation-install-method.mdx" +import CliRestores from "../partials/snapshots/_limitation-cli-restores.mdx" + +# Restoring from Backups + +This topic describes how to restore from full or partial backups using Replicated snapshots. + +## Overview + + + +You can do any type of restore from a full backup using the KOTS CLI. You can also restore an application from a full or partial backup using the Admin Console. + +## Limitations + +The following limitations apply to restoring from backups using snapshots: + +* +* +* +* + +For a full list of limitations and considerations related to the snapshots feature, see [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. + +## Restore From a Full Backup Using the CLI {#full-cli} + +You can use the KOTS CLI to restore both the Admin Console and the application, the Admin Console only, or the application only. If you need to restore the Admin Console, you must use the KOTS CLI because the Admin Console gets recreated and is disconnected during the restore process. + +:::note + +::: + +To restore using the CLI, see the corresponding procedure for your environment: + +- [Existing Clusters](#existing) +- [Online kURL Clusters](#online) +- [Air Gap kURL Clusters](#air-gapped) + +### Existing Clusters {#existing} + +:::note +If you are restoring to a healthy cluster, you can skip reinstalling Velero and continue to running the `get backups` and `restore` commands in the last two steps. +::: + +To restore a full backup in an existing cluster: + +1. (New or Unhealthy Clusters Only) In the cluster where you will do the restore, install a version of Velero that is compatible with the version that was used to make the snapshot backup. + + The Velero installation command varies depending on the storage destination for the backup. For the Velero installation command, see one of the following: + + * **Host Path:** See [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + * **NFS:** See [Configuring an NFS Storage Destination](snapshots-configuring-nfs) or for the configuration steps and how to set up Velero. + * **AWS, GCP, Azure, or other S3:** See [Configuring Other Storage Destinations](snapshots-storage-destinations). + +1. + +1. + +### Online Embedded kURL Clusters {#online} + +:::note +If you are restoring to a healthy cluster, you can skip the installation and configuration steps and continue to running the `get backups` and `restore` commands in the last two steps. +::: + +To restore a full backup in a kURL cluster: + +1. (New or Unhealthy Clusters Only) Provision a cluster with kURL and install the target application in the cluster. See [Online Installation with kURL](installing-kurl). + +1. (New or Unhealthy Clusters Only) In the new kURL cluster, configure a storage destination that holds the backup you want to use: + + * **Host Path:** See [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + * **NFS:** See [Configuring an NFS Storage Destination](snapshots-configuring-nfs) or for the configuration steps and how to set up Velero. + * **AWS, GCP, Azure, or other S3:** See [Configuring Other Storage Destinations](snapshots-storage-destinations). + +1. + +1. + +### Air Gap kURL Clusters {#air-gapped} + +To restore a full backup in an air gap kURL cluster: + +1. Run the following command to install a new cluster and provide kURL with the correct registry IP address. kURL must be able to assign the same IP address to the embedded private image registry in the new cluster. + + ```bash + cat install.sh | sudo bash -s airgap kurl-registry-ip=IP + ``` + + Replace `IP` with the registry IP address. + +1. Use the KOTS CLI to configure Velero to use a storage destination. The storage backend used for backups must be accessible from the new cluster. + + * **Host Path:** See [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + * **NFS:** See [Configuring an NFS Storage Destination](snapshots-configuring-nfs) or for the configuration steps and how to set up Velero. + * **S3-Compatible:** See [Configure S3-Compatible Storage for Air Gapped Environments](snapshots-storage-destinations#configure-s3-compatible-storage-for-air-gapped-environments) in _Configuring Other Storage Destinations_. + +1. + +1. + +## Restore the Application Only Using the Admin Console {#admin-console} + +You can restore an application from a full or partial backup using the Admin Console. + +### Restore an Application From a Full Backup + +To restore an application from a full backup: + +1. Select **Full Snapshots (Instance)** from the Snapshots tab. + + ![Full Snapshot tab](/images/full-snapshot-tab.png) + + [View a larger version of this image](/images/full-snapshot-tab.png) + +1. Click the **Restore from this backup** icon (the circular blue arrows) for the backup that you want to restore. + +1. In the **Restore from backup** dialog, select **Partial restore**. + + ![Restore Full Snapshot dialog](/images/restore-backup-dialog.png) + + [View a larger version of this image](/images/restore-backup-dialog.png) + + :::note + You can also get the CLI commands for full restores or Admin Console only restores from this dialog. + ::: + +1. At the bottom of the dialog, enter the application slug provided by your software vendor. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. + +1. Click **Confirm and restore**. + +### Restore an Application From a Partial Backup + +To restore an application from a partial backup: + +1. Select **Partial Snapshots (Application)** from the Snapshots tab. + + ![Partial Snapshot tab](/images/partial-snapshot-tab.png) + + [View a larger version of this image](/images/partial-snapshot-tab.png) + +1. Click the **Restore from this backup** icon (the circular blue arrows) for the backup that you want to restore. + + The **Restore from Partial backup (Application)** dialog opens. + +1. Under **Type your application slug to continue**, enter the application slug provided by your software vendor. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. + + ![Restore Partial Snapshot dialog](/images/restore-partial-dialog.png) + + [View a larger version of this image](/images/restore-partial-dialog.png) + +1. Click **Confirm and restore**. + +## Additional Resources + +[Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + +================ +File: docs/enterprise/snapshots-storage-destinations.md +================ +import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" +import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" +import CheckVersion from "../partials/snapshots/_checkVersion.mdx" + +# Configuring Other Storage Destinations + +This topic describes installing Velero and configuring storage for Amazon Web Service (AWS), Google Cloud Provider (GCP), Microsoft Azure, and S3-compatible providers. + +To configure host path or NFS as a storage destination, see [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) and [Configuring an NFS Storage Destination](snapshots-configuring-nfs). + +:::note + +::: + +## Prerequisites + +Complete the following items before you install Velero and configure a storage destination: + +* Review the limitations and considerations. See [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. +* Install the velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + +## Configure AWS Storage for Online Environments + +In this procedure, you install Velero and configure an AWS storage destination in online environments. + +Snapshots does not support Amazon Simple Storage Service (Amazon S3) buckets that have a bucket policy requiring the server-side encryption header. If you want to require server-side encryption for objects, you can enable default encryption on the bucket instead. For more information about Amazon S3, see the [Amazon S3](https://docs.aws.amazon.com/s3/?icmpid=docs_homepage_featuredsvcs) documentation. + +To install Velero and configure an AWS storage destination: + +1. Follow the instructions for [installing Velero on AWS](https://github.com/vmware-tanzu/velero-plugin-for-aws#setup) in the Velero documentation. + +1. Run the `velero install` command with these additional flags: + + * **Velero 1.10 and later**: Use the `--use-node-agent`, `--uploader-type=restic`, and `--use-volume-snapshots=false` flags. + * **Velero versions earlier than 1.10**: Use the `--use-restic` and `--use-volume-snapshots=false` flags. + + **Example:** + + ``` + velero install \ + --provider aws \ + --plugins velero/velero-plugin-for-aws:v1.2.0 \ + --bucket $BUCKET \ + --backup-location-config region=$REGION \ + --secret-file CREDS_FILE \ + --use-node-agent --uploader-type=restic \ + --use-volume-snapshots=false + ``` + +## Configure GCP Storage for Online Environments + +In this procedure, you install Velero and configure a GCP storage destination in online environments. + +To install Velero and configure a GCP storage destination: + +1. Follow the instructions for [installing Velero on GCP](https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup) in the Velero documentation. + +1. Run the `velero install` command with these additional flags: + * **Velero 1.10 and later**: Use the `--use-node-agent`, `--uploader-type=restic`, and `--use-volume-snapshots=false` flags. + * **Velero versions earlier than 1.10**: Use the `--use-restic` and `--use-volume-snapshots=false` flags. + + **Example:** + + ``` + velero install \ + --provider gcp \ + --plugins velero/velero-plugin-for-gcp:v1.5.0 \ + --bucket $BUCKET \ + --secret-file ./CREDS_FILE + --use-node-agent --uploader-type=restic \ + --use-volume-snapshots=false + ``` + +## Configure Azure Storage for Online Environments + +In this procedure, you install Velero and configure an Azure storage destination in online environments. + +To install Velero and configure an Azure storage destination: + +1. Follow the instructions for [installing Velero on Azure](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#setup) in the Velero documentation. + +1. Run the `velero install` command with these additional flags: + * **Velero 1.10 and later**: Use the `--use-node-agent`, `--uploader-type=restic`, and `--use-volume-snapshots=false` flags. + * **Velero versions earlier than 1.10**: Use the `--use-restic` and `--use-volume-snapshots=false` flags. + + **Example:** + + ``` + velero install \ + --provider azure \ + --plugins velero/velero-plugin-for-microsoft-azure:v1.5.0 \ + --bucket $BLOB_CONTAINER \ + --secret-file ./CREDS_FILE \ + --backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID[,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] \ + --snapshot-location-config apiTimeout=[,resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] + --use-node-agent --uploader-type=restic \ + --use-volume-snapshots=false + ``` + +## Configure S3-Compatible Storage for Online Environments + +Replicated supports the following S3-compatible object stores for storing backups with Velero: + +- Ceph RADOS v12.2.7 +- MinIO + +Run the following command to install Velero and configure an S3-compatible storage destination in an online environment. For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +``` +kubectl kots velero configure-other-s3 \ + --namespace NAME \ + --endpoint ENDPOINT \ + --region REGION \ + --bucket BUCKET \ + --access-key-id ACCESS_KEY_ID \ + --secret-access-key SECRET_ACCESS_KEY +``` + +Replace: + +- NAME with the name of the namespace where the Replicated KOTS Admin Console is installed and running +- ENDPOINT with the s3 endpoint +- REGION with the region where the bucket exists +- BUCKET with the name of the object storage bucket where backups should be stored +- ACCESS_KEY_ID with the access key id to use for accessing the bucket +- SECRET_ACCESS_KEY with the secret access key to use for accessing the bucket + +**Example:** + +``` +kubectl kots velero configure-other-s3 \ + --namespace default \ + --endpoint http://minio \ + --region minio \ + --bucket kots-snaps \ + --access-key-id XXXXXXXJTJB7M2XZUV7D \ + --secret-access-key mysecretkey +``` + +If no Velero installation is detected, instructions are displayed to install Velero and configure the storage destination. + +## Configure S3-Compatible Storage for Air Gapped Environments + +> Introduced in Replicated KOTS v1.94.0 + +The following S3-compatible object stores are supported for storing backups with Velero: + +- Ceph RADOS v12.2.7 +- MinIO + +Run the following command to install Velero and configure an S3-compatible storage destination in an air gapped environment. For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +```bash +kubectl kots velero configure-other-s3 \ + --namespace NAME \ + --endpoint ENDPOINT \ + --region REGION \ + --bucket BUCKET \ + --access-key-id ACCESS_KEY_ID \ + --secret-access-key SECRET_ACCESS_KEY \ + --kotsadm-registry REGISTRY_HOSTNAME[/REGISTRY_NAMESPACE] \ + --registry-username REGISTRY_USERNAME \ + --registry-password REGISTRY_PASSWORD +``` + +Replace: + +- `NAME` with the name of the namespace where the Admin Console is installed and running +- `ENDPOINT` with the s3 endpoint +- `REGION` with the region where the bucket exists +- `BUCKET` with the name of the object storage bucket where backups should be stored +- `ACCESS_KEY_ID` with the access key id to use for accessing the bucket +- `SECRET_ACCESS_KEY` with the secret access key to use for accessing the bucket +- `REGISTRY_HOSTNAME` with the registry endpoint where the images are hosted +- `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional) +- `REGISTRY_USERNAME` with the username to use to authenticate with the registry +- `REGISTRY_PASSWORD` with the password to use to authenticate with the registry + +If no Velero installation is detected, instructions are displayed to install Velero and configure the storage destination. + + + +## Next Steps + +* (Existing Clusters Only) Configure Velero namespace access if you are using minimal RBAC. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* (Optional) Increase the default memory limits. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* Create or schedule backups. See [Creating and Scheduling Backups](snapshots-creating). + +## Additional Resources + +* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + +================ +File: docs/enterprise/snapshots-troubleshooting-backup-restore.md +================ +import NodeAgentMemLimit from "../partials/snapshots/_node-agent-mem-limit.mdx" + +# Troubleshooting Snapshots + +When a snapshot fails, a support bundle will be collected and stored automatically. Because this is a point-in-time collection of all logs and system state at the time of the failed snapshot, this is a good place to view the logs. + +## Velero is Crashing + +If Velero is crashing and not starting, some common causes are: + +### Invalid Cloud Credentials + +#### Symptom + +You see the following error message from Velero when trying to configure a snapshot. + +```shell +time="2020-04-10T14:22:24Z" level=info msg="Checking existence of namespace" logSource="pkg/cmd/server/server.go:337" namespace=velero +time="2020-04-10T14:22:24Z" level=info msg="Namespace exists" logSource="pkg/cmd/server/server.go:343" namespace=velero +time="2020-04-10T14:22:27Z" level=info msg="Checking existence of Velero custom resource definitions" logSource="pkg/cmd/server/server.go:372" +time="2020-04-10T14:22:31Z" level=info msg="All Velero custom resource definitions exist" logSource="pkg/cmd/server/server.go:406" +time="2020-04-10T14:22:31Z" level=info msg="Checking that all backup storage locations are valid" logSource="pkg/cmd/server/server.go:413" +An error occurred: some backup storage locations are invalid: backup store for location "default" is invalid: rpc error: code = Unknown desc = NoSuchBucket: The specified bucket does not exist + status code: 404, request id: BEFAE2B9B05A2DCF, host id: YdlejsorQrn667ziO6Xr6gzwKJJ3jpZzZBMwwMIMpWj18Phfii6Za+dQ4AgfzRcxavQXYcgxRJI= +``` + +#### Cause + +If the cloud access credentials are invalid or do not have access to the location in the configuration, Velero will crashloop. The Velero logs will be included in a support bundle, and the message will look like this. + +#### Solution + +Replicated recommends that you validate the access key / secret or service account json. + + +### Invalid Top-level Directories + +#### Symptom + +You see the following error message when Velero is starting: + +```shell +time="2020-04-10T14:12:42Z" level=info msg="Checking existence of namespace" logSource="pkg/cmd/server/server.go:337" namespace=velero +time="2020-04-10T14:12:42Z" level=info msg="Namespace exists" logSource="pkg/cmd/server/server.go:343" namespace=velero +time="2020-04-10T14:12:44Z" level=info msg="Checking existence of Velero custom resource definitions" logSource="pkg/cmd/server/server.go:372" +time="2020-04-10T14:12:44Z" level=info msg="All Velero custom resource definitions exist" logSource="pkg/cmd/server/server.go:406" +time="2020-04-10T14:12:44Z" level=info msg="Checking that all backup storage locations are valid" logSource="pkg/cmd/server/server.go:413" +An error occurred: some backup storage locations are invalid: backup store for location "default" is invalid: Backup store contains invalid top-level directories: [other-directory] +``` + +#### Cause + +This error message is caused when Velero is attempting to start, and it is configured to use a reconfigured or re-used bucket. + +When configuring Velero to use a bucket, the bucket cannot contain other data, or Velero will crash. + +#### Solution + +Configure Velero to use a bucket that does not contain other data. + +## Node Agent is Crashing + +If the node-agent Pod is crashing and not starting, some common causes are: + +### Metrics Server is Failing to Start + +#### Symptom + +You see the following error in the node-agent logs. + +```shell +time="2023-11-16T21:29:44Z" level=info msg="Starting metric server for node agent at address []" logSource="pkg/cmd/cli/nodeagent/server.go:229" +time="2023-11-16T21:29:44Z" level=fatal msg="Failed to start metric server for node agent at []: listen tcp :80: bind: permission denied" logSource="pkg/cmd/cli/nodeagent/server.go:236" +``` + +#### Cause + +This is a result of a known issue in Velero 1.12.0 and 1.12.1 where the port is not set correctly when starting the metrics server. This causes the metrics server to fail to start with a `permission denied` error in environments that do not run MinIO and have Host Path, NFS, or internal storage destinations configured. When the metrics server fails to start, the node-agent Pod crashes. For more information about this issue, see [the GitHub issue details](https://github.com/vmware-tanzu/velero/issues/6792). + +#### Solution + +Replicated recommends that you either upgrade to Velero 1.12.2 or later, or downgrade to a version earlier than 1.12.0. + +## Snapshot Creation is Failing + +### Timeout Error when Creating a Snapshot + +#### Symptom + +You see a backup error that includes a timeout message when attempting to create a snapshot. For example: + +```bash +Error backing up item +timed out after 12h0m0s +``` + +#### Cause + +This error message appears when the node-agent (restic) Pod operation timeout limit is reached. In Velero v1.4.2 and later, the default timeout is 240 minutes. + +Restic is an open-source backup tool. Velero integrates with Restic to provide a solution for backing up and restoring Kubernetes volumes. For more information about the Velero Restic integration, see [File System Backup](https://velero.io/docs/v1.10/file-system-backup/) in the Velero documentation. + +#### Solution + +Use the kubectl Kubernetes command-line tool to patch the Velero deployment to increase the timeout: + +**Velero version 1.10 and later**: + +```bash +kubectl patch deployment velero -n velero --type json -p '[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--fs-backup-timeout=TIMEOUT_LIMIT"}]' +``` + +**Velero versions less than 1.10**: + +```bash +kubectl patch deployment velero -n velero --type json -p '[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--restic-timeout=TIMEOUT_LIMIT"}]' +``` + +Replace `TIMEOUT_LIMIT` with a length of time for the node-agent (restic) Pod operation timeout in hours, minutes, and seconds. Use the format `0h0m0s`. For example, `48h30m0s`. + +:::note +The timeout value reverts back to the default value if you rerun the `velero install` command. +::: + +### Memory Limit Reached on the node-agent Pod + +#### Symptom + +The node-agent (restic) Pod is killed by the Linux kernel Out Of Memory (OOM) killer or snapshots are failing with errors simlar to: + +``` +pod volume backup failed: ... signal: killed +``` + +#### Cause + +Velero sets default limits for the velero Pod and the node-agent (restic) Pod during installation. There is a known issue with Restic that causes high memory usage, which can result in failures during snapshot creation when the Pod reaches the memory limit. + +For more information, see the [Restic backup — OOM-killed on raspberry pi after backing up another computer to same repo](https://github.com/restic/restic/issues/1988) issue in the restic GitHub repository. + +#### Solution + + + +### At least one source file could not be read + +#### Symptom + +You see the following error in Velero logs: + +``` +Error backing up item...Warning: at least one source file could not be read +``` + +#### Cause + +There are file changes between Restic's initial scan of the volume and during the backup to Restic store. + +#### Solution + +To resolve this issue, do one of the following: + +* Use [hooks](/vendor/snapshots-hooks) to export data to an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume and include that in the backup instead of the primary PVC volume. See [Configuring Backup and Restore Hooks for Snapshots](/vendor/snapshots-hooks). +* Freeze the file system to ensure all pending disk I/O operations have completed prior to taking a snapshot. For more information, see [Hook Example with fsfreeze](https://velero.io/docs/main/backup-hooks/#hook-example-with-fsfreeze) in the Velero documentation. + + +## Snapshot Restore is Failing + +### Service NodePort is Already Allocated + +#### Symptom + +In the Replicated KOTS Admin Console, you see an **Application failed to restore** error message that indicates the port number for a static NodePort is already in use. For example: + +![Snapshot Troubleshoot Service NodePort](/images/snapshot-troubleshoot-service-nodeport.png) + +[View a larger version of this image](/images/snapshot-troubleshoot-service-nodeport.png) + +#### Cause + +There is a known issue in Kubernetes versions earlier than version 1.19 where using a static NodePort for services can collide in multi-primary high availability setups when recreating the services. For more information about this known issue, see https://github.com/kubernetes/kubernetes/issues/85894. + +#### Solution + +This issue is fixed in Kubernetes version 1.19. To resolve this issue, upgrade to Kubernetes version 1.19 or later. + +For more infromation about the fix, see https://github.com/kubernetes/kubernetes/pull/89937. + +### Partial Snapshot Restore is Stuck in Progress + +#### Symptom + +In the Admin Console, you see at least one volume restore progress bar frozen at 0%. Example Admin Console display: + +![Snapshot Troubleshoot Frozen Restore](/images/snapshot-troubleshoot-frozen-restore.png) + +You can confirm this is the same issue by running `kubectl get pods -n `, and you should see at least one pod stuck in initialization: + +```shell +NAME READY STATUS RESTARTS AGE +example-mysql-0 0/1 Init:0/2 0 4m15s #<- the offending pod +example-nginx-77b878b4f-zwv2h 3/3 Running 0 4m15s +``` + +#### Cause + +We have seen this issue with Velero version 1.5.4 and opened up this issue with the project to inspect the root cause: https://github.com/vmware-tanzu/velero/issues/3686. However we have not experienced this using Velero 1.6.0 or later. + +#### Solution + +Upgrade Velero to 1.9.0. You can upgrade using Replicated kURL. Or, to follow the Velero upgrade instructions, see [Upgrading to Velero 1.9](https://velero.io/docs/v1.9/upgrade-to-1.9/) in the Velero documentation. + +### Partial Snapshot Restore Finishes with Warnings + +#### Symptom + +In the Admin Console, when the partial snapshot restore completes, you see warnings indicating that Endpoint resources were not restored: + +![Snapshot Troubleshoot Restore Warnings](/images/snapshot-troubleshoot-restore-warnings.png) + +#### Cause + +The resource restore priority was changed in Velero 1.10.3 and 1.11.0, which leads to this warning when restoring Endpoint resources. For more information about this issue, see [the issue details](https://github.com/vmware-tanzu/velero/issues/6280) in GitHub. + +#### Solution + +These warnings do not necessarily mean that the restore itself failed. The endpoints likely do exist as they are created by Kubernetes when the related Service resources were restored. However, to prevent encountering these warnings, use Velero version 1.11.1 or later. + +================ +File: docs/enterprise/snapshots-updating-with-admin-console.md +================ +# Updating Storage Settings + +This topic describes how to update existing storage destination settings using the Replicated Admin Console. + +## Prerequisite +If you are changing from one provider to another provider, make sure that you meet the prerequisites for the storage destination. For information about prerequisites, see: + +- [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) +- [Configuring an NFS Storage Destination](snapshots-configuring-nfs) +- [Configuring Other Storage Destinations](snapshots-storage-destinations) + +## Update Storage Settings + +You can update storage destination settings for online and air gapped environments at any time using the Admin Console. + +Additionally, if Velero was automatically installed by Replicated kURL, then Replicated recommends that you change the default internal storage because it is not sufficient for disaster recovery. + +To update storage destination settings: + +1. In the Admin Console, select **Snapshots** > **Settings and Schedule**. + +1. Under storage, you can edit the existing settings or click **Add a new storage destination** and select a storage destination type. + + ![Snapshot Destination Dropdown Host Path](/images/snapshot-destination-dropdown-hostpath.png) + + The configuration fields that display depend on the type of storage destination. See the following storage destination sections for field descriptions: + + - [AWS](#aws-fields) + - [GCP](#gcp-fields) + - [Azure](#azure-fields) + - [S3-compatible](#s3-compatible-fields) + - [NFS](#nfs-fields) + - [Host Path](#host-path-fields) + +1. Click **Update storage settings**. The update can take several minutes. + +### AWS Fields + +When configuring the Admin Console to store backups on Amazon Web Services (AWS), the following fields are available: + +| Name | Description | +|------------------------------|-----------------------------------------------------------------------------------------------------------------| +| Region | The AWS region that the S3 bucket is available in | +| Bucket | The name of the S3 bucket to use | +| Path (Optional) | The path in the bucket to store all backups in | +| Access Key ID (Optional) | The AWS IAM Access Key ID that can read from and write to the bucket | +| Secret Access Key (Optional) | The AWS IAM Secret Access Key that is associated with the Access Key ID | +| Use Instance Role | When enabled, instead of providing an Access Key ID and Secret Access Key, Velero will use an instance IAM role | +| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | + +### GCP Fields + +When configuring the Admin Console to store backups on Google Cloud Provide (GCP), the following fields are available: + +| Name | Description | +|-----------------|-----------------------------------------------------------------------------------------------------------| +| Bucket | The name of the GCP storage bucket to use | +| Path (Optional) | The path in the bucket to store all backups in | +| Service Account | The GCP IAM Service Account JSON file that has permissions to read from and write to the storage location | +| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | + +### Azure Fields + +When configuring the Admin Console to store backups on Microsoft Azure, the following fields are available: + +| Name | Description | +|----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| Bucket | The name of the Azure Blob Storage Container to use | +| Path (Optional) | The path in the Blob Storage Container to store all backups in | +| Resource Group | The Resource Group name of the target Blob Storage Container | +| Storage Account | The Storage Account Name of the target Blob Storage Container | +| Subscription ID | The Subscription ID associated with the target Blob Storage Container (required only for access via Service Principle or AAD Pod Identity) | +| Tenant ID | The Tenant ID associated with the Azure account of the target Blob Storage container (required only for access via Service Principle) | +| Client ID | The Client ID of a Service Principle with access to the target Container (required only for access via Service Principle) | +| Client Secret | The Client Secret of a Service Principle with access to the target Container (required only for access via Service Principle) | +| Cloud Name | The Azure cloud for the target storage (options: AzurePublicCloud, AzureUSGovernmentCloud, AzureChinaCloud, AzureGermanCloud) | +| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | + +Only connections with Service Principles are supported at this time. + +For more information about authentication methods and setting up Azure, see [Velero plugins for Microsoft Azure](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure) in the velero-plugin-for-microsoft-azure GitHub repository. + +### S3-Compatible Fields + +Replicated supports the following S3-compatible object stores for storing backups with Velero: + +* Ceph RADOS v12.2.7. For more information, see the [Ceph](https://docs.ceph.com/en/quincy/) documentation. +* MinIO. For more information, see the [MinIO](https://docs.min.io/docs/minio-quickstart-guide.html) documentation. + +When configuring the Admin Console to store backups on S3-compatible storage, the following fields are available: + +| Name | Description | +|------------------------------|-----------------------------------------------------------------------------------------------------------------| +| Region | The AWS region that the S3 bucket is available in | +| Endpoint | The endpoint to use to connect to the bucket | +| Bucket | The name of the S3 bucket to use | +| Path (Optional) | The path in the bucket to store all backups in | +| Access Key ID (Optional) | The AWS IAM Access Key ID that can read from and write to the bucket | +| Secret Access Key (Optional) | The AWS IAM Secret Access Key that is associated with the Access Key ID | +| Use Instance Role | When enabled, instead of providing an Access Key ID and Secret Access Key, Velero will use an instance IAM role | +| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | + +### NFS Fields + +When configuring the Admin Console to store backups on network file system (NFS) storage, the following fields are available: + +| Name | Description | +|--------|----------------------------------------------| +| Server | The hostname or IP address of the NFS server | +| Path | The path that is exported by the NFS server | + +### Host Path Fields + +When configuring the Admin Console to store backups on host path storage, the following fields are available: + +**Host path**: Enter the path to the directory on the node. Although the path can be local, Replicated recommends that you use an external host path. + +================ +File: docs/enterprise/snapshots-velero-cli-installing.md +================ +# Installing the Velero CLI + +You install the Velero CLI before installing Velero and configuring a storage destination for backups. + +:::note +For embedded clusters created with Replicated kURL, if the kURL Installer spec included the Velero add-on, then Velero was automatically installed with default internal storage. Replicated recommends that you proceed to change the default internal storage because it is insufficient for disaster recovery. See [Updating Storage Settings in the Admin Console](snapshots-updating-with-admin-console). +::: + +## Install the Velero CLI in an Online Cluster + +To install the Velero CLI in an online cluster: + +1. Do one of the following: + + - (Embedded kURL cluster) Run an SSH command to access and authenticate to your cluster node. + - (Existing cluster) Open a terminal in the environment that you manage the cluster from, which can be a local machine that has kubectl installed. + +1. Check for the latest supported release of the Velero CLI for **Linux AMD64** in the Velero GitHub repo at https://github.com/vmware-tanzu/velero/releases. Although earlier versions of Velero are supported, Replicated recommends using the latest supported version. For more information about supported versions, see [Velero Version Compatibility](/vendor/snapshots-overview#velero-version-compatibility). + + Note the version number for the next step. + +1. Run the following command to download the latest supported Velero CLI version for the **Linux AMD64** operating system to the cluster: + + ``` + curl -LO https://github.com/vmware-tanzu/velero/releases/download/VERSION/velero-VERSION-linux-amd64.tar.gz + ``` + + Replace VERSION with the version number using the format `vx.x.x` + + **Example:** + + ``` + curl -LO https://github.com/vmware-tanzu/velero/releases/download/v1.10.1/velero-v1.10.1-linux-amd64.tar.gz + ``` + +1. Run the following command to uncompress the TAR file: + + ``` + tar zxvf velero-VERSION-linuxamd64.tar.gz + ``` + Replace VERSION with the version number using the format `vx.x.x`. + +1. Run the following command to install the Velero CLI: + + ``` + sudo mv velero-VERSION-linux-amd64/velero /usr/local/bin/velero + ``` + Replace VERSION with the version number using the format `vx.x.x`. + +1. Run `velero version` to test that the Velero CLI installation worked correctly. + + You might get an error message stating that there are no matches for the server version. This is acceptable, as long as you get a confirmation for the client version. After the Velero installation, you also see the server version. + +## Install the Velero CLI in an Air Gapped Cluster + +To install the Velero CLI in an air gapped cluster: + +1. From a computer with internet access, check for the latest supported release of the Velero CLI for **Linux AMD64** in the Velero GitHub repo at https://github.com/vmware-tanzu/velero/releases. Although earlier versions of Velero are supported, Replicated recommends using the latest supported version. See [Velero Version Compatibility](/vendor/snapshots-overview#velero-version-compatibility). + + Note the version number for the next step. + +1. Run the following command to download the latest supported Velero CLI version for the **Linux AMD64** operating system to the cluster: + + ``` + curl -LO https://github.com/vmware-tanzu/velero/releases/download/VERSION/velero-VERSION-linux-amd64.tar.gz + ``` + + Replace VERSION with the version number using the format `vx.x.x` + + **Example:** + + ``` + curl -LO https://github.com/vmware-tanzu/velero/releases/download/v1.10.1/velero-v1.10.1-linux-amd64.tar.gz + ``` + +1. Copy the TAR file to the air gapped node. + +1. Run the following command to uncompress the TAR file: + + ``` + tar zxvf velero-VERSION-linuxamd64.tar.gz + ``` + Replace VERSION with the version number using the format `vx.x.x`. + +1. Run the following command to install the Velero CLI: + + ``` + sudo mv velero-VERSION-linux-amd64/velero /usr/local/bin/velero + ``` + + Replace VERSION with the version number using the format `vx.x.x`. + +1. Run `velero version` to test that the Velero CLI installation worked correctly. + + You might get an error message stating that there are no matches for the server version. This is acceptable, as long as you get a confirmation for the client version. After the Velero installation, you should see the server version also. + + +## Next Step + +Install Velero and configure a storage destination using one of the following procedures: + +- [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) +- [Configuring an NFS Storage Destination](snapshots-configuring-nfs) +- [Configuring Other Storage Destinations](snapshots-storage-destinations) + +================ +File: docs/enterprise/snapshots-velero-installing-config.mdx +================ +import NodeAgentMemLimit from "../partials/snapshots/_node-agent-mem-limit.mdx" +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" + +# Configuring Namespace Access and Memory Limit + +This topic describes how to configure namespace access and the memory limit for Velero. + +## Overview + +The Replicated KOTS Admin Console requires access to the namespace where Velero is installed. If your Admin Console is running with minimal role-based-access-control (RBAC) privileges, you must enable the Admin Console to access Velero. + +Additionally, if the application uses a large amount of memory, you can configure the default memory limit to help ensure that Velero runs successfully with snapshots. + +## Configure Namespace Access + +This section applies only to _existing cluster_ installations (online and air gap) where the Admin Console is running with minimal role-based-access-control (RBAC) privileges. + +Run the following command to enable the Admin Console to access the Velero namespace: + +``` +kubectl kots velero ensure-permissions --namespace ADMIN_CONSOLE_NAMESPACE --velero-namespace VELERO_NAMESPACE +``` +Replace: +* `ADMIN_CONSOLE_NAMESPACE` with the namespace on the cluster where the Admin Console is running. +* `VELERO_NAMESPACE` with the namespace on the cluster where Velero is installed. + +For more information, see [`velero ensure-permissions`](/reference/kots-cli-velero-ensure-permissions/) in the KOTS CLI documentation. For more information about RBAC privileges for the Admin Console, see [Kubernetes RBAC](/vendor/packaging-rbac). + +## Configure Memory Limit + +This section applies to all online and air gap installations. + +Velero sets default limits for the velero Pod and the node-agent (restic) Pod during installation. There is a known issue with restic that causes high memory usage, which can result in failures during backup creation when the Pod reaches the memory limit. + + + +## Additional Resources + +* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + +================ +File: docs/enterprise/status-viewing-details.md +================ +import StatusesTable from "../partials/status-informers/_statusesTable.mdx" +import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" +import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" + +# Understanding Application Status Details in the Admin Console + +This topic describes how to view the status of an application on the Replicated KOTS Admin Console dashboard. It also describes how Replicated KOTS collects and aggregates the application status. +## View Status Details + +The application status displays on the dashboard of the Admin Console. Viewing the status details can be helpful for troubleshooting. + +To view the status details, click **Details** next to the status on the dashboard. + +![Status Details](/images/kotsadm-dashboard-appstatus.png) + +## About Application Status + +To display application status on the Admin Console dashboard, KOTS aggregates the status of specific Kubernetes resources for the application. + +The following resource types are supported for displaying application status: + +* Deployment +* StatefulSet +* Service +* Ingress +* PersistentVolumeClaims (PVC) +* DaemonSet + +Applications can specify one or more of the supported Kubernetes workloads listed above. KOTS watches all specified workloads for state changes. + +For more information about how to interpret the application status displayed on the Admin Console dashboard, see [Resource Statuses](#resource-statuses) and [Aggregate Application Status](#aggregate-application-status) below. + +### Resource Statuses + +Possible application statuses are Ready, Updating, Degraded, Unavailable, and Missing. + +The following table lists the supported Kubernetes resources and the conditions that contribute to each status: + + + +### Aggregate Application Status + + + + + +================ +File: docs/enterprise/troubleshooting-an-app.mdx +================ +import GenerateBundleAdminConsole from "../partials/support-bundles/_generate-bundle-admin-console.mdx" + +# Generating Support Bundles from the Admin Console + +This topic describes how to generate support bundles from the KOTS Admin Console. + +## Generate a Bundle from the Admin Console + + + +================ +File: docs/enterprise/updating-app-manager.mdx +================ +import AdminConsole from "../partials/updating/_admin-console.mdx" +import AdminConsoleAirGap from "../partials/updating/_admin-console-air-gap.mdx" +import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" +import BuildAirGapBundle from "../partials/install/_airgap-bundle-build.mdx" +import DownloadAirGapBundle from "../partials/install/_airgap-bundle-download.mdx" +import ViewAirGapBundle from "../partials/install/_airgap-bundle-view-contents.mdx" + +# Performing Updates in Existing Clusters + +This topic describes how to perform updates in existing cluster installations with Replicated KOTS. It includes information about how to update applications and the version of KOTS running in the cluster. + +## Update an Application + +You can perform an application update using the KOTS Admin Console or the KOTS CLI. You can also set up automatic updates. See [Configuring Automatic Updates](/enterprise/updating-apps). + +### Using the Admin Console + +#### Online Environments + + + +#### Air Gap Environments + + + +### Using the KOTS CLI + +You can use the KOTS CLI [upstream upgrade](/reference/kots-cli-upstream-upgrade) command to update an application in existing cluster installations. + +#### Online Environments + +To update an application in online environments: + +```bash +kubectl kots upstream upgrade APP_SLUG -n ADMIN_CONSOLE_NAMESPACE +``` +Where: +* `APP_SLUG` is the unique slug for the application. See [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. +* `ADMIN_CONSOLE_NAMESPACE` is the namespace where the Admin Console is running. + +:::note +Add the `--deploy` flag to automatically deploy this version. +::: + +#### Air Gap Environments + +To update an application in air gap environments: + +1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the target release is promoted to build and download the new `.airgap` bundle: + + + +1. + +1. + +1. Run the following command to update the application: + + ```bash + kubectl kots upstream upgrade APP_SLUG \ + --airgap-bundle NEW_AIRGAP_BUNDLE \ + --kotsadm-registry REGISTRY_HOST[/REGISTRY_NAMESPACE] \ + --registry-username RO_USERNAME \ + --registry-password RO_PASSWORD \ + -n ADMIN_CONSOLE_NAMESPACE + ``` + Replace: + * `APP_SLUG` with the unique slug for the application. See [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. + * `NEW_AIRGAP_BUNDLE` with the `.airgap` bundle for the target application version. + * `REGISTRY_HOST` with the private registry that contains the Admin Console images. + * `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional). + * `RO_USERNAME` and `RO_PASSWORD` with the username and password for an account that has read-only access to the private registry. + * `ADMIN_CONSOLE_NAMESPACE` with the namespace where the Admin Console is running. + +:::note +Add the `--deploy` flag to automatically deploy this version. +::: + +## Update KOTS + +This section describes how to update the version of Replicated KOTS running in your cluster. For information about the latest versions of KOTS, see [KOTS Release Notes](/release-notes/rn-app-manager). + +:::note +Downgrading KOTS to a version earlier than what is currently deployed is not supported. +::: + +### Online Environments + +To update KOTS in an online existing cluster: + +1. Run _one_ of the following commands to update the KOTS CLI to the target version of KOTS: + + - **Install or update to the latest version**: + + ``` + curl https://kots.io/install | bash + ``` + + - **Install or update to a specific version**: + + ``` + curl https://kots.io/install/VERSION | bash + ``` + Where `VERSION` is the target KOTS version. + + For more KOTS CLI installation options, including information about how to install or update without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + +1. Run the following command to update the KOTS Admin Console to the same version as the KOTS CLI: + + ```bash + kubectl kots admin-console upgrade -n NAMESPACE + ``` + Replace `NAMESPACE` with the namespace in your cluster where KOTS is installed. + +### Air Gap Environments + +To update KOTS in an existing air gap cluster: + +1. Download the target version of the following assets from the [Releases](https://github.com/replicatedhq/kots/releases/latest) page in the KOTS GitHub repository: + * KOTS Admin Console `kotsadm.tar.gz` bundle + * KOTS CLI plugin + + Ensure that you can access the downloaded bundles from the environment where the Admin Console is running. + +1. Install or update the KOTS CLI to the version that you downloaded. See [Manually Download and Install](/reference/kots-cli-getting-started#manually-download-and-install) in _Installing the KOTS CLI_. + +1. + +1. Run the following command using registry read-only credentials to update the KOTS Admin Console: + + ``` + kubectl kots admin-console upgrade \ + --kotsadm-registry REGISTRY_HOST \ + --registry-username RO_USERNAME \ + --registry-password RO_PASSWORD \ + -n NAMESPACE + ``` + Replace: + * `REGISTRY_HOST` with the same private registry from the previous step. + * `RO_USERNAME` with the username for credentials with read-only permissions to the registry. + * `RO_PASSWORD` with the password associated with the username. + * `NAMESPACE` with the namespace on your cluster where KOTS is installed. + + For help information, run `kubectl kots admin-console upgrade -h`. + +================ +File: docs/enterprise/updating-apps.mdx +================ +# Configuring Automatic Updates + +This topic describes how to configure automatic updates for applications installed in online (internet-connected) environments. + +## Overview + +For applications installed in an online environment, the Replicated KOTS Admin Console automatically checks for new versions once every four hours by default. After the Admin Console checks for updates, it downloads any new versions of the application and displays them on the **Version History** tab. + +You can edit this default cadence to customize how often the Admin Console checks for and downloads new versions. + +You can also configure the Admin Console to automatically deploy new versions of the application after it downloads them. + +The Admin Console only deploys new versions automatically if preflight checks pass. By default, the Admin Console does not automatically deploy any version of an application. + +## Limitations + +Automatic updates have the following limitations: + +* Automatic updates are not supported for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. + +* Automatic updates are not supported for applications installed in air gap environments with no outbound internet access. + +* Automatically deploying new versions is not supported when KOTS is installed with minimal RBAC. This is because all preflight checks must pass for the new version to be automatically deployed, and preflight checks that require cluster-scoped access will fail in minimal RBAC environments. + +## Set Up Automatic Updates + +To configure automatic updates: + +1. In the Admin Console, go to the **Version History** tab and click **Configure automatic updates**. + + The **Configure automatic updates** dialog opens. + +1. Under **Automatically check for updates**, use the default or select a cadence (Hourly, Daily, Weekly, Never, Custom) from the dropdown list. + + To turn off automatic updates, select **Never**. + + To define a custom cadence, select **Custom**, then enter a cron expression in the text field. For more information about cron expressions, see [Cron Expressions](/reference/cron-expressions). Configured automatic update checks use the local server time. + + ![Configure automatic updates](/images/automatic-updates-config.png) + +1. Under **Automatically deploy new versions**, select an option. The available options depend on whether semantic versioning is enabled for the channel. + * **For channels that use semantic versioning**: (v1.58.0 and later) Select an option in the dropdown + to specify the versions that the Admin Console automatically deploys. For example, + to automatically deploy only new patch and minor versions, select + **Automatically deploy new patch and minor versions**. + * **For channels that do not use semantic versioning**: (v1.67.0 and later) Optionally select **Enable automatic deployment**. + When this checkbox is enabled, the Admin Console automatically deploys each new version of the application that it downloads. + +================ +File: docs/enterprise/updating-embedded.mdx +================ +import UpdateAirGapAdm from "../partials/embedded-cluster/_update-air-gap-admin-console.mdx" +import UpdateAirGapCli from "../partials/embedded-cluster/_update-air-gap-cli.mdx" +import UpdateAirGapOverview from "../partials/embedded-cluster/_update-air-gap-overview.mdx" +import DoNotDowngrade from "../partials/embedded-cluster/_warning-do-not-downgrade.mdx" +import Overview from "../partials/embedded-cluster/_update-overview.mdx" + +# Performing Updates in Embedded Clusters + +This topic describes how to perform updates for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. + +:::note +If you are instead looking for information about Replicated kURL, see [Performing Updates in kURL Clusters](updating-kurl). +::: + +## Overview + + + +The following diagram demonstrates how updates are performed with Embedded Cluster in online (internet-connected) environments: + +![Embedded Cluster updates Kubernetes and an app in a customer environment](/images/embedded-cluster-update.png) + +[View a larger version of this image](/images/embedded-cluster-update.png) + +As shown in the diagram above, users check for available updates from the KOTS Admin Console. When deploying the new version, both the application and the cluster infrastructure are updated as needed. + +## Update in Online Clusters + + + +To perform an update with Embedded Cluster: + +1. In the Admin Console, go to the **Version history** tab. + + All versions available for upgrade are listed in the **Available Updates** section: + + ![Version history page](/images/ec-upgrade-version-history.png) + + [View a larger version of this image](/images/ec-upgrade-version-history.png) + +1. Click **Deploy** next to the target version. + +1. On the **Config** screen of the upgrade wizard, make any necessary changes to the configuration for the application. Click **Next**. + + ![Config screen in the upgrade wizard](/images/ec-upgrade-wizard-config.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-config.png) + + :::note + Any changes made on the **Config** screen of the upgrade wizard are not set until the new version is deployed. + ::: + +1. On the **Preflight** screen, view the results of the preflight checks. + + ![Preflight screen in the upgrade wizard](/images/ec-upgrade-wizard-preflights.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-preflights.png) + +1. On the **Confirm** screen, click **Deploy**. + + ![Confirmation screen in the upgrade wizard](/images/ec-upgrade-wizard-confirm.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-confirm.png) + + During updates, the Admin Console is unavailable. A modal is displayed with a message that the update is in progress. + + :::note + KOTS can experience downtime during an update, such as in single-node installations. If downtime occurs, refreshing the page results in a connection error. Users can refresh the page again after the update is complete to access the Admin Console. + ::: + +## Update in Air Gap Clusters + + + + + +### Upload the New Version From the Command Line + +To update by uploading the air gap bundle for the new version from the command line: + + + +### Upload the New Version From the Admin Console + +To update by uploading the air gap bundle for the new version from the Admin Console: + + + +================ +File: docs/enterprise/updating-kurl-about.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# About kURL Cluster Updates + + + +This topic provides an overview of Replicated kURL cluster updates. For information about how to perform updates in kURL clusters, see [Performing Updates in kURL Clusters](updating-kurl). + +## Overview + +The Replicated kURL installer spec specifies the kURL add-ons and the Kubernetes version that are deployed in kURL clusters. You can run the kURL installation script to apply the latest installer spec and update the cluster. + +## About Kubernetes Updates {#kubernetes} + +The version of Kubernetes running in a kURL cluster can be upgraded by one or more minor versions. + +The Kubernetes upgrade process in kURL clusters steps through one minor version at a time. For example, upgrades from Kubernetes 1.19.x to 1.26.x install versions 1.20.x, 1.21x, 1.22.x, 1.23.x, 1.24.x, and 1.25.x before installing 1.26.x. + +The installation script automatically detects when the Kubernetes version in your cluster must be updated. When a Kubernetes upgrade is required, the script first prints a prompt: `Drain local node and apply upgrade?`. When you confirm the prompt, it drains and upgrades the local primary node where the script is running. + +Then, if there are any remote primary nodes to upgrade, the script drains each sequentially and prints a command that you must run on the node to upgrade. For example, the command that that script prints might look like the following: `curl -sSL https://kurl.sh/myapp/upgrade.sh | sudo bash -s hostname-check=master-node-2 kubernetes-version=v1.24.3`. + +The script polls the status of each remote node until it detects that the Kubernetes upgrade is complete. Then, it uncordons the node and proceeds to cordon and drain the next node. This process ensures that only one node is cordoned at a time. After upgrading all primary nodes, the script performs the same operation sequentially on all remote secondary nodes. + +### Air Gap Multi-Version Kubernetes Updates {#kubernetes-multi} + +To upgrade Kubernetes by more than one minor version in air gapped kURL clusters, you must provide a package that includes the assets required for the upgrade. + +When you run the installation script to upgrade, the script searches for the package in the `/var/lib/kurl/assets/` directory. The script then lists any required assets that are missing, prints a command to download the missing assets as a `.tar.gz` package, and prompts you to provide an absolute path to the package in your local directory. For example: + +``` +⚙ Upgrading Kubernetes from 1.23.17 to 1.26.3 +This involves upgrading from 1.23 to 1.24, 1.24 to 1.25, and 1.25 to 1.26. +This may take some time. +⚙ Downloading assets required for Kubernetes 1.23.17 to 1.26.3 upgrade +The following packages are not available locally, and are required: + kubernetes-1.24.12.tar.gz + kubernetes-1.25.8.tar.gz + +You can download them with the following command: + + curl -LO https://kurl.sh/bundle/version/v2023.04.24-0/19d41b7/packages/kubernetes-1.24.12,kubernetes-1.25.8.tar.gz + +Please provide the path to the file on the server. +Absolute path to file: +``` + +## About Add-ons and KOTS Updates {#add-ons} + +If the application vendor updated any add-ons in the kURL installer spec since the last time that you ran the installation script in your cluster, the script automatically updates the add-ons after updating Kubernetes (if required). + +For a complete list of add-ons that can be included in the kURL installer spec, including the KOTS add-on, see [Add-ons](https://kurl.sh/docs/add-ons/antrea) in the kURL documentation. + +### Containerd and Docker Add-on Updates + +The installation script upgrades the version of the Containerd or Docker container runtime if required by the installer spec. For example, if your cluster uses Containerd version 1.6.4 and the spec is updated to use 1.6.18, then Containerd is updated to 1.6.18 in your cluster when you run the installation script. + +The installation script also supports migrating from Docker to Containerd as Docker is not supported in Kubernetes versions 1.24 and later. If the install script detects a change from Docker to Containerd, it installs Containerd, loads the images found in Docker, and removes Docker. + +For information about the container runtime add-ons, see [Containerd Add-On](https://kurl.sh/docs/add-ons/containerd) and [Docker Add-On](https://kurl.sh/docs/add-ons/docker) in the kURL documentation. + +### KOTS Updates (KOTS Add-on) + +The version of KOTS that is installed in a kURL cluster is set by the [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm), which is defined in the kURL installer spec. + +For example, if the version of KOTS running in your cluster is 1.109.0, and the KOTS add-on in the kURL installer spec is updated to 1.109.12, then the KOTS version in your cluster is updated to 1.109.12 when you update the cluster. + +================ +File: docs/enterprise/updating-kurl.mdx +================ +import InstallerRequirements from "../partials/updating/_installerRequirements.mdx" +import UpgradePrompt from "../partials/updating/_upgradePrompt.mdx" +import AdminConsole from "../partials/updating/_admin-console.mdx" +import AdminConsoleAirGap from "../partials/updating/_admin-console-air-gap.mdx" +import DownloadKurlBundle from "../partials/install/_download-kurl-bundle.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Performing Updates in kURL Clusters + + + +This topic describes how to perform updates in Replicated kURL installations. It includes procedures for updating an application, as well as for updating the versions of Kubernetes, Replicated KOTS, and add-ons in a kURL cluster. + +For more information about managing nodes in kURL clusters, including how to safely reset, reboot, and remove nodes when performing maintenance tasks, see [Managing Nodes](https://kurl.sh/docs/install-with-kurl/managing-nodes) in the open source kURL documentation. + +## Update an Application + +For kURL installations, you can update an application from the Admin Console. You can also set up automatic updates. See [Configuring Automatic Updates](/enterprise/updating-apps). + +### Online Environments + + + +### Air Gap Environments + + + +## Update the kURL Cluster + +After updating the kURL installer spec, you can rerun the kURL installation script to update a kURL cluster. For more information about kURL cluster udpates, see [About kURL Cluster Updates](/enterprise/updating-kurl-about). + +For more information about managing nodes in kURL clusters, including how to safely reset, reboot, and remove nodes when performing maintenance tasks, see [Managing Nodes](https://kurl.sh/docs/install-with-kurl/managing-nodes) in the open source kURL documentation. + +:::important +The Kubernetes scheduler automatically reschedules Pods to other nodes during maintenance. Any deployments or StatefulSets with a single replica experience downtime while being rescheduled. +::: + +### Online Environments + +To update the kURL cluster in an online environment: + +1. Edit the kURL installer spec as desired. For example, update the version of Kubernetes or add, remove, or update add-ons. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). + +1. Run the kURL installation script on any primary node in the cluster: + + ```bash + curl -sSL https://k8s.kurl.sh/APP_SLUG | sudo bash -s ADVANCED_OPTIONS + ``` + Replace: + * `APP_SLUG` with the unique slug for the application. + * `ADVANCED_OPTIONS` optionally with any flags listed in [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. + + To use no advanced installation options, remove `-s ADVANCED_OPTIONS` from the command. + + See the following recommendations for advanced options: + + + +1. + +### Air Gap Environments + +For air gap installations, you must load images on each node in the cluster before you can run the installation script to update Kubernetes and any add-ons. This is because upgraded components might have Pods scheduled on any node in the cluster. + +To update the kURL cluster in an air gap environment: + +1. Edit the kURL installer spec as desired. For example, update the version of Kubernetes or add, remove, or update add-ons. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). + +1. Repeat the following steps on each node in the cluster to download and extract the kURL `.tar.gz` air gap bundle for the updated spec: + + 1. Download the kURL `.tar.gz` air gap bundle from the channel where the new kURL installer spec is promoted: + + * To download the kURL air gap bundle for the Stable channel: + + + + * To download the kURL bundle for channels other than Stable: + + ```bash + replicated channel inspect CHANNEL + ``` + Replace `CHANNEL` with the exact name of the target channel, which can include uppercase letters or special characters, such as `Unstable` or `my-custom-channel`. + + In the output of this command, copy the curl command with the air gap URL. + + 1. Extract the contents of the bundle: + + ```bash + tar -xvzf FILENAME.tar.gz + ``` + Replace `FILENAME` with the name of the downloaded kURL `.tar.gz` air gap bundle. + +1. Run the following KURL script to ensure all required images are available: + + ```bash + cat tasks.sh | sudo bash -s load-images + ``` + + :::note + The kURL installation script that you will run in the next step also performs a check for required images and prompts you to run the `load-images` command if any images are missing. + ::: + +1. Run the kURL installation script on any primary node in the cluster with the `airgap` option: + + ```bash + cat install.sh | sudo bash -s airgap OTHER_ADVANCED_OPTIONS + ``` + Replace `OTHER_ADVANCED_OPTIONS` optionally with any flags listed in [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. + + See the following recommendations for advanced options: + + +1. + + :::note + If Kubernetes must be upgraded by more than one minor version, the script automatically searches for the required Kubernetes assets in the `/var/lib/kurl/assets/` directory. If the assets are not available, the script prints a command to download the assets as a `tar.gz` package. Download and provide the absolute path to the package when prompted to continue with the upgrade. + ::: + +================ +File: docs/enterprise/updating-licenses.md +================ +# Updating Licenses in the Admin Console + +This topic describes how to update a license from the KOTS Admin Console. + +## Update Online Licenses + +To update licenses in online environments: + +1. In the Admin Console, go to the **License** tab. + +1. Click **Sync license** to get the latest updates. + + ![Online License](/images/online-license-tab.png) + + [View a larger version of this image](/images/online-license-tab.png) + + :::note + If no changes are detected, a **License is already up to date** message appears. + ::: + + When the license is updated, KOTS makes a new version available that includes the license changes: + + ![License updated successfully](/images/kots-license-update-message.png) + + [View a larger version of this image](/images/kots-license-update-message.png) + +1. In the dialog, click **Go to new version** to navigate to the **Version history** page. + +1. On the **Version history** page, next to the new version labeled **License Change**, click **Deploy** then **Yes, deploy**. + + ![Deploy license change](/images/kots-deploy-license-change.png) + + [View a larger version of this image](/images/kots-deploy-license-change.png) + + The version with the license change is then displayed as the currently deployed version, as shown below: + + ![Currently deployed version](/images/kots-license-change-currently-deployed.png) + + [View a larger version of this image](/images/kots-license-change-currently-deployed.png) + +## Update Air Gap Licenses + +To update licenses in air gap environments: + +1. Download the new license. Ensure that it is available on the machine where you can access a browser. + +1. In the Admin Console, go to the **License** tab. + +1. Click **Upload license** and select the new license. + + ![Airgap License](/images/airgap-license-tab.png) + + [View a larger version of this image](/images/airgap-license-tab.png) + + :::note + If no changes are detected, a **License is already up to date** message appears. + ::: + + When the license is updated, KOTS makes a new version available that includes the license changes: + + ![License updated successfully](/images/kots-airgap-license-update-message.png) + + [View a larger version of this image](/images/kots-airgap-license-update-message.png) + +1. In the dialog, click **Go to new version** to navigate to the **Version history** page. + +1. On the **Version history** page, next to the new version labeled **License Change**, click **Deploy** then **Yes, deploy**. + + ![Deploy license change](/images/kots-deploy-license-change.png) + + [View a larger version of this image](/images/kots-deploy-license-change.png) + + The version with the license change is then displayed as the currently deployed version, as shown below: + + ![Currently deployed version](/images/kots-license-change-currently-deployed.png) + + [View a larger version of this image](/images/kots-license-change-currently-deployed.png) + +## Upgrade from a Community License + +If you have a community license, you can change your license by uploading a new one. This allows you to upgrade from a community version of the software without having to reinstall the Admin Console and the application. + +To change a community license to another license: + +1. Download the new license. +1. In the **License** tab of the Admin Console, click **Change license**. +1. In the dialog, upload the new license file. + +================ +File: docs/enterprise/updating-patching-with-kustomize.md +================ +# Patching with Kustomize + +This topic describes how to use Kustomize to patch an application before deploying. + +## Overview + +Replicated KOTS uses Kustomize to let you make patches to an application outside of the options available in the KOTS Admin Console **Config** page. _Kustomizations_ are the Kustomize configuration objects, defined in `kustomization.yaml` files, that describe how to transform or generate other Kubernetes objects. + +These kustomizations overlay the application resource files and can persist after release updates. For example, you can kustomize the number of replicas that you want to continually use in your environment or specify what `nodeSelectors` to use for a deployment. + +For more information, see the [Kustomize website](https://kustomize.io). + +## Limitation + +For Helm charts deployed with version `kots.io/v1beta2` of the KOTS HelmChart custom resource, editing the downstream Kustomization files to make changes to the application before deploying is not supported. This is because KOTS does not use Kustomize when installing Helm charts with the `kots.io/v1beta2` HelmChart custom resource. For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +## About the Directory Structure + +You can patch an application with Kustomize from the **View files** page in the Admin Console. The **View files** page shows the Kubernetes manifest files for the application. + +The following images shows an example of the file directory on the View files page: + +![Kustomize Directory Structure](/images/kustomize-dir-structure.png) + +[View a larger version of this image](/images/kustomize-dir-structure.png) + +For more information about each of the sections in the file directory, see the following sections: + +- [Upstream](#upstream) +- [Base](#base) +- [Overlays](#overlays) +- [Rendered](#rendered) +- [skippedFiles](#skippedfiles) + +### Upstream + +The following table describes the `upstream` directory and whether custom changes persist after an update: + + + + + + + + + + + + + + +
DirectoryChanges Persist?Description
upstreamNo, except for the userdata subdirectory

The upstream directory exactly mirrors the content pushed to a release.

Contains the template functions, preflight checks, support bundle, config options, license, and so on.

Contains a userdata subdirectory that includes user data files such as the license file and the config file.

+ +### Base + +The following table describes the `base` directory and whether custom changes persist after an update: + + + + + + + + + + + + + + +
DirectoryChanges Persist?Description
baseNo

After KOTS processes and renders the upstream, it puts those files in the base directory.

Only the deployable application files, such as files deployable with kubectl apply, are placed here.

Any non-deployable manifests, such as template functions, preflight checks, and configuration options, are removed.

+ + +### Overlays + +The `overlays` directory contains the following subdirectories that apply specific kustomizations to the `base` directory when deploying a version to the cluster. + The following table describes the subdirectories and specifies whether the custom changes made in each subdirectory persist after an update. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SubdirectoryChanges Persist?Description
midstreamNoContains KOTS-specific kustomizations, such as:
  • Backup labels, such as those used to configure Velero.
  • Image pull secret definitions and patches to inject the imagePullSecret field into relevant manifests (such as deployments, stateful sets, and jobs).
downstreamYes

Contains user-defined kustomizations that are applied to the midstream directory and deployed to the cluster.

Only one downstream subdirectory is supported. It is automatically created and named this-cluster when the Admin Console is installed.

To add kustomizations, see Patch an Application.

midstream/chartsNo

Appears only when the useHelmInstall property in the HelmChart custom resource is set to true.

Contains a subdirectory for each Helm chart. Each Helm chart has its own kustomizations because each chart is rendered and deployed separately from other charts and manifests.

The subcharts of each Helm chart also have their own kustomizations and are rendered separately. However, these subcharts are included and deployed as part of the parent chart.

downstream/chartsYes

Appears only when the useHelmInstall property in the HelmChart custom resource is set to true.

Contains a subdirectory for each Helm chart. Each Helm chart has its own kustomizations because each chart is rendered and deployed separately from other charts and manifests.

The subcharts of each Helm chart also have their own kustomizations and are rendered separately. However, these subcharts are included and deployed as part of the parent chart.

+ +### Rendered + +The following table describes the `rendered` directory and whether custom changes persist after an update: + + + + + + + + + + + + + + + + + + + +
DirectoryChanges Persist?Description
renderedNo

Contains the final rendered application manifests that are deployed to the cluster.

The rendered files are created when KOTS processes the base by applying the corresponding overlays and the user-defined kustomizations. KOTS puts the rendered files in the rendered directory.

rendered/chartsNo

Appears only when the useHelmInstall property in the HelmChart custom resource is set to true.

Contains a subdirectory for each rendered Helm chart. Each Helm chart is deployed separately from other charts and manifests.

The rendered subcharts of each Helm chart are included and deployed as part of the parent chart.

+ +### skippedFiles + +The `skippedFiles` directory lists files that KOTS is not able to process or render, such as invalid YAML files. + +The `_index.yaml` file contains metadata and details about the errors, such as which files they were found in and sometimes the line number of the error. + +## Patch an Application + +To patch the application with Kustomize so that your changes persist between updates, edit the files in the `overlays/downstream/this-cluster` directory. + +The Admin Console overwrites the `upstream` and `base` directories each time you upgrade the application to a later version. + +To patch an application: + +1. On the View Files tab in the Admin Console, click **Need to edit these files? Click here to learn how**. + + ![edit-patches-kots-app](/images/edit-patches-kots-app.png) + +1. To download the application bundle locally: + + ```shell + kubectl kots download --namespace APP_NAMESPACE --slug APP_SLUG + ``` + Replace: + * `APP_NAMESPACE` with the namespace on the cluster where the application is deployed. + * `APP_SLUG` with the unique slug for the application. + + You can copy these values from the dialog that appears when you click **Need to edit these files? Click here to learn how**. + +1. Create a Kubernetes manifest YAML file and make any desired edits. You only need to add the fields and values that you want to change because this patch file overwrites the corresponding values in the `base` directory. For example, the following `Deployment` patch manifest file shows an edit only to the number of replicas. None of the other values in the `base/deployment.yaml` file will be overwritten. + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example-nginx + spec: + replicas: 2 + ``` + +1. Add the filename that you created in the previous step to the `patches` field in the `kustomization.yaml` file, located in `/overlays/downstream/this-cluster`. The `downstream/this-cluster` subdirectory is where custom changes (patches) persist when releases are updated. These changes are in turn applied to the `midstream` directory. For more information, see [overlays](#overlays). + + **Example:** + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + bases: + - ../../midstream + kind: Kustomization + patches: + - path: ./FILENAME.yaml + ``` + +1. Upload your changes to the cluster: + + ```shell + kubectl kots upload --namespace APP_NAMESPACE --slug APP_SLUG ~/APP-SLUG + ``` + +1. On the Version History tab in the Admin Console, click **Diff** to see the new version of the application with the diff of the changes that you uploaded. + + ![kustomize-view-history-diff](/images/kustomize-view-history-diff.png) + + [View a larger version of this image](/images/kustomize-view-history-diff.png) + +1. Click **Deploy** to apply the changes. + + ![kustomize-view-history-deploy](/images/kustomize-view-history-deploy.png) + +1. Verify your changes. For example, running the following command shows that there are two NGINX pods running after deploying two replicas in the example YAML above: + + ```shell + kubectl get po | grep example-nginx + ``` + **Example output:** + + ```shell + example-nginx-f5c49fdf6-bf584 1/1 Running 0 1h + example-nginx-t6ght74jr-58fhr 1/1 Running 0 1m + ``` + +================ +File: docs/enterprise/updating-tls-cert.md +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Updating TLS Certificates in kURL Clusters + + + +This topic describes how to upload custom TLS certificates for Replicated kURL clusters. + +## Overview + +For kURL clusters, the default Replicated KOTS self-signed certificate automatically renews 30 days before the expiration date. + +If you have uploaded a custom TLS certificate instead, then no renewal is attempted, even if the certificate is expired. In this case, you must manually upload a new custom certificate. + +For information about TLS renewal for registry and Kubernetes control plane with Replicated kURL, see [TLS Certificates](https://kurl.sh/docs/install-with-kurl/setup-tls-certs) in the kURL documentation. + +## Update Custom TLS Certificates + +If you are using a custom TLS certificate in a kURL cluster, you manually upload a new certificate when the previous one expires. + +:::important +Adding the `acceptAnonymousUploads` annotation temporarily creates a vulnerability for an attacker to maliciously upload TLS certificates. After TLS certificates have been uploaded, the vulnerability is closed again. + +Replicated recommends that you complete this upload process quickly to minimize the vulnerability risk. +::: + +To upload a new custom TLS certificate: + +1. Run the following annotation command to restore the ability to upload new TLS certificates: + + ```bash + kubectl -n default annotate secret kotsadm-tls acceptAnonymousUploads=1 --overwrite + ``` +1. Run the following command to get the name of the kurl-proxy server: + + ```bash + kubectl get pods -A | grep kurl-proxy | awk '{print $2}' + ``` + +1. Run the following command to delete the kurl-proxy pod. The pod automatically restarts after the command runs. + + ```bash + kubectl delete pods PROXY_SERVER + ``` + + Replace PROXY_SERVER with the name of the kurl-proxy server that you got in the previous step. + +1. After the pod has restarted, direct your browser to `http://:8800/tls` and go through the upload process in the user interface. + +================ +File: docs/partials/airgap/_airgap-bundle.mdx +================ +Air gap bundles (`.airgap`) contain the images needed to install and run a single release of your application in _air gap_ environments with no outbound internet access. + +================ +File: docs/partials/application-links/_nginx-deployment.mdx +================ +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + annotations: + backup.velero.io/backup-volumes: nginx-content + spec: + containers: + - name: nginx + image: nginx + resources: + limits: + memory: '256Mi' + cpu: '500m' + requests: + memory: '32Mi' + cpu: '100m' +``` + +================ +File: docs/partials/application-links/_nginx-k8s-app.mdx +================ +```yaml +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "nginx" +spec: + descriptor: + links: + - description: Open App + # needs to match applicationUrl in kots-app.yaml + url: "http://nginx" +``` + +================ +File: docs/partials/application-links/_nginx-kots-app.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: nginx +spec: + title: App Name + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png + statusInformers: + - deployment/nginx + ports: + - serviceName: "nginx" + servicePort: 80 + localPort: 8888 + applicationUrl: "http://nginx" +``` + +================ +File: docs/partials/application-links/_nginx-service.mdx +================ +```yaml +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx + annotations: + kots.io/when: '{{repl not IsKurl }}' +spec: + type: ClusterIP + ports: + - port: 80 + selector: + app: nginx +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx + annotations: + kots.io/when: '{{repl IsKurl }}' +spec: + type: NodePort + ports: + - port: 80 + nodePort: 8888 + selector: + app: nginx +``` + +================ +File: docs/partials/ci-cd/_build-source-code.mdx +================ +Add one or more jobs to compile your application source code and build images. The build jobs that you create vary depending upon your application and your CI/CD platform. For additional guidance, see the documentation for your CI/CD platform. + +================ +File: docs/partials/ci-cd/_test-recs.mdx +================ +* **Application Testing:** Traditional application testing includes unit, integration, and end-to-end tests. These tests are critical for application reliability, and Compatibility Matrix is designed to to incorporate and use your application testing. + +* **Performance Testing:** Performance testing is used to benchmark your application to ensure it can handle the expected load and scale gracefully. Test your application under a range of workloads and scenarios to identify any bottlenecks or performance issues. Make sure to optimize your application for different Kubernetes distributions and configurations by creating all of the environments you need to test in. + +* **Smoke Testing:** Using a single, conformant Kubernetes distribution to test basic functionality of your application with default (or standard) configuration values is a quick way to get feedback if something is likely to be broken for all or most customers. Replicated also recommends that you include each Kubernetes version that you intend to support in your smoke tests. + +* **Compatibility Testing:** Because applications run on various Kubernetes distributions and configurations, it is important to test compatibility across different environments. Compatibility Matrix provides this infrastructure. + +* **Canary Testing:** Before releasing to all customers, consider deploying your application to a small subset of your customer base as a _canary_ release. This lets you monitor the application's performance and stability in real-world environments, while minimizing the impact of potential issues. Compatibility Matrix enables canary testing by simulating exact (or near) customer environments and configurations to test your application with. + +================ +File: docs/partials/cmx/_openshift-pool.mdx +================ +:::note +Due to the time it takes to start an OpenShift cluster, a warm pool of OpenShift clusters is maintained. +When available, an OpenShift cluster from the pool starts in approximately two minutes with default disks. +When starting a cluster with a disk size different than the default, an additional four minutes is added to the warm cluster start time. +::: + +================ +File: docs/partials/cmx/_overview.mdx +================ +Replicated Compatibility Matrix quickly provisions ephemeral clusters of different Kubernetes distributions and versions, such as OpenShift, EKS, and Replicated kURL. + +You can use Compatibility Matrix to get kubectl access to running clusters within minutes or less. This allows you to more easily test your code in a range of different environments before releasing to customers. + +Example use cases for Compatibility Matrix include: +* Run tests before releasing a new version of your application to validate compatibility with supported Kubernetes distributions +* Get access to a cluster to develop on and quickly test changes +* Reproduce a reported issue on a customer-representative environment for troubleshooting + +================ +File: docs/partials/cmx/_prerequisites.mdx +================ +* Create an account in the Replicated Vendor Portal. See [Creating a Vendor Account](/vendor/vendor-portal-creating-account). + +* Install the Replicated CLI and then authorize the CLI using your vendor account. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* If you have a contract, you can purchase more credits by going to [**Compatibility Matrix > Buy additional credits**](https://vendor.replicated.com/compatibility-matrix). Otherwise, you can request credits by going to [**Compatibility Matrix > Request more credits**](https://vendor.replicated.com/compatibility-matrix) in the Vendor Portal. For more information, see [Billing and Credits](/vendor/testing-about#billing-and-credits). + +================ +File: docs/partials/cmx/_supported-clusters-overview.mdx +================ +Compatibility Matrix can create clusters on virtual machines (VMs), such as kind, k3s, RKE2, and Red Hat OpenShift OKD, and also create cloud-managed clusters, such as EKS, GKE and AKS: + +* Cloud-based Kubernetes distributions are run in a Replicated managed and controlled cloud account to optimize and deliver a clusters quickly and reliably. The Replicated account has control planes ready and adds a node group when you request it, making the cluster available much faster than if you try to create your own cluster with your own cloud account. + +* VMs run on Replicated bare metal servers located in several data centers, including data centers physically in the European Union. + +To view an up-to-date list of the available cluster distributions, including the supported Kubernetes versions, instance types, and maximum nodes for each distribution, run [`replicated cluster versions`](/reference/replicated-cli-cluster-versions). + +For detailed information about the available cluster distributions, see [Supported Compatibility Matrix Cluster Types](testing-supported-clusters). + +================ +File: docs/partials/collab-repo/_collab-existing-user.mdx +================ +If a team member adds a GitHub username to their Vendor Portal account that already exists in the collab repository, then the Vendor Portal does _not_ change the role that the existing user is assigned in the collab repository. + +However, if the RBAC policy assigned to this member in the Vendor Portal later changes, or if the member is removed from the Vendor Portal team, then the Vendor Portal updates or removes the user in the collab repository accordingly. + +================ +File: docs/partials/collab-repo/_collab-rbac-important.mdx +================ +:::important +The RBAC policy that you specify also determines the level of access that the user has to the Replicated collab repository in GitHub. By default, the Read Only policy grants the user read access to the collab repository. + +For more information about managing user access to the collab repository from the Vendor Portal, see [Managing Access to the Collab Repository](team-management-github-username). +::: + +================ +File: docs/partials/collab-repo/_collab-rbac-resources-important.mdx +================ +:::important +When you update an existing RBAC policy to add one or more `team/support-issues` resource, the GitHub role in the Replicated collab repository of every team member that is assigned to that policy and has a GitHub username saved in their account is updated accordingly. +::: + +================ +File: docs/partials/collab-repo/_collab-repo-about.mdx +================ +The replicated-collab organization in GitHub is used for tracking and collaborating on escalations, bug reports, and feature requests that are sent by members of a Vendor Portal team to the Replicated team. Replicated creates a unique repository in the replicated-collab organization for each Vendor Portal team. Members of a Vendor Portal team submit issues to their unique collab repository on the Support page in the [Vendor Portal](https://vendor.replicated.com/support). + +For more information about the collab repositories and how they are used, see [Replicated Support Paths and Processes](https://community.replicated.com/t/replicated-vendor-support-paths-and-processes/850) in _Replicated Community_. + +================ +File: docs/partials/config/_affixExample.mdx +================ +```yaml +groups: +- name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. + items: + - name: username + title: Username + type: text + required: true + affix: left + - name: password + title: Password + type: password + required: true + affix: right +``` + +================ +File: docs/partials/config/_defaultExample.mdx +================ +```yaml +- name: custom_key + title: Set your secret key for your app + description: Paste in your Custom Key + items: + - name: key + title: Key + type: text + value: "" + default: change me +``` +![Default change me value displayed under the config field](/images/config-default.png) + +[View a larger version of this image](/images/config-default.png) + +================ +File: docs/partials/config/_helpTextExample.mdx +================ +```yaml +- name: http_settings + title: HTTP Settings + items: + - name: http_enabled + title: HTTP Enabled + help_text: Check to enable the HTTP listener + type: bool +``` +![Config field with help text underneath](/images/config-help-text.png) + +[View a larger version of this image](/images/config-help-text.png) + +================ +File: docs/partials/config/_hiddenExample.mdx +================ +```yaml +- name: secret_key + title: Secret Key + type: password + hidden: true + value: "{{repl RandomString 40}}" +``` + +================ +File: docs/partials/config/_item-types.mdx +================ +- `bool` +- `dropdown` +- `file` +- `heading` +- `label` +- `password` +- `radio` +- `select_one` (Deprecated) +- `text` +- `textarea` + +================ +File: docs/partials/config/_nameExample.mdx +================ +```yaml +- name: http_settings + title: HTTP Settings + items: + - name: http_enabled + title: HTTP Enabled + type: bool +``` + +================ +File: docs/partials/config/_property-when.mdx +================ +It can be useful to conditionally show or hide fields so that your users are only provided the configuration options that are relevant to them. This helps to reduce user error when configuring the application. Conditional statements in the `when` property can be used to evaluate things like the user's environment, license entitlements, and configuration choices. For example: +* The Kubernetes distribution of the cluster +* If the license includes a specific feature entitlement +* The number of users that the license permits +* If the user chooses to bring their own external database, rather than using an embedded database offered with the application + +You can construct conditional statements in the `when` property using KOTS template functions. KOTS template functions are a set of custom template functions based on the Go text/template library. For more information, see [About Template Functions](/reference/template-functions-about). + +================ +File: docs/partials/config/_randomStringNote.mdx +================ +:::note +When you assign a template function that generates a value to a `value` property, you can use the `readonly` and `hidden` properties to define whether or not the generated value is ephemeral or persistent between changes to the configuration settings for the application. For more information, see [RandomString](template-functions-static-context#randomstring) in _Static Context_. +::: + +================ +File: docs/partials/config/_readonlyExample.mdx +================ +```yaml +- name: key + title: Key + type: text + value: "" + default: change me +- name: unique_key + title: Unique Key + type: text + value: "{{repl RandomString 20}}" + readonly: true +``` +![Default change me value displayed under the config field](/images/config-readonly.png) + +[View a larger version of this image](/images/config-readonly.png) + +================ +File: docs/partials/config/_recommendedExample.mdx +================ +```yaml +- name: recommended_field + title: My recommended field + type: bool + default: "0" + recommended: true +``` +![config field with green recommended tag](/images/config-recommended-item.png) + +[View a larger version of this image](/images/config-recommended-item.png) + +================ +File: docs/partials/config/_regexValidationExample.mdx +================ +``` +- name: smtp-settings + title: SMTP Settings + - name: smtp_password + title: SMTP Password + type: password + required: true + validation: + regex: + pattern: ^(?:[\w@#$%^&+=!*()_\-{}[\]:;"'<>,.?\/|]){8,16}$ + message: The password must be between 8 and 16 characters long and can contain a combination of uppercase letters, lowercase letters, digits, and special characters. + - name: jwt_token + title: JWT token + type: file + validation: + regex: + pattern: ^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$ + message: Upload a file with valid JWT token. +``` + +================ +File: docs/partials/config/_requiredExample.mdx +================ +```yaml + - name: custom_key + title: Set your secret key for your app + description: Paste in your Custom Key + items: + - name: key + title: Key + type: text + value: "" + default: change me + required: true +``` +![config field with yellow required tag](/images/config-required-item.png) + +[View a larger version of this image](/images/config-required-item.png) + +================ +File: docs/partials/config/_typeExample.mdx +================ +```yaml +- name: group_title + title: Group Title + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` +![field named HTTP Enabled with disabled checkbox](/images/config-screen-bool.png) + +[View a larger version of this image](/images/config-screen-bool.png) + +================ +File: docs/partials/config/_valueExample.mdx +================ +```yaml +- name: custom_key + title: Set your secret key for your app + description: Paste in your Custom Key + items: + - name: key + title: Key + type: text + value: "{{repl RandomString 20}}" +``` +![config field with random string as HTML input](/images/config-value-randomstring.png) + +[View a larger version of this image](/images/config-value-randomstring.png) + +================ +File: docs/partials/config/_when-note.mdx +================ +:::note +`when` is a property of both groups and items. See [Group Properties > `when`](/reference/custom-resource-config#when) above. +::: + +================ +File: docs/partials/config/_when-requirements.mdx +================ +* The `when` property accepts the following types of values: + * Booleans + * Strings that match "true", "True", "false", or "False" + + [KOTS template functions](/reference/template-functions-about) can be used to render these supported value types. +* For the `when` property to evaluate to true, the values compared in the conditional statement must match exactly without quotes + +================ +File: docs/partials/config/_whenExample.mdx +================ +```yaml +- name: database_settings_group + title: Database Settings + items: + - name: db_type + title: Database Type + type: radio + default: external + items: + - name: external + title: External + - name: embedded + title: Embedded DB + - name: database_host + title: Database Hostname + type: text + when: repl{{ (ConfigOptionEquals "db_type" "external")}} + - name: database_password + title: Database Password + type: password + when: repl{{ (ConfigOptionEquals "db_type" "external")}} +``` + +External option selected and conditional fields displayed + +[View a larger version of this image](/images/config-when-enabled.png) + +Embedded DB option selected and no additional fields displayed + +[View a larger version of this image](/images/config-when-disabled.png) + +================ +File: docs/partials/configValues/_boolExample.mdx +================ +```yaml +bool_config_field: + value: "1" +``` +```yaml +bool_config_field: + value: "0" +``` + +================ +File: docs/partials/configValues/_config-values-procedure.mdx +================ +During installation, KOTS automatically generates a ConfigValues file and saves the file in a directory called `upstream`. After installation, you can view the generated ConfigValues file in the Admin Console **View files** tab or from the command line by running the `kubectl kots get config` command. + +To get the ConfigValues file from an installed application instance: + +1. Install the target release in a development environment. You can either install the release with Replicated Embedded Cluster or install in an existing cluster with KOTS. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + +1. Depending on the installer that you used, do one of the following to get the ConfigValues for the installed instance: + + * **For Embedded Cluster installations**: In the Admin Console, go to the **View files** tab. In the filetree, go to **upstream > userdata** and open **config.yaml**, as shown in the image below: + + ![ConfigValues file in the Admin Console View Files tab](/images/admin-console-view-files-configvalues.png) + + [View a larger version of this image](/images/admin-console-view-files-configvalues.png) + + * **For KOTS installations in an existing cluster**: Run the `kubectl kots get config` command to view the generated ConfigValues file: + + ```bash + kubectl kots get config --namespace APP_NAMESPACE --decrypt + ``` + Where: + * `APP_NAMESPACE` is the cluster namespace where KOTS is running. + * The `--decrypt` flag decrypts all configuration fields with `type: password`. In the downloaded ConfigValues file, the decrypted value is stored in a `valuePlaintext` field. + + The output of the `kots get config` command shows the contents of the ConfigValues file. For more information about the `kots get config` command, including additional flags, see [kots get config](/reference/kots-cli-get-config). + +================ +File: docs/partials/configValues/_configValuesExample.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: ConfigValues +spec: + values: + text_config_field_name: + default: Example default value + value: Example user-provided value + boolean_config_field_name: + value: "1" + password_config_field_name: + valuePlaintext: examplePassword +``` + +================ +File: docs/partials/configValues/_fileExample.mdx +================ +```yaml +file_config_field: + filename: my-file.txt + value: JVBERi0xLjQKMSAw... +``` + +================ +File: docs/partials/configValues/_passwordExample.mdx +================ +```yaml +password_config_field: + valuePlaintext: myPlainTextPassword +``` + +================ +File: docs/partials/configValues/_selectOneExample.mdx +================ +```yaml +radio_config_field: + value: option_name +``` + +================ +File: docs/partials/configValues/_textareaExample.mdx +================ +```yaml +textarea_config_field: + value: This is a text area field value. +``` + +================ +File: docs/partials/configValues/_textExample.mdx +================ +```yaml +text_config_field: + value: This is a text field value. +``` + +================ +File: docs/partials/custom-domains/_wizard.mdx +================ +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Custom Domains**. + +1. In the section for the target Replicated endpoint, click **Add your first custom domain** for your first domain, or click **Add new domain** for additional domains. + + The **Configure a custom domain** wizard opens. + + custom domain wizard + +1. For **Domain**, enter the custom domain. Click **Save & continue**. + +1. For **Create CNAME**, copy the text string and use it to create a CNAME record in your DNS account. Click **Continue**. + +1. For **Verify ownership**, copy the text string and use it to create a TXT record in your DNS account. Click **Validate & continue**. + + Your changes can take up to 24 hours to propagate. + +1. For **TLS cert creation verification**, copy the text string and use it to create a TXT record in your DNS account. Click **Validate & continue**. + + Your changes can take up to 24 hours to propagate. + +1. For **Use Domain**, to set the new domain as the default, click **Yes, set as default**. Otherwise, click **Not now**. + + :::note + Replicated recommends that you do _not_ set a domain as the default until you are ready for it to be used by customers. + ::: + +The Vendor Portal marks the domain as **Configured** after the verification checks for ownership and TLS certificate creation are complete. + +================ +File: docs/partials/custom-resource-application/_additionalImages.mdx +================ +```yaml +additionalImages: + - jenkins/jenkins:lts +``` + +================ +File: docs/partials/custom-resource-application/_additionalNamespaces.mdx +================ +```yaml +additionalNamespaces: + - "*" +``` + +================ +File: docs/partials/custom-resource-application/_allowRollback.mdx +================ +```yaml +allowRollback: false +``` + +================ +File: docs/partials/custom-resource-application/_graphs-templates.mdx +================ +The template escape sequence is `{{}}`. Use `{{ value }}`. For more information, see [Template Reference](https://prometheus.io/docs/prometheus/latest/configuration/template_reference/) in the Prometheus documentation. + +================ +File: docs/partials/custom-resource-application/_graphs.mdx +================ +```yaml +graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' +``` + +================ +File: docs/partials/custom-resource-application/_icon.mdx +================ +```yaml +icon: https://support.io/img/logo.png +``` + +================ +File: docs/partials/custom-resource-application/_minKotsVersion.mdx +================ +```yaml +minKotsVersion: "1.71.0" +``` + +================ +File: docs/partials/custom-resource-application/_ports-applicationURL.mdx +================ +
  • (Optional) ports.applicationUrl: When set to the same URL that is specified in the `descriptor.links.url` field of the Kubernetes SIG Application custom resource, KOTS adds a link on the Admin Console dashboard where the given service can be accessed. This process automatically links to the hostname in the browser (where the Admin Console is being accessed) and appends the specified `localPort`.

    If not set, then the URL defined in the `descriptor.links.url` field of the Kubernetes SIG Application is linked on the Admin Console dashboard.

  • + +================ +File: docs/partials/custom-resource-application/_ports-kurl-note.mdx +================ +:::note +KOTS does not automatically create port forwards for installations on VMs or bare metal servers with Replicated Embedded Cluster or Replicated kURL. This is because it cannot be verified that the ports are secure and authenticated. Instead, Embedded Cluster or kURL creates a NodePort service to make the Admin Console accessible on a port on the node (port `8800` for kURL or port `30000` for Embedded Cluster). + +You can expose additional ports on the node for Embedded Cluster or kURL installations by creating NodePort services. For more information, see [Exposing Services Using NodePorts](/vendor/kurl-nodeport-services). +::: + +================ +File: docs/partials/custom-resource-application/_ports-localPort.mdx +================ +
  • ports.localPort: The port to map on the local workstation.
  • + +================ +File: docs/partials/custom-resource-application/_ports-serviceName.mdx +================ +
  • ports.serviceName: The name of the service that receives the traffic.
  • + +================ +File: docs/partials/custom-resource-application/_ports-servicePort.mdx +================ +
  • ports.servicePort: The containerPort of the Pod where the service is running.

  • + +================ +File: docs/partials/custom-resource-application/_ports.mdx +================ +```yaml +ports: + - serviceName: web + servicePort: 9000 + localPort: 9000 + applicationUrl: "http://web" +``` + +================ +File: docs/partials/custom-resource-application/_proxyRegistryDomain.mdx +================ +```yaml +proxyRegistryDomain: "proxy.mycompany.com" +``` + +================ +File: docs/partials/custom-resource-application/_releaseNotes.mdx +================ +```yaml +releaseNotes: Fixes a bug and adds a new feature. +``` + +================ +File: docs/partials/custom-resource-application/_replicatedRegistryDomain.mdx +================ +```yaml +replicatedRegistryDomain: "registry.mycompany.com" +``` + +================ +File: docs/partials/custom-resource-application/_requireMinimalRBACPrivileges.mdx +================ +```yaml +requireMinimalRBACPrivileges: false +``` + +================ +File: docs/partials/custom-resource-application/_servicePort-note.mdx +================ +:::note +Ensure that you use the `containerPort` and not the `servicePort`. The `containerPort` and `servicePort` are often the same port, though it is possible that they are different. +::: + +================ +File: docs/partials/custom-resource-application/_statusInformers.mdx +================ +```yaml +statusInformers: + - deployment/my-web-svc + - deployment/my-worker +``` +The following example shows excluding a specific status informer based on a user-supplied value from the Admin Console Configuration screen: +```yaml +statusInformers: + - deployment/my-web-svc + - '{{repl if ConfigOptionEquals "option" "value"}}deployment/my-worker{{repl else}}{{repl end}}' +``` + +================ +File: docs/partials/custom-resource-application/_supportMinimalRBACPrivileges.mdx +================ +```yaml +supportMinimalRBACPrivileges: true +``` + +================ +File: docs/partials/custom-resource-application/_targetKotsVersion.mdx +================ +```yaml +targetKotsVersion: "1.85.0" +``` + +================ +File: docs/partials/custom-resource-application/_title.mdx +================ +```yaml +title: My Application +``` + +================ +File: docs/partials/customers/_change-channel.mdx +================ +You can change the channel a customer is assigned at any time. For installations with Replicated KOTS, when you change the customer's channel, the customer can synchronize their license in the Replicated Admin Console to fetch the latest release on the new channel and then upgrade. The Admin Console always fetches the latest release on the new channel, regardless of the presence of any releases on the channel that are marked as required. + +================ +File: docs/partials/customers/_download.mdx +================ +You can download customer and instance data from the **Download CSV** dropdown on the **Customers** page: + +![Download CSV button in the Customers page](/images/customers-download-csv.png) + +[View a larger version of this image](/images/customers-download-csv.png) + +The **Download CSV** dropdown has the following options: + +* **Customers**: Includes details about your customers, such as the customer's channel assignment, license entitlements, expiration date, last active timestamp, and more. + +* (Recommended) **Customers + Instances**: Includes details about the instances assoicated with each customer, such as the Kubernetes distribution and cloud provider of the cluster where the instance is running, the most recent application instance status, if the instance is active or inactive, and more. The **Customers + Instances** data is a super set of the customer data, and is the recommended download for most use cases. + +You can also export customer instance data as JSON using the Vendor API v3 `customer_instances` endpoint. For more information, see [Get customer instance report in CSV or JSON format](https://replicated-vendor-api.readme.io/reference/listappcustomerinstances) in the Vendor API v3 documentation. + +================ +File: docs/partials/embedded-cluster/_definition.mdx +================ +Replicated Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. Embedded Cluster is based on the open source Kubernetes distribution k0s. For more information, see the [k0s documentation](https://docs.k0sproject.io/stable/). + +For software vendors, Embedded Cluster provides a Config for defining characteristics of the cluster that will be created in the customer environment. Additionally, each version of Embedded Cluster includes a specific version of Replicated KOTS, ensuring compatibility between KOTS and the cluster. For enterprise users, cluster updates are done automatically at the same time as application updates, allowing users to more easily keep the cluster up-to-date without needing to use kubectl. + +================ +File: docs/partials/embedded-cluster/_ec-config.mdx +================ +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + version: 2.1.3+k8s-1.30 +``` + +================ +File: docs/partials/embedded-cluster/_multi-node-ha-arch.mdx +================ +The following diagram shows the architecture of an HA multi-node Embedded Cluster installation: + +![Embedded Cluster multi-node architecture with high availability](/images/embedded-architecture-multi-node-ha.png) + +[View a larger version of this image](/images/embedded-architecture-multi-node-ha.png) + +As shown in the diagram above, in HA installations with Embedded Cluster: +* A single replica of the Embedded Cluster Operator is deployed and runs on a controller node. +* A single replica of the KOTS Admin Console is deployed and runs on a controller node. +* Three replicas of rqlite are deployed in the kotsadm namespace. Rqlite is used by KOTS to store information such as support bundles, version history, application metadata, and other small amounts of data needed to manage the application. +* For installations that include disaster recovery, the Velero pod is deployed on one node. The Velero Node Agent runs on each node in the cluster. The Node Agent is a Kubernetes DaemonSet that performs backup and restore tasks such as creating snapshots and transferring data during restores. +* For air gap installations, two replicas of the air gap image registry are deployed. + +Any Helm [`extensions`](/reference/embedded-config#extensions) that you include in the Embedded Cluster Config are installed in the cluster depending on the given chart and whether or not it is configured to be deployed with high availability. + +================ +File: docs/partials/embedded-cluster/_port-reqs.mdx +================ +This section lists the ports used by Embedded Cluster. These ports must be open and available for both single- and multi-node installations. + +#### Ports Used by Local Processes + +The following ports must be open and available for use by local processes running on the same node. It is not necessary to create firewall openings for these ports. + +* 2379/TCP +* 9099/TCP +* 10248/TCP +* 10257/TCP +* 10259/TCP + +#### Ports Required for Bidirectional Communication Between Nodes + +The following ports are used for bidirectional communication between nodes. + +For multi-node installations, create firewall openings between nodes for these ports. + +For single-node installations, ensure that there are no other processes using these ports. Although there is no communication between nodes in single-node installations, these ports are still required. + +* 2380/TCP +* 4789/UDP +* 6443/TCP +* 7443/TCP +* 9091/TCP +* 9443/TCP +* 10249/TCP +* 10250/TCP +* 10256/TCP + +#### Admin Console Port + +The KOTS Admin Console requires that port 30000/TCP is open and available. Create a firewall opening for port 30000/TCP so that the Admin Console can be accessed by the end user. + +Additionally, port 30000 must be accessible by nodes joining the cluster. + +If port 30000 is occupied, you can select a different port for the Admin Console during installation. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + +#### LAM Port + +The Local Artifact Mirror (LAM) requires that port 50000/TCP is open and available. + +If port 50000 is occupied, you can select a different port for the LAM during installation. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + +================ +File: docs/partials/embedded-cluster/_proxy-install-limitations.mdx +================ +**Limitations:** + +* If any of your [Helm extensions](/reference/embedded-config#extensions) make requests to the internet, the given charts need to be manually configured so that those requests are made to the user-supplied proxy server instead. Typically, this requires updating the Helm values to set HTTP proxy, HTTPS proxy, and no proxy. Note that this limitation applies only to network requests made by your Helm extensions. The proxy settings supplied to the install command are used to pull the containers required to run your Helm extensions. + +* Proxy settings cannot be changed after installation or during upgrade. + +================ +File: docs/partials/embedded-cluster/_proxy-install-reqs.mdx +================ +**Requirement:** Proxy installations require Embedded Cluster 1.5.1 or later with Kubernetes 1.29 or later. + +================ +File: docs/partials/embedded-cluster/_requirements.mdx +================ +* Linux operating system + +* x86-64 architecture + +* systemd + +* At least 2GB of memory and 2 CPU cores + +* The disk on the host must have a maximum P99 write latency of 10 ms. This supports etcd performance and stability. For more information about the disk write latency requirements for etcd, see [Disks](https://etcd.io/docs/latest/op-guide/hardware/#disks) in _Hardware recommendations_ and [What does the etcd warning “failed to send out heartbeat on time” mean?](https://etcd.io/docs/latest/faq/) in the etcd documentation. + +* The filesystem at `/var/lib/embedded-cluster` has 40Gi or more of total space and must be less than 80% full + + The directory used for data storage can be changed by passing the `--data-dir` flag with the Embedded Cluster `install` command. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + + Note that in addition to the primary `/var/lib/embedded-cluster` directory, Embedded Cluster creates directories and files in the following locations: + + - `/etc/cni` + - `/etc/k0s` + - `/opt/cni` + - `/opt/containerd` + - `/run/calico` + - `/run/containerd` + - `/run/k0s` + - `/sys/fs/cgroup/kubepods` + - `/sys/fs/cgroup/system.slice/containerd.service` + - `/sys/fs/cgroup/system.slice/k0scontroller.service` + - `/usr/libexec/k0s` + - `/var/lib/calico` + - `/var/lib/cni` + - `/var/lib/containers` + - `/var/lib/kubelet` + - `/var/log/calico` + - `/var/log/containers` + - `/var/log/pods` + - `/usr/local/bin/k0s` + +* (Online installations only) Access to replicated.app and proxy.replicated.com or your custom domain for each + +* Embedded Cluster is based on k0s, so all k0s system requirements and external runtime dependencies apply. See [System requirements](https://docs.k0sproject.io/stable/system-requirements/) and [External runtime dependencies](https://docs.k0sproject.io/stable/external-runtime-deps/) in the k0s documentation. + +================ +File: docs/partials/embedded-cluster/_update-air-gap-admin-console.mdx +================ +1. On a machine with browser access (for example, where you accessed the Admin Console to configure the application), download the air gap bundle for the new version using the same curl command that you used to install. For example: + + ```bash + curl -f https://replicated.app/embedded/APP_SLUG/CHANNEL_SLUG?airgap=true -H "Authorization: LICENSE_ID" -o APP_SLUG-CHANNEL_SLUG.tgz + ``` + For more information, see [Install](/enterprise/installing-embedded-air-gap#install). + +1. Untar the tarball. For example: + + ```bash + tar -xvzf APP_SLUG-CHANNEL_SLUG.tgz + ``` + Ensure that the `.airgap` air gap bundle is present. + +1. On the same machine, use a browser to access the Admin Console. + +1. On the **Version history** page, click **Upload new version** and choose the `.airgap` air gap bundle you downloaded. + +1. When the air gap bundle has been uploaded, click **Deploy** next to the new version. + +1. On the **Config** screen of the upgrade wizard, make any necessary changes to the configuration for the application. Click **Next**. + + ![Config screen in the upgrade wizard](/images/ec-upgrade-wizard-config.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-config.png) + + :::note + Any changes made on the **Config** screen of the upgrade wizard are not set until the new version is deployed. + ::: + +1. On the **Preflight** screen, view the results of the preflight checks. + + ![Preflight screen in the upgrade wizard](/images/ec-upgrade-wizard-preflights.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-preflights.png) + +1. On the **Confirm** screen, click **Deploy**. + + ![Confirmation screen in the upgrade wizard](/images/ec-upgrade-wizard-confirm.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-confirm.png) + +================ +File: docs/partials/embedded-cluster/_update-air-gap-cli.mdx +================ +1. SSH onto a controller node in the cluster and download the air gap bundle for the new version using the same curl command that you used to install. For example: + + ```bash + curl -f https://replicated.app/embedded/APP_SLUG/CHANNEL_SLUG?airgap=true -H "Authorization: LICENSE_ID" -o APP_SLUG-CHANNEL_SLUG.tgz + ``` + + For more information, see [Install](/enterprise/installing-embedded-air-gap#install). + +1. Untar the tarball. For example: + + ```bash + tar -xvzf APP_SLUG-CHANNEL_SLUG.tgz + ``` + Ensure that the `.airgap` air gap bundle is present. + +1. Use the `update` command to upload the air gap bundle and make this new version available in the Admin Console. For example: + + ```bash + ./APP_SLUG update --airgap-bundle APP_SLUG.airgap + ``` + +1. When the air gap bundle has been uploaded, open a browser on the same machine and go to the Admin Console. + +1. On the **Version history** page, click **Deploy** next to the new version. + + ![Version history page](/images/ec-upgrade-version-history.png) + + [View a larger version of this image](/images/ec-upgrade-version-history.png) + +1. On the **Config** screen of the upgrade wizard, make any necessary changes to the configuration for the application. Click **Next**. + + ![Config screen in the upgrade wizard](/images/ec-upgrade-wizard-config.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-config.png) + + :::note + Any changes made on the **Config** screen of the upgrade wizard are not set until the new version is deployed. + ::: + +1. On the **Preflight** screen, view the results of the preflight checks. + + ![Preflight screen in the upgrade wizard](/images/ec-upgrade-wizard-preflights.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-preflights.png) + +1. On the **Confirm** screen, click **Deploy**. + + ![Confirmation screen in the upgrade wizard](/images/ec-upgrade-wizard-confirm.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-confirm.png) + +================ +File: docs/partials/embedded-cluster/_update-air-gap-overview.mdx +================ +To upgrade an installation, new air gap bundles can be uploaded to the Admin Console from the browser or with the Embedded Cluster binary from the command line. + +Using the binary is faster and allows the user to download the air gap bundle directly to the machine where the Embedded Cluster is running. Using the browser is slower because the user must download the air gap bundle to a machine with a browser, then upload that bundle to the Admin Console, and then the Admin Console can process it. + +================ +File: docs/partials/embedded-cluster/_update-overview.mdx +================ +When you update an application installed with Embedded Cluster, you update both the application and the cluster infrastructure together, including Kubernetes, KOTS, and other components running in the cluster. There is no need or mechanism to update the infrastructure on its own. + +When you deploy a new version, any changes to the cluster are deployed first. The Admin Console waits until the cluster is ready before updatng the application. + +Any changes made to the Embedded Cluster Config, including changes to the Embedded Cluster version, Helm extensions, and unsupported overrides, trigger a cluster update. + +When performing an upgrade with Embedded Cluster, the user is able to change the application config before deploying the new version. Additionally, the user's license is synced automatically. Users can also make config changes and sync their license outside of performing an update. This requires deploying a new version to apply the config change or license sync. + +================ +File: docs/partials/embedded-cluster/_warning-do-not-downgrade.mdx +================ +:::important +Do not downgrade the Embedded Cluster version. This is not supported but is not prohibited, and it can lead to unexpected behavior. +::: + +================ +File: docs/partials/getting-started/_create-promote-release.mdx +================ +Create a new release and promote it to the Unstable channel. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + +================ +File: docs/partials/getting-started/_csdl-overview.mdx +================ +Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. + +Replicated has developed the Commercial Software Distribution Lifecycle to represents the stages that are essential for every company that wants to deliver their software securely and reliably to customer controlled environments. + +This lifecycle was inspired by the DevOps lifecycle and the Software Development Lifecycle (SDLC), but it focuses on the unique things that must be done to successfully distribute third party, commercial software to tens, hundreds, or thousands of enterprise customers. + +================ +File: docs/partials/getting-started/_gitea-ec-config.mdx +================ +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + version: 2.1.3+k8s-1.30 +``` + +================ +File: docs/partials/getting-started/_gitea-helmchart-cr-ec.mdx +================ +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: gitea +spec: + # chart identifies a matching chart from a .tgz + chart: + name: gitea + chartVersion: 1.0.6 + optionalValues: + - when: 'repl{{ eq Distribution "embedded-cluster" }}' + recursiveMerge: false + values: + service: + type: NodePort + nodePorts: + http: "32000" +``` + +================ +File: docs/partials/getting-started/_gitea-helmchart-cr.mdx +================ +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: gitea +spec: + # chart identifies a matching chart from a .tgz + chart: + name: gitea + chartVersion: 1.0.6 +``` + +================ +File: docs/partials/getting-started/_gitea-k8s-app-cr.mdx +================ +```yaml +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "gitea" +spec: + descriptor: + links: + - description: Open App + # needs to match applicationUrl in kots-app.yaml + url: "http://gitea" +``` + +================ +File: docs/partials/getting-started/_gitea-kots-app-cr-ec.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: gitea +spec: + title: Gitea + statusInformers: + - deployment/gitea + ports: + - serviceName: "gitea" + servicePort: 3000 + localPort: 32000 + applicationUrl: "http://gitea" + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png +``` + +================ +File: docs/partials/getting-started/_gitea-kots-app-cr.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: gitea +spec: + title: Gitea + statusInformers: + - deployment/gitea + ports: + - serviceName: "gitea" + servicePort: 3000 + localPort: 8888 + applicationUrl: "http://gitea" + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png +``` + +================ +File: docs/partials/getting-started/_grafana-config.mdx +================ +```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: grafana-config + spec: + groups: + - name: grafana + title: Grafana + description: Grafana Configuration + items: + - name: admin_user + title: Admin User + type: text + default: 'admin' + - name: admin_password + title: Admin Password + type: password + default: 'admin' + ``` + +================ +File: docs/partials/getting-started/_grafana-helmchart.mdx +================ +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: grafana +spec: + # chart identifies a matching chart from a .tgz + chart: + name: grafana + chartVersion: 9.6.5 + values: + admin: + user: "repl{{ ConfigOption `admin_user`}}" + password: "repl{{ ConfigOption `admin_password`}}" +``` + +================ +File: docs/partials/getting-started/_grafana-k8s-app.mdx +================ +```yaml +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "grafana" +spec: + descriptor: + links: + - description: Open App + # needs to match applicationUrl in kots-app.yaml + url: "http://grafana" +``` + +================ +File: docs/partials/getting-started/_grafana-kots-app.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: grafana +spec: + title: Grafana + statusInformers: + - deployment/grafana + ports: + - serviceName: "grafana" + servicePort: 3000 + localPort: 8888 + applicationUrl: "http://grafana" + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png +``` + +================ +File: docs/partials/getting-started/_kubernetes-training.mdx +================ +:::note +This tutorial assumes that you have a working knowledge of Kubernetes. For an introduction to Kubernetes and free training resources, see [Training](https://kubernetes.io/training/) in the Kubernetes documentation. +::: + +================ +File: docs/partials/getting-started/_labs-intro.mdx +================ +Replicated also offers a sandbox environment where you can complete several beginner, intermediate, and advanced labs. The sandbox environment automatically provisions the required Kubernetes cluster or VM where you will install a sample application as part of the labs. + +To get started with an introductory lab, see [Deploy a Hello World Application with Replicated](https://play.instruqt.com/replicated/tracks/hello-world). + +================ +File: docs/partials/getting-started/_related-topics.mdx +================ +For more information about the subjects in the getting started tutorials, see the following topics: + +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [Linter Rules](/reference/linter) +* [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) +* [Performing Updates in Existing Clusters](/enterprise/updating-app-manager) + +================ +File: docs/partials/getting-started/_replicated-definition.mdx +================ +Replicated is a commercial software distribution platform. Independent software vendors (ISVs) can use features of the Replicated Platform to distribute modern commercial software into complex, customer-controlled environments, including on-prem and air gap. + +================ +File: docs/partials/getting-started/_test-your-changes.mdx +================ +Install the release to test your changes. For Embedded Cluster installations, see [Performing Udpates in Embedded Clusters](/enterprise/updating-embedded). For existing cluster installations with KOTS, see [Performing Updates in Existing Clusters](/enterprise/updating-app-manager). + +================ +File: docs/partials/getting-started/_tutorial-intro.mdx +================ +This tutorial introduces you to the Replicated features for software vendors and their enterprise users. It is designed to familiarize you with the key concepts and processes that you use as a software vendor when you package and distribute your application with Replicated. + +In this tutorial, you use a set of sample manifest files for a basic NGINX application to learn how to: +* Create and promote releases for an application as a software vendor +* Install and update an application on a Kubernetes cluster as an enterprise user + +================ +File: docs/partials/getting-started/_vm-requirements.mdx +================ +For this tutorial, the VM must meet the following requirements: + + * Ubuntu 18.04 + * At least 8 GB of RAM + * 4 CPU cores + * At least 50GB of disk space + + :::note + If you use a virtual machine that is behind a firewall, make sure that port 8800 (and any other ports you attempt to access through the internet) are allowed to accept traffic. GCP and AWS typically require firewall rule creation to expose ports. + ::: + +For the complete list of system requirements for the kURL, see [kURL Requirements](/enterprise/installing-general-requirements#kurl-requirements) in _Installation Requirements_. + +================ +File: docs/partials/gitops/_gitops-not-recommended.mdx +================ +:::important +KOTS Auto-GitOps is a legacy feature and is **not recommended** for use. For modern enterprise customers that prefer software deployment processes that use CI/CD pipelines, Replicated recommends the [Helm CLI installation method](/vendor/install-with-helm), which is more commonly used in these types of enterprise environments. +::: + +================ +File: docs/partials/helm/_gitops-limitation.mdx +================ +The KOTS Auto-GitOps workflow is not supported for installations with the HelmChart custom resource `apiVersion: kots.io/v1beta2` or the HelmChart custom resource `apiVersion: kots.io/v1beta1` with `useHelmInstall: true`. + +================ +File: docs/partials/helm/_helm-builder-requirements.mdx +================ +The `builder` key has the following requirements and recommendations: +* Replicated recommends that you include only the minimum Helm values in the `builder` key that are required to template the Helm chart with the correct image tags. +* Use only static, or _hardcoded_, values in the `builder` key. You cannot use template functions in the `builder` key because values in the `builder` key are not rendered in a customer environment. +* Any `required` Helm values that need to be set to render the chart templates must have a value supplied in the `builder` key. For more information about the Helm `required` function, see [Using the 'required' function](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-required-function) in the Helm documentation. + +================ +File: docs/partials/helm/_helm-cr-builder-airgap-intro.mdx +================ +In the `builder` key, you provide the minimum Helm values required to render the chart templates so that the output includes any images that must be included in the air gap bundle. The Vendor Portal uses these values to render the Helm chart templates when building the `.airgap` bundle for the release. + +================ +File: docs/partials/helm/_helm-cr-builder-example.mdx +================ +For example, a Helm chart might include a conditional PostgreSQL Deployment, as shown in the Helm template below: + +```yaml +{{- if .Values.postgresql.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgresql + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:10.17" + ports: + - name: postgresql + containerPort: 80 +# ... +{{- end }} +``` + +To ensure that the `postgresql` image is included in the air gap bundle for the release, the `postgresql.enabled` value is added to the `builder` key of the HelmChart custom resource and is hardcoded to `true`: + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + values: + postgresql: + enabled: repl{{ ConfigOptionEquals "postgres_type" "embedded_postgres"}} + builder: + postgresql: + enabled: true +``` + +================ +File: docs/partials/helm/_helm-cr-chart-name.mdx +================ +The name of the chart. This value must exactly match the `name` field from a `Chart.yaml` in a `.tgz` chart archive that is also included in the release. If the names do not match, then the installation can error or fail. + +================ +File: docs/partials/helm/_helm-cr-chart-release-name.mdx +================ +Specifies the release name to use when installing this instance of the Helm chart. Defaults to the chart name. + +The release name must be unique across all charts deployed in the namespace. To deploy multiple instances of the same Helm chart in a release, you must add an additional HelmChart custom resource with a unique release name for each instance of the Helm chart. + +Must be a valid Helm release name that matches regex `^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` and is no longer than 53 characters. + +================ +File: docs/partials/helm/_helm-cr-chart-version.mdx +================ +The version of the chart. This value must match the `version` field from a `Chart.yaml` in a `.tgz` chart archive that is also included in the release. + +================ +File: docs/partials/helm/_helm-cr-chart.mdx +================ +The `chart` key allows for a mapping between the data in this definition and the chart archive itself. +More than one `kind: HelmChart` can reference a single chart archive, if different settings are needed. + +================ +File: docs/partials/helm/_helm-cr-exclude.mdx +================ +The attribute is a value for making optional charts. The `exclude` attribute can be parsed by template functions. + +When Replicated KOTS processes Helm charts, it excludes the entire chart if the output of the `exclude` field can be parsed as a boolean evaluating to `true`. + +For more information about optional charts, template functions, and how KOTS processes Helm charts, see: + +* [Optional Charts](/vendor/helm-optional-charts) +* [About Template Function Contexts](template-functions-about) +* [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) + +================ +File: docs/partials/helm/_helm-cr-namespace.mdx +================ +The `namespace` key specifies an alternative namespace where Replicated KOTS installs the Helm chart. **Default:** The Helm chart is installed in the same namespace as the Admin Console. The `namespace` attribute can be parsed by template functions. For more information about template functions, see [About template function contexts](template-functions-about). + + +If you specify a namespace in the HelmChart `namespace` field, you must also include the same namespace in the `additionalNamespaces` field of the Application custom resource manifest file. KOTS creates the namespaces listed in the `additionalNamespaces` field during installation. For more information, see [additionalNamespaces](custom-resource-application#additionalnamespaces) in the _Application_ reference. + +================ +File: docs/partials/helm/_helm-cr-optional-values-recursive-merge.mdx +================ +The `optionalValues.recursiveMerge` boolean defines how KOTS merges `values` and `optionalValues`: + +* When `optionalValues.recursiveMerge` is false, the top level keys in `optionalValues` override the top level keys in `values`. By default, `optionalValues.recursiveMerge` is set to false. + +* When `optionalValues.recursiveMerge` is true, all keys from `values` and `optionalValues` are included. In the case of a conflict where there is a matching key in `optionalValues` and `values`, KOTS uses the value of the key from `optionalValues`. + +================ +File: docs/partials/helm/_helm-cr-optional-values-when.mdx +================ +The `optionalValues.when` field defines a conditional statement that must evaluate to true for the given values to be set. Evaluation of the conditional in the `optionalValues.when` field is deferred until render time in the customer environment. + +Use KOTS template functions to write the `optionalValues.when` conditional statement. The following example shows a conditional statement for selecting a database option on the Admin Console configuration screen: + +```yaml +optionalValues: + - when: repl{{ ConfigOptionEquals "postgres_type" "external_postgres"}} +``` + +For more information about using KOTS template functions, see [About Template Functions](/reference/template-functions-about). + +================ +File: docs/partials/helm/_helm-cr-optional-values.mdx +================ +The `optionalValues` key can be used to set values in the Helm chart `values.yaml` file when a given conditional statement evaluates to true. For example, if a customer chooses to include an optional application component in their deployment, it might be necessary to include Helm chart values related to the optional component. + +`optionalValues` includes the following properties: + +* `optionalValues.when`: Defines a conditional statement using KOTS template functions. If `optionalValues.when` evaluates to true, then the values specified in `optionalValues` are set. + +* `optionalValues.recursiveMerge`: Defines how `optionalValues` is merged with `values`. + +* `optionalValues.values`: An array of key-value pairs. + +================ +File: docs/partials/helm/_helm-cr-upgrade-flags.mdx +================ +Specifies additional flags to pass to the `helm upgrade` command for charts. These flags are passed in addition to any flags Replicated KOTS passes by default. The values specified here take precedence if KOTS already passes the same flag. The `helmUpgradeFlags` attribute can be parsed by template functions. For more information about template functions, see [About template function contexts](template-functions-about). + +KOTS uses `helm upgrade` for _all_ deployments of an application, not just upgrades, by specifying the `--install` flag. For non-boolean flags that require an additional argument, such as `--timeout 1200s`, you must use an equal sign (`=`) or specify the additional argument separately in the array. + +**Example:** + +```yaml +helmUpgradeFlags: + - --timeout + - 1200s + - --history-max=15 +``` + +================ +File: docs/partials/helm/_helm-cr-values.mdx +================ +The `values` key can be used to set or delete existing values in the Helm chart `values.yaml` file. Any values that you include in the `values` key must match values in the Helm chart `values.yaml`. For example, `spec.values.images.pullSecret` in the HelmChart custom resource matches `images.pullSecret` in the Helm chart `values.yaml`. + +During installation or upgrade with KOTS, `values` is merged with the Helm chart `values.yaml` in the chart archive. Only include values in the `values` key that you want to set or delete. + +================ +File: docs/partials/helm/_helm-cr-weight-limitation.mdx +================ +The `weight` field is _not_ supported for HelmChart custom resources with `useHelmInstall: false`. + +================ +File: docs/partials/helm/_helm-cr-weight.mdx +================ +Determines the order in which KOTS applies the Helm chart. Charts are applied by weight in ascending order, with lower weights applied first. **Supported values:** Positive or negative integers. **Default:** `0` + +In KOTS v1.99.0 and later, `weight` also determines the order in which charts are uninstalled. Charts are uninstalled by weight in descending order, with higher weights uninstalled first. For more information about uninstalling applications, see [remove](kots-cli-remove) in _KOTS CLI_. + +For more information, see [Orchestrating Resource Deployment](/vendor/orchestrating-resource-deployment). + +================ +File: docs/partials/helm/_helm-definition.mdx +================ +Helm is a popular open source package manager for Kubernetes applications. Many ISVs use Helm to configure and deploy Kubernetes applications because it provides a consistent, reusable, and sharable packaging format. For more information, see the [Helm documentation](https://helm.sh/docs). + +================ +File: docs/partials/helm/_helm-install-beta.mdx +================ +The Helm installation method is Beta and is not recommended for production releases. The features and availability of the Helm installation method are subject to change. + +================ +File: docs/partials/helm/_helm-install-prereqs.mdx +================ +* The customer used to install must have a valid email address. This email address is only used as a username for the Replicated registry and is never contacted. For more information about creating and editing customers in the Vendor Portal, see [Creating a Customer](/vendor/releases-creating-customer). + +* The customer used to install must have the **Existing Cluster (Helm CLI)** install type enabled. For more information about enabling install types for customers in the Vendor Portal, see [Managing Install Types for a License](licenses-install-types). + +* To ensure that the Replicated proxy registry can be used to grant proxy access to your application images during Helm installations, you must create an image pull secret for the proxy registry and add it to your Helm chart. To do so, follow the steps in [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry). + +* Declare the SDK as a dependency in your Helm chart. For more information, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. + +================ +File: docs/partials/helm/_helm-package.mdx +================ +```bash +helm package -u PATH_TO_CHART +``` +Where: +* `-u` or `--dependency-update` is an option for the `helm package` command that updates chart dependencies before packaging. For more information, see [Helm Package](https://helm.sh/docs/helm/helm_package/) in the Helm documentation. +* `PATH_TO_CHART` is the path to the Helm chart in your local directory. For example, `helm package -u .`. + +The Helm chart, including any dependencies, is packaged and copied to your current directory in a `.tgz` file. The file uses the naming convention: `CHART_NAME-VERSION.tgz`. For example, `postgresql-8.1.2.tgz`. + +================ +File: docs/partials/helm/_helm-template-limitation.mdx +================ +Helm's `lookup` function and some values in the built-in `Capabilities` object are not supported with the `kots.io/v1beta1` HelmChart custom resource. + + This is because KOTS uses the `helm template` command to render chart templates locally. During rendering, Helm does not have access to the cluster where the chart will be installed. For more information, see [Kubernetes and Chart Functions](https://helm.sh/docs/chart_template_guide/function_list/#kubernetes-and-chart-functions) in the Helm documentation. + +================ +File: docs/partials/helm/_helm-version-limitation.mdx +================ +Support for Helm v2, including security patches, ended on November 13, 2020. If you specified `helmVersion: v2` in any HelmChart custom resources, update your references to v3. By default, KOTS uses Helm v3 to process all Helm charts. + +================ +File: docs/partials/helm/_hook-weights-limitation.mdx +================ +Hook weights below -9999 are not supported. All hook weights must be set to a value above -9999 to ensure the Replicated image pull secret is deployed before any resources are pulled. + +================ +File: docs/partials/helm/_hooks-limitation.mdx +================ +The following hooks are not supported and are ignored if they are present: + * `test` + * `pre-rollback` + * `post-rollback` + +================ +File: docs/partials/helm/_installer-only-annotation.mdx +================ +Any other Kubernetes resources in the release (such as Kubernetes Deployments or Services) must include the `kots.io/installer-only` annotation. + +The `kots.io/installer-only` annotation indicates that the Kubernetes resource is used only by the Replicated installers (Embedded Cluster, KOTS, and kURL). + +Example: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + annotations: + kots.io/installer-only: "true" +``` + +================ +File: docs/partials/helm/_kots-helm-cr-description.mdx +================ +To deploy Helm charts, KOTS requires a unique HelmChart custom resource for each Helm chart `.tgz` archive in the release. You configure the HelmChart custom resource to provide the necessary instructions to KOTS for processing and preparing the chart for deployment. Additionally, the HelmChart custom resource creates a mapping between KOTS and your Helm chart to allow Helm values to be dynamically set during installation or upgrade. + +================ +File: docs/partials/helm/_replicated-deprecated.mdx +================ +The HelmChart custom resource `apiVersion: kots.io/v1beta1` is deprecated. For installations with Replicated KOTS v1.99.0 and later, use the HelmChart custom resource with `apiVersion: kots.io/v1beta2` instead. See [HelmChart v2](/reference/custom-resource-helmchart-v2) and [Confguring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). + +================ +File: docs/partials/helm/_replicated-helm-migration.mdx +================ +You cannot migrate existing Helm charts in existing installations from the `useHelmInstall: false` installation method to a different method. If KOTS already installed the Helm chart previously in the environment using a HelmChart custom resource with `apiVersion: kots.io/v1beta1` and `useHelmInstall: false`, then KOTS does not attempt to install the chart using a different method and displays the following error message: `Deployment method for chart has changed`. + +To change the installation method from `useHelmInstall: false` to a different method, the user must reinstall your application in a new environment. + +================ +File: docs/partials/helm/_set-values-config-example.mdx +================ +Using KOTS template functions in the [Config](/reference/template-functions-config-context) context allows you to set Helm values based on user-supplied values from the KOTS Admin Console configuration page. + +For example, the following Helm chart `values.yaml` file contains `postgresql.enabled`, which is set to `false`: + +```yaml +# Helm chart values.yaml +postgresql: + enabled: false +``` +The following HelmChart custom resource contains a mapping to `postgresql.enabled` in its `values` key: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + values: + postgresql: + enabled: repl{{ ConfigOptionEquals `postgres_type` `embedded_postgres`}} +``` + +The `values.postgresql.enabled` field in the HelmChart custom resource above uses the Replicated [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to evaluate the user's selection for a `postgres_type` configuration option. + +During installation or upgrade, the template function is rendered to true or false based on the user's selction. Then, KOTS sets the matching `postgresql.enabled` value in the Helm chart `values.yaml` file accordingly. + +================ +File: docs/partials/helm/_set-values-license-example.mdx +================ +Using KOTS template functions in the [License](/reference/template-functions-license-context) context allows you to set Helm values based on the unique license file used for installation or upgrade. + +For example, the following HelmChart custom resource uses the Replicated [LiencseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function to evaluate if the license has the boolean `newFeatureEntitlement` field set to `true`: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + values: + newFeature: + enabled: repl{{ LicenseFieldValue "newFeatureEntitlement" }} +``` + +During installation or upgrade, the LicenseFieldValue template function is rendered based on the user's license. Then, KOTS sets the matching `newFeature.enabled` value in the Helm chart `values.yaml` file accordingly. + +================ +File: docs/partials/helm/_v2-native-helm-cr-example.mdx +================ +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + # chart identifies a matching chart from a .tgz + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + exclude: "repl{{ ConfigOptionEquals `include_chart` `include_chart_no`}}" + + # weight determines the order that charts are applied, with lower weights first. + weight: 42 + + # helmUpgradeFlags specifies additional flags to pass to the `helm upgrade` command. + helmUpgradeFlags: + - --skip-crds + - --no-hooks + - --timeout + - 1200s + - --history-max=15 + + # values are used in the customer environment as a pre-render step + # these values are supplied to helm template + values: + postgresql: + enabled: repl{{ ConfigOptionEquals `postgres_type` `embedded_postgres`}} + + optionalValues: + - when: "repl{{ ConfigOptionEquals `postgres_type` `external_postgres`}}" + recursiveMerge: false + values: + postgresql: + postgresqlDatabase: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_database`}}repl{{ end}}" + postgresqlUsername: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_username`}}repl{{ end}}" + postgresqlHost: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_host`}}repl{{ end}}" + postgresqlPassword: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_password`}}repl{{ end}}" + postgresqlPort: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_port`}}repl{{ end}}" + # adds backup labels to postgresql if the license supports snapshots + - when: "repl{{ LicenseFieldValue `isSnapshotSupported` }}" + recursiveMerge: true + values: + postgresql: + commonLabels: + kots.io/backup: velero + kots.io/app-slug: my-app + podLabels: + kots.io/backup: velero + kots.io/app-slug: my-app + + # namespace allows for a chart to be installed in an alternate namespace to + # the default + namespace: samplechart-namespace + + # builder values render the chart with all images and manifests. + # builder is used to create `.airgap` packages and to support end users + # who use private registries + builder: + postgresql: + enabled: true +``` + +================ +File: docs/partials/image-registry/_docker-compatibility.mdx +================ +- Docker Hub + + :::note + To avoid the November 20, 2020 Docker Hub rate limits, use the `kots docker ensure-secret` CLI command. For more information, see [Avoiding Docker Hub Rate Limits](image-registry-rate-limits). + ::: + +- Quay +- Amazon Elastic Container Registry (ECR) +- Google Container Registry (GCR) +- Azure Container Registry (ACR) +- Harbor +- Sonatype Nexus + +================ +File: docs/partials/image-registry/_image-registry-settings.mdx +================ + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    HostnameSpecify a registry domain that uses the Docker V2 protocol.
    UsernameSpecify the username for the domain.
    PasswordSpecify the password for the domain.
    Registry NamespaceSpecify the registry namespace. The registry namespace is the path between the registry and the image name. For example, `my.registry.com/namespace/image:tag`. For air gap environments, this setting overwrites the registry namespace where images where pushed when KOTS was installed.
    Disable Pushing Images to Registry(Optional) Select this option to disable KOTS from pushing images. Make sure that an external process is configured to push images to your registry instead. Your images are still read from your registry when the application is deployed.
    + +================ +File: docs/partials/install/_access-admin-console.mdx +================ +By default, during installation, KOTS automatically opens localhost port 8800 to provide access to the Admin Console. Using the `--no-port-forward` flag with the `kots install` command prevents KOTS from creating a port forward to the Admin Console. + +After you install with the `--no-port-forward` flag, you can optionally create a port forward so that you can log in to the Admin Console in a browser window. + +To access the Admin Console: + +1. If you installed in a VM where you cannot open a browser window, forward a port on your local machine to `localhost:8800` on the remote VM using the SSH client: + + ```bash + ssh -L LOCAL_PORT:localhost:8800 USERNAME@IP_ADDRESS + ``` + Replace: + * `LOCAL_PORT` with the port on your local machine to forward. For example, `9900` or `8800`. + * `USERNAME` with your username for the VM. + * `IP_ADDRESS` with the IP address for the VM. + + **Example**: + + The following example shows using the SSH client to forward port 8800 on your local machine to `localhost:8800` on the remote VM. + + ```bash + ssh -L 8800:localhost:8800 user@ip-addr + ``` + +1. Run the following KOTS CLI command to open localhost port 8800, which forwards to the Admin Console service: + + ```bash + kubectl kots admin-console --namespace NAMESPACE + ``` + Replace `NAMESPACE` with the namespace where the Admin Console was installed. + + For more information about the `kots admin-console` command, see [admin-console](/reference/kots-cli-admin-console-index) in the _KOTS CLI_ documentation. + +1. Open a browser window and go to `https://localhost:8800`. + +1. Log in to the Admin Console using the password that you created as part of the `kots install` command. + +================ +File: docs/partials/install/_airgap-bundle-build.mdx +================ +* If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. +* If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. + + Release history link on a channel card + + [View a larger version of this image](/images/release-history-link.png) + + ![Build button on the Release history page](/images/release-history-build-airgap-bundle.png) + + [View a larger version of this image](/images/release-history-build-airgap-bundle.png) + +================ +File: docs/partials/install/_airgap-bundle-download.mdx +================ +After the build completes, download the bundle. Ensure that you can access the downloaded bundle from the environment where you will install the application. + +================ +File: docs/partials/install/_airgap-bundle-view-contents.mdx +================ +(Optional) View the contents of the downloaded bundle: + + ```bash + tar -zxvf AIRGAP_BUNDLE + ``` + + Where `AIRGAP_BUNDLE` is the filename for the `.airgap` bundle that you downloaded. + +================ +File: docs/partials/install/_airgap-license-download.mdx +================ +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page. + +1. Click on the name of the target customer and go to the **Manage customer** tab. + +1. Under **License options**, enable the **Airgap Download Enabled** option. Click **Save Changes**. + + ![Airgap Download Enabled option](/images/airgap-download-enabled.png) + + [View a larger version of this image](/images/airgap-download-enabled.png) + +1. At the top of the screen, click **Download license** to download the air gap enabled license. + + ![Download air gap license](/images/download-airgap-license.png) + + [View a larger version of this image](/images/download-airgap-license.png) + +================ +File: docs/partials/install/_automation-intro-embedded.mdx +================ +When you use the KOTS CLI to install an application in a kURL cluster, you first run the kURL installation script to provision the cluster and automatically install KOTS in the cluster. Then, you can run the `kots install` command to install the application. + +================ +File: docs/partials/install/_automation-intro-existing.mdx +================ +When you use the KOTS CLI to install an application in an existing cluster, you install both the application and Replicated KOTS with a single command. + +================ +File: docs/partials/install/_config-values-procedure.mdx +================ +To get the ConfigValues file from an installed application instance: + +1. Install the target release in a development environment. You can either install the release with Replicated Embedded Cluster or install in an existing cluster with KOTS. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Online Installation in Existing Clusters](/enterprise/installing-existing-cluster). + +1. Depending on the installer that you used, do one of the following to get the ConfigValues for the installed instance: + + * **For Embedded Cluster installations**: In the Admin Console, go to the **View files** tab. In the filetree, go to **upstream > userdata** and open **config.yaml**, as shown in the image below: + + ![ConfigValues file in the Admin Console View Files tab](/images/admin-console-view-files-configvalues.png) + + [View a larger version of this image](/images/admin-console-view-files-configvalues.png) + + * **For KOTS installations in an existing cluster**: Run the `kubectl kots get config` command to view the generated ConfigValues file: + + ```bash + kubectl kots get config --namespace APP_NAMESPACE --decrypt + ``` + Where: + * `APP_NAMESPACE` is the cluster namespace where KOTS is running. + * The `--decrypt` flag decrypts all configuration fields with `type: password`. In the downloaded ConfigValues file, the decrypted value is stored in a `valuePlaintext` field. + + The output of the `kots get config` command shows the contents of the ConfigValues file. For more information about the `kots get config` command, including additional flags, see [kots get config](/reference/kots-cli-get-config). + +================ +File: docs/partials/install/_download-kotsadm-bundle.mdx +================ +Download the `kotsadm.tar.gz` air gap bundle from the [Releases](https://github.com/replicatedhq/kots/releases) page in the kots repository in GitHub. Ensure that you can access the downloaded bundle from the environment where you will install the application. + +:::note +The version of the `kotsadm.tar.gz` air gap bundle used must be compatible with the version of the `.airgap` bundle for the given application release. +::: + +================ +File: docs/partials/install/_download-kurl-bundle.mdx +================ +```bash +export REPLICATED_APP=APP_SLUG +curl -LS https://k8s.kurl.sh/bundle/$REPLICATED_APP.tar.gz -o $REPLICATED_APP.tar.gz +``` +Where `APP_SLUG` is the unqiue slug for the application. + +================ +File: docs/partials/install/_ec-prereqs.mdx +================ +* Ensure that your installation environment meets the Embedded Cluster requirements. See [Embedded Cluster Requirements](/enterprise/installing-embedded-requirements). + +* The application release that you want to install must include an [Embedded Cluster Config](/reference/embedded-config). + +* The license used to install must have the **Embedded Cluster Enabled** license field enabled. See [Creating and Managing Customers](/vendor/releases-creating-customer). + +================ +File: docs/partials/install/_embedded-ha-step.mdx +================ +(HA Installation Only) If you are installing in HA mode and did not already preconfigure a load balancer, you are prompted during the installation. Do one of the following: + + - If you are using the internal load balancer, leave the prompt blank and proceed with the installation. + + - If you are using an external load balancer, pass the load balancer address. + +================ +File: docs/partials/install/_embedded-login-password.mdx +================ +After the installation command finishes, note the `Kotsadm` and `Login with password (will not be shown again)` fields in the output of the command. You use these to log in to the Admin Console. + + The following shows an example of the `Kotsadm` and `Login with password (will not be shown again)` fields in the output of the installation command: + + ``` + Installation + Complete ✔ + + Kotsadm: http://10.128.0.35:8800 + Login with password (will not be shown again): 3Hy8WYYid + + This password has been set for you by default. It is recommended that you change + this password; this can be done with the following command: + kubectl kots reset-password default + ``` + +================ +File: docs/partials/install/_extract-kurl-bundle.mdx +================ +In your installation environment, extract the contents of the kURL `.tar.gz` bundle that you downloaded: + + ```bash + tar -xvzf $REPLICATED_APP.tar.gz + ``` + +================ +File: docs/partials/install/_firewall-openings-intro.mdx +================ +The domains for the services listed in the table below need to be accessible from servers performing online installations. No outbound internet access is required for air gap installations. + +For services hosted at domains owned by Replicated, the table below includes a link to the list of IP addresses for the domain at [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json) in GitHub. Note that the IP addresses listed in the `replicatedhq/ips` repository also include IP addresses for some domains that are _not_ required for installation. + +For any third-party services hosted at domains not owned by Replicated, consult the third-party's documentation for the IP address range for each domain, as needed. + +================ +File: docs/partials/install/_firewall-openings.mdx +================ +The domains for the services listed in the table below need to be accessible from servers performing online installations. No outbound internet access is required for air gap installations. + +For services hosted at domains owned by Replicated, the table below includes a link to the list of IP addresses for the domain at [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json) in GitHub. Note that the IP addresses listed in the `replicatedhq/ips` repository also include IP addresses for some domains that are _not_ required for installation. + +For third-party services hosted at domains not owned by Replicated, the table below lists the required domains. Consult the third-party's documentation for the IP address range for each domain, as needed. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    HostEmbedded ClusterHelmKOTS Existing ClusterkURLDescription
    Docker HubNot RequiredNot RequiredRequiredRequiredSome dependencies of KOTS are hosted as public images in Docker Hub. The required domains for this service are `index.docker.io`, `cdn.auth0.com`, `*.docker.io`, and `*.docker.com.`
    `replicated.app`RequiredRequired***RequiredRequired

    Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

    `proxy.replicated.com`RequiredRequiredRequired*Required*

    Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

    `registry.replicated.com`Required**RequiredRequired**Required**

    Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

    `kots.io`Not RequiredNot RequiredRequiredNot RequiredRequests are made to this domain when installing the Replicated KOTS CLI. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.
    `github.com`Not RequiredNot RequiredRequiredNot RequiredRequests are made to this domain when installing the Replicated KOTS CLI. For information about retrieving GitHub IP addresses, see [About GitHub's IP addresses](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-githubs-ip-addresses) in the GitHub documentation.

    `k8s.kurl.sh`

    `s3.kurl.sh`

    Not RequiredNot RequiredNot RequiredRequired

    kURL installation scripts and artifacts are served from [kurl.sh](https://kurl.sh). An application identifier is sent in a URL path, and bash scripts and binary executables are served from kurl.sh. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `k8s.kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L34-L39) in GitHub.

    The range of IP addresses for `s3.kurl.sh` are the same as IP addresses for the `kurl.sh` domain. For the range of IP address for `kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L28-L31) in GitHub.

    `amazonaws.com`Not RequiredNot RequiredNot RequiredRequired`tar.gz` packages are downloaded from Amazon S3 during installations with kURL. For information about dynamically scraping the IP ranges to allowlist for accessing these packages, see [AWS IP address ranges](https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html#aws-ip-download) in the AWS documentation.
    + +* Required only if the application uses the [Replicated proxy registry](/vendor/private-images-about). + +** Required only if the application uses the [Replicated registry](/vendor/private-images-replicated). + +*** Required only if the [Replicated SDK](/vendor/replicated-sdk-overview) if included as a dependency of the application Helm chart. + +================ +File: docs/partials/install/_ha-load-balancer-about.mdx +================ +A load balancer is required for high availability mode. If your vendor has chosen to use the internal load balancer with the kURL EKCO add-on, you do not need to provide your own external load balancer. An external load balancer can be preferred when clients outside the cluster need access to the cluster's Kubernetes API. + +If you decide to use an external load balancer, the external load balancer must be a TCP forwarding load balancer. For more information, see [Prerequisites](#prerequisites). + +The health check for an apiserver is a TCP check on the port that the kube-apiserver listens on. The default value is `:6443`. For more information about the kube-apiserver external load balancer, see [Create load balancer for kube-apiserver](https://kubernetes.io/docs/setup/independent/high-availability/#create-load-balancer-for-kube-apiserver) in the Kubernetes documentation. + +================ +File: docs/partials/install/_ha-load-balancer-prereq.mdx +================ +- If you are installing in high availability (HA) mode, a load balancer is required. You can use the kURL internal load balancer if the [Embedded kURL Cluster Operator (EKCO) Add-On](https://kurl.sh/docs/add-ons/ekco) is included in the kURL Installer spec. Or, you can bring your own external load balancer. An external load balancer might be preferred when clients outside the cluster need access to the cluster's Kubernetes API. + + To install in HA mode, complete the following prerequisites: + - (Optional) If you are going to use the internal EKCO load balancer, you can preconfigure it by passing `| sudo bash -s ha ekco-enable-internal-load-balancer` with the kURL install command. Otherwise, you are prompted for load balancer details during installation. For more information about the EKCO Add-on, see [EKCO Add-On](https://kurl.sh/docs/add-ons/ekco) in the open source kURL documentation. + - To use an external load balancer, ensure that the load balancer meets the following requirements: + - Must be a TCP forwarding load balancer + - Must be configured to distribute traffic to all healthy control plane nodes in its target list + - The health check must be a TCP check on port 6443 + + For more information about how to create a load balancer for kube-apirserver, see [Create load balancer for kube-apiserver](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/#create-load-balancer-for-kube-apiserver) in the Kubernetes documentation. + + You can optionally preconfigure the external loader by passing the `load-balancer-address=HOST:PORT` flag with the kURL install command. Otherwise, you are prompted to provide the load balancer address during installation. + +================ +File: docs/partials/install/_install-kots-cli-airgap.mdx +================ +Install the KOTS CLI. See [Manually Download and Install](/reference/kots-cli-getting-started#manually-download-and-install) in _Installing the KOTS CLI_. + +================ +File: docs/partials/install/_install-kots-cli.mdx +================ +Install the KOTS CLI: + + ``` + curl https://kots.io/install | bash + ``` + + For more installation options, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + +================ +File: docs/partials/install/_intro-air-gap.mdx +================ +The procedures in this topic apply to installation environments that do not have access to the internet, known as _air gap_ environments. + +================ +File: docs/partials/install/_intro-embedded.mdx +================ +This topic describes how to use Replicated kURL to provision an embedded cluster in a virtual machine (VM) or bare metal server and install an application in the cluster. + +================ +File: docs/partials/install/_intro-existing.mdx +================ +This topic describes how to use Replicated KOTS to install an application in an existing Kubernetes cluster. + +================ +File: docs/partials/install/_kots-airgap-version-match.mdx +================ +:::note +The versions of the KOTS CLI and the `kotsadm.tar.gz` bundle must match. You can check the version of the KOTS CLI with `kubectl kots version`. +::: + +================ +File: docs/partials/install/_kots-install-prompts.mdx +================ +When prompted by the `kots install` command: + 1. Provide the namespace where you want to install both KOTS and the application. + 1. Create a new password for logging in to the Admin Console. + + **Example**: + + ```shell + $ kubectl kots install application-name + Enter the namespace to deploy to: application-name + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password to be used for the Admin Console: •••••••• + • Waiting for Admin Console to be ready ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + + ``` + + After the `kots install` command completes, it creates a port forward to the Admin Console. The Admin Console is exposed internally in the cluster and can only be accessed using a port forward. + +================ +File: docs/partials/install/_kubernetes-compatibility.mdx +================ +| KOTS Versions | Kubernetes Compatibility | +|------------------------|-----------------------------| +| 1.117.0 and later | 1.31, 1.30, 1.29 | +| 1.109.1 to 1.116.1 | 1.30, 1.29, 1.28 | +| 1.105.2 to 1.109.0 | 1.29, 1.28 | + +================ +File: docs/partials/install/_kurl-about.mdx +================ +Replicated kURL is an open source project. For more information, see the [kURL documentation](https://kurl.sh/docs/introduction/). + +================ +File: docs/partials/install/_license-file-prereq.mdx +================ +* Download your license file. Ensure that you can access the downloaded license file from the environment where you will install the application. See [Downloading Customer Licenses](/vendor/licenses-download). + +================ +File: docs/partials/install/_placeholder-airgap-bundle.mdx +================ +* `PATH_TO_AIRGAP_BUNDLE` with the path to the `.airgap` bundle for the application release. You can build and download the air gap bundle for a release in the [Vendor Portal](https://vendor.replicated.com) on the **Release history** page for the channel where the release is promoted. + + Alternatively, for information about building and downloading air gap bundles with the Vendor API v3, see [Trigger airgap build for a channel's release](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbuild) and [Get airgap bundle download URL for the active release on the channel](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) in the Vendor API v3 documentation. + +================ +File: docs/partials/install/_placeholder-app-name-UI.mdx +================ +* `APP_NAME` with the name of the application. The `APP_NAME` is included in the installation command that your vendor gave you. This is a unique identifier that KOTS will use to refer to the application that you install. + +================ +File: docs/partials/install/_placeholder-namespace-embedded.mdx +================ +* `NAMESPACE` with the namespace where Replicated kURL installed Replicated KOTS when creating the cluster. By default, kURL installs KOTS in the `default` namespace. + +================ +File: docs/partials/install/_placeholder-namespace-existing.mdx +================ +* `NAMESPACE` with the namespace where you want to install both the application and KOTS. + +================ +File: docs/partials/install/_placeholder-ro-creds.mdx +================ +* `REGISTRY_HOST` with the same hostname for the private registry where you pushed the Admin Console images. + +* `RO_USERNAME` and `RO_PASSWORD` with the username and password for an account that has read-only access to the private registry. + + :::note + KOTS stores these read-only credentials in a Kubernetes secret in the same namespace where the Admin Console is installed. + + KOTS uses these credentials to pull the images. To allow KOTS to pull images, the credentials are automatically created as an imagePullSecret on all of the Admin Console Pods. + ::: + +================ +File: docs/partials/install/_placeholders-global.mdx +================ +* `APP_NAME` with a name for the application. This is the unique name that KOTS will use to refer to the application that you install. + +* `PASSWORD` with a shared password for accessing the Admin Console. + +* `PATH_TO_LICENSE` with the path to your license file. See [Downloading Customer Licenses](/vendor/licenses-download). For information about how to download licenses with the Vendor API v3, see [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) in the Vendor API v3 documentation. + +* `PATH_TO_CONFIGVALUES` with the path to the ConfigValues file. + +================ +File: docs/partials/install/_prereqs-embedded-cluster.mdx +================ +* Ensure that your environment meets the minimum system requirements. See [kURL Installation Requirements](/enterprise/installing-kurl-requirements). + +* Review the advanced installation options available for the kURL installer. See [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. + +================ +File: docs/partials/install/_prereqs-existing-cluster.mdx +================ +* Ensure that your cluster meets the minimum system requirements. See [Minimum System Requirements](/enterprise/installing-general-requirements#minimum-system-requirements) in _Installation Requirements_. + +* Ensure that you have at least the minimum RBAC permissions in the cluster required to install KOTS. See [RBAC Requirements](/enterprise/installing-general-requirements#rbac-requirements) in _Installation Requirements_. + + :::note + If you manually created RBAC resources for KOTS as described in [Namespace-scoped RBAC Requirements](/enterprise/installing-general-requirements#namespace-scoped), include both the `--ensure-rbac=false` and `--skip-rbac-check` flags when you run the `kots install` command. + + These flags prevent KOTS from checking for or attempting to create a Role with `* * *` permissions in the namespace. For more information about these flags, see [install](/reference/kots-cli-install) or [admin-console upgrade](/reference/kots-cli-admin-console-upgrade). + ::: + +* Review the options available with the `kots install` command before installing. The `kots install` command includes several optional flags to support different installation use cases. For a list of options, see [install](/reference/kots-cli-install) in the _KOTS CLI_ documentation. + +================ +File: docs/partials/install/_provision-cluster-intro.mdx +================ +This procedure describes how to use kURL to provision an embedded cluster on a VM or bare metal server. When you create a cluster with kURL, kURL also automatically installs Replicated KOTS in the `default` namespaces in the cluster. + +================ +File: docs/partials/install/_push-kotsadm-images.mdx +================ +Extract the KOTS Admin Console container images from the `kotsadm.tar.gz` bundle and push the images to your private registry: + + ``` + kubectl kots admin-console push-images ./kotsadm.tar.gz REGISTRY_HOST \ + --registry-username RW_USERNAME \ + --registry-password RW_PASSWORD + ``` + + Replace: + + * `REGISTRY_HOST` with the hostname for the private registry. For example, `private.registry.host` or `my-registry.example.com/my-namespace`. + + * `RW_USERNAME` and `RW_PASSWORD` with the username and password for an account that has read and write access to the private registry. + + :::note + KOTS does not store or reuse these read-write credentials. + ::: + +================ +File: docs/partials/instance-insights/_airgap-telemetry.mdx +================ +For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. The Replicated SDK also stores any custom metrics within its Secret. + +The telemetry and custom metrics stored in the Secret are collected when a support bundle is generated in the environment. When the support bundle is uploaded to the Vendor Portal, the telemetry and custom metrics are associated with the correct customer and instance ID, and the Vendor Portal updates the instance insights and event data accordingly. + +================ +File: docs/partials/instance-insights/_notifications-about.mdx +================ +:::note +Configuring notifications for customer instance changes is in public Beta. Features and functionality are subject to change as we continue to iterate this functionality towards General Availability. +::: + +Notifications can help catch problems before they happen and let you proactively contact customers to prevent support cases. For example, you can be notified of a degraded status and you can contact your customer about fixing it before the instance goes down. This approach can make issues quicker and easier to solve, and improve the customer experience with less down time. + +For more information about how application status is determined, see [Resource Statuses](insights-app-status#resource-statuses) in _Enabling and Understanding Application Status_. For more information about events that might trigger notifications, see [How the Vendor Portal Generates Events and Insights](instance-insights-event-data#about-events) in _About Instance and Event Data_. + +================ +File: docs/partials/instance-insights/_supported-resources-status.mdx +================ +The following resource types are supported: + +* Deployment +* StatefulSet +* Service +* Ingress +* PersistentVolumeClaims (PVC) +* DaemonSet + +================ +File: docs/partials/kots/_admin-console-about.mdx +================ +KOTS provides an Admin Console that lets your customers manage your application. You can customize the Admin Console. For example, you can customize the Config screen to allow customers to specify inputs related to unique options that your application provides. You can also include your own branding on the Admin Console, configure status informers, and add custom graphs. + +================ +File: docs/partials/kots/_download-portal-about.mdx +================ +The Replicated Download Portal can be used to share license files, air gap bundles, and other assets with customers. A unique Download Portal link is available for each customer. The Download Portal uses information from the customer's license to make the relevant assets available for download, such as: +* The license file +* `.airgap` bundles for the application releases that the customer has access to based on their channel assignment +* The Replicated KOTS Admin Console `kotsadm.tar.gz` air gap bundle +* The Replicated kURL `.tgz` air gap bundle +* Preflight, support bundle, and KOTS CLI kubectl plugins + +================ +File: docs/partials/kots/_embedded-kubernetes-definition.mdx +================ +_Embedded Kubernetes_ refers to delivering a Kubernetes distribution alongside an application, so that both Kubernetes and the application are deployed in the customer environment. Embedding Kubernetes allows software vendors to install their Kubernetes application in non-Kubernetes customer-controlled environments, such as virtual machines (VMs) or bare metal servers. Additionally, software vendors that embed Kubernetes with their application have greater control over the charactersitics of the cluster where their application is installed. This allows vendors to deliver a cluster that meets their application's requirements, which can help reduce errors during installation. + +================ +File: docs/partials/kots/_kots-definition.mdx +================ +Replicated KOTS is a kubectl plugin and an in-cluster Admin Console that provides highly successful installations of Helm charts and Kubernetes applications into customer-controlled environments, including on-prem and air gap environments. + +================ +File: docs/partials/kots/_kots-entitlement-note.mdx +================ +:::note +The Replicated KOTS entitlement is required to install applications with KOTS. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. +::: + +================ +File: docs/partials/kots-cli/_ensure-rbac.mdx +================ + + --ensure-rbac + bool + When false, KOTS does not attempt to create the RBAC resources necessary to manage applications. Default: true. If a role specification is needed, use the generate-manifests command. + + +================ +File: docs/partials/kots-cli/_help.mdx +================ + + -h, --help + + Help for the command. + + +================ +File: docs/partials/kots-cli/_kotsadm-namespace.mdx +================ + + --kotsadm-namespace + string +

    Set to override the registry namespace of KOTS Admin Console images. Used for air gap installations. For more information, see [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped).

    Note: Replicated recommends that you use --kotsadm-registry instead of --kotsadm-namespace to override both the registry hostname and, optionally, the registry namespace with a single flag.

    + + +================ +File: docs/partials/kots-cli/_kotsadm-registry.mdx +================ + + --kotsadm-registry + string + Set to override the registry hostname and namespace of KOTS Admin Console images. Used for air gap installations. For more information, see [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). + + +================ +File: docs/partials/kots-cli/_registry-password.mdx +================ + + --registry-password + string + Password to use to authenticate with the application registry. Used for air gap installations. + + +================ +File: docs/partials/kots-cli/_registry-username.mdx +================ + + --registry-username + string + Username to use to authenticate with the application registry. Used for air gap installations. + + +================ +File: docs/partials/kots-cli/_skip-rbac-check.mdx +================ + + --skip-rbac-check + bool + When true, KOTS does not validate RBAC permissions. Default: false + + +================ +File: docs/partials/kots-cli/_strict-sec-context-yaml.mdx +================ +```yaml +securityContext: + fsGroup: 1001 + runAsGroup: 1001 + runAsNonRoot: true + runAsUser: 1001 + seccompProfile: + type: RuntimeDefault + supplementalGroups: + - 1001 +``` + +================ +File: docs/partials/kots-cli/_strict-security-context.mdx +================ +import StrictSecContextYaml from "./_strict-sec-context-yaml.mdx" + + + --strict-security-context + bool + +

    Set to true to explicitly enable strict security contexts for all KOTS Pods and containers.

    +

    By default, KOTS Pods and containers are not deployed with a specific security context. When true, --strict-security-context does the following:

    +
      +
    • Ensures containers run as a non-root user
    • +
    • Sets the specific UID for the containers (1001)
    • +
    • Sets the GID for volume ownership and permissions (1001)
    • +
    • Applies the default container runtime seccomp profile for security
    • +
    • Ensures the container is not run with privileged system access
    • +
    • Prevents the container from gaining more privileges than its parent process
    • +
    • Ensures the container's root filesystem is mounted as read-only
    • +
    • Removes all Linux capabilities from the container
    • +
    +

    The following shows the securityContext for KOTS Pods when --strict-security-context is set:

    + +

    Default: false

    + :::note + Might not work for some storage providers. + ::: + + + +================ +File: docs/partials/kots-cli/_use-minimal-rbac.mdx +================ + + --use-minimal-rbac + bool +

    When true, KOTS RBAC permissions are limited to the namespace where it is installed.

    To use --use-minimal-rbac, the application must support namespace-scoped installations and the user must have the minimum RBAC permissions required by KOTS in the target namespace. For a complete list of requirements, see Namespace-scoped RBAC Requirements​ in Installation Requirements. Default: false

    + + +================ +File: docs/partials/kots-cli/_wait-duration.mdx +================ + + --wait-duration + string + Timeout out to be used while waiting for individual components to be ready. Must be in Go duration format. Example: 10s, 2m + + +================ +File: docs/partials/kots-cli/_with-minio.mdx +================ + + --with-minio + bool + When true, KOTS deploys a local MinIO instance for storage and attempts to change any MinIO-based snapshots (hostpath and NFS) to the local-volume-provider plugin. See local-volume-provider in GitHub. Default: true + + +================ +File: docs/partials/kurl/_installers.mdx +================ +To provision a cluster on a VM or bare metal server, kURL uses a spec that is defined in a manifest file with `apiVersion: cluster.kurl.sh/v1beta1` and `kind: Installer`. This spec (called a _kURL installer_) lists the kURL add-ons that will be included in the cluster. kURL provides add-ons for networking, storage, ingress, and more. kURL also provides a KOTS add-on, which installs KOTS in the cluster and deploys the KOTS Admin Console. You can customize the kURL installer according to your application requirements. + +================ +File: docs/partials/kurl/_kurl-availability.mdx +================ +:::note +Replicated kURL is available only for existing customers. If you are not an existing kURL user, use Replicated Embedded Cluster instead. For more information, see [Using Embedded Cluster](/vendor/embedded-overview). + +kURL is a Generally Available (GA) product for existing customers. For more information about the Replicated product lifecycle phases, see [Support Lifecycle Policy](/vendor/policies-support-lifecycle). +::: + +================ +File: docs/partials/kurl/_kurl-definition.mdx +================ +kURL is an open source project maintained by Replicated that software vendors can use to create custom Kubernetes distributions that are embedded with their application. Enterprise customers can then run a kURL installation script on their virtual machine (VM) or bare metal server to provision a cluster and install the application. This allows software vendors to distribute Kubernetes applications to customers that do not have access to a cluster in their environment. + +For more information about the kURL open source project, see the [kURL website](https://kurl.sh). + +================ +File: docs/partials/linter-rules/_allow-privilege-escalation.mdx +================ +```yaml +spec: + allowPrivilegeEscalation: true +``` + +================ +File: docs/partials/linter-rules/_application-icon.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +spec: + icon: https://example.com/app-icon.png +``` + +================ +File: docs/partials/linter-rules/_application-spec.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +``` + +================ +File: docs/partials/linter-rules/_application-statusInformers.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +spec: + statusInformers: + - deployment/example-nginx +``` + +================ +File: docs/partials/linter-rules/_config-option-invalid-regex-validator.mdx +================ +**Correct**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: jwt_file + title: jwt_file + type: file + validation: + regex: + pattern: "^[A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" // valid RE2 regular expression + message: "JWT is invalid" +``` + +**Incorrect**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: jwt_file + title: jwt_file + type: file + validation: + regex: + pattern: "^/path/([A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" // invalid RE2 regular expression + message: "JWT is invalid" +``` + +================ +File: docs/partials/linter-rules/_config-option-invalid-type.mdx +================ +**Correct**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: group_title + title: Group Title + items: + - name: http_enabled + title: HTTP Enabled + type: bool # bool is a valid type +``` + +**Incorrect**:: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: group_title + title: Group Title + items: + - name: http_enabled + title: HTTP Enabled + type: unknown_type # unknown_type is not a valid type +``` + +================ +File: docs/partials/linter-rules/_config-option-is-circular.mdx +================ +**Incorrect**: + +```yaml +spec: + groups: + - name: example_settings + items: + - name: example_default_value + type: text + value: repl{{ ConfigOption "example_default_value" }} +``` + +================ +File: docs/partials/linter-rules/_config-option-password-type.mdx +================ +```yaml +spec: + groups: + - name: ports + items: + - name: my_secret + type: password +``` + +================ +File: docs/partials/linter-rules/_config-option-regex-validator-invalid-type.mdx +================ +**Correct**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: jwt_file + title: jwt_file + type: file // valid item type + validation: + regex: + pattern: "^[A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" + message: "JWT is invalid" +``` + +**Incorrect**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: jwt_file + title: jwt_file + type: bool // invalid item type + validation: + regex: + pattern: "^[A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" + message: "JWT is invalid" +``` + +================ +File: docs/partials/linter-rules/_config-spec.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +``` + +================ +File: docs/partials/linter-rules/_container-image-latest-tag.mdx +================ +```yaml +spec: + containers: + - image: nginx:latest +``` + +================ +File: docs/partials/linter-rules/_container-image-local-image-name.mdx +================ +```yaml +spec: + containers: + - image: LocalImageName +``` + +================ +File: docs/partials/linter-rules/_container-resource-limits.mdx +================ +```yaml +spec: + containers: + - name: nginx + resources: + requests: + memory: '32Mi' + cpu: '100m' + # note the lack of a limit field +``` + +================ +File: docs/partials/linter-rules/_container-resource-requests.mdx +================ +```yaml +spec: + containers: + - name: nginx + resources: + limits: + memory: '256Mi' + cpu: '500m' + # note the lack of a requests field +``` + +================ +File: docs/partials/linter-rules/_container-resources.mdx +================ +```yaml +spec: + containers: + - name: nginx + # note the lack of a resources field +``` + +================ +File: docs/partials/linter-rules/_deprecated-kubernetes-installer-version.mdx +================ +**Correct**: + +```yaml +apiVersion: cluster.kurl.sh/v1beta1 +kind: Installer +``` + +**Incorrect**: + +```yaml +apiVersion: kurl.sh/v1beta1 +kind: Installer +``` + +================ +File: docs/partials/linter-rules/_hardcoded-namespace.mdx +================ +```yaml +metadata: + name: spline-reticulator + namespace: graphviz-pro +``` + +================ +File: docs/partials/linter-rules/_invalid_type.mdx +================ +**Correct**: + +```yaml +ports: + - serviceName: "example" + servicePort: 80 +``` + +**Incorrect**: + +```yaml +ports: + - serviceName: "example" + servicePort: "80" +``` + +================ +File: docs/partials/linter-rules/_invalid-helm-release-name.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +spec: + chart: + releaseName: samplechart-release-1 +``` + +================ +File: docs/partials/linter-rules/_invalid-kubernetes-installer.mdx +================ +**Correct**: + +```yaml +apiVersion: cluster.kurl.sh/v1beta1 +kind: Installer +spec: + kubernetes: + version: 1.24.5 +``` + +**Incorrect**: + +```yaml +apiVersion: cluster.kurl.sh/v1beta1 +kind: Installer +spec: + kubernetes: + version: 1.24.x + ekco: + version: latest +``` + +================ +File: docs/partials/linter-rules/_invalid-min-kots-version.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +spec: + minKotsVersion: 1.0.0 +``` + +================ +File: docs/partials/linter-rules/_invalid-rendered-yaml.mdx +================ +**Example Helm Chart**: +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +metadata: + name: nginx-chart +spec: + chart: + name: nginx-chart + chartVersion: 0.1.0 + helmVersion: v3 + useHelmInstall: true + builder: {} + values: + image: repl{{ ConfigOption `nginx_image`}} +``` + +**Correct Config**: +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: nginx-config +spec: + groups: + - name: nginx-deployment-config + title: nginx deployment config + items: + - name: nginx_image + title: image + type: text + default: "nginx" +``` + +**Resulting Rendered Helm Chart**: +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +metadata: + name: nginx-chart +spec: + chart: + name: nginx-chart + chartVersion: 0.1.0 + helmVersion: v3 + useHelmInstall: true + builder: {} + values: + image: nginx +``` +**Incorrect Config**: +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: nginx-config +spec: + groups: + - name: nginx-deployment-config + items: + - name: nginx_image + title: image + type: text + default: "***HIDDEN***" +``` + +**Resulting Lint Error**: +```json +{ + "lintExpressions": [ + { + "rule": "invalid-rendered-yaml", + "type": "error", + "message": "yaml: did not find expected alphabetic or numeric character: image: ***HIDDEN***", + "path": "nginx-chart.yaml", + "positions": null + } + ], + "isLintingComplete": false +} +``` +**Incorrectly Rendered Helm Chart**: +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +metadata: + name: nginx-chart +spec: + chart: + name: nginx-chart + chartVersion: 0.1.0 + helmVersion: v3 + useHelmInstall: true + builder: {} + values: + image: ***HIDDEN*** +``` + +================ +File: docs/partials/linter-rules/_invalid-target-kots-version.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +spec: + targetKotsVersion: 1.0.0 +``` + +================ +File: docs/partials/linter-rules/_invalid-yaml.mdx +================ +**Correct**: + +```yaml +spec: + kubernetes: + version: 1.24.5 +``` + +**Incorrect**: + +```yaml +spec: + kubernetes: version 1.24.x +``` + +================ +File: docs/partials/linter-rules/_linter-definition.mdx +================ +The linter checks the manifest files in Replicated KOTS releases to ensure that there are no YAML syntax errors, that all required manifest files are present in the release to support installation with KOTS, and more. + +================ +File: docs/partials/linter-rules/_may-contain-secrets.mdx +================ +```yaml +data: + ENV_VAR_1: "y2X4hPiAKn0Pbo24/i5nlInNpvrL/HJhlSCueq9csamAN8g5y1QUjQnNL7btQ==" +``` + +================ +File: docs/partials/linter-rules/_missing-api-version-field.mdx +================ +```yaml +apiVersion: kots.io/v1beta1 +``` + +================ +File: docs/partials/linter-rules/_missing-kind-field.mdx +================ +```yaml +kind: Config +``` + +================ +File: docs/partials/linter-rules/_preflight-spec.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +``` + +================ +File: docs/partials/linter-rules/_privileged.mdx +================ +```yaml +spec: + privileged: true +``` + +================ +File: docs/partials/linter-rules/_repeat-option-malformed-yamlpath.mdx +================ +```yaml +spec: + groups: + - name: ports + items: + - name: service_port + yamlPath: 'spec.ports[0]' +``` + +================ +File: docs/partials/linter-rules/_repeat-option-missing-template.mdx +================ +```yaml +spec: + groups: + - name: ports + items: + - name: service_port + title: Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app + yamlPath: 'spec.ports[0]' + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app +``` + +================ +File: docs/partials/linter-rules/_repeat-option-missing-valuesByGroup.mdx +================ +```yaml +spec: + groups: + - name: ports + items: + - name: service_port + title: Service Port + type: text + repeatable: true + valuesByGroup: + ports: + port-default-1: "80" +``` + +================ +File: docs/partials/linter-rules/_replicas-1.mdx +================ +```yaml +spec: + replicas: 1 +``` + +================ +File: docs/partials/linter-rules/_resource-limits-cpu.mdx +================ +```yaml +spec: + containers: + - name: nginx + resources: + limits: + memory: '256Mi' + # note the lack of a cpu field +``` + +================ +File: docs/partials/linter-rules/_resource-limits-memory.mdx +================ +```yaml +spec: + containers: + - name: nginx + resources: + limits: + cpu: '500m' + # note the lack of a memory field +``` + +================ +File: docs/partials/linter-rules/_resource-requests-cpu.mdx +================ +```yaml +spec: + containers: + - name: nginx + resources: + requests: + memory: '32Mi' + # note the lack of a cpu field +``` + +================ +File: docs/partials/linter-rules/_resource-requests-memory.mdx +================ +```yaml +spec: + containers: + - name: nginx + resources: + requests: + cpu: '100m' + # note the lack of a memory field +``` + +================ +File: docs/partials/linter-rules/_troubleshoot-spec.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +``` + +================ +File: docs/partials/linter-rules/_volume-docker-sock.mdx +================ +```yaml +spec: + volumes: + - hostPath: + path: /var/run/docker.sock +``` + +================ +File: docs/partials/linter-rules/_volumes-host-paths.mdx +================ +```yaml +spec: + volumes: + - hostPath: + path: /data +``` + +================ +File: docs/partials/monitoring/_limitation-ec.mdx +================ +Monitoring applications with Prometheus is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). + +================ +File: docs/partials/monitoring/_overview-prom.mdx +================ +The KOTS Admin Console can use the open source systems monitoring tool Prometheus to collect metrics on an application and the cluster where the application is installed. Prometheus components include the main Prometheus server, which scrapes and stores time series data, an Alertmanager for alerting on metrics, and Grafana for visualizing metrics. For more information about Prometheus, see [What is Prometheus?](https://prometheus.io/docs/introduction/overview/) in the Prometheus documentation. + +The Admin Console exposes graphs with key metrics collected by Prometheus in the **Monitoring** section of the dashboard. By default, the Admin Console displays the following graphs: + +* Cluster disk usage +* Pod CPU usage +* Pod memory usage + +In addition to these default graphs, application developers can also expose business and application level metrics and alerts on the dashboard. + +The following screenshot shows an example of the **Monitoring** section on the Admin Console dashboard with the Disk Usage, CPU Usage, and Memory Usage default graphs: + +Graphs on the Admin Console dashboard + +[View a larger version of this image](/images/kotsadm-dashboard-graph.png) + +================ +File: docs/partials/preflights/_analyzers-note.mdx +================ +For basic examples of checking CPU, memory, and disk capacity, see [Node Resources Analyzer](https://troubleshoot.sh/reference/analyzers/node-resources/) in the Troubleshoot documentation. + +================ +File: docs/partials/preflights/_http-requests-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: preflight-checks +spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." +``` + +================ +File: docs/partials/preflights/_http-requests-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." +``` + +================ +File: docs/partials/preflights/_k8s-distro-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - distribution: + checkName: Kubernetes distribution + outcomes: + - fail: + when: "== docker-desktop" + message: The application does not support Docker Desktop Clusters + - fail: + when: "== microk8s" + message: The application does not support Microk8s Clusters + - fail: + when: "== minikube" + message: The application does not support Minikube Clusters + - pass: + when: "== eks" + message: EKS is a supported distribution + - pass: + when: "== gke" + message: GKE is a supported distribution + - pass: + when: "== aks" + message: AKS is a supported distribution + - pass: + when: "== kurl" + message: KURL is a supported distribution + - pass: + when: "== digitalocean" + message: DigitalOcean is a supported distribution + - warn: + message: Unable to determine the distribution of Kubernetes +``` + +================ +File: docs/partials/preflights/_k8s-distro-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - distribution: + checkName: Kubernetes distribution + outcomes: + - fail: + when: "== docker-desktop" + message: The application does not support Docker Desktop Clusters + - fail: + when: "== microk8s" + message: The application does not support Microk8s Clusters + - fail: + when: "== minikube" + message: The application does not support Minikube Clusters + - pass: + when: "== eks" + message: EKS is a supported distribution + - pass: + when: "== gke" + message: GKE is a supported distribution + - pass: + when: "== aks" + message: AKS is a supported distribution + - pass: + when: "== kurl" + message: KURL is a supported distribution + - pass: + when: "== digitalocean" + message: DigitalOcean is a supported distribution + - warn: + message: Unable to determine the distribution of Kubernetes +``` + +================ +File: docs/partials/preflights/_k8s-version-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - clusterVersion: + outcomes: + - fail: + when: "< 1.25.0" + message: The application requires Kubernetes 1.25.0 or later, and recommends 1.28.0. + uri: https://www.kubernetes.io + - warn: + when: "< 1.28.0" + message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.28.0 or later. + uri: https://kubernetes.io + - pass: + message: Your cluster meets the recommended and required versions of Kubernetes. +``` + +================ +File: docs/partials/preflights/_k8s-version-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - clusterVersion: + outcomes: + - fail: + when: "< 1.25.0" + message: The application requires Kubernetes 1.25.0 or later, and recommends 1.28.0. + uri: https://www.kubernetes.io + - warn: + when: "< 1.28.0" + message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.28.0 or later. + uri: https://kubernetes.io + - pass: + message: Your cluster meets the recommended and required versions of Kubernetes. +``` + +================ +File: docs/partials/preflights/_mysql-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + collectors: + - mysql: + collectorName: mysql + uri: 'repl{{ ConfigOption "db_user" }}:repl{{ConfigOption "db_password" }}@tcp(repl{{ ConfigOption "db_host" }}:repl{{ConfigOption "db_port" }})/repl{{ ConfigOption "db_name" }}' + analyzers: + - mysql: + # `strict: true` prevents installation from continuing if the preflight check fails + strict: true + checkName: Must be MySQL 8.x or later + collectorName: mysql + outcomes: + - fail: + when: connected == false + message: Cannot connect to MySQL server + - fail: + when: version < 8.x + message: The MySQL server must be at least version 8 + - pass: + message: The MySQL server is ready +``` + +================ +File: docs/partials/preflights/_mysql-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + {{ if eq .Values.global.mysql.enabled true }} + collectors: + - mysql: + collectorName: mysql + uri: '{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false' + {{ end }} + analyzers: + {{ if eq .Values.global.mysql.enabled true }} + - mysql: + checkName: Must be MySQL 8.x or later + collectorName: mysql + outcomes: + - fail: + when: connected == false + message: Cannot connect to MySQL server + - fail: + when: version < 8.x + message: The MySQL server must be at least version 8 + - pass: + message: The MySQL server is ready + {{ end }} +``` + +================ +File: docs/partials/preflights/_node-count-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Node Count Check + outcomes: + - fail: + when: 'count() > {{ .Values.global.maxNodeCount }}' + message: "The cluster has more than {{ .Values.global.maxNodeCount }} nodes." + - pass: + message: You have the correct number of nodes. +``` + +================ +File: docs/partials/preflights/_node-cpu-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - nodeResources: + checkName: Total CPU Cores in the cluster is 4 or greater + outcomes: + - fail: + when: "sum(cpuCapacity) < 4" + message: The cluster must contain at least 4 cores + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: There are at least 4 cores in the cluster +``` + +================ +File: docs/partials/preflights/_node-cpu-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Total CPU Cores in the cluster is 4 or greater + outcomes: + - fail: + when: "sum(cpuCapacity) < 4" + message: The cluster must contain at least 4 cores + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: There are at least 4 cores in the cluster +``` + +================ +File: docs/partials/preflights/_node-ephem-storage-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - nodeResources: + checkName: Every node in the cluster must have at least 40 GB of ephemeral storage, with 100 GB recommended + outcomes: + - fail: + when: "min(ephemeralStorageCapacity) < 40Gi" + message: All nodes must have at least 40 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(ephemeralStorageCapacity) < 100Gi" + message: All nodes are recommended to have at least 100 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: All nodes have at least 100 GB of ephemeral storage. +``` + +================ +File: docs/partials/preflights/_node-ephem-storage-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Every node in the cluster must have at least 40 GB of ephemeral storage, with 100 GB recommended + outcomes: + - fail: + when: "min(ephemeralStorageCapacity) < 40Gi" + message: All nodes must have at least 40 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(ephemeralStorageCapacity) < 100Gi" + message: All nodes are recommended to have at least 100 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: All nodes have at least 100 GB of ephemeral storage. +``` + +================ +File: docs/partials/preflights/_node-mem-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - nodeResources: + checkName: Every node in the cluster must have at least 8 GB of memory, with 32 GB recommended + outcomes: + - fail: + when: "min(memoryCapacity) < 8Gi" + message: All nodes must have at least 8 GB of memory. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(memoryCapacity) < 32Gi" + message: All nodes are recommended to have at least 32 GB of memory. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: All nodes have at least 32 GB of memory. +``` + +================ +File: docs/partials/preflights/_node-mem-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Every node in the cluster must have at least 8 GB of memory, with 32 GB recommended + outcomes: + - fail: + when: "min(memoryCapacity) < 8Gi" + message: All nodes must have at least 8 GB of memory. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(memoryCapacity) < 32Gi" + message: All nodes are recommended to have at least 32 GB of memory. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: All nodes have at least 32 GB of memory. +``` + +================ +File: docs/partials/preflights/_node-req-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - nodeResources: + checkName: Node requirements + filters: + # Must have 1 node with 16 GB (available) memory and 5 cores (on a single node) with amd64 architecture + allocatableMemory: 16Gi + cpuArchitecture: amd64 + cpuCapacity: "5" + outcomes: + - fail: + when: "count() < 1" + message: This application requires at least 1 node with 16GB available memory and 5 cpu cores with amd64 architecture + - pass: + message: This cluster has a node with enough memory and cpu cores +``` + +================ +File: docs/partials/preflights/_node-req-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Node requirements + filters: + # Must have 1 node with 16 GB (available) memory and 5 cores (on a single node) with amd64 architecture + allocatableMemory: 16Gi + cpuArchitecture: amd64 + cpuCapacity: "5" + outcomes: + - fail: + when: "count() < 1" + message: This application requires at least 1 node with 16GB available memory and 5 cpu cores with amd64 architecture + - pass: + message: This cluster has a node with enough memory and cpu cores +``` + +================ +File: docs/partials/preflights/_node-storage-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - storageClass: + checkName: Required storage classes + storageClassName: "default" + outcomes: + - fail: + message: Could not find a storage class called "default". + - pass: + message: A storage class called "default" is present. +``` + +================ +File: docs/partials/preflights/_node-storage-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - storageClass: + checkName: Required storage classes + storageClassName: "default" + outcomes: + - fail: + message: Could not find a storage class called "default". + - pass: + message: A storage class called "default" is present. +``` + +================ +File: docs/partials/preflights/_preflight-sb-helm-templates.mdx +================ +For more information about using Helm templates with collectors and analyzers, see [Using Helm Templates in Specifications](preflight-sb-helm-templates-about). + +================ +File: docs/partials/preflights/_preflights-add-analyzers.mdx +================ +You must add analyzers to analyze the data from the collectors that you specified. Define the criteria for the pass, fail, and warn outcomes, and specify custom messages for each. + +For example, you can set a `fail` outcome if the MySQL version is less than the minimum required. Then, specify a message to display that informs your customer of the reasons for the failure and steps they can take to fix the issue. + +================ +File: docs/partials/preflights/_preflights-define-xref.mdx +================ +For more information about defining collectors and analyzers, see [Collecting Data](https://troubleshoot.sh/docs/collect/) +and [Analyzing Data](https://troubleshoot.sh/docs/analyze/) in the Troubleshoot documentation. + +================ +File: docs/partials/preflights/_preflights-define.mdx +================ +Any preflight checks you run are dependent on your application needs. This section gives some guidance about how to think about using collectors and analyzers to design preflight checks. + +================ +File: docs/partials/preflights/_preflights-sb-about.mdx +================ +Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. Troubleshoot is a kubectl plugin that provides diagnostic tools for Kubernetes applications. For more information, see the open source [Troubleshoot](https://troubleshoot.sh/docs/collect/) documentation. + +Preflight checks and support bundles analyze data from customer environments to provide insights that help users to avoid or troubleshoot common issues with an application: +* **Preflight checks** run before an application is installed to check that the customer environment meets the application requirements. +* **Support bundles** collect troubleshooting data from customer environments to help users diagnose problems with application deployments. + +================ +File: docs/partials/preflights/_preflights-sb-note.mdx +================ +For a comprehensive overview, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). + +================ +File: docs/partials/preflights/_preflights-spec-locations.mdx +================ +For more information about specifications, see [About Specifications](preflight-support-bundle-about#about-specifications) in _About Preflight Checks and Support Bundles_. + +================ +File: docs/partials/preflights/_preflights-strict.mdx +================ +If any strict preflight checks are configured, the `--skip-preflights` flag are not honored because the preflight checks must run and contain no failures before the application is deployed. + +When the `--deploy` option is provided and there are strict preflight checks, the preflight checks always run. The deployment waits for up to 15 minutes for the preflight checks to complete. If the checks complete without strict preflight failures, the release deploys. If the checks do not complete within 15 minutes, the release does not deploy. If there are one or more strict preflight failures, the release does not deploy. + +For more information about strict preflight checks, see [Defining Preflight Checks](/vendor/preflight-defining). + +================ +File: docs/partials/proxy-service/_step-creds.mdx +================ +Provide read-only credentials for the external private registry in your Replicated account. This allows Replicated to access the images through the proxy registry. See [Add Credentials for an External Registry](packaging-private-images#add-credentials-for-an-external-registry) in _Connecting to an External Registry_. + +================ +File: docs/partials/proxy-service/_step-custom-domain.mdx +================ +(Optional) Add a custom domain for the proxy registry instead of `proxy.replicated.com`. See [Using Custom Domains](custom-domains-using). + +================ +File: docs/partials/redactors/_redactors-about.mdx +================ +Troubleshoot has built-in redactors to prevent sensitive data from being collected when support bundles are generated. You can add more redactors if needed. For more information, see [Redacting Data](https://troubleshoot.sh/docs/redact/) in the Troubleshoot documentation. + +================ +File: docs/partials/releases/_required-releases-description.mdx +================ +When a release is required, KOTS requires users to upgrade to that version before they can upgrade to a later version. For example, if you select **Prevent this release from being skipped during upgrades** for release v2.0.0, users with v1.0.0 deployed must upgrade to v2.0.0 before they can upgrade to a version later than v2.0.0, such as v2.1.0. + +================ +File: docs/partials/releases/_required-releases-limitations.mdx +================ +Required releases have the following limitations: + + * Required releases are supported in KOTS v1.68.0 and later. + * After users deploy a required version, they can no longer redeploy (roll back to) versions earlier than the required version, even if `allowRollback` is true in the Application custom resource manifest. For more information, see [`allowRollback`](/reference/custom-resource-application#allowrollback) in the Application custom resource topic. + * If you change the channel an existing customer is assigned to, the Admin Console always fetches the latest release on the new channel, regardless of any required releases on the channel. For more information, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. + * Required releases are supported for KOTS installations only and are not supported for releases installed with Helm. The **Prevent this release from being skipped during upgrades** option has no affect if the user installs with Helm. + +================ +File: docs/partials/releases/_version-label-reqs-helm.mdx +================ +* The version label for the release must match the version label from one of the `Chart.yaml` files in the release. +* If there is one Helm chart in the release, Replicated automatically uses the version from the `Chart.yaml` file. +* If there is more than one Helm chart in the release, Replicated uses the version label from one of the `Chart.yaml` files. You can edit the version label for the release to use the version label from a different `Chart.yaml` file. + +================ +File: docs/partials/replicated-cli/_app.mdx +================ + + --app + string + The app slug or app ID to use in all calls. The default uses the $REPLICATED_APP environment variable. + + +================ +File: docs/partials/replicated-cli/_authorize-with-token-note.mdx +================ +:::note +The `replicated login` command creates a token after you log in to your vendor account in a browser and saves it to a config file. Alteratively, if you do not have access to a browser, you can set the `REPLICATED_API_TOKEN` environment variable to authenticate. For more information, see [(Optional) Set Environment Variables](#env-var) below. +::: + +================ +File: docs/partials/replicated-cli/_authtype.mdx +================ + + --authtype + string + Authorization type for the registry. Default: password + + +================ +File: docs/partials/replicated-cli/_chart-yaml-dir-reqs.mdx +================ +:::note +If your release supports installations with Replicated KOTS, `--yaml-dir` is required. If your release supports installations with the Helm CLI only, either `--yaml-dir` or `--chart` can be used. +::: + +================ +File: docs/partials/replicated-cli/_help.mdx +================ + + -h, --help + + Help for the command. + + +================ +File: docs/partials/replicated-cli/_login.mdx +================ +Authorize the Replicated CLI: + + ``` + replicated login + ``` + + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + + Authorize replicated cli web page + + [View a larger version of this image](/images/authorize-repl-cli.png) + +================ +File: docs/partials/replicated-cli/_logout.mdx +================ +(Optional) When you are done using the Replicated CLI, remove any stored credentials created by the `replicated login` command: + + ``` + replicated logout + ``` + +================ +File: docs/partials/replicated-cli/_output.mdx +================ + + --output + string + +

    The output format to use. Valid values: json or table. Some commands also support wide Default: table

    + + + +================ +File: docs/partials/replicated-cli/_password-stdin.mdx +================ + + --password-stdin + + Takes the password from stdin. + + +================ +File: docs/partials/replicated-cli/_password.mdx +================ + + --password + string + The password to use when authenticating to the registry. + + +================ +File: docs/partials/replicated-cli/_skip-validation.mdx +================ + + --skip-validation + + Skips the validation of the registry (not recommended). + + +================ +File: docs/partials/replicated-cli/_sudo-install.mdx +================ +:::note +If you do not have root access to the `/usr/local/bin` directory, you can install with sudo by running `sudo mv replicated /usr/local/bin/replicated` instead of `mv replicated /usr/local/bin/replicated`. +::: + +================ +File: docs/partials/replicated-cli/_token-stdin.mdx +================ + + --token-stdin + + Takes the token from stdin. + + +================ +File: docs/partials/replicated-cli/_token.mdx +================ + + --token + string + The API token used to access your application in the Vendor API. The default uses the $REPLICATED_API_TOKEN environment variable. + + +================ +File: docs/partials/replicated-cli/_username.mdx +================ + + --username + string + The username with which to authenticate to the registry. + + +================ +File: docs/partials/replicated-cli/_verify-install.mdx +================ +Verify that the installation was successful: + + ``` + replicated --help + ``` + +================ +File: docs/partials/replicated-cli/_yaml-dir.mdx +================ + + --yaml-dir + path + The directory containing multiple YAML manifest files for a release. (Required) + + +================ +File: docs/partials/replicated-sdk/_401-unauthorized.mdx +================ +:::note +If you see a `401 Unauthorized` error message, log out of the Replicated registry by running `helm registry logout registry.replicated.com` and then run `helm package . --dependency-update` again. +::: + +================ +File: docs/partials/replicated-sdk/_dependency-yaml.mdx +================ +```yaml +# Chart.yaml +dependencies: +- name: replicated + repository: oci://registry.replicated.com/library + version: 1.1.1 +``` + +For the latest version information for the Replicated SDK, see the [replicated-sdk repository](https://github.com/replicatedhq/replicated-sdk/releases) in GitHub. + +================ +File: docs/partials/replicated-sdk/_integration-mode-install.mdx +================ +You can install the Replicated SDK in integration mode to develop locally against the SDK API without needing to add the SDK to your application, create a release in the Replicated Vendor Portal, or make changes in your environment. You can also use integration mode to test sending instance data to the Vendor Portal, including any custom metrics that you configure. + +To use integration mode, install the Replicated SDK as a standalone component using a valid Development license created in the Vendor Portal. After you install in integration mode, the SDK provides default mock data for requests to the SDK API `app` endpoints. Requests to the `license` endpoints use the real data from your Development license. + +To install the SDK in integration mode: + +1. Create a Development license that you can use to install the SDK in integration mode: + + 1. In the Vendor Portal, go to **Customers** and click **Create customer**. + + 1. Complete the following fields: + + 1. For **Customer name**, add a name for the customer. + + 1. For **Assigned channel**, assign the customer to the channel that you use for testing. For example, Unstable. + + 1. For **Customer type**, select **Development**. + + 1. For **Customer email**, add the email address that you want to use for the license. + + 1. For **Install types**, ensure that the **Existing Cluster (Helm CLI)** option is enabled. + + 1. (Optional) Add any license field values that you want to use for testing: + + 1. For **Expiration policy**, you can add an expiration date for the license. + + 1. For **Custom fields**, you can add values for any custom license fields in your application. For information about how to create custom license fields, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). + + 1. Click **Save Changes**. + +1. On the **Manage customer** page for the customer you created, click **Helm install instructions**. + + Helm install instructions button on the manage customer page + + [View a larger version of this image](/images/helm-install-instructions-button.png) + +1. In the **Helm install instructions** dialog, copy and run the command to log in to the Replicated registry. + + Registry login command in the Helm install instructions dialog + + [View a larger version of this image](/images/helm-install-instructions-registry-login.png) + +1. From the same dialog, copy and run the command to install the SDK in integration mode: + + SDK integration mode install command in the Helm install instructions dialog + + [View a larger version of this image](/images/helm-install-instructions-sdk-integration.png) + +1. Make requests to the SDK API from your application. You can access the SDK API for testing by forwarding the API service to your local machine. For more information, see [Port Forwarding the SDK API Service](/vendor/replicated-sdk-development#port-forward). + +================ +File: docs/partials/replicated-sdk/_kots-version-req.mdx +================ +To install the SDK with a Replicated installer, KOTS v1.104.0 or later and the SDK version 1.0.0-beta.12 or later are required. You can verify the version of KOTS installed with `kubectl kots version`. For Replicated Embedded Cluster installations, you can see the version of KOTS that is installed by your version of Embedded Cluster in the [Embedded Cluster Release Notes](/release-notes/rn-embedded-cluster). + +================ +File: docs/partials/replicated-sdk/_overview.mdx +================ +The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. The SDK can be installed alongside applications packaged as Helm charts or Kubernetes manifests. The SDK can be installed using the Helm CLI or KOTS. + +For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). + +Replicated recommends that the SDK is distributed with all applications because it provides access to key Replicated functionality, such as: + +* Automatic access to insights and operational telemetry for instances running in customer environments, including granular details about the status of different application resources. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). +* An in-cluster API that you can use to embed Replicated features into your application, including: + * Collect custom metrics on instances running in online or air gap environments. See [Configuring Custom Metrics](/vendor/custom-metrics). + * Check customer license entitlements at runtime. See [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk) and [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). + * Provide update checks to alert customers when new versions of your application are available for upgrade. See [Support Update Checks in Your Application](/reference/replicated-sdk-apis#support-update-checks-in-your-application) in _Replicated SDK API_. + * Programmatically name or tag instances from the instance itself. See [Programatically Set Tags](/reference/replicated-sdk-apis#post-appinstance-tags). + +================ +File: docs/partials/replicated-sdk/_registry-logout.mdx +================ +If you see a 401 Unauthorized error after running `helm dependency update`, run the following command to remove credentials from the Replicated registry, then re-run `helm dependency update`: + +```bash +helm registry logout registry.replicated.com +``` + +For more information, see [401 Unauthorized Error When Updating Helm Dependencies](replicated-sdk-installing#401). + +================ +File: docs/partials/replicated-sdk/_sdk-values.mdx +================ +When a user installs a Helm chart that includes the Replicated SDK as a dependency, a set of default SDK values are included in the `replicated` key of the parent chart's values file. + +For example: + +```yaml +# values.yaml + +replicated: + enabled: true + appName: gitea + channelID: 2jKkegBMseH5w... + channelName: Beta + channelSequence: 33 + integration: + enabled: true + license: {} + parentChartURL: oci://registry.replicated.com/gitea/beta/gitea + releaseCreatedAt: "2024-11-25T20:38:22Z" + releaseNotes: 'CLI release' + releaseSequence: 88 + replicatedAppEndpoint: https://replicated.app + versionLabel: Beta-1234 +``` + +These `replicated` values can be referenced by the application or set during installation as needed. For example, if users need to add labels or annotations to everything that runs in their cluster, then they can pass the labels or annotations to the relevant value in the SDK subchart. + +For the default Replicated SDK Helm chart values file, see [values.yaml.tmpl](https://github.com/replicatedhq/replicated-sdk/blob/main/chart/values.yaml.tmpl) in the [replicated-sdk](https://github.com/replicatedhq/replicated-sdk) repository in GitHub. + +The SDK Helm values also include a `replicated.license` field, which is a string that contains the YAML representation of the customer license. For more information about the built-in fields in customer licenses, see [Built-In License Fields](licenses-using-builtin-fields). + +================ +File: docs/partials/snapshots/_checkVersion.mdx +================ +Run `velero version --client-only` to check the version of the velero CLI that you installed as part of [Installing the Velero CLI](snapshots-velero-cli-installing). + +================ +File: docs/partials/snapshots/_installVelero.mdx +================ +Run one of the following commands to install Velero, depending on the version of the velero CLI you are using: + + * **Velero v1.10 and later**: + + ```bash + velero install \ + --no-default-backup-location \ + --no-secret \ + --use-node-agent --uploader-type=restic \ + --use-volume-snapshots=false \ + --plugins velero/velero-plugin-for-aws:v1.5.3 + ``` + + * **Velero versions earlier than v1.10**: + + ```bash + velero install \ + --no-default-backup-location \ + --no-secret \ + --use-restic \ + --use-volume-snapshots=false \ + --plugins velero/velero-plugin-for-aws:v1.5.3 + ``` + +================ +File: docs/partials/snapshots/_limitation-cli-restores.mdx +================ +Only full backups can be restored using the KOTS CLI. To restore an application from a partial backup, use the Admin Console. See [Restore the Application Only Using the Admin Console](/enterprise/snapshots-restoring-full#admin-console). + +================ +File: docs/partials/snapshots/_limitation-dr.mdx +================ +Only full backups that include both the application and the Admin Console can be restored to a new cluster in disaster recovery scenarios. Partial backups that include the application only _cannot_ be restored to a new cluster, and are therefore not useable for disaster recovery. + +================ +File: docs/partials/snapshots/_limitation-install-method.mdx +================ +Snapshots can be restored only to clusters that use the same installation method as the cluster the snapshot was taken from. For example, snapshots taken in an online (internet-connected) cluster must be restored to an online cluster. + +================ +File: docs/partials/snapshots/_limitation-no-ec-support.mdx +================ +The KOTS Snapshots feature is supported for existing cluster installations with KOTS and Replicated kURL installations only. Snapshots is not supported for Replicated Embedded Cluster installations. For more information about configuring backup and restore for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). + +================ +File: docs/partials/snapshots/_limitation-os.mdx +================ +Snapshots must be restored on the same operating system that the snapshot was taken on. For example, snapshots taken on a CentOS cluster must be restored on a CentOS cluster. + +================ +File: docs/partials/snapshots/_node-agent-mem-limit.mdx +================ +Increase the default memory limit for the node-agent (restic) Pod if your application is particularly large. For more information about configuring Velero resource requests and limits, see [Customize resource requests and limits](https://velero.io/docs/v1.10/customize-installation/#customize-resource-requests-and-limits) in the Velero documentation. + +For example, the following kubectl commands will increase the memory limit for the node-agent (restic) daemon set from the default of 1Gi to 2Gi. + +**Velero 1.10 and later**: + +``` +kubectl -n velero patch daemonset node-agent -p '{"spec":{"template":{"spec":{"containers":[{"name":"node-agent","resources":{"limits":{"memory":"2Gi"}}}]}}}}' +``` + +**Velero versions earlier than 1.10**: + +``` +kubectl -n velero patch daemonset restic -p '{"spec":{"template":{"spec":{"containers":[{"name":"restic","resources":{"limits":{"memory":"2Gi"}}}]}}}}' +``` + +Alternatively, you can potentially avoid the node-agent (restic) Pod reaching the memory limit during snapshot creation by running the following kubectl command to lower the memory garbage collection target percentage on the node-agent (restic) daemon set: + +**Velero 1.10 and later**: + +``` +kubectl -n velero set env daemonset/node-agent GOGC=1 +``` + +**Velero versions earlier than 1.10**: + +``` +kubectl -n velero set env daemonset/restic GOGC=1 +``` + +================ +File: docs/partials/snapshots/_registryCredentialsNote.mdx +================ +:::note +It is typical for the velero and node-agent (restic) Pods to be in the `ErrImagePull` or `ImagePullBackOff` state after you run the `velero install` command because Velero does not support passing registry credentials during installation. In Replicated KOTS v1.94.0 and later, this situation resolves itself after you complete the instructions to configure the storage destination. + +If you are on an earlier version of KOTS, Replicated recommends that you upgrade to KOTS v1.94.0 or later. Otherwise, you must patch the Velero deployment manually and add the image pull secret to access the registry. +::: + +================ +File: docs/partials/snapshots/_resticDaemonSet.mdx +================ +Configure the Restic DaemonSet specification if your cluster uses one of the following Kubernetes distributions: + * RancherOS + * OpenShift + * Microsoft Azure + * VMware Tanzu Kubernetes Grid Integrated Edition + +For information about how to configure the Restic DaemonSet for these distributions, see [Configure Restic DaemonSet spec](https://velero.io/docs/v1.9/restic/#configure-restic-daemonset-spec) in the Velero documentation. + +================ +File: docs/partials/snapshots/_restore-types.mdx +================ +Snapshots supports the following types of restores: +* Restore both the application and the KOTS Admin Console (also referred to as a _full_ restore) +* Restore the KOTS Admin Console only +* Restore the application only (also referred to as a _partial_ restore) + +================ +File: docs/partials/snapshots/_restoreTable.mdx +================ + + + + + + + + + + + + + + + + + + + + + +
    Restore TypeDescriptionInterface to Use
    Full restoreRestores the Admin Console and the application.KOTS CLI
    Partial restoreRestores the application only.KOTS CLI or Admin Console
    Admin consoleRestores the Admin Console only.KOTS CLI
    + +================ +File: docs/partials/snapshots/_step-get-backups.mdx +================ +Run the [`kubectl kots get backups`](/reference/kots-cli-get-backups) command to get the list of full backups for the instance. + +================ +File: docs/partials/snapshots/_step-restore.mdx +================ +Run the following command to restore a full backup: + + ```bash + kubectl kots restore --from-backup BACKUP + ``` + Replace `BACKUP` with the the name of the backup to restore from. + + For more information about the available options for the `kots restore` command, including application-only and Admin Console-only options, see [restore](/reference/kots-cli-restore-index/). + +================ +File: docs/partials/snapshots/_updateDefaultStorage.mdx +================ +If Velero is already installed, you can update your storage destination in the Replicated Admin Console. + +For embedded clusters with the Velero add-on, you must update the default internal storage settings in the Admin Console because internal storage is insufficient for full backups. + +For more information about updating storage, see [Updating Settings in the Admin Console](snapshots-updating-with-admin-console). + +================ +File: docs/partials/status-informers/_aggregate-status-intro.mdx +================ +When you provide more than one Kubernetes resource, Replicated aggregates all resource statuses to display a single application status. + +Replicated uses the least available resource status to represent the aggregate application status. For example, if at least one resource has an Unavailable status, then the aggregate application status is Unavailable. + +================ +File: docs/partials/status-informers/_aggregateStatus.mdx +================ +The following table describes the resource statuses that define each aggregate application status: + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Resource StatusesAggregate Application Status
    No status available for any resourceMissing
    One or more resources UnavailableUnavailable
    One or more resources DegradedDegraded
    One or more resources UpdatingUpdating
    All resources ReadyReady
    + +================ +File: docs/partials/status-informers/_statusesTable.mdx +================ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    DeploymentStatefulSetServiceIngressPVCDaemonSet
    ReadyReady replicas equals desired replicasReady replicas equals desired replicasAll desired endpoints are ready, any load balancers have been assignedAll desired backend service endpoints are ready, any load balancers have been assignedClaim is boundReady daemon pods equals desired scheduled daemon pods
    UpdatingThe deployed replicas are from a different revisionThe deployed replicas are from a different revisionN/AN/AN/AThe deployed daemon pods are from a different revision
    DegradedAt least 1 replica is ready, but more are desiredAt least 1 replica is ready, but more are desiredAt least one endpoint is ready, but more are desiredAt least one backend service endpoint is ready, but more are desiredN/AAt least one daemon pod is ready, but more are desired
    UnavailableNo replicas are readyNo replicas are readyNo endpoints are ready, no load balancer has been assignedNo backend service endpoints are ready, no load balancer has been assignedClaim is pending or lostNo daemon pods are ready
    MissingMissing is an initial deployment status indicating that informers have not reported their status because the application has just been deployed and the underlying resource has not been created yet. After the resource is created, the status changes. However, if a resource changes from another status to Missing, then the resource was either deleted or the informers failed to report a status.
    + +================ +File: docs/partials/support-bundles/_configmap-note.mdx +================ +:::note +Alternatively, you can use a ConfigMap (`kind: ConfigMap`) if the specification will not collect private information from the cluster. +::: + +================ +File: docs/partials/support-bundles/_customize-support-bundle-spec.mdx +================ +When customizing your support bundle specifications, consider the following guidelines: + +- The `clusterInfo` and `clusterResources` collectors are useful because they collect a large amount of data to help with installation and debugging. + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: collectors + spec: + collectors: + - clusterInfo: + exclude: false + - clusterResources: + exclude: false + ``` +- You can edit the default collector properties. If `clusterResources` is defined in your specification, the default namespace cannot be removed, but you can add a namespace to the `namespaces` field. + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: collectors + spec: + collectors: + - clusterInfo: + exclude: false + - clusterResources: + namespaces: + - default + - APP_NAMESPACE + ``` + Replace `APP_NAMESPACE` with the application namespace. + +- Add application Pod logs and set the collection limits for the number of lines logged. Typically the selector attribute is matched to the labels. + + 1. To get the labels for an application, inspect the Pods YAML. + + 1. Create collectors to include logs from these pods in a bundle. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector. You can include the `logs` collector specification multiple times. + + The limits field can support `maxAge` or `maxLines`. This limits the output to the constraints provided. **Default:** `maxLines: 10000` + + **Example:** + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: collectors + spec: + collectors: + - logs: + selector: + - app=api + namespace: default + limits: + maxLines: 10000 + ``` + +- Add any custom collectors to the file. Collectors that Replicated recommends considering are: + + - **Kubernetes resources:** Use for custom resource definitions (CRDs), secrets, and ConfigMaps, if they are required for your application to work. + - **Databases:** Return a selection of rows or entire tables. + - **Volumes:** Ensure that an application's persistent state files exist, are readable/writeable, and have the right permissions. + - **Pods:** Run a pod from a custom image. + - **Files:** Copy files from pods and hosts. + - **HTTP:** Consume your own application APIs with HTTP requests. If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. + +- Add analyzers based on conditions that you expect for your application. For example, you might require that a cluster have at least 2 CPUs and 4GB memory available. + + Good analyzers clearly identify failure modes. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log. + + At a minimum, include application log analyzers. A simple text analyzer can detect specific log lines and inform an end user of remediation steps. + + Analyzers that Replicated recommends considering are: + + - **Resource statuses:** Check the status of various resources, such as Deployments, StatefulSets, Jobs, and so on. + - **Regular expressions:** Analyze arbitrary data. + - **Databases:** Check the version and connection status. +. +- If needed, you can add custom the redactors to the default redactors. Disabling the redactors is not recommended. + +================ +File: docs/partials/support-bundles/_deploy-status-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: + - deploymentStatus: + name: api + namespace: default + outcomes: + - fail: + when: "< 1" + message: The API deployment does not have any ready replicas. + - warn: + when: "= 1" + message: The API deployment has only a single ready replica. + - pass: + message: There are multiple replicas of the API deployment ready. +``` + +================ +File: docs/partials/support-bundles/_deploy-status-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: [] + analyzers: + - deploymentStatus: + name: api + namespace: default + outcomes: + - fail: + when: "< 1" + message: The API deployment does not have any ready replicas. + - warn: + when: "= 1" + message: The API deployment has only a single ready replica. + - pass: + message: There are multiple replicas of the API deployment ready. +``` + +================ +File: docs/partials/support-bundles/_ec-support-bundle-intro.mdx +================ +Embedded Cluster includes a default support bundle spec that collects both host- and cluster-level information. + +The host-level information is useful for troubleshooting failures related to host configuration like DNS, networking, or storage problems. Cluster-level information includes details about the components provided by Replicated, such as the Admin Console and Embedded Cluster operator that manage install and upgrade operations. If the cluster has not installed successfully and cluster-level information is not available, then it is excluded from the bundle. + +In addition to the host- and cluster-level details provided by the default Embedded Cluster spec, support bundles generated for Embedded Cluster installations also include app-level details provided by any custom support bundle specs that you included in the application release. + +================ +File: docs/partials/support-bundles/_generate-bundle-admin-console.mdx +================ +The Replicated KOTS Admin Console includes a **Troubleshoot** page where you can generate a support bundle and review remediation suggestions for troubleshooting. You can also download the support bundle from the Admin Console. + +To generate a support bundle in the KOTS Admin Console: + +1. Log in to the Admin Console and go to the **Troubleshoot** tab. + +1. Click **Analyze** to start analyzing the application. Or, copy the command provided to generate a bundle from the CLI. + + The analysis executes the support bundle plugin. After the analysis completes, the bundle is available on the **Troubleshoot** tab in the Admin Console. If any known issues are detected, they are highlighted with possible remediation suggestions. + + :::note + No data leaves the cluster. Data is never sent across the internet or to anyone else. + ::: + +1. (Optional) If enabled for your online installation, you might also see a **Send bundle to vendor** button available. Clicking this button will send the support bundle directly to your vendor. Replicated recommendeds following up with your vendor to let them know the bundle has been provided. + Send bundle to vendor screen + + [View a larger version of this image](/images/send-bundle-to-vendor.png) + +1. (Optional) Click **Download bundle** to download the support bundle. You can send the bundle to your vendor for assistance. + +================ +File: docs/partials/support-bundles/_generate-bundle-default-kots.mdx +================ +For KOTS installations, you can generate a support bundle using the default KOTS spec. This is useful if the application does not have a support bundle spec included. + +#### Online Environments + +In an online environment, run the following command to generate a support bundle using the default KOTS spec: + +``` +kubectl support-bundle https://kots.io +``` + +#### Air Gap Environments + +For air gap environments, perform the following steps to generate a support bundle using the default KOTS spec: + +1. Run the following command from a computer with internet access to download the default KOTS spec: + + ``` + curl -o spec.yaml https://kots.io -H 'User-agent:Replicated_Troubleshoot/v1beta1' + ``` + +1. Upload the `spec.yaml` file to your air gap server. + +1. Run the following command to create a support bundle using the uploaded `spec.yaml` file: + + ``` + kubectl support-bundle /path/to/spec.yaml + ``` + +================ +File: docs/partials/support-bundles/_generate-bundle-ec.mdx +================ +There are different steps to generate a support bundle depending on the version of Embedded Cluster installed. + +### For Versions 1.17.0 and Later + +For Embedded Cluster 1.17.0 and later, you can run the Embedded Cluster `support-bundle` command to generate a support bundle. + +The `support-bundle` command uses the default Embedded Cluster support bundle spec to collect both cluster- and host-level information. It also automatically includes any application-specific support bundle specs in the generated bundle. + +To generate a support bundle: + +1. SSH onto a controller node. + + :::note + You can SSH onto a worker node to generate a support bundle that contains information specific to that node. However, when run on a worker node, the `support-bundle` command does not capture cluster-wide information. + ::: + +1. Run the following command: + + ```bash + sudo ./APP_SLUG support-bundle + ``` + + Where `APP_SLUG` is the unique slug for the application. + +### For Versions Earlier Than 1.17.0 + +For Embedded Cluster versions earlier than 1.17.0, you can generate a support bundle from the shell using the kubectl support-bundle plugin. + +To generate a bundle with the support-bundle plugin, you pass the default Embedded Cluster spec to collect both cluster- and host-level information. You also pass the `--load-cluster-specs` flag, which discovers all support bundle specs that are defined in Secrets or ConfigMaps in the cluster. This ensures that any application-specific specs are also included in the bundle. For more information, see [Discover Cluster Specs](https://troubleshoot.sh/docs/support-bundle/discover-cluster-specs/) in the Troubleshoot documentation. + +To generate a bundle: + +1. SSH onto a controller node. + +1. Use the Embedded Cluster shell command to start a shell with access to the cluster: + + ```bash + sudo ./APP_SLUG shell + ``` + Where `APP_SLUG` is the unique slug for the application. + + The output looks similar to the following: + + ```bash + __4___ + _ \ \ \ \ Welcome to APP_SLUG debug shell. + <'\ /_/_/_/ This terminal is now configured to access your cluster. + ((____!___/) Type 'exit' (or CTRL+d) to exit. + \0\0\0\0\/ Happy hacking. + ~~~~~~~~~~~ + root@alex-ec-2:/home/alex# export KUBECONFIG="/var/lib/embedded-cluster/k0s/pki/admin.conf" + root@alex-ec-2:/home/alex# export PATH="$PATH:/var/lib/embedded-cluster/bin" + root@alex-ec-2:/home/alex# source <(kubectl completion bash) + root@alex-ec-2:/home/alex# source /etc/bash_completion + ``` + + The appropriate kubeconfig is exported, and the location of useful binaries like kubectl and the preflight and support-bundle plugins is added to PATH. + + :::note + The shell command cannot be run on non-controller nodes. + ::: + +2. Generate the support bundle using the default Embedded Cluster spec and the `--load-cluster-specs` flag: + + ```bash + kubectl support-bundle --load-cluster-specs /var/lib/embedded-cluster/support/host-support-bundle.yaml + ``` + +================ +File: docs/partials/support-bundles/_generate-bundle-host.mdx +================ +To generate a kURL host support bundle: + +1. Do one of the following: + + - Save the host support bundle YAML file on the host. For more information about creating a YAML spec for a host support bundle, see [Create a Host Support Bundle Spec](/vendor/support-host-support-bundles#create-a-host-support-bundle-spec). + + - Run the following command to download the default kURL host support bundle YAML file from the Troubleshoot repository: + + ``` + kubectl support-bundle https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/host/default.yaml + ``` + + :::note + For air gap environments, download the YAML file and copy it to the air gap machine. + ::: + +1. Run the following command on the host to generate a support bundle: + + ``` + ./support-bundle --interactive=false PATH/FILE.yaml + ``` + + Replace: + - `PATH` with the path to the host support bundle YAML file. + - `FILE` with the name of the host support bundle YAML file from your vendor. + + :::note + Root access is typically not required to run the host collectors and analyzers. However, depending on what is being collected, you might need to run the support-bundle binary with elevated permissions. For example, if you run the `filesystemPerformance` host collector against `/var/lib/etcd` and the user running the binary does not have permissions on this directory, the collection process fails. + ::: + +1. Share the host support bundle with your vendor's support team, if needed. + +1. Repeat these steps for each node because there is no method to generate host support bundles on remote hosts. If you have a multi-node kURL cluster, you must run the support-bundle binary on each node to generate a host support bundle for each node. + +================ +File: docs/partials/support-bundles/_generate-bundle.mdx +================ +Run the following command to generate a bundle: + +```bash +kubectl support-bundle --load-cluster-specs +``` + +The `--load-cluster-specs` flag automatically discovers all support bundle specs that are defined in Secrets or ConfigMaps in the cluster. For more information, see [Discover Cluster Specs](https://troubleshoot.sh/docs/support-bundle/discover-cluster-specs/) in the Troubleshoot documentation. + +For a complete list of options with the `kubectl support-bundle` command, run `kubectl support-bundle --help`. For more information, see [Collecting a Support Bundle](https://troubleshoot.sh/docs/support-bundle/collecting/) in the Troubleshoot documentation. + +================ +File: docs/partials/support-bundles/_http-requests-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." +``` + +================ +File: docs/partials/support-bundles/_http-requests-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." +``` + +================ +File: docs/partials/support-bundles/_install-plugin.mdx +================ +The support-bundle plugin (a kubectl plugin) is required to generate support bundles from the command line. + +You can install the support-bundle plugin using krew or install it manually from the release archives. + +:::note +For Replicated Embedded Cluster and Replicated kURL installations, the support-bundle plugin is automatically installed on all of the control plane nodes. You can skip this prerequisite. +::: + +#### Install or Upgrade using krew + +To install the support-bundle plugin using krew, do one of the following: + +* If krew is _not_ installed already, run the following command to install krew and the support-bundle plugin at the same time: + + ``` + curl https://krew.sh/support-bundle | bash + ``` + +* If krew is installed already, run the following command to install the plug-in: + + ``` + kubectl krew install support-bundle + ``` + +* To upgrade your existing support-bundle plugin using krew: + + ``` + kubectl krew upgrade support-bundle + ``` + +#### Install Manually + +If you do not want to install the plugin using krew or want an easier way to install the plugin in an air gap environment, you can install the plugin manually from the release archives. + +To install the support-bundle plugin manually: + +1. Run the following command to download and unarchive the latest release, and move the plugin to your $PATH: + + ``` + curl -L https://github.com/replicatedhq/troubleshoot/releases/latest/download/support-bundle_linux_amd64.tar.gz | tar xzvf - + sudo mv ./support-bundle /usr/local/bin/kubectl-support_bundle + ``` + :::note + If you do not have root access, or choose not to add the support-bundle plugin to your path, you can run the binary directly from where you unzipped it by executing `./support-bundle`. If you choose not to put the plugin into your $PATH, then replace all instances of `kubectl support-bundle` in these instructions with `./support-bundle` or with the absolute path to the binary. + ::: + +1. (Optional) Run the following command to test that the installation is working: + + ``` + kubectl support-bundle --help + ``` + +================ +File: docs/partials/support-bundles/_k8s-version-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: + - clusterVersion: + outcomes: + - fail: + message: This application relies on kubernetes features only present in 1.16.0 + and later. + uri: https://kubernetes.io + when: < 1.16.0 + - warn: + message: Your cluster is running a version of kubernetes that is out of support. + uri: https://kubernetes.io + when: < 1.24.0 + - pass: + message: Your cluster meets the recommended and quired versions of Kubernetes. +``` + +================ +File: docs/partials/support-bundles/_k8s-version-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: [] + analyzers: + - clusterVersion: + outcomes: + - fail: + message: This application relies on kubernetes features only present in 1.16.0 + and later. + uri: https://kubernetes.io + when: < 1.16.0 + - warn: + message: Your cluster is running a version of kubernetes that is out of support. + uri: https://kubernetes.io + when: < 1.24.0 + - pass: + message: Your cluster meets the recommended and quired versions of Kubernetes. +``` + +================ +File: docs/partials/support-bundles/_logs-limits-cr.mdx +================ +```yaml +apiVersion: troubleshoot.replicated.com/v1beta1 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - logs: + selector: + - app.kubernetes.io/name=myapp + namespace: '{{repl Namespace }}' + limits: + maxAge: 720h + maxLines: 10000 +``` + +================ +File: docs/partials/support-bundles/_logs-limits-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - logs: + selector: + - app.kubernetes.io/name=myapp + namespace: {{ .Release.Namespace }} + limits: + maxAge: 720h + maxLines: 10000 +``` + +================ +File: docs/partials/support-bundles/_logs-selectors-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - logs: + namespace: example-namespace + selector: + - app=slackernews-nginx + - logs: + namespace: example-namespace + selector: + - app=slackernews-api + - logs: + namespace: example-namespace + selector: + - app=slackernews-frontend + - logs: + selector: + - app=postgres + analyzers: + - textAnalyze: + checkName: Axios Errors + fileName: slackernews-frontend-*/slackernews.log + regex: "error - AxiosError" + outcomes: + - pass: + when: "false" + message: "Axios errors not found in logs" + - fail: + when: "true" + message: "Axios errors found in logs" +``` + +================ +File: docs/partials/support-bundles/_logs-selectors-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - logs: + namespace: {{ .Release.Namespace }} + selector: + - app=slackernews-nginx + - logs: + namespace: {{ .Release.Namespace }} + selector: + - app=slackernews-api + - logs: + namespace: {{ .Release.Namespace }} + selector: + - app=slackernews-frontend + - logs: + selector: + - app=postgres + analyzers: + - textAnalyze: + checkName: Axios Errors + fileName: slackernews-frontend-*/slackernews.log + regex: "error - AxiosError" + outcomes: + - pass: + when: "false" + message: "Axios errors not found in logs" + - fail: + when: "true" + message: "Axios errors found in logs" +``` + +================ +File: docs/partials/support-bundles/_node-resources-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: + - nodeResources: + checkName: One node must have 2 GB RAM and 1 CPU Cores + filters: + allocatableMemory: 2Gi + cpuCapacity: "1" + outcomes: + - fail: + when: count() < 1 + message: Cannot find a node with sufficient memory and cpu + - pass: + message: Sufficient CPU and memory is available + - nodeResources: + checkName: Must have at least 3 nodes in the cluster + outcomes: + - fail: + when: "count() < 3" + message: This application requires at least 3 nodes + - warn: + when: "count() < 5" + message: This application recommends at last 5 nodes. + - pass: + message: This cluster has enough nodes. + - nodeResources: + checkName: Each node must have at least 40 GB of ephemeral storage + outcomes: + - fail: + when: "min(ephemeralStorageCapacity) < 40Gi" + message: Noees in this cluster do not have at least 40 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(ephemeralStorageCapacity) < 100Gi" + message: Nodes in this cluster are recommended to have at least 100 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: The nodes in this cluster have enough ephemeral storage. +``` + +================ +File: docs/partials/support-bundles/_node-resources-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: [] + analyzers: + - nodeResources: + checkName: One node must have 2 GB RAM and 1 CPU Cores + filters: + allocatableMemory: 2Gi + cpuCapacity: "1" + outcomes: + - fail: + when: count() < 1 + message: Cannot find a node with sufficient memory and cpu + - pass: + message: Sufficient CPU and memory is available + - nodeResources: + checkName: Must have at least 3 nodes in the cluster + outcomes: + - fail: + when: "count() < 3" + message: This application requires at least 3 nodes + - warn: + when: "count() < 5" + message: This application recommends at last 5 nodes. + - pass: + message: This cluster has enough nodes. + - nodeResources: + checkName: Each node must have at least 40 GB of ephemeral storage + outcomes: + - fail: + when: "min(ephemeralStorageCapacity) < 40Gi" + message: Noees in this cluster do not have at least 40 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(ephemeralStorageCapacity) < 100Gi" + message: Nodes in this cluster are recommended to have at least 100 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: The nodes in this cluster have enough ephemeral storage. +``` + +================ +File: docs/partials/support-bundles/_node-status-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: + - nodeResources: + checkName: Node status check + outcomes: + - fail: + when: "nodeCondition(Ready) == False" + message: "Not all nodes are online." + - warn: + when: "nodeCondition(Ready) == Unknown" + message: "Not all nodes are online." + - pass: + message: "All nodes are online." +``` + +================ +File: docs/partials/support-bundles/_node-status-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: [] + analyzers: + - nodeResources: + checkName: Node status check + outcomes: + - fail: + when: "nodeCondition(Ready) == False" + message: "Not all nodes are online." + - warn: + when: "nodeCondition(Ready) == Unknown" + message: "Not all nodes are online." + - pass: + message: "All nodes are online." +``` + +================ +File: docs/partials/support-bundles/_redis-mysql-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - mysql: + collectorName: mysql + uri: 'root:my-secret-pw@tcp(localhost:3306)/mysql' + parameters: + - character_set_server + - collation_server + - init_connect + - innodb_file_format + - innodb_large_prefix + - innodb_strict_mode + - log_bin_trust_function_creators + - redis: + collectorName: my-redis + uri: rediss://default:replicated@server:6380 +``` + +================ +File: docs/partials/support-bundles/_redis-mysql-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - mysql: + collectorName: mysql + uri: 'root:my-secret-pw@tcp(localhost:3306)/mysql' + parameters: + - character_set_server + - collation_server + - init_connect + - innodb_file_format + - innodb_large_prefix + - innodb_strict_mode + - log_bin_trust_function_creators + - redis: + collectorName: my-redis + uri: rediss://default:replicated@server:6380 +``` + +================ +File: docs/partials/support-bundles/_run-pods-cr.mdx +================ +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - runPod: + collectorName: "static-hi" + podSpec: + containers: + - name: static-hi + image: alpine:3 + command: ["echo", "hi static!"] + analyzers: + - textAnalyze: + checkName: Said hi! + fileName: /static-hi.log + regex: 'hi static' + outcomes: + - fail: + message: Didn't say hi. + - pass: + message: Said hi! +``` + +================ +File: docs/partials/support-bundles/_run-pods-secret.mdx +================ +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - runPod: + collectorName: "static-hi" + podSpec: + containers: + - name: static-hi + image: alpine:3 + command: ["echo", "hi static!"] + analyzers: + - textAnalyze: + checkName: Said hi! + fileName: /static-hi.log + regex: 'hi static' + outcomes: + - fail: + message: Didn't say hi. + - pass: + message: Said hi! +``` + +================ +File: docs/partials/support-bundles/_support-bundle-add-analyzers.mdx +================ +Add analyzers based on conditions that you expect for your application. For example, you might require that a cluster have at least 2 CPUs and 4GB memory available. + + Good analyzers clearly identify failure modes. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log. + + At a minimum, include application log analyzers. A simple text analyzer can detect specific log lines and inform an end user of remediation steps. + + Analyzers that Replicated recommends considering are: + +- **Resource statuses:** Check the status of various resources, such as Deployments, StatefulSets, Jobs, and so on. +- **Regular expressions:** Analyze arbitrary data. +- **Databases:** Check the version and connection status. + +================ +File: docs/partials/support-bundles/_support-bundle-add-logs.mdx +================ +Replicated recommends adding application Pod logs and set the collection limits for the number of lines logged. Typically the selector attribute is matched to the labels. + +To get the labels for an application, either inspect the YAML or run `kubectl get pods --show-labels`. + +After the labels are discovered, create collectors to include logs from these pods in a bundle. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector. You can include the `logs` collector as many times as needed. + +The `limits` field can support `maxAge` or `maxLines`. This limits the output to the constraints provided. **Default:** `maxLines: 10000` + +================ +File: docs/partials/support-bundles/_support-bundle-custom-collectors.mdx +================ +Add any custom collectors to the file. Collectors that Replicated recommends considering are: + +- **Kubernetes resources:** Use for custom resource definitions (CRDs), Secrets, and ConfigMaps, if they are required for your application to work. +- **Databases:** Return a selection of rows or entire tables. +- **Volumes:** Ensure that an application's persistent state files exist, are readable/writeable, and have the right permissions. +- **Pods:** Run a Pod from a custom image. +- **Files:** Copy files from Pods and hosts. +- **HTTP:** Consume your own application APIs with HTTP requests. If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. + +================ +File: docs/partials/template-functions/_go-sprig.mdx +================ +KOTS template functions are based on the Go text/template library. All functionality of the Go templating language, including if statements, loops, and variables, is supported with KOTS template functions. For more information, see [text/template](https://golang.org/pkg/text/template/) in the Go documentation. + +Additionally, KOTS template functions can be used with all functions in the Sprig library. Sprig provides several template functions for the Go templating language, such as type conversion, string, and integer math functions. For more information, see [Sprig Function Documentation](https://masterminds.github.io/sprig/). + +================ +File: docs/partials/template-functions/_integer-comparison.mdx +================ +The following example uses: +* KOTS [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function to evaluate the number of seats permitted by the license +* Sprig [atoi](https://masterminds.github.io/sprig/conversion.html) function to convert the string values returned by LicenseFieldValue to integers +* [Go binary comparison operators](https://pkg.go.dev/text/template#hdr-Functions) `gt`, `lt`, `ge`, and `le` to compare the integers + +```yaml +# KOTS Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_group + title: Example Config + items: + - name: small + title: Small (100 or Fewer Seats) + type: text + default: Default for small teams + # Use le and atoi functions to display this config item + # only when the value of the numSeats entitlement is + # less than or equal to 100 + when: repl{{ le (atoi (LicenseFieldValue "numSeats")) 100 }} + - name: medium + title: Medium (101-1000 Seats) + type: text + default: Default for medium teams + # Use ge, le, and atoi functions to display this config item + # only when the value of the numSeats entitlement is + # greater than or equal to 101 and less than or equal to 1000 + when: repl{{ (and (ge (atoi (LicenseFieldValue "numSeats")) 101) (le (atoi (LicenseFieldValue "numSeats")) 1000)) }} + - name: large + title: Large (More Than 1000 Seats) + type: text + default: Default for large teams + # Use gt and atoi functions to display this config item + # only when the value of the numSeats entitlement is + # greater than 1000 + when: repl{{ gt (atoi (LicenseFieldValue "numSeats")) 1000 }} +``` + +As shown in the image below, if the user's license contains `numSeats: 150`, then the `medium` item is displayed on the **Config** page and the `small` and `large` items are not displayed: + +Config page displaying the Medium (101-1000 Seats) item + +[View a larger version of this image](/images/config-example-numseats.png) + +================ +File: docs/partials/template-functions/_ne-comparison.mdx +================ +In the example below, the `ingress_type` field is displayed on the **Config** page only when the distribution of the cluster is _not_ [Replicated Embedded Cluster](/vendor/embedded-overview). This ensures that only users deploying to their own existing cluster are able to select the method for ingress. + +The following example uses: +* KOTS [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where KOTS is running +* [ne](https://pkg.go.dev/text/template#hdr-Functions) (_not equal_) Go binary operator to compare the rendered value of the Distribution template function to a string, then return `true` if the values are not equal to one another + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config +spec: + groups: + # Ingress settings + - name: ingress_settings + title: Ingress Settings + description: Configure Ingress + items: + - name: ingress_type + title: Ingress Type + help_text: | + Select how traffic will ingress to the appliction. + type: radio + items: + - name: ingress_controller + title: Ingress Controller + - name: load_balancer + title: Load Balancer + default: "ingress_controller" + required: true + when: 'repl{{ ne Distribution "embedded-cluster" }}' + # Database settings + - name: database_settings + title: Database + items: + - name: postgres_type + help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres +``` + +The following image shows how the `ingress_type` field is hidden when the distribution of the cluster is `embedded-cluster`. Only the `postgres_type` item is displayed: + +Config page with a Postgres field + +[View a larger version of this image](/images/config-example-distribution-not-ec.png) + +Conversely, when the distribution of the cluster is not `embedded-cluster`, both fields are displayed: + +Config page with Ingress and Postgres fields + +[View a larger version of this image](/images/config-example-distribution-not-ec-2.png) + +================ +File: docs/partials/template-functions/_string-comparison.mdx +================ +The following example uses: +* KOTS [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where KOTS is running +* [eq](https://pkg.go.dev/text/template#hdr-Functions) (_equal_) Go binary operator to compare the rendered value of the Distribution template function to a string, then return the boolean truth of the comparison + +```yaml +# KOTS Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + description: Example fields for using Distribution template function + items: + - name: gke_distribution + type: label + title: "You are deploying to GKE" + # Use the eq binary operator to check if the rendered value + # of the KOTS Distribution template function is equal to gke + when: repl{{ eq Distribution "gke" }} + - name: openshift_distribution + type: label + title: "You are deploying to OpenShift" + when: repl{{ eq Distribution "openShift" }} + - name: eks_distribution + type: label + title: "You are deploying to EKS" + when: repl{{ eq Distribution "eks" }} + ... +``` + +The following image shows how only the `gke_distribution` item is displayed on the **Config** page when KOTS is running in a GKE cluster: + +Config page with the text You are deploying to GKE + +================ +File: docs/partials/template-functions/_use-cases.mdx +================ +Common use cases for KOTS template functions include rendering values during installation or upgrade, such as: +* Customer-specific license field values +* User-provided configuration values +* Information about the customer environment, such the number of nodes or the Kubernetes version in the cluster where the application is installed +* Random strings + +KOTS template functions can also be used to work with integer, boolean, float, and string values, such as doing mathematical operations, trimming leading and trailing spaces, or converting string values to integers or booleans. + +================ +File: docs/partials/updating/_admin-console-air-gap.mdx +================ +import BuildAirGapBundle from "../install/_airgap-bundle-build.mdx" +import DownloadAirGapBundle from "../install/_airgap-bundle-download.mdx" +import ViewAirGapBundle from "../install/_airgap-bundle-view-contents.mdx" + +To perform an air gap update from the Admin Console: + +1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the target release is promoted to build and download the new `.airgap` bundle: + + + +1. +1. +1. In the Admin Console, go to the **Version History** tab. +1. Click **Upload a new version**. + + A new upstream version displays in the list of available versions. + + ![New Version Available](/images/new-version-available.png) + +1. (Optional) When there are multiple versions of an application, you can compare +the changes between them by clicking **Diff releases** in the right corner. + + You can review changes between any two arbitrary releases by clicking the icon in the header + of the release column. Select the two versions to compare, and click **Diff releases** + to show the relative changes between the two releases. + + ![Diff Releases](/images/diff-releases.png) + ![New Changes](/images/new-changes.png) + +1. (Optional) Click the **View preflight checks** icon to view or re-run the preflight checks. + + ![Preflight Checks](/images/preflight-checks.png) + +1. Return to the **Version History** tab and click **Deploy** next to the target version. + +================ +File: docs/partials/updating/_admin-console.mdx +================ +To perform an update from the Admin Console: + +1. In the Admin Console, go to the **Version History** tab. +1. Click **Check for updates**. + + A new upstream version displays in the list of available versions. + + New Version Available + + [View a larger version of this image](/images/new-version-available.png) + +1. (Optional) When there are multiple versions of an application, you can compare +the changes between them by clicking **Diff releases** in the right corner. + + You can review changes between any two arbitrary releases by clicking the icon in the header + of the release column. Select the two versions to compare, and click **Diff releases** + to show the relative changes between the two releases. + + Diff Releases + + [View a larger version of this image](/images/diff-releases.png) + + New Changes + + [View a larger version of this image](/images/new-changes.png) + +1. (Optional) Click the **View preflight checks** icon to view or re-run the preflight checks. + + Preflight checks + + [View a larger version of this image](/images/preflight-checks.png) + +1. Return to the **Version History** tab and click **Deploy** next to the target version. + +================ +File: docs/partials/updating/_installerRequirements.mdx +================ +* **installer-spec-file**: If you used the `installer-spec-file` flag to pass a `patch.yaml` file when you installed, you must pass the same `patch.yaml` file when you upgrade. This prevents the installer from overwriting any configuration from your `patch.yaml` file and making changes to the add-ons in your cluster. For example: `installer-spec-file="./patch.yaml"`. + +* **app-version-label**: By default, the script also upgrades your application to the latest version when you run the installation script. + + You can specify a target application version with the `app-version-label` flag. To avoid upgrading your application version, set the `app-version-label` flag to the currently installed application version. For example: `app-version-label=1.5.0`. + +================ +File: docs/partials/updating/_upgradePrompt.mdx +================ +(Kubernetes Upgrades Only) If a Kubernetes upgrade is required, the script automatically prints a `Drain local node and apply upgrade?` prompt. Confirm the prompt to drain the local primary node and apply the Kubernetes upgrade to the control plane. + + The script continues to drain and upgrade nodes sequentially. For each node, the script prints a command that you must run on the node to upgrade Kubernetes. For more information, see [About Kubernetes Updates](/enterprise/updating-kurl-about#kubernetes) in _About kURL Cluster Updates_. + +================ +File: docs/partials/vendor-api/_api-about.mdx +================ +The Vendor API is the API for the Vendor Portal. This API can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams and license files. + +================ +File: docs/partials/vendor-api/_team-token-note.mdx +================ +:::note +Team API tokens are deprecated and cannot be generated. If you are already using team API tokens, Replicated recommends that you migrate to Service Accounts or User API tokens instead because these options provide better granular control over token access. +::: + +================ +File: docs/reference/cron-expressions.md +================ +# Cron Expressions + +This topic describes the supported cron expressions that you can use to schedule automatic application update checks and automatic backups in the KOTS Admin Console. + +For more information, see [Configuring Automatic Updates](/enterprise/updating-apps) and [Schedule Automatic Backups](/enterprise/snapshots-creating#schedule-automatic-backups) in _Creating and Scheduling Backups_. + +## Syntax + +``` + +``` + +## Fields + +The following table lists the required cron fields and supported values: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Required FieldAllowed ValuesAllowed Special Characters
    Minute0 through 59, - *
    Hour0 through 23, - *
    Day-of-month1 through 31, - * ?
    Month1 through 12 or JAN through DEC, - *
    Day-of-week1 through 7 or SUN through SAT, - * ?
    + +## Special Characters + +Replicated uses an external cron Go library. For more information about it's usage, see [cron](https://pkg.go.dev/github.com/robfig/cron/v3). + +The following table describes the supported special characters: + + + + + + + + + + + + + + + + + + + + + + +
    Special CharacterDescription
    Comma (,)Specifies a list or multiple values, which can be consecutive or not. For example, 1,2,4 in the Day-of-week field signifies every Monday, Tuesday, and Thursday.
    Dash (-)Specifies a contiguous range. For example, 4-6 in the Month field signifies April through June.
    Asterisk (*)Specifies that all of the values for the field are used. For example, using * in the Month field means that all of the months are included in the schedule.
    Question mark (?) Specifies that one or another value can be used. For example, enter 5 for Day-of-the-month and ? for Day-of-the-week to check for updates on the 5th day of the month, regardless of which day of the week it is.
    + +## Predefined Schedules + +You can use one of the following predefined schedule values instead of a cron expression: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Schedule ValueDescriptionEquivalent Cron Expression
    @yearly (or @annually)Runs once a year, at midnight on January 1.0 0 1 1 *
    @monthlyRuns once a month, at midnight on the first of the month.0 0 1 * *
    @weeklyRun once a week, at midnight on Saturday.0 0 * * 0
    @daily (or @midnight)Runs once a day, at midnight.0 0 * * *
    @hourlyRuns once an hour, at the beginning of the hour.0 * * * *
    @never

    Disables the schedule completely. Only used by KOTS.

    This value can be useful when you are calling the API directly or are editing the KOTS configuration manually.

    0 * * * *
    @default

    Selects the default schedule option (every 4 hours). Begins when the Admin Console starts up.

    This value can be useful when you are calling the API directly or are editing the KOTS configuration manually.

    0 * * * *
    + +## Intervals + +You can also schedule the job to operate at fixed intervals, starting at the time the job is added or when cron is run: + +``` +@every DURATION +``` + +Replace `DURATION` with a string that is accepted by time.ParseDuration, with the exception of seconds. Seconds are not supported by KOTS. For more information about duration strings, see [time.ParseDuration](http://golang.org/pkg/time/#ParseDuration) in the Go Time documentation. + +As with standard cron expressions, the interval does not include the job runtime. For example, if a job is scheduled to run every 10 minutes, and the job takes 4 minutes to run, there are 6 minutes of idle time between each run. + +## Examples + +The following examples show valid cron expressions to schedule checking for updates: + +- At 11:30 AM every day: + + ``` + 30 11 * * * + ``` + +- After 1 hour and 45 minutes, and then every interval following that: + + ``` + @every 1h45m + ``` + +================ +File: docs/reference/custom-resource-about.md +================ +# About Custom Resources + +You can include custom resources in releases to control the experience for applications installed with Replicated KOTS. + +Custom resources are consumed by KOTS, the Admin Console, or by other kubectl plugins. Custom resources are packaged as part of the application, but are _not_ deployed to the cluster. + +## KOTS Custom Resources + +The following are custom resources in the `kots.io` API group: + +| API Group/Version | Kind | Description | +|---------------|------|-------------| +| kots.io/v1beta1 | [Application](custom-resource-application) | Adds additional metadata (branding, release notes and more) to an application | +| kots.io/v1beta1 | [Config](custom-resource-config)| Defines a user-facing configuration screen in the Admin Console | +| kots.io/v1beta2 | [HelmChart](custom-resource-helmchart-v2) | Identifies an instantiation of a Helm Chart | +| kots.io/v1beta1 | [LintConfig](custom-resource-lintconfig) | Customizes the default rule levels for the KOTS release linter | + +## Other Custom Resources + +The following are custom resources in API groups other than `kots.io` that can be included in a KOTS release to configure additional functionality: + +| API Group/Version | Kind | Description | +|---------------|------|-------------| +| app.k8s.io/v1beta1 | [SIG Application](https://github.com/kubernetes-sigs/application#kubernetes-applications) | Defines metadata about the application | +| cluster.kurl.sh/v1beta1 | [Installer](https://kurl.sh/docs/create-installer/) | Defines a Replicated kURL distribution | +| embeddedcluster.replicated.com/v1beta1 | [Config](/reference/embedded-config) | Defines a Replicated Embedded Cluster distribution | +| troubleshoot.replicated.com/v1beta2 | [Preflight](custom-resource-preflight) | Defines the data to collect and analyze for custom preflight checks | +| troubleshoot.replicated.com/v1beta2 | [Redactor](https://troubleshoot.sh/reference/redactors/overview/) | Defines custom redactors that apply to support bundles and preflight checks | +| troubleshoot.sh/v1beta2 | [Support Bundle](custom-resource-preflight) | Defines the data to collect and analyze for a support bundle | +| velero.io/v1 | [Backup](https://velero.io/docs/v1.10/api-types/backup/) | A Velero backup request, triggered when the user initiates a backup with Replicated snapshots | + +================ +File: docs/reference/custom-resource-application.mdx +================ +import Title from "../partials/custom-resource-application/_title.mdx" +import Icon from "../partials/custom-resource-application/_icon.mdx" +import ReleaseNotes from "../partials/custom-resource-application/_releaseNotes.mdx" +import AllowRollback from "../partials/custom-resource-application/_allowRollback.mdx" +import AdditionalNamespaces from "../partials/custom-resource-application/_additionalNamespaces.mdx" +import AdditionalImages from "../partials/custom-resource-application/_additionalImages.mdx" +import RequireMinimalRBACPrivileges from "../partials/custom-resource-application/_requireMinimalRBACPrivileges.mdx" +import SupportMinimalRBACPrivileges from "../partials/custom-resource-application/_supportMinimalRBACPrivileges.mdx" +import Ports from "../partials/custom-resource-application/_ports.mdx" +import StatusInformers from "../partials/custom-resource-application/_statusInformers.mdx" +import Graphs from "../partials/custom-resource-application/_graphs.mdx" +import GraphsTemplates from "../partials/custom-resource-application/_graphs-templates.mdx" +import TargetKotsVersion from "../partials/custom-resource-application/_targetKotsVersion.mdx" +import MinKotsVersion from "../partials/custom-resource-application/_minKotsVersion.mdx" +import ProxyRegistryDomain from "../partials/custom-resource-application/_proxyRegistryDomain.mdx" +import ReplicatedRegistryDomain from "../partials/custom-resource-application/_replicatedRegistryDomain.mdx" +import ServicePortNote from "../partials/custom-resource-application/_servicePort-note.mdx" +import PortsServiceName from "../partials/custom-resource-application/_ports-serviceName.mdx" +import PortsLocalPort from "../partials/custom-resource-application/_ports-localPort.mdx" +import PortsServicePort from "../partials/custom-resource-application/_ports-servicePort.mdx" +import PortsApplicationURL from "../partials/custom-resource-application/_ports-applicationURL.mdx" +import KurlNote from "../partials/custom-resource-application/_ports-kurl-note.mdx" + +# Application + +The Application custom resource enables features such as branding, release notes, port forwarding, dashboard buttons, app status indicators, and custom graphs. + +There is some overlap between the Application custom resource manifest file and the [Kubernetes SIG Application custom resource](https://github.com/kubernetes-sigs/application/blob/master/docs/api.md). For example, enabling features such as [adding a button to the dashboard](/vendor/admin-console-adding-buttons-links) requires the use of both the Application and SIG Application custom resources. + +The following is an example manifest file for the Application custom resource: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-application +spec: + title: My Application + icon: https://support.io/img/logo.png + releaseNotes: These are our release notes + allowRollback: false + targetKotsVersion: "1.60.0" + minKotsVersion: "1.40.0" + requireMinimalRBACPrivileges: false + additionalImages: + - jenkins/jenkins:lts + additionalNamespaces: + - "*" + ports: + - serviceName: web + servicePort: 9000 + localPort: 9000 + applicationUrl: "http://web" + statusInformers: + - deployment/my-web-svc + - deployment/my-worker + graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' +``` + +## title + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    DescriptionThe application title. Used on the license upload and in various places in the Replicated Admin Console.
    Example</td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## icon + +<table> + <tr> + <th>Description</th> + <td>The icon file for the application. Used on the license upload, in various places in the Admin Console, and in the Download Portal. The icon can be a remote URL or a Base64 encoded image. Base64 encoded images are required to display the image in air gap installations with no outbound internet access.</td> + </tr> + <tr> + <th>Example</th> + <td><Icon/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + + +## releaseNotes + +<table> + <tr> + <th>Description</th> + <td>The release notes for this version. These can also be set when promoting a release.</td> + </tr> + <tr> + <th>Example</th> + <td><ReleaseNotes/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## allowRollback + +<table> + <tr> + <th>Description</th> + <td> + <p>Enable this flag to create a <strong>Rollback</strong> button on the Admin Console Version History page.</p> + <p>If an application is guaranteed not to introduce backwards-incompatible versions, such as through database migrations, then the <code>allowRollback</code> flag can allow end users to easily roll back to previous versions from the Admin Console.</p> + <p>Rollback does not revert any state. Rather, it recovers the YAML manifests that are applied to the cluster.</p> + </td> + </tr> + <tr> + <th>Example</th> + <td><AllowRollback/></td> + </tr> + <tr> + <th>Default</th> + <td><code>false</code></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Embedded Cluster 1.17.0 and later supports partial rollbacks of the application version. Partial rollbacks are supported only when rolling back to a version where there is no change to the [Embedded Cluster Config](/reference/embedded-config) compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same Embedded Cluster Config.</td> + </tr> +</table> + + +## additionalNamespaces + +<table> + <tr> + <th>Description</th> + <td> + <p>An array of additional namespaces as strings that Replicated KOTS creates on the cluster. For more information, see <a href="/vendor/operator-defining-additional-namespaces">Defining Additional Namespaces</a>.</p> + <p>In addition to creating the additional namespaces, KOTS ensures that the application secret exists in the namespaces. KOTS also ensures that this application secret has access to pull the application images, including both images that are used and any images you add in the <code>additionalImages</code> field. This pull secret is automatically added to all manifest files that use private images.</p> + <p>For dynamically created namespaces, specify <code>"*"</code>.</p> + </td> + </tr> + <tr> + <th>Example</th> + <td><AdditionalNamespaces/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## additionalImages + +<table> + <tr> + <th>Description</th> + <td><p>An array of strings that reference images to be included in air gap bundles and pushed to the local registry during installation.</p><p>KOTS detects images from the PodSpecs in the application. Some applications, such as Operators, might need to include additional images that are not referenced until runtime. For more information, see <a href="/vendor/operator-defining-additional-images">Defining Additional Images</a>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><AdditionalImages/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## requireMinimalRBACPrivileges + +<table> + <tr> + <th>Description</th> + <td><p><code>requireMinimalRBACPrivileges</code> applies to existing clusters only.</p><p>Requires minimal role-based access control (RBAC) be used for all customer installations. When set to <code>true</code>, KOTS creates a namespace-scoped Role and RoleBinding, instead of the default cluster-scoped ClusterRole and ClusterRoleBinding.</p><p>For additional requirements and limitations related to using namespace-scoped RBAC, see <a href="/vendor/packaging-rbac#min-rbac">About Namespace-scoped RBAC</a> in <em>Configuring KOTS RBAC</em>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><RequireMinimalRBACPrivileges/></td> + </tr> + <tr> + <th>Default</th> + <td><code>false</code></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No</td> + </tr> +</table> + +## supportMinimalRBACPrivileges + +<table> + <tr> + <th>Description</th> + <td><p><code>supportMinimalRBACPrivileges</code> applies to existing clusters only.</p><p>Allows minimal role-based access control (RBAC) be used for all customer installations. When set to <code>true</code>, KOTS supports creating a namespace-scoped Role and RoleBinding, instead of the default cluster-scoped ClusterRole and ClusterRoleBinding.</p><p> Minimal RBAC is not used by default. It is only used when the <code>--use-minimal-rbac</code> flag is passed to the <code>kots install</code> command.</p><p>For additional requirements and limitations related to using namespace-scoped RBAC, see <a href="/vendor/packaging-rbac#min-rbac">About Namespace-scoped RBAC</a> in <em>Configuring KOTS RBAC</em>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><SupportMinimalRBACPrivileges/></td> + </tr> + <tr> + <th>Default</th> + <td><code>false</code></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No</td> + </tr> +</table> + +## ports + +<table> +<tr> + <th>Description</th> + <td> + <p>Extra ports (additional to the <code>8800</code> Admin Console port) that are port-forwarded when running the <code>kubectl kots admin-console</code> command. With ports specified, KOTS can establish port forwarding to simplify connections to the deployed application. When the application starts and the service is ready, the KOTS CLI will print a message in the terminal with the URL where the port-forwarded service can be accessed. For more information, see <a href="/vendor/admin-console-port-forward">Port Forwarding Services with KOTS</a>.</p> + <KurlNote/> + <p>The <code>ports</code> key has the following fields:</p> + <ul> + <PortsServiceName/> + <PortsServicePort/> + <ServicePortNote/> + <PortsLocalPort/> + <PortsApplicationURL/> + For more information about adding links to port forwarded services, see <a href="/vendor/admin-console-port-forward#add-link">Add a Link to a Port-Forwarded Service in the Admin Console</a>. + </ul> + </td> + </tr> + <tr> + <th>Example</th> + <td><Ports/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td><p>Go templates are supported in the `serviceName` and `applicationUrl` fields only.</p><p>Using Go templates in the `localPort` or `servicePort` fields results in an installation error similar to the following: `json: cannot unmarshal string into Go struct field ApplicationPort.spec.ports.servicePort of type int`.</p></td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## statusInformers + +<table> + <tr> + <th>Description</th> + <td> + <p>Resources to watch and report application status back to the user. When you include <code>statusInformers</code>, the dashboard can indicate when the application deployment is complete and the application is ready for use.</p> + <p><code>statusInformers</code> use the format <code>[namespace/]type/name</code>, where namespace is optional.</p> + <p>For more information about including statusInformers, see <a href="/vendor/admin-console-display-app-status">Adding Resource Status Informers</a>.</p> + </td> + </tr> + <tr> + <th>Example</th> + <td><StatusInformers/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## graphs + +<table> + <tr> + <th>Description</th> + <td><p>Custom graphs to include on the Admin Console application dashboard.For more information about how to create custom graphs, see <a href="/vendor/admin-console-prometheus-monitoring">Adding Custom Graphs</a>.</p><p><code>graphs</code> has the following fields:</p><ul><li><code>graphs.title</code>: The graph title.</li><li><code>graphs.query</code>: The Prometheus query.</li><li><code>graphs.legend</code>: The legend to use for the query line. You can use Prometheus templating in the <code>legend</code> fields with each element returned from the Prometheus query. <p><GraphsTemplates/></p></li><li><code>graphs.queries</code>: A list of queries containing a <code>query</code> and <code>legend</code>.</li> <li><code>graphs.yAxisFormat</code>: The format of the Y axis labels with support for all Grafana units. For more information, see <a href="https://grafana.com/docs/features/panels/graph/#left-y-right-y">Visualizations</a> in the Grafana documentation.</li><li><code>graphs.yAxisTemplate</code>: Y axis labels template.</li></ul></td> + </tr> + <tr> + <th>Example</th> + <td><Graphs/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td> + <p>Yes</p> + </td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No</td> + </tr> +</table> + +## proxyRegistryDomain + +:::important +`proxyRegistryDomain` is deprecated. For information about how to use a custom domain for the Replicated proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). +::: + +<table> + <tr> + <th>Description</th> + <td><p>The custom domain used for proxy.replicated.com. For more information, see <a href="/vendor/custom-domains-using">Using Custom Domains</a>.</p> <p>Introduced in KOTS v1.91.1.</p> </td> + </tr> + <tr> + <th>Example</th> + <td><ProxyRegistryDomain/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +## replicatedRegistryDomain + +:::important +`replicatedRegistryDomain` is deprecated. For information about how to use a custom domain for the Replicated registry, see [Using Custom Domains](/vendor/custom-domains-using). +::: + +<table> + <tr> + <th>Description</th> + <td><p>The custom domain used for registry.replicated.com. For more information, see <a href="/vendor/custom-domains-using">Using Custom Domains</a>.</p><p>Introduced in KOTS v1.91.1.</p> </td> + </tr> + <tr> + <th>Example</th> + <td><ReplicatedRegistryDomain/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## targetKotsVersion + +<table> + <tr> + <th>Description</th> + <td><p>The KOTS version that is targeted by the release. For more information, see <a href="/vendor/packaging-kots-versions">Setting Minimum and Target Versions for KOTS</a>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><TargetKotsVersion/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No. Setting <code>targetKotsVersion</code> to a version earlier than the KOTS version included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: <code>Error: This version of App Name requires a different version of KOTS from what you currently have installed.</code>. To avoid installation failures, do not use <code>targetKotsVersion</code> in releases that support installation with Embedded Cluster.</td> + </tr> +</table> + +## minKotsVersion (Beta) + +<table> + <tr> + <th>Description</th> + <td><p>The minimum KOTS version that is required by the release. For more information, see <a href="/vendor/packaging-kots-versions">Setting Minimum and Target Versions for KOTS</a>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><MinKotsVersion/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No. Setting <code>minKotsVersion</code> to a version later than the KOTS version included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: <code>Error: This version of App Name requires a different version of KOTS from what you currently have installed.</code>. To avoid installation failures, do not use <code>minKotsVersion</code> in releases that support installation with Embedded Cluster.</td> + </tr> +</table> + +================ +File: docs/reference/custom-resource-backup.md +================ +# Velero Backup Resource for Snapshots + +This topic provides information about the supported fields in the Velero Backup resource for the Replicated KOTS snapshots feature. + +## Overview + +The Velero Backup custom resource enables the KOTS snapshots backup and restore feature. The backend of this feature uses the Velero open source project to back up Kubernetes manifests and persistent volumes. + +## Example + +The following shows an example of the Velero Backup resource: + +```yaml +apiVersion: velero.io/v1 +kind: Backup +metadata: + name: backup + annotations: + # `pvc-volume` will be the only volume included in the backup + backup.velero.io/backup-volumes: pvc-volume +spec: + includedNamespaces: + - '*' + excludedNamespaces: + - some-namespace + orderedResources: + pods: mysql/mysql-cluster-replica-0,mysql/mysql-cluster-replica-1 + persistentvolumes: pvc-12345,pvc-67890 + ttl: 720h + hooks: + resources: + - + name: my-hook + includedNamespaces: + - '*' + excludedNamespaces: + - some-namespace + includedResources: + - pods + excludedResources: [] + labelSelector: + matchLabels: + app: velero + component: server + pre: + - + exec: + container: my-container + command: + - /bin/uname + - -a + onError: Fail + timeout: 10s + post: [] +``` + +## Supported Fields for Full Backups with Snapshots {#fields} + +For partial backups with the snapshots feature, you can use all of the fields that Velero supports. See [Backups](https://velero.io/docs/v1.10/api-types/backup/) in the Velero documentation. + +However, not all fields are supported for full backups. The table below lists the fields that are supported for full backups with snapshots: + +<table> + <tr> + <th width="50%">Field Name</th> + <th width="50%">Description</th> + </tr> + <tr> + <td><code>includedNamespaces</code></td> + <td>(Optional) Specifies an array of namespaces to include in the backup. If unspecified, all namespaces are included.</td> + </tr> + <tr> + <td><code>excludedNamespaces</code></td> + <td>(Optional) Specifies an array of namespaces to exclude from the backup.</td> + </tr> + <tr> + <td><code>orderedResources</code></td> + <td>(Optional) Specifies the order of the resources to collect during the backup process. This is a map that uses a key as the plural resource. Each resource name has the format NAMESPACE/OBJECTNAME. The object names are a comma delimited list. For cluster resources, use OBJECTNAME only.</td> + </tr> + <tr> + <td><code>ttl</code></td> + <td> Specifies the amount of time before this backup is eligible for garbage collection. <b>Default:</b><code>720h</code> (equivalent to 30 days). This value is configurable only by the customer.</td> + </tr> + <tr> + <td><code>hooks</code></td> + <td>(Optional) Specifies the actions to perform at different times during a backup. The only supported hook is executing a command in a container in a pod (uses the pod exec API). Supports <code>pre</code> and <code>post</code> hooks.</td> + </tr> + <tr> + <td><code>hooks.resources</code></td> + <td>(Optional) Specifies an array of hooks that are applied to specific resources.</td> + </tr> + <tr> + <td><code>hooks.resources.name</code></td> + <td>Specifies the name of the hook. This value displays in the backup log.</td> + </tr> + <tr> + <td><code>hooks.resources.includedNamespaces</code></td> + <td>(Optional) Specifies an array of namespaces that this hook applies to. If unspecified, the hook is applied to all namespaces.</td> + </tr> + <tr> + <td><code>hooks.resources.excludedNamespaces</code></td> + <td>(Optional) Specifies an array of namespaces to which this hook does not apply.</td> + </tr> + <tr> + <td><code>hooks.resources.includedResources</code></td> + <td>Specifies an array of pod resources to which this hook applies.</td> + </tr> + <tr> + <td><code>hooks.resources.excludedResources</code></td> + <td>(Optional) Specifies an array of resources to which this hook does not apply.</td> + </tr> + <tr> + <td><code>hooks.resources.labelSelector</code></td> + <td>(Optional) Specifies that this hook only applies to objects that match this label selector.</td> + </tr> + <tr> + <td><code>hooks.resources.pre</code></td> + <td>Specifies an array of <code>exec</code> hooks to run before executing custom actions.</td> + </tr> + <tr> + <td><code>hooks.resources.post</code></td> + <td>Specifies an array of <code>exec</code> hooks to run after executing custom actions. Supports the same arrays and fields as <code>pre</code> hooks.</td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec</code></td> + <td>Specifies the type of the hook. <code>exec</code> is the only supported type.</td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec.container</code></td> + <td>(Optional) Specifies the name of the container where the specified command will be executed. If unspecified, the first container in the pod is used.</td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec.command</code></td> + <td>Specifies the command to execute. The format is an array.</td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec.onError</code></td> + <td>(Optional) Specifies how to handle an error that might occur when executing the command. <b>Valid values:</b> <code>Fail</code> and <code>Continue</code> <b>Default:</b> <code>Fail</code></td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec.timeout</code></td> + <td>(Optional) Specifies how many seconds to wait for the command to finish executing before the action times out. <b>Default:</b> <code>30s</code></td> + </tr> +</table> + +## Limitations {#limitations} + +- The following top-level Velero fields, or children of `spec`, are not supported in full backups: + + - `snapshotVolumes` + - `volumeSnapshotLocations` + - `labelSelector` + - `includedResources` + - `excludedResources` + + :::note + Some of these fields are supported for hook arrays, as described in the previous field definition table. See [Supported Fields for Full Backups with Snapshots](#fields) above. + ::: + +- All resources are included in the backup by default. However, resources can be excluded by adding `velero.io/exclude-from-backup=true` to the manifest files that you want to exclude. For more information, see [Configuring Snapshots](/vendor/snapshots-configuring-backups). + +================ +File: docs/reference/custom-resource-config.mdx +================ +import ItemTypes from "../partials/config/_item-types.mdx" +import PropertyWhen from "../partials/config/_property-when.mdx" +import RandomStringNote from "../partials/config/_randomStringNote.mdx" +import NameExample from "../partials/config/_nameExample.mdx" +import TypeExample from "../partials/config/_typeExample.mdx" +import DefaultExample from "../partials/config/_defaultExample.mdx" +import ValueExample from "../partials/config/_valueExample.mdx" +import RequiredExample from "../partials/config/_requiredExample.mdx" +import RecommendedExample from "../partials/config/_recommendedExample.mdx" +import HiddenExample from "../partials/config/_hiddenExample.mdx" +import ReadonlyExample from "../partials/config/_readonlyExample.mdx" +import WhenExample from "../partials/config/_whenExample.mdx" +import AffixExample from "../partials/config/_affixExample.mdx" +import HelpTextExample from "../partials/config/_helpTextExample.mdx" +import RegexValidationExample from "../partials/config/_regexValidationExample.mdx" +import WhenRequirements from "../partials/config/_when-requirements.mdx" +import WhenNote from "../partials/config/_when-note.mdx" + +# Config + +The Config custom resource can be provided by a vendor to specify a Config page in the Replicated Admin Console for collecting customer supplied values and template function rendering. + +The settings that appear on the Admin Console Config page are specified as an array configuration _groups_ and _items_. + +The following example shows three groups defined in the Config custom resource manifest file, and how these groups are displayed on the Admin Console Config page. + +For more information about the properties of groups and items, see [Group Properties](#group-properties) and [Item Properties](#item-properties) below. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: my-application +spec: + groups: + - name: example_group + title: First Group + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" + - name: example_group_2 + title: Second Group + when: false + items: + - name: key + title: Key + type: textarea + - name: hostname + title: Hostname + type: text + - name: example_group_3 + title: Third Group + items: + - name: email-address + title: Email Address + type: text + - name: password_text + title: Password + type: password + value: '{{repl RandomString 10}}' +``` +![Three groups of items on the config page](/images/config-screen-groups.png) +[View a larger version of this image](/images/config-screen-groups.png) + +## Group Properties + +Groups have a `name`, `title`, `description` and an array of `items`. + +### `description` + +Descriptive help text for the group that displays on the Admin Console Config page. Supports markdown formatting. + +To provide help text for individual items on the Config page, use the item `help-text` property. See [help_text](#help_text) below. + +```yaml +spec: + groups: + - name: example_group + title: First Group + # Provide a description of the input fields in the group + description: Select whether or not to enable HTTP. + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` + +### `name` + +A unique identifier for the group. + +```yaml +spec: + groups: + # The name must be unique + - name: example_group + title: First Group + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` + +### `title` + +The title of the group that displays on the Admin Console Config page. + +```yaml +spec: + groups: + - name: example_group + # First Group is the heading that appears on the Config page + title: First Group + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` + +### `when` + +The `when` property denotes groups that are displayed on the Admin Console **Config** page only when a condition evaluates to true. When the condition evaluates to false, the group is not displayed. + +<PropertyWhen/> + +:::note +`when` is a property of both groups and items. See [Item Properties > `when`](/reference/custom-resource-config#when-item) below. +::: + +#### Requirements and Limitations + +The `when` group property has the following requirements and limitations: + +<WhenRequirements/> + +#### Example + +In the following example, the `example_group_2` group of items will be displayed on the **Config** page only when the user enables the `http_enabled` configuration field. This example uses the KOTS [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to evaluate the value of the `http_enabled` configuration field. + +```yaml +spec: + groups: + - name: example_group + title: First Group + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" + - name: example_group_2 + title: Second Group + # This group is displayed only when the `http_enabled` field is selected + when: repl{{ ConfigOptionEquals "http_enabled" "1" }} + items: + - name: key + title: Key + type: textarea + - name: hostname + title: Hostname + type: text + - name: example_group_3 + title: Third Group + items: + - name: email-address + title: Email Address + type: text + - name: password_text + title: Password + type: password + value: '{{repl RandomString 10}}' +``` + +![Only the first and third groups appear on the config screen](/images/config-screen-group-when-false.png) +[View a larger version of this image](/images/config-screen-group-when-false.png) + +For additional examples, see [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional). + +### `items` + +Each group contains an array of items that map to input fields on the Admin Console Config screen. All items have `name`, `title`, and `type` properties and belong to a single group. + +For more information, see [Item Properties](#item-properties) and [Item Types](#item-types) below. + +## Item Properties + +Items have a `name`, `title`, `type`, and other optional properties. + +### `affix` + +<table> + <tr> + <th>Description</th> + <td> + <p>Items can be affixed <code>left</code> or <code>right</code>. Affixing items allows them to appear in the Admin Console on the same line.</p><p>Specify the <code>affix</code> field to all of the items in a particular group to preserve the line spacing and prevent the appearance of crowded text.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><AffixExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `default` + +<table> + <tr> + <th>Description</th> + <td> + <p>Defines the default value for the config item. If the user does not provide a value for the item, then the <code>default</code> value is applied.</p> + <p>If the <code>default</code> value is not associated with a <code>password</code> type config item, then it appears as placeholder text in the Admin Console.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><DefaultExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td><p>Yes. Every time the user makes a change to their configuration settings for the application, any template functions used in the <code>default</code> property are reevaluated.</p></td> + </tr> +</table> + +### `help_text` + +<table> + <tr> + <th>Description</th> + <td> + <p>Displays a helpful message below the <code>title</code> for the config item in the Admin Console.</p> + <p>Markdown syntax is supported. For more information about markdown syntax, see <a href="https://guides.github.com/features/mastering-markdown/">Basic writing and formatting syntax</a> in the GitHub Docs.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><HelpTextExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `hidden` + +<table> + <tr> + <th>Description</th> + <td> + <p>Hidden items are not visible in the Admin Console.</p> + <p><RandomStringNote/></p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><HiddenExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `name` + +<table> + <tr> + <th>Description</th> + <td><p>A unique identifier for the config item. Item names must be unique both within the group and across all groups. The item <code>name</code> is not displayed in the Admin Console.</p><p> The item <code>name</code> can be used with KOTS template functions in the Config context (such as ConfigOption or ConfigOptionEquals) to return the value of the item. For more information, see <a href="/reference/template-functions-config-context">Config Context</a>.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>Yes</td> + </tr> + <tr> + <th>Example</th> + <td><NameExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `readonly` + +<table> + <tr> + <th>Description</th> + <td> + <p>Readonly items are displayed in the Admin Console and users cannot edit their value.</p> + <p><RandomStringNote/></p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><ReadonlyExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `recommended` + +<table> + <tr> + <th>Description</th> + <td><p>Displays a Recommended tag for the config item in the Admin Console.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td> + <RecommendedExample/> + </td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `required` + +<table> + <tr> + <th>Description</th> + <td><p>Displays a Required tag for the config item in the Admin Console. A required item prevents the application from starting until it has a value.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><RequiredExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `title` + +<table> + <tr> + <th>Description</th> + <td><p>The title of the config item that displays in the Admin Console.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>Yes</td> + </tr> + <tr> + <th>Example</th> + <td><HelpTextExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `type` + +<table> + <tr> + <th>Description</th> + <td> + <p>Each item has a <code>type</code> property that defines the type of user input accepted by the field.</p> + <p>The <code>type</code> property supports the following values: <ItemTypes/></p> + <p>For information about each type, see <a href="#item-types">Item Types</a>.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>Yes</td> + </tr> + <tr> + <th>Example</th> + <td><TypeExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `value` + +<table> + <tr> + <th>Description</th> + <td> + <p>Defines the value of the config item. Data that you add to <code>value</code> appears as the HTML input value for the config item in the Admin Console.</p> + <p>If the config item is not readonly, then the data that you add to <code>value</code> is overwritten by any user input for the item. If the item is readonly, then the data that you add to <code>value</code> cannot be overwritten.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><ValueExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td><p>Yes</p><RandomStringNote/></td> + </tr> +</table> + +### `when` {#when-item} + +<table> + <tr> + <th>Description</th> + <td><p>The <code>when</code> property denotes items that are displayed on the Admin Console <strong>Config</strong> page only when a condition evaluates to true. When the condition evaluates to false, the item is not displayed.</p><PropertyWhen/><p>The `when` item property has the following requirements and limitations:</p><WhenRequirements/><ul><li><code>when</code> cannot be applied to the items nested under a <code>radio</code>, <code>dropdown</code> or <code>select_one</code> item. To conditionally show or hide <code>radio</code>, <code>dropdown</code> or <code>select_one</code> items, apply the <code>when</code> property to the item itself.</li></ul><WhenNote/></td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td> + <p>Display the <code>database_host</code> and <code>database_password</code> items only when the user selects <code>external</code> for the <code>db_type</code> item:</p><p><WhenExample/></p><p>For additional examples, see <a href="/vendor/config-screen-conditional">Using Conditional Statements in Configuration Fields</a>.</p> + </td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `validation` + +<table> + <tr> + <th>Description</th> + <td><p>The <code>validation</code> property can be used to validate an item's value, <br/>allowing you to specify custom validation rules that determine whether the value is valid or not.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td> + <p>Validates and returns if <code>password</code> value is not matching the regex. <br/>The <code>jwt_token</code> file content is only validated if the file is uploaded since it is optional.</p> + <RegexValidationExample/> + </td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +For information about supported validation types, see [Item Validation](#item-validation). + +## Item Types + +The section describes each of the item types: +<ItemTypes/> + +### `bool` +The `bool` input type should use a "0" or "1" to set the value +```yaml + - name: group_title + title: Group Title + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` + +![Boolean selector on the configuration screen](/images/config-screen-bool.png) + +[View a larger version of this image](/images/config-screen-bool.png) + +### `dropdown` + +> Introduced in KOTS v1.114.0 + +The `dropdown` item type includes one or more nested items that are displayed in a dropdown on the Admin Console config screen. Dropdowns are especially useful for displaying long lists of options. You can also use the [`radio`](#radio) item type to display radio buttons for items with shorter lists of options. + +To set a default value for `dropdown` items, set the `default` field to the name of the target nested item. + +```yaml +spec: + groups: + - name: example_settings + title: My Example Config + items: + - name: version + title: Version + default: version_latest + type: dropdown + items: + - name: version_latest + title: latest + - name: version_123 + title: 1.2.3 + - name: version_124 + title: 1.2.4 + - name: version_125 + title: 1.2.5 +``` + +![Dropdown item type on config screen](/images/config-screen-dropdown.png) + +[View a larger version of this image](/images/config-screen-dropdown.png) + +![Dropdown item type expanded](/images/config-screen-dropdown-open.png) + +[View a larger version of this image](/images/config-screen-dropdown-open.png) + +### `file` +A `file` is a special type of form field that renders an [`<input type="file" />`](https://www.w3schools.com/tags/tag_input.asp) HTML element. +Only the contents of the file, not the name, are captured. +See the [`ConfigOptionData`](template-functions-config-context#configoptiondata) template function for examples on how to use the file contents in your application. + +```yaml + - name: certs + title: TLS Configuration + items: + - name: tls_private_key_file + title: Private Key + type: file + - name: tls_certificate_file + title: Certificate + type: file +``` + +![File input field on the configuration screen](/images/config-screen-file.png) + +[View a larger version of this image](/images/config-screen-file.png) + +### `heading` +The `heading` type allows you to display a group heading as a sub-element within a group. +This is useful when you would like to use a config group to group items together, but still separate the items visually. + +```yaml + - name: ldap_settings + title: LDAP Server Settings + items: + ... + - name: ldap_schema + type: heading + title: LDAP schema + ... +``` + +![Heading on the configuration screen](/images/config-screen-heading.png) + +[View a larger versio of this image](/images/config-screen-heading.png) + +### `label` +The `label` type allows you to display an input label. +```yaml + - name: email + title: Email + items: + - name: email-address + title: Email Address + type: text + - name: description + type: label + title: "Note: The system will send you an email every hour." +``` +![Email address label on the configuration screen](/images/config-screen-label.png) + +[View a larger version of this image](/images/config-screen-label.png) + +### `password` +The `password` type is a text field that hides the character input. + +```yaml + - name: password_text + title: Password Text + type: password + value: '{{repl RandomString 10}}' +``` + +![Password text field on the configuration screen](/images/config-screen-password.png) + +[View a larger version of this image](/images/config-screen-password.png) + +### `radio` + +> Introduced in KOTS v1.114.0 + +The `radio` item type includes one or more nested items that are displayed as radio buttons on the Admin Console config screen. Radio buttons are especially useful for displaying short lists of options. You can also use the [`dropdown`](#dropdown) item type for items with longer lists of options. + +To set a default value for `radio` items, set the `default` field to the name of the target nested item. + +```yaml +spec: + groups: + - name: example_settings + title: My Example Config + items: + - name: authentication_type + title: Authentication Type + default: authentication_type_anonymous + type: radio + items: + - name: authentication_type_anonymous + title: Anonymous + - name: authentication_type_password + title: Password +``` + +### `select_one` (Deprecated) + +:::important +The `select_one` item type is deprecated and is not recommended for use. To display config items with multiple options, use the [`radio`](#radio) or [`dropdown`](#dropdown) item types instead. +::: + +`select_one` items must contain nested items. The nested items are displayed as radio buttons in the Admin Console. + +You can use the `name` field of a `select_one` item with KOTS template functions in the Config context (such as ConfigOption or ConfigOptionEquals) to return the option selected by the user. + +For example, if the user selects the **Password** option for the `select_one` item shown below, then the template function `'{{repl ConfigOption "authentication_type"}}'` is rendered as `authentication_type_password`. For more information about working with template functions in the Config context, see [Config Context](/reference/template-functions-config-context). + +```yaml +spec: + groups: + - name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Nginx welcome page. + items: + - name: authentication_type + title: Authentication Type + default: authentication_type_anonymous + type: select_one + items: + - name: authentication_type_anonymous + title: Anonymous + - name: authentication_type_password + title: Password +``` + +![Select one field on the configuration screen](/images/config-screen-selectone.png) + +### `text` +A `text` input field allows users to enter a string value. +Optionally, all additional properties are available for this input type. + +```yaml + - name: example_text_input + title: Example Text Input + type: text +``` + +![Text field on the configuration screen](/images/config-screen-text.png) + +:::important +Do not store secrets or passwords in `text` items because they are not encrypted or masked and can be easily accessed. Instead, use [`password`](#password) items. +::: + +### `textarea` +A `textarea` items creates a multi-line text input for when users have to enter a sizeable amount of text. + +```yaml + - name: custom_key + title: Set your secret key for your app + description: Paste in your Custom Key + items: + - name: key + title: Key + type: textarea + - name: hostname + title: Hostname + type: text +``` +![Text area field on the configuration screen](/images/config-screen-textarea.png) + +## Item Validation + +A `validation` can be specified to validate the value of an item. `regex` is the supported validation type. + +Based on specified validation rules, the item is validated and a validation message is returned if the validation rule is not satisfied. A default message is returned if there is an empty validation message. + +The validation rules are as follows: + +- An item is validated only when its value is not empty. +- Items of types `text`, `textarea`, `password`, and `file` are validated, but `repeatable` items are not validated. +- If an item is marked as `hidden` or if its `when` condition is set to `false`, the item is not validated. +- If a group `when` condition is set to `false`, the items in the group are not validated. + +### `regex` +For applications installed with KOTS v1.98.0 or later, a `regex` can be used to validate whether an item's value matches the provided regular expression `pattern`. The regex pattern should be of the [RE2 regular expression](https://github.com/google/re2/wiki/Syntax) type and can validate the `text`, `textarea`, `password`, and `file` field types. + + The default validation message is `Value does not match regex`. + +<RegexValidationExample/> + +![Password validation error](/images/regex_password_validation_error.png) + +![File validation error only when uploaded](/images/regex_file_validation_error.png) + +## Repeatable Items + +A repeatable config item copies a YAML array entry or YAML document for as many values as are provided. Any number of values can be added to a repeatable item to generate additional copies. + +To make an item repeatable, set `repeatable` to true: + +```yaml + - name: ports_group + items: + - name: serviceport + title: Service Port + type: text + repeatable: true +``` + +Repeatable items do not use the `default` or `value` fields, but instead a `valuesByGroup` field. +`valuesByGroup` must have an entry for the parent Config Group name, with all of the default `key:value` pairs nested in the group. At least one default entry is required for the repeatable item: + +```yaml + valuesByGroup: + ports_group: + port-default-1: "80" +``` + +### Limitations + +* Repeatable items work only for text, textarea, and file types. +* Repeatable item names must only consist of lower case alphanumeric characters. +* Repeatable items are only supported for Kubernetes manifests, not Helm charts. + +### Template Targets + +Repeatable items require that you provide at least one `template`. The `template` defines a YAML target in the manifest to duplicate for each repeatable item. + +Required fields for a template target are `apiVersion`, `kind`, and `name`. + +`namespace` is an optional template target field to match a YAML document's `metadata.namespace` property when the same filename is used across multiple namespaces. + +The entire YAML node at the target is duplicated, including nested fields. + +The `yamlPath` field of the `template` must denote index position for arrays using square brackets. For example, `spec.ports[0]` selects the first port entry for duplication. All duplicate YAML is appended to the final array in the `yamlPath`. + +`yamlPath` must end with an array. + +**Example:** + +```yaml + templates: + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app + yamlPath: 'spec.ports[0]' +``` + +If the `yamlPath` field is not present, the entire YAML document matching the `template` is replaced with a copy for each of the repeatable item entries. The `metadata.name` field of the new document reflects the repeatable item `key`. + +### Templating + +The repeat items are called with the delimeters `repl[[ .itemName ]]` or `[[repl .itemName ]]`. These delimiters can be placed anywhere inside of the `yamlPath` target node: + +```yaml + - port: repl{{ ConfigOption "[[repl .serviceport ]]" | ParseInt }} + name: '[[repl .serviceport ]]' +``` +This repeatable templating is not compatible with sprig templating functions. It is designed for inserting repeatable `keys` into the manifest. Repeatable templating can be placed inside of Replicated config templating. + +### Ordering + +Repeatable templates are processed before config template rendering. + +Repeatable items are processed in order of the template targets in the Config Spec file. Effectively, this ordering is from the top of the Config Spec, by Config Group, by Config Item, and then by template target. + +```yaml + - name: ports_group + items: + - name: serviceport + title: Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 #processed first + kind: Service + name: my-service + namespace: my-app + yamlPath: 'spec.ports[0]' + - apiVersion: v1 #processed second + kind: Service + name: my-service + namespace: my-app + {other item properties ...} + - name: other_ports + title: Other Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 #processed third + kind: Service + name: my-other-service + namespace: my-app + {other item properties ...} + - name: deployments + items: + - name: deployment-name + title: Deployment Names + type: text + repeatable: true + templates: + - apiVersion: apps/v1 #processed fourth + kind: Deployment + name: my-deployment + namespace: my-app + {other item properties ...} +``` + +## Repeatable Examples + +In these examples, the default service port of "80" is included with the release. Port 443 is added as an additional port on the Admin Console configuration page, which is stored in the ConfigValues file. + +### Repeatable Item Example for a yamlPath + +**Config custom resource manifest file:** + +```yaml + - name: ports_group + items: + - name: serviceport + title: Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app + yamlPath: spec.ports[0] + valuesByGroup: + ports_group: + port-default-1: "80" +``` + +**Config values:** +```yaml +apiVersion: kots.io/v1beta1 +kind: ConfigValues +metadata: + name: example_app +spec: + values: + port-default-1: + repeatableItem: serviceport + value: "80" + serviceport-8jdn2bgd: + repeatableItem: serviceport + value: "443" +``` + +**Template manifest:** +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "[[repl .serviceport ]]" | ParseInt }} + name: '[[repl .serviceport ]]' + selector: + app: repeat_example + component: my-deployment +``` + +**After repeatable config processing:** + +**Note**: This phase is internal to configuration rendering for KOTS. This example is only provided to further explain the templating process.* + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "port-default-1" | ParseInt }} + name: 'port-default-1' + - port: repl{{ ConfigOption "serviceport-8jdn2bgd" | ParseInt }} + name: 'serviceport-8jdn2bgd' + selector: + app: repeat_example + component: my-deployment +``` + +**Resulting manifest:** +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-app +spec: + type: NodePort + ports: + - port: 80 + name: port-default-1 + - port: 443 + name: serviceport-8jdn2bgd + selector: + app: repeat_example + component: my-deployment +``` + +### Repeatable Item Example for an Entire Document +**Config spec:** +```yaml + - name: ports_group + items: + - name: serviceport + title: Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app + valuesByGroup: + ports_group: + port-default-1: "80" +``` + +**Config values:** +```yaml +apiVersion: kots.io/v1beta1 +kind: ConfigValues +metadata: + name: example_app +spec: + values: + port-default-1: + repeatableItem: serviceport + value: "80" + serviceport-8jdn2bgd: + repeatableItem: serviceport + value: "443" +``` + +**Template manifest:** +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "[[repl .serviceport ]]" | ParseInt }} + selector: + app: repeat_example + component: repl[[ .serviceport ]] +``` + +**After repeatable config processing:** + +**Note**: This phase is internal to configuration rendering for KOTS. This example is only provided to further explain the templating process.* + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: port-default-1 + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "port-default-1" | ParseInt }} + selector: + app: repeat_example + component: port-default-1 +--- +apiVersion: v1 +kind: Service +metadata: + name: serviceport-8jdn2bgd + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "serviceport-8jdn2bgd" | ParseInt }} + selector: + app: repeat_example + component: serviceport-8jdn2bgd +``` + +**Resulting manifest:** +```yaml +apiVersion: v1 +kind: Service +metadata: + name: port-default-1 + namespace: my-app +spec: + type: NodePort + ports: + - port: 80 + selector: + app: repeat_example + component: port-default-1 +--- +apiVersion: v1 +kind: Service +metadata: + name: serviceport-8jdn2bgd + namespace: my-app +spec: + type: NodePort + ports: + - port: 443 + selector: + app: repeat_example + component: serviceport-8jdn2bgd +``` + +================ +File: docs/reference/custom-resource-helmchart-v2.mdx +================ +import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" +import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" +import Chart from "../partials/helm/_helm-cr-chart.mdx" +import ChartName from "../partials/helm/_helm-cr-chart-name.mdx" +import ChartVersion from "../partials/helm/_helm-cr-chart-version.mdx" +import ChartReleaseName from "../partials/helm/_helm-cr-chart-release-name.mdx" +import HelmUpgradeFlags from "../partials/helm/_helm-cr-upgrade-flags.mdx" +import Values from "../partials/helm/_helm-cr-values.mdx" +import Weight from "../partials/helm/_helm-cr-weight.mdx" +import Exclude from "../partials/helm/_helm-cr-exclude.mdx" +import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" +import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" +import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" +import Namespace from "../partials/helm/_helm-cr-namespace.mdx" +import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" +import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" +import V2Example from "../partials/helm/_v2-native-helm-cr-example.mdx" +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" + +# HelmChart v2 + +> Introduced in Replicated KOTS v1.99.0 + +<KotsHelmCrDescription/> + +For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +## Example + +The following is an example manifest file for the HelmChart v2 custom resource: + +<V2Example/> + +## chart + +<Chart/> + +### chart.name + +<ChartName/> + +### chart.chartVersion + +<ChartVersion/> + +## releaseName + +<ChartReleaseName/> + +## weight + +<Weight/> + +## helmUpgradeFlags + +<HelmUpgradeFlags/> + +## exclude + +<Exclude/> + +## values + +<Values/> + +For more information about using `values`, see [Setting Helm Chart Values with KOTS](/vendor/helm-optional-value-keys). + +## optionalValues + +<OptionalValues/> + +For more information about using `optionalValues`, see [Setting Helm Chart Values with KOTS](/vendor/helm-optional-value-keys). + +### optionalValues.when + +<OptionalValuesWhen/> + +### optionalValues.recursiveMerge + +<OptionalValuesRecursiveMerge/> + +**Default**: False + +For an example of recursive and non-recursive merging, see [About Recursive Merge](/vendor/helm-optional-value-keys#recursive-merge). + +## namespace + +<Namespace/> + +## builder + +The `builder` key is used to provide Helm values that are used during various stages of processing the Helm chart. + +The `builder` key is required for the following use cases: + +* To create an `.airgap` bundle for installations into air gap environments. + + <BuilderAirgapIntro/> + + For more information, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). + +* To support online installations that use a local private registry, the `builder` field renders the Helm chart with all of the necessary images so that KOTS knows where to pull the images. + + You cannot prevent customers from configuring a local private registry in the Admin Console. If you think any of your customers will use a local private registry, you should use the `builder` key. For more information, see [Configuring Local Image Registries](/enterprise/image-registry-settings). + +<HelmBuilderRequirements/> + +* Use the same `builder` configuration to support the use of local registries in both online and air gap installations. If you already configured the `builder` key to support air gap installations, then no additional configuration is required. + +**Example:** + +<BuilderExample/> + +================ +File: docs/reference/custom-resource-helmchart.mdx +================ +import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" +import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" +import Chart from "../partials/helm/_helm-cr-chart.mdx" +import ChartName from "../partials/helm/_helm-cr-chart-name.mdx" +import ChartVersion from "../partials/helm/_helm-cr-chart-version.mdx" +import ChartReleaseName from "../partials/helm/_helm-cr-chart-release-name.mdx" +import HelmUpgradeFlags from "../partials/helm/_helm-cr-upgrade-flags.mdx" +import Values from "../partials/helm/_helm-cr-values.mdx" +import Weight from "../partials/helm/_helm-cr-weight.mdx" +import WeightLimitation from "../partials/helm/_helm-cr-weight-limitation.mdx" +import Exclude from "../partials/helm/_helm-cr-exclude.mdx" +import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" +import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" +import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" +import Namespace from "../partials/helm/_helm-cr-namespace.mdx" +import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" +import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" +import Deprecated from "../partials/helm/_replicated-deprecated.mdx" +import ReplicatedHelmMigration from "../partials/helm/_replicated-helm-migration.mdx" +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" + + +# HelmChart v1 (Deprecated) + +:::important +<Deprecated/> +::: + +<KotsHelmCrDescription/> + +For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +## Example + +The following is an example manifest file for the HelmChart v1 custom resource: + +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +metadata: + name: samplechart +spec: + # chart identifies a matching chart from a .tgz + chart: + name: samplechart + chartVersion: 3.1.7 + releaseName: samplechart-release-1 + + exclude: "repl{{ ConfigOptionEquals `include_chart` `include_chart_no`}}" + + # helmVersion identifies the Helm Version used to render the chart. Default is v3. + helmVersion: v3 + + # useHelmInstall identifies the kots.io/v1beta1 installation method + useHelmInstall: true + + # weight determines the order that charts with "useHelmInstall: true" are applied, with lower weights first. + weight: 42 + + # helmUpgradeFlags specifies additional flags to pass to the `helm upgrade` command. + helmUpgradeFlags: + - --skip-crds + - --no-hooks + - --timeout + - 1200s + - --history-max=15 + + # values are used in the customer environment, as a pre-render step + # these values will be supplied to helm template + values: + postgresql: + enabled: repl{{ ConfigOptionEquals `postgres_type` `embedded_postgres`}} + + optionalValues: + - when: "repl{{ ConfigOptionEquals `postgres_type` `external_postgres`}}" + recursiveMerge: false + values: + postgresql: + postgresqlDatabase: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_database`}}repl{{ end}}" + postgresqlUsername: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_username`}}repl{{ end}}" + postgresqlHost: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_host`}}repl{{ end}}" + postgresqlPassword: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_password`}}repl{{ end}}" + postgresqlPort: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_port`}}repl{{ end}}" + + # namespace allows for a chart to be installed in an alternate namespace to + # the default + namespace: samplechart-namespace + + # builder values provide a way to render the chart with all images + # and manifests. this is used in Replicated to create `.airgap` packages + builder: + postgresql: + enabled: true +``` + +## chart + +<Chart/> + +### chart.name + +<ChartName/> + +### chart.chartVersion + +<ChartVersion/> + +### chart.releaseName + +> Introduced in Replicated KOTS v1.73.0 + +<ChartReleaseName/> + +## helmVersion + +Identifies the Helm Version used to render the chart. Acceptable values are `v2` or `v3`. `v3` is the default when no value is specified. + +:::note +<VersionLimitation/> +::: + +## useHelmInstall + +Identifies the method that KOTS uses to install the Helm chart: +* `useHelmInstall: true`: KOTS uses Kustomize to modify the chart then repackages the resulting manifests to install. This was previously referred to as the _native Helm_ installation method. + +* `useHelmInstall: false`: KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. This was previously referred to as the _Replicated Helm_ installation method. + + :::note + <ReplicatedHelmMigration/> + ::: + +For more information about how KOTS deploys Helm charts when `useHelmInstall` is `true` or `false`, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +## weight + +<WeightLimitation/> + +<Weight/> + +## helmUpgradeFlags + +The `helmUpgradeFlags` field is _not_ supported for HelmChart custom resources with `useHelmInstall: false`. + +<HelmUpgradeFlags/> + +## values + +<Values/> + +## exclude + +<Exclude/> + +## optionalValues + +<OptionalValues/> + +### optionalValues.when + +<OptionalValuesWhen/> + +### optionalValues.recursiveMerge + +:::note +`recursiveMerge` is available in KOTS v1.38.0 and later. +::: + +<OptionalValuesRecursiveMerge/> + +**Default**: False + +## namespace + +<Namespace/> + +## builder + +<BuilderAirgapIntro/> + +<HelmBuilderRequirements/> + +**Example:** + +<BuilderExample/> + +================ +File: docs/reference/custom-resource-identity.md +================ +:::important +This topic is deleted from the product documentation because this Beta feature is deprecated. +::: + +# Identity (Beta) + +The Identity custom resource allows you to configure the Replicated identity service for your application. + +The following is an example manifest file for the Identity custom resource: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Identity +metadata: + name: my-application +spec: + identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/dex + oidcRedirectUris: + - https://{{repl ConfigOption "ingress_hostname"}}/oidc/login/callback + supportedProviders: [ oidc ] + requireIdentityProvider: true + roles: + - id: member + name: Member + description: Can see every member and non-secret team in the organization. + - id: owner + name: Owner + description: Has full administrative access to the entire organization. + oauth2AlwaysShowLoginScreen: false + signingKeysExpiration: 6h + idTokensExpiration: 24h + webConfig: + title: My App + theme: + logoUrl: data:image/png;base64,<encoded_base64_stream> + logoBase64: <base64 encoded png file> + styleCssBase64: <base64 encoded [styles.css](https://github.com/dexidp/dex/blob/v2.27.0/web/themes/coreos/styles.css) file> + faviconBase64: <base64 encoded png file> +``` + +## identityIssuerURL +**(required)** This is the canonical URL that all clients must use to refer to the OIDC identity service. +If a path is provided, the HTTP service will listen at a non-root URL. + +## oidcRedirectUris +**(required)** A registered set of redirect URIs. +When redirecting from the Replicated app manager identity OIDC server to the client, the URI requested to redirect to must match one of these values. + +## supportedProviders +A list of supported identity providers. +If unspecified, all providers will be available. + +## requireIdentityProvider +If true, require the identity provider configuration to be set by the customer before the app can be deployed. + +## roles +**(`id` required)** A list of roles to be mapped to identity provider groups by the customer on the Replicated Admin Console identity service configuration page. + +## oauth2AlwaysShowLoginScreen +If true, show the identity provider selection screen even if there's only one configured. +Default `false`. + +## signingKeysExpiration +Defines the duration of time after which the SigningKeys will be rotated. +Default `6h`. + +## idTokensExpiration +Defines the duration of time for which the IdTokens will be valid. +Default `24h`. + +## webConfig +Can be used for branding the application identity login screen. + +================ +File: docs/reference/custom-resource-lintconfig.mdx +================ +import LinterDefinition from "../partials/linter-rules/_linter-definition.mdx" + +# LintConfig + +<LinterDefinition/> + +The linter runs automatically against releases that you create in the Replicated vendor portal, and displays any error or warning messages in the vendor portal UI. + +The linter rules have default levels that can be overwritten. You can configure custom levels by adding a LintConfig manifest file (`kind: LintConfig`) to the release. Specify the rule name and level you want the rule to have. Rules that are not included in the LintConfig manifest file keep their default level. For information about linter rules and their default levels, see [Linter Rules](/reference/linter). + +The supported levels are: + +<table> + <tr> + <th width="20%">Level</th> + <th width="80%">Description</th> + </tr> + <tr> + <td>error</td> + <td>The rule is enabled and shows as an error.</td> + </tr> + <tr> + <td>warn</td> + <td>The rule is enabled and shows as a warning.</td> + </tr> + <tr> + <td>info</td> + <td>The rule is enabled and shows an informational message.</td> + </tr> + <tr> + <td>off</td> + <td>The rule is disabled.</td> + </tr> + </table> + + +## Example +The following example manifest file overwrites the level for the application-icon to `off` to disable the rule. Additionally, the level for the application-statusInformers rule is changed to `error`, so instead of the default warning, it displays an error if the application is missing status informers. + +```yaml +apiVersion: kots.io/v1beta1 +kind: LintConfig +metadata: + name: default-lint-config +spec: + rules: + - name: application-icon + level: "off" + - name: application-statusInformers + level: "error" +``` + +================ +File: docs/reference/custom-resource-preflight.md +================ +# Preflight and Support Bundle + +You can define preflight checks and support bundle specifications for Replicated KOTS and Helm installations. + +Preflight collectors and analyzers provide cluster operators with clear feedback for any missing requirements or incompatibilities in the target environment before an application is deployed. Preflight checks are not automatically included in releases, so you must define them if you want to include them with a release. + +Support bundles collect and analyze troubleshooting data from a cluster and help diagnose problems with application deployments. For KOTS, default support bundles are automatically included with releases, and can be customized. For Helm installations, support bundles are not pre-enabled and must be defined if you want to use them. + +Collectors and analyzers are configured in Preflight and Support Bundle custom resources. + +:::note +Built-in redactors run by default for preflight checks and support bundles to protect sensitive information. +::: + +## Defining Custom Resources + +To define preflight checks or customize the default support bundle settings, add the corresponding custom resource YAML to your release. Then add custom collector and analyzer specifications to the custom resource. For more information about these troubleshoot features and how to configure them, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). + +The following sections show basic Preflight and Support Bundle custom resource definitions. + +### Preflight + +The Preflight custom resource uses `kind: Preflight`: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: sample +spec: + collectors: [] + analyzers: [] +``` + +### Support Bundle + +The Support Bundle custom resource uses `kind: SupportBundle`: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: sample +spec: + collectors: [] + analyzers: [] +``` + +## Global Fields + +Global fields, also known as shared properties, are fields that are supported on all collectors or all analyzers. The following sections list the global fields for [collectors](#collector-global-fields) and [analyzers](#analyzer-global-fields) respectively. + +Additionally, each collector and analyzer has its own fields. For more information about collector- and analyzer-specific fields, see the [Troubleshoot documentation](https://troubleshoot.sh/docs/). + +### Collector Global Fields + +The following fields are supported on all optional collectors for preflights and support bundles. For a list of collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. + +<table> + <tr> + <th width="30%">Field Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>collectorName</code></td> + <td>(Optional) A collector can specify the <code>collectorName</code> field. In some collectors, this field controls the path where result files are stored in the support bundle.</td> + </tr> + <tr> + <td><code>exclude</code></td> + <td>(Optional) (KOTS Only) Based on the runtime available configuration, a conditional can be specified in the <code>exclude</code> field. This is useful for deployment techniques that allow templating for Replicated KOTS and the optional KOTS Helm component. When this value is <code>true</code>, the collector is not included.</td> + </tr> +</table> + +### KOTS Collector Example + +This is an example of collector definition for a KOTS support bundle: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: sample +spec: + collectors: + - collectd: + collectorName: "collectd" + image: busybox:1 + namespace: default + hostPath: "/var/lib/collectd/rrd" + imagePullPolicy: IfNotPresent + imagePullSecret: + name: my-temporary-secret + data: + .dockerconfigjson: ewoJICJhdXRocyI6IHsKzCQksHR0cHM6Ly9pbmRleC5kb2NrZXIuaW8vdjEvIjoge30KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy4xMiAoZGFyd2luKSIKCX0sCgkiY3JlZHNTdG9yZSI6ICJkZXNrdG9wIiwKCSJleHBlcmltZW50YWwiOiAiZGlzYWJsZWQiLAoJInN0YWNrT3JjaGVzdHJhdG9yIjogInN3YXJtIgp9 + type: kubernetes.io/dockerconfigjson +``` + +### Analyzer Global Fields + +The following fields are supported on all optional analyzers for preflights and support bundles. For a list of analyzers, see [Analyzing Data](https://troubleshoot.sh/docs/analyze/) in the Troubleshoot documentation. + +<table> + <tr> + <th width="30%">Field Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>collectorName</code></td> + <td>(Optional) An analyzer can specify the <code>collectorName</code> field.</td> + </tr> + <tr> + <td><code>exclude</code></td> + <td>(Optional) (KOTS Only) A condition based on the runtime available configuration can be specified in the <code>exclude</code> field. This is useful for deployment techniques that allow templating for KOTS and the optional KOTS Helm component. When this value is <code>true</code>, the analyzer is not included.</td> + </tr> + <tr> + <td><code>strict</code></td> + <td>(Optional) (KOTS Only) An analyzer can be set to <code>strict: true</code> so that <code>fail</code> outcomes for that analyzer prevent the release from being deployed by KOTS until the vendor-specified requirements are met. When <code>exclude: true</code> is also specified, <code>exclude</code> overrides <code>strict</code> and the analyzer is not executed.</td> + </tr> +</table> + +### KOTS Analyzer Example + +This is an example of an KOTS analyzer definition with a strict preflight check and `exclude` set for installations that do not use Replicated kURL. In this case, the strict preflight is enforced on an embedded cluster but not on an existing cluster or air gap cluster. + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: check-kubernetes-version +spec: + analyzers: + - clusterVersion: + exclude: 'repl{{ (not IsKurl) }}' + strict: true + outcomes: + - fail: + when: "< 1.16.0" + message: The application requires Kubernetes 1.16.0 or later + uri: https://kubernetes.io + - warn: + when: "< 1.17.0" + message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.17.0 or later. + uri: https://kubernetes.io + - pass: + message: Your cluster meets the recommended and required versions of Kubernetes. +``` + +================ +File: docs/reference/custom-resource-redactor.md +================ +# Redactor (KOTS Only) + +This topic describes how to define redactors with the Redactor custom resource. + +:::note +Custom redactors defined with the Redactor resource apply only to installations with Replicated KOTS. +::: + +## Overview + +Preflight checks and support bundles include built-in redactors. These built-in redactors use regular expressions to identify and hide potentially sensitive data before it is analyzed. For example, the built-in redactors hide values that match common patterns for data sources, passwords, and user IDs that can be found in standard database connection strings. They also hide environment variables with names that begin with words like token, password, or user. To view the complete list of regex patterns for the built-in redactors, see [`redact.go`](https://github.com/replicatedhq/troubleshoot/blob/main/pkg/redact/redact.go#L204) in the open-source Troubleshoot GitHub repo. + +For Replicated KOTS installations, you can also add custom redactors to support bundles using the Redactor custom resource manifest file. For example, you can redact API keys or account numbers, depending on your customer needs. For more information about redactors, see [Redacting Data](https://troubleshoot.sh/docs/redact/) in the Troubleshoot documentation. + +## Defining Custom Redactors + +You can add custom redactors for KOTS installations using the following basic Redactor custom resource manifest file (`kind: Redactor`): + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Redactor +metadata: + name: sample +spec: + redactors: [] +``` + +## Objects and Fields + +A redactor supports two objects: `fileSelector` and `removals`. These objects specify the files the redactor applies to and how the redactions occur. For more information and examples of these fields, see [KOTS Redactor Example](#example) below and [Redactors](https://troubleshoot.sh/docs/redact/redactors/) in the Troubleshoot documentation. + +### fileSelector + +The `fileSelector` object determines which files the redactor is applied to. If this object is omitted from the manifest file, the redactor is applied to all files. This object supports the following optional fields: + +<table> + <tr> + <th width="30%">Field Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>file</code></td> + <td>(Optional) Specifies a single file for redaction.</td> + </tr> + <tr> + <td><code>files</code></td> + <td>(Optional) Specifies multiple files for redaction.</td> + </tr> +</table> + +Globbing is used to match files. For example, <code>/my/test/glob/*</code> matches <code>/my/test/glob/file</code>, but does not match <code>/my/test/glob/subdir/file</code>. + +### removals + +The `removals` object is required and defines the redactions that occur. This object supports the following fields. At least one of these fields must be specified: + +<table> + <tr> + <th width="30%">Field Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>regex</code></td> + <td>(Optional) Allows a regular expression to be applied for removal and redaction on lines that immediately follow a line that matches a filter. The <code>selector</code> field is used to identify lines, and the <code>redactor</code> field specifies a regular expression that runs on the line after any line identified by <code>selector</code>. If <code>selector</code> is empty, the redactor runs on every line. Using a <code>selector</code> is useful for removing values from pretty-printed JSON, where the value to be redacted is pretty-printed on the line beneath another value.<br></br><br></br>Matches to the regex are removed or redacted, depending on the construction of the regex. Any portion of a match not contained within a capturing group is removed entirely. The contents of capturing groups tagged <code>mask</code> are masked with <code>***HIDDEN***</code>. Capturing groups tagged <code>drop</code> are dropped.</td> + </tr> + <tr> + <td><code>values</code></td> + <td>(Optional) Specifies values to replace with the string <code>***HIDDEN***</code>.</td> + </tr> + <tr> + <td><code>yamlPath</code></td> + <td>(Optional) Specifies a <code>.</code>-delimited path to the items to be redacted from a YAML document. If an item in the path is the literal string <code>*</code>, the redactor is applied to all options at that level.<br></br><br></br>Files that fail to parse as YAML or do not contain any matches are not modified. Files that do contain matches are re-rendered, which removes comments and custom formatting. Multi-document YAML is not fully supported. Only the first document is checked for matches, and if a match is found, later documents are discarded entirely.</td> + </tr> +</table> + +## KOTS Redactor Example {#example} + +The following example shows `regex` and `yamlPath` redaction for a support bundle: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Redactor +metadata: + name: my-redactor-name +spec: + redactors: + - name: all files # as no file is specified, this redactor will run against all files + removals: + regex: + - redactor: (another)(?P<mask>.*)(here) # this will replace anything between the strings `another` and `here` with `***HIDDEN***` + - selector: 'S3_ENDPOINT' # remove the value in lines immediately following those that contain the string `S3_ENDPOINT` + redactor: '("value": ").*(")' + yamlPath: + - "abc.xyz.*" # redact all items in the array at key `xyz` within key `abc` in YAML documents +``` + +================ +File: docs/reference/embedded-cluster-install.mdx +================ +import ProxyLimitations from "../partials/embedded-cluster/_proxy-install-limitations.mdx" +import ProxyRequirements from "../partials/embedded-cluster/_proxy-install-reqs.mdx" + + +# Embedded Cluster Install Command Options + +This topic describes the options available with the Embedded Cluster install command. For more information about how to install with Embedded Cluster, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded). + +## Usage + +```bash +sudo ./APP_SLUG install --license LICENSE_FILE [flags] +``` +* `APP_SLUG` is the unique application slug +* `LICENSE_FILE` is the customer's license + +## Flags + +<table> + <tr> + <th width="35%">Flag</th> + <th width="65%">Description</th> + </tr> + <tr> + <td>`--admin-console-password`</td> + <td> + <p>Set the password for the Admin Console. The password must be at least six characters in length. If not set, the user is prompted to provide an Admin Console password.</p> + </td> + </tr> + <tr> + <td>`--admin-console-port`</td> + <td> + <p>Port on which to run the KOTS Admin Console. **Default**: By default, the Admin Console runs on port 30000.</p> + <p>**Limitation:** It is not possible to change the port for the Admin Console during a restore with Embedded Cluster. For more information, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery).</p> + </td> + </tr> + <tr> + <td>`--airgap-bundle`</td> + <td>The Embedded Cluster air gap bundle used for installations in air-gapped environments with no outbound internet access. For information about how to install in an air-gapped environment, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap).</td> + </tr> + <tr> + <td>`--cidr`</td> + <td> + <p>The range of IP addresses that can be assigned to Pods and Services, in CIDR notation. **Default:** By default, the CIDR block is `10.244.0.0/16`.</p> + <p>**Requirement**: Embedded Cluster 1.16.0 or later.</p> + </td> + </tr> + <tr> + <td>`--config-values`</td> + <td> + <p>Path to the ConfigValues file for the application. The ConfigValues file can be used to pass the application configuration values from the command line during installation, such as when performing automated installations as part of CI/CD pipelines. For more information, see [Automating Installation with Embedded Cluster](/enterprise/installing-embedded-automation).</p> + <p><strong>Requirement:</strong> Embedded Cluster 1.18.0 and later.</p> + </td> + </tr> + <tr> + <td>`--data-dir`</td> + <td> + <p>The data directory used by Embedded Cluster. **Default**: `/var/lib/embedded-cluster`</p> + <p>**Requirement**: Embedded Cluster 1.16.0 or later.</p> + <p>**Limitations:**</p> + <ul> + <li>The data directory for Embedded Cluster cannot be changed after the cluster is installed.</li> + <li>For multi-node installations, the same data directory that is supplied at installation is used for all nodes joined to the cluster. You cannot choose a different data directory when joining nodes with the Embedded Cluster `join` command. For more information about joining nodes, see [Add Nodes to a Cluster](/enterprise/embedded-manage-nodes#add-nodes) in _Managing Multi-Node Clusters with Embedded Cluster_.</li> + <li>If you use the `--data-dir` flag to change the data directory during installation, then you must use the same location when restoring in a disaster recovery scenario. For more information about disaster recovery with Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery).</li> + <li>Replicated does not support using symlinks for the Embedded Cluster data directory. Use the `--data-dir` flag instead of symlinking `/var/lib/embedded-cluster`.</li> + </ul> + </td> + </tr> + <tr> + <td>`--http-proxy`</td> + <td> + <p>Proxy server to use for HTTP.</p> + <ProxyRequirements/> + <ProxyLimitations/> + </td> + </tr> + <tr> + <td>`--https-proxy`</td> + <td> + <p>Proxy server to use for HTTPS.</p> + <ProxyRequirements/> + <ProxyLimitations/> + </td> + </tr> + <tr> + <td>`--local-artifact-mirror-port`</td> + <td> + <p>Port on which to run the Local Artifact Mirror (LAM). **Default**: By default, the LAM runs on port 50000.</p> + </td> + </tr> + <tr> + <td>`--network-interface`</td> + <td> + <p>The name of the network interface to bind to for the Kubernetes API. A common use case of `--network-interface` is for multi-node clusters where node communication should happen on a particular network. **Default**: If a network interface is not provided, the first valid, non-local network interface is used.</p> + </td> + </tr> + <tr> + <td>`--no-proxy`</td> + <td> + <p>Comma-separated list of hosts for which not to use a proxy.</p> + <p>For single-node installations, pass the IP address of the node where you are installing. For multi-node installations, when deploying the first node, pass the list of IP addresses for all nodes in the cluster (typically in CIDR notation). The network interface's subnet will automatically be added to the no-proxy list if the node's IP address is not already included.</p> + <p>The following are never proxied:</p> + <ul> + <li>Internal cluster communication (`localhost`, `127.0.0.1`, `.cluster.local`, `.svc`)</li> + <li>The CIDR block used for assigning IPs to Kubernetes Pods and Services. By default, the CIDR block is `10.244.0.0/16`. For information about how to change this default, see [Set IP Address Range for Pods and Services](#set-ip-address-range-for-pods-and-services).</li> + </ul> + <p>To ensure your application's internal cluster communication is not proxied, use fully qualified domain names like `my-service.my-namespace.svc` or `my-service.my-namespace.svc.cluster.local`.</p> + <ProxyRequirements/> + <ProxyLimitations/> + </td> + </tr> + <tr> + <td>`--private-ca`</td> + <td> + <p>The path to trusted certificate authority (CA) certificates. Using the `--private-ca` flag ensures that the CA is trusted by the installation. KOTS writes the CA certificates provided with the `--private-ca` flag to a ConfigMap in the cluster.</p> + <p>The KOTS [PrivateCACert](/reference/template-functions-static-context#privatecacert) template function returns the ConfigMap containing the private CA certificates supplied with the `--private-ca` flag. You can use this template function to mount the ConfigMap so your containers trust the CA too.</p> + </td> + </tr> +</table> + +## Examples + +### Air Gap Install + +```bash +sudo ./my-app install --license license.yaml --airgap-bundle myapp.airgap +``` + +### Change the Admin Console and LAM Ports + +```bash +sudo ./my-app install --license license.yaml --admin-console-port=20000 --local-artifact-mirror-port=40000 +``` + +### Change the Data Directory + +```bash +sudo ./my-app install --license license.yaml --data-dir /data/embedded-cluster +``` + +### Headless (Automated) Install + +```bash +sudo ./my-app install --license license.yaml \ + --config-values configvalues.yaml \ + --admin-console-password password +``` + +### Install Behind a Proxy + +```bash +sudo ./APP_SLUG install --license license.yaml \ + --http-proxy=HOST:PORT \ + --https-proxy=HOST:PORT \ + --no-proxy=LIST_OF_HOSTS +``` +Where: + +* `HOST:PORT` is the host and port of the proxy server +* `LIST_OF_HOSTS` is the list of hosts to not proxy. For example, the IP address of the node where you are installing. Or, for multi-node clusters, the list of IP addresses for all nodes in the cluster, typically in CIDR notation. + +### Install Behind an MITM Proxy + +```bash +sudo ./my-app install --license license.yaml --private-ca /path/to/private-ca-bundle \ + --http-proxy=http://10.128.0.0:3300 \ + --https-proxy=http://10.128.0.0:3300 \ + --no-proxy=123.89.46.4,10.96.0.0/16,*.example.com +``` + +### Set Admin Console Password + +```bash +sudo ./my-app install --license license.yaml --admin-console-password password +``` + +### Set IP Address Range for Pods and Services + +```bash +sudo ./my-app install --license license.yaml --cidr 172.16.136.0/16 +``` + +### Use a Specific Network Interface + +```bash +sudo ./my-app install --license license.yaml --network-interface eno167777 +``` + +================ +File: docs/reference/embedded-config.mdx +================ +# Embedded Cluster Config + +This topic is a reference for the Replicated Embedded Cluster Config custom resource. For more information about Embedded Cluster, see [Using Embedded Cluster](/vendor/embedded-overview). + +:::note +Embedded Cluster is in beta. If you are instead looking for information about creating Kubernetes Installers with Replicated kURL, see the [Replicated kURL](/vendor/packaging-embedded-kubernetes) section. +::: + +## Overview + +To install your application with Embedded Cluster, an Embedded Cluster Config must be created in a release. Embedded Cluster installation artifacts are available only for releases that include an Embedded Cluster Config. + +The Embedded Cluster Config lets you define several aspects of the Kubernetes cluster that will be created. + +### Limitations + +* The Embedded Cluster Config does not support the use of Go template functions, including [KOTS template functions](/reference/template-functions-about). + +For additional property-specific limitations, see the sections below. + +### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + version: 2.1.3+k8s-1.30 + roles: + controller: + name: management + labels: + management: "true" + custom: + - name: app + labels: + app: "true" + extensions: + helm: + repositories: + - name: ingress-nginx + url: https://kubernetes.github.io/ingress-nginx + charts: + - name: ingress-nginx + chartname: ingress-nginx/ingress-nginx + namespace: ingress-nginx + version: "4.8.3" + values: | + controller: + service: + type: NodePort + nodePorts: + http: "80" + https: "443" + # Known issue: Only use image tags for multi-architecture images. + # Set digest to empty string to ensure the air gap builder uses + # single-architecture images. + image: + digest: "" + digestChroot: "" + admissionWebhooks: + patch: + image: + digest: "" +``` + +## version + +You must specify which version of Embedded Cluster to install. Each version of Embedded Cluster includes particular versions of components like KOTS (Admin Console) and OpenEBS. + +For a full list of versions, see the Embedded Cluster [releases page](https://github.com/replicatedhq/embedded-cluster/releases) in GitHub. It's recommended to keep this version as up to date as possible because Embedded Cluster is changing rapidly. + +## roles + +You can optionally customize node roles in the Embedded Cluster Config using the `roles` key. + +If the `roles` key is configured, users select one or more roles to assign to a node when it is joined to the cluster. A single node can be assigned: +* The `controller` role, which designates nodes that run the Kubernetes control plane +* One or more `custom` roles +* Both the `controller` role _and_ one or more `custom` roles + +For more information about how to assign node roles in the Admin Console, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + +If the `roles` key is _not_ configured, all nodes joined to the cluster are assigned the `controller` role. The `controller` role designates nodes that run the Kubernetes control plane. Controller nodes can also run other workloads, such as application or Replicated KOTS workloads. + +For more information, see the sections below. + +### controller + +By default, all nodes joined to a cluster are assigned the `controller` role. + +You can customize the `controller` role in the following ways: +* Change the `name` that is assigned to controller nodes. By default, controller nodes are named “controller”. If you plan to create any `custom` roles, Replicated recommends that you change the default name for the `controller` role to a term that is easy to understand, such as "management". This is because, when you add `custom` roles, both the name of the `controller` role and the names of any `custom` roles are displayed to the user when they join a node. +* Add one or more `labels` to be assigned to all controller nodes. See [labels](#labels). + +#### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + roles: + controller: + name: management + labels: + management: "true" # Label applied to "management" nodes +``` + +### custom + +You can add `custom` roles that users can assign to one or more nodes in the cluster. Each `custom` role that you add must have a `name` and can also have one or more `labels`. See [labels](#labels). + +Adding `custom` node roles is useful if you need to assign application workloads to specific nodes in multi-node clusters. For example, if your application has graphics processing unit (GPU) workloads, you could create a `custom` role that will add a `gpu=true` label to any node that is assigned the role. This allows you to then schedule GPU workloads on nodes labled `gpu=true`. Or, if your application includes any resource-intensive workloads (such as a database) that must be run on dedicated nodes, you could create a `custom` role that adds a `db=true` label to the node. This way, the database workload could be assigned to a certain node or nodes. + +#### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + roles: + custom: + - name: app + labels: + app: "true" # Label applied to "app" nodes +``` + +### labels + +You can define Kubernetes labels for the default `controller` role and any `custom` roles that you add. When `labels` are defined, Embedded Cluster applies the label to any node in the cluster that is assigned the given role. Labels are useful for tasks like assigning workloads to nodes. + +#### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + roles: + controller: + name: management + labels: + management: "true" # Label applied to "management" nodes + custom: + - name: db + labels: + db: "true" # Label applied to "db" nodes + - name: gpu + labels: + gpu: "true" # Label applied to "gpu" nodes +``` + +## extensions + +If you need to install Helm charts before your application and as part of the Embedded Cluster itself, you can do this with Helm extensions. One situation where this is useful is if you want to ship an ingress controller, because Embedded Cluster does not yet include one. + +Helm extensions are updated when new versions of your application are deployed from the Admin Console. So, for example, you can change the values for a Helm extension from one release to another, and those changes will be applied to the cluster when the new release is deployed. + +The format for specifying Helm extensions uses the same k0s Helm extensions format from the k0s configuration. For more information about these fields, see the [k0s documentation](https://docs.k0sproject.io/stable/helm-charts/#example). + +### Limitation + +If a Helm extension is removed from the Embedded Cluster Config, the associated Helm chart is not removed from the cluster. + +### Requirements + +* The `version` field is required. Failing to specify a chart version will cause problems for upgrades. + +* If you need to install multiple charts in a particular order, set the `order` field to a value greater than or equal to 10. Numbers below 10 are reserved for use by Embedded Cluster to deploy things like a storage provider and the Admin Console. If an `order` is not provided, Helm extensions are installed with order 10. + +### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + extensions: + helm: + repositories: + - name: ingress-nginx + url: https://kubernetes.github.io/ingress-nginx + charts: + - name: ingress-nginx + chartname: ingress-nginx/ingress-nginx + namespace: ingress-nginx + version: "4.8.3" + values: | + controller: + service: + type: NodePort + nodePorts: + http: "80" + https: "443" + # Known issue: Only use image tags for multi-architecture images. + # Set digest to empty string to ensure the air gap builder uses + # single-architecture images. + image: + digest: "" + digestChroot: "" + admissionWebhooks: + patch: + image: + digest: "" +``` + +## unsupportedOverrides + +:::important +This feature should be used with caution by advanced users who understand the risks and ramifications of changing the default configuration. +::: + +Unsupported overrides allow you to override Embedded Cluster's default configuration, including the k0s config and the Helm values for extensions like KOTS and OpenEBS. This should be used with caution because changes here are untested and can disrupt or break Embedded Clusters. Any issues that are caused by unsupported overrides are not supported. + +While they should be used with caution, unsupported overrides are useful if you need to make changes that are not otherwise exposed by Embedded Cluster. + +### Override the k0s Config + +By default, Embedded Cluster uses a k0s config that is tested and known to work for Embedded Clusters. In some circumstances, you might want to change the k0s config. + +For more information on the k0s config, see [Configuration options](https://docs.k0sproject.io/stable/configuration/#configuration-file-reference) in the k0s documentation. + +For example, you can do the following to enable WireGuard-based encryption. Note that other configuration might be necessary. See [`spec.network.calico`](https://docs.k0sproject.io/stable/configuration/#specnetworkcalico) in the k0s documentation for more details. +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + unsupportedOverrides: + k0s: | + config: + spec: + network: + calico: + wireguard: true +``` + +#### Limtiations + +* The `spec.api` and `spec.storage` keys in the k0s config cannot be changed after installation. Whereas most keys in the k0s config apply to the whole cluster, these two keys are set for each node. Embedded Cluster cannot update these keys on each individual node during updates, so they cannot be changed after installation. + +* Overrides overwrite the corresponding fields in the k0s configuration. They are not merged into Embedded Cluster’s default configuration. When using overrides to override a list, for example, ensure that you include other elements in the list that Embedded Cluster includes by default. + +### Override the Helm Values for Built-In Extensions + +Embedded Cluster deploys built-in extensions like KOTS and OpenEBS to provide capabilities like storage and application management. These extensions are deployed with Helm, and the Helm values for each can be modified if necessary. + +To modify these values, you can use the `unsupportedOverrides.builtInExtensions` key of the Embedded Cluster Config. Each chart you want to modify is an item in the array. The `name` key identifies the Helm chart that you want to modify, and the `values` key is a string where you specify your modified Helm values. Your modified values are merged into the values used by Embedded Cluster. + +The following are the built-in extensions available for modification: + +- `openebs` +- `admin-console` +- `velero` +- `embedded-cluster-operator` + +#### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + unsupportedOverrides: + builtInExtensions: + - name: openebs + values: | + key: value +``` + +================ +File: docs/reference/kots-cli-admin-console-garbage-collect-images.md +================ +# admin-console garbage-collect-images + +Starts image garbage collection. +The KOTS Admin Console must be running and an application must be installed in order to use this command. + +### Usage +```bash +kubectl kots admin-console garbage-collect-images -n <namespace> +``` + +This command supports all [global flags](kots-cli-global-flags). + +| Flag | Type | Description | +|:--------------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `-h, --help` | | help for admin-console | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | +| `--ignore-rollback` | string | force images garbage collection even if rollback is enabled for the application (default false). Note: this may impact the ability to rollback the application to a previous version. | + +### Examples +```bash +kubectl kots admin-console garbage-collect-images -n default +``` + +================ +File: docs/reference/kots-cli-admin-console-generate-manifests.mdx +================ +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import StrictSecContext from "../partials/kots-cli/_strict-sec-context-yaml.mdx" + +# admin-console generate-manifests + +Running this command will create a directory on the workstation containing the Replicated Admin Console manifests. These assets can be used to deploy KOTS to a cluster through other workflows, such as kubectl, to provide additional customization of the Admin Console before deploying. + +### Limitations + +* `generate-manifests` does not support generating manifests for Red Hat OpenShift clusters or GKE Autopilot clusters if executed without a Kubernetes cluster context. + +* To upgrade a KOTS instance that has ever been on version 1.72.0 or earlier, you must run `generate-manifests` with a Kubernetes cluster context. + +* The `admin-console generate-manifests` command does not accept the [`--strict-security-context`](/reference/kots-cli-install#usage) flag, which deploys KOTS Pods with a security context. To generate Admin Console manifests with a security context, add the following to the Pod templates for Deployments and StatefulSets deployed by KOTS: + + <StrictSecContext/> + +### Usage +```bash +kubectl kots admin-console generate-manifests [flags] +``` + +This command supports the following flags: + +<table> + <tr> + <td>Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <tr> + <td><code>--rootdir</code></td> + <td>string</td> + <td>Root directory where the YAML will be written (default `${HOME}` or `%USERPROFILE%`)</td> + </tr> + <tr> + <td><code>--namespace</code></td> + <td>string</td> + <td>Target namespace for the Admin Console</td> + </tr> + <tr> + <td><code>--shared-password</code></td> + <td>string</td> + <td>Shared password to use when deploying the Admin Console</td> + </tr> + <tr> + <td><code>--http-proxy</code></td> + <td>string</td> + <td>Sets HTTP_PROXY environment variable in all KOTS Admin Console components</td> + </tr> + <tr> + <td><code>--http-proxy</code></td> + <td>string</td> + <td>Sets HTTP_PROXY environment variable in all KOTS Admin Console</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <tr> + <td><code>--no-proxy</code></td> + <td>string</td> + <td>Sets NO_PROXY environment variable in all KOTS Admin Console components</td> + </tr> + <tr> + <td><code>--private-ca-configmap</code></td> + <td>string</td> + <td>Name of a ConfigMap containing private CAs to add to the kotsadm deployment</td> + </tr> + <RegistryPassword/> + <RegistryUsername/> + <tr> + <td><code>--with-minio</code></td> + <td>bool</td> + <td>Set to true to include a local minio instance to be used for storage (default true)</td> + </tr> + <tr> + <td><code>--minimal-rbac</code></td> + <td>bool</td> + <td>Set to true to include a local minio instance to be used for storage (default true)</td> + </tr> + <tr> + <td><code>--additional-namespaces</code></td> + <td>string</td> + <td>Comma delimited list to specify additional namespace(s) managed by KOTS outside where it is to be deployed. Ignored without with <code>--minimal-rbac=true</code></td> + </tr> + <tr> + <td><code>--storage-class</code></td> + <td>string</td> + <td>Sets the storage class to use for the KOTS Admin Console components. <strong>Default:</strong> unset, which means the default storage class will be used</td> + </tr> +</table> + +### Examples +```bash +kubectl kots admin-console generate-manifests +kubectl kots admin-console generate-manifests --rootdir ./manifests +kubectl kots admin-console generate-manifests --namespace kotsadm --minimal-rbac=true --additional-namespaces="app1,app3" +``` + +================ +File: docs/reference/kots-cli-admin-console-index.md +================ +# admin-console + +Enables access to the KOTS Admin Console from a local machine. + +This command opens localhost port 8800, which forwards to the `kotsadm` service. +Alternatively you can specify the `--port` flag to specify a port other than 8800. + +To access the Admin Console, browse to http://localhost:8800 after running this command. + +### Usage +```bash +kubectl kots admin-console [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +|:------------------|--------|---------------------------------------------------------------------------------| +| `-h, --help` | | Help for admin-console. | +| `-n, --namespace` | string | The namespace where the Admin Console is running. **Default:** "default" | +| `--port` | string | Override the local port on which to access the Admin Console. **Default:** 8800 | + +### Examples +```bash +kubectl kots admin-console --namespace kots-sentry +``` + +================ +File: docs/reference/kots-cli-admin-console-push-images.md +================ +# admin-console push-images + +Pushes images from an air gap bundle to a private registry. +The air gap bundle can be either a KOTS Admin Console release or an application release. + +### Usage +```bash +kubectl kots admin-console push-images [airgap-bundle] [private-registry] [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +|:------------------------|--------|-------------------------------------| +| `-h, --help` | | Help for the command | +| `--registry-username` | string | username for the private registry | +| `--registry-password` | string | password for the private registry | +| `--skip-registry-check` | bool | Set to `true` to skip the connectivity test and validation of the provided registry information. **Default:** `false` | + +### Examples +```bash +kubectl kots admin-console push-images ./kotsadm.tar.gz private.registry.host/app-name \ + --registry-username rw-username \ + --registry-password rw-password +``` + +================ +File: docs/reference/kots-cli-admin-console-upgrade.mdx +================ +# admin-console upgrade + +import EnsureRBAC from "../partials/kots-cli/_ensure-rbac.mdx" +import Help from "../partials/kots-cli/_help.mdx" +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import SkipRBACCheck from "../partials/kots-cli/_skip-rbac-check.mdx" +import StrictSecurityContext from "../partials/kots-cli/_strict-security-context.mdx" +import WaitDuration from "../partials/kots-cli/_wait-duration.mdx" +import WithMinIO from "../partials/kots-cli/_with-minio.mdx" + +Upgrades the KOTS Admin Console to match the version of KOTS CLI. + + +### Usage +```bash +kubectl kots admin-console upgrade [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: +<table> + <tr> + <th width="30%">Flag</th> + <th>Type</th> + <th>Description</th> + </tr> + <EnsureRBAC/> + <Help/> + <KotsadmNamespace/> + <KotsadmRegistry/> + <RegistryPassword/> + <RegistryUsername/> + <SkipRBACCheck/> + <StrictSecurityContext/> + <WaitDuration/> + <WithMinIO/> +</table> + +### Examples +```bash +kubectl kots admin-console upgrade --namespace kots-sentry +kubectl kots admin-console upgrade --ensure-rbac=false +``` + +================ +File: docs/reference/kots-cli-backup-index.md +================ +# backup + +Create a full instance snapshot for disaster recovery. + +### Usage + +```bash +kubectl kots backup [flags] +``` + +This command supports the following flags: + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------------------- | +| `-h, --help` | | Help for `backup`. | +| `-n, --namespace` | string | The namespace where the Admin Console is running. **Default:** `default` | +| `-o, --output` | string | The output format. Supports JSON. Defaults to plain text if not set. | +| `--wait`. | bool | Wait for the backup to finish. **Default:** true | + +### Example + +```bash +kubectl kots backup --namespace kots-sentry +``` + +================ +File: docs/reference/kots-cli-backup-ls.md +================ +# backup ls + +:::note +This command is deprecated. Use [`kubectl kots get backups`](/reference/kots-cli-get-backups) instead. +::: + +Show a list of all the available instance snapshots for disaster recovery. + +### Usage + +```bash +kubectl kots backup ls [flags] +``` + +This command supports the following flags: + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | Help for `backup ls`. | +| `-n, --namespace` | string | Filter by the namespace the Admin Console was installed in. | + +### Example + +```bash +kubectl kots backup ls --namespace kots-sentry +``` + +================ +File: docs/reference/kots-cli-docker-ensure-secret.md +================ +# docker ensure-secret + +Creates an image pull secret for Docker Hub that the Admin Console can utilize to avoid [rate limiting](/enterprise/image-registry-rate-limits). +The credentials are validated before creating the image pull secret. +Running this command creates a new application version, based on the latest version, with the new image pull secret added to all Kubernetes manifests that have images. +In order for this secret to take effect to avoid rate limiting, the new version must be deployed. + +### Usage + +```bash +kubectl kots docker ensure-secret [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| ----------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for ensure-secret | +| `--dockerhub-username` | string | DockerHub username to be used _(required)_ | +| `--dockerhub-password` | string | DockerHub password to be used _(required)_ | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | + +### Example + +```bash +kubectl kots docker ensure-secret --dockerhub-username sentrypro --dockerhub-password password --namespace sentry-pro +``` + +================ +File: docs/reference/kots-cli-docker-index.md +================ +# docker + +KOTS Docker interface + +### Usage + +```bash +kubectl kots docker [command] +``` + +This command supports all [global flags](kots-cli-global-flags). + +================ +File: docs/reference/kots-cli-download.md +================ +# download + +Retrieves a copy of the application manifests from the cluster, and store them in a specific directory structure on your workstation. +Requires a running application with the KOTS Admin Console. + +## Usage +```bash +kubectl kots download [app-slug] [flags] +``` + +* _Replace `[app-slug]` with the application slug provided by your software vendor (required)._ For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:----------------------------|--------|-----------------------------------------------------------------------------------------------------------------------| +| `--decrypt-password-values` | bool | decrypt password values to plaintext | +| `--dest` | string | the directory to store the application in _(defaults to current working dir)_ | +| `--current` | bool | download the archive of the currently deployed app version | +| `--sequence` | int | sequence of the app version to download the archive for (defaults to the latest version unless --current flag is set) | +| `-h, --help` | | help for download | +| `-n, --namespace` | string | the namespace to download from _(default `"default"`)_ | +| `--overwrite` | | overwrite any local files, if present | +| `-o, --output` | string | output format (currently supported: json) _(defaults to plain text if not set)_ | + +## Example +```bash +kubectl kots download kots-sentry --namespace kots-sentry --dest ./manifests --overwrite +``` + +================ +File: docs/reference/kots-cli-enable-ha.md +================ +# enable-ha + +(Deprecated) Runs the rqlite StatefulSet as three replicas for data replication and high availability. + +This command is deprecated and will be removed in a future release. The EKCO add-on for Replicated kURL now scales up the rqlite StatefulSet automatically when three or more nodes are healthy and the OpenEBS localpv storage class is available. For more information, see [EKCO add-on](https://kurl.sh/docs/add-ons/ekco#kotsadm) in the kURL documentation. + +## Usage +```bash +kubectl kots enable-ha [flags] +``` + +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:---------------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--wait-duration` | string | Timeout used while waiting for individual components to be ready. Must be in Go duration format. For example, `10s` or `2m`. See [func ParseDuration](https://pkg.go.dev/time#ParseDuration) in the Go documentation. | +| `-h, --help` | | Help for `enable-ha`. | +| `-n, --namespace` | string | The namespace where the Admin Console is running _(required)_ | + +## Example +```bash +kubectl kots enable-ha --namespace kots-sentry +``` + +================ +File: docs/reference/kots-cli-get-apps.md +================ +# get apps + +The `kots get apps` command lists installed applications. + +### Usage + +```bash +kubectl kots get apps [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for get apps | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | + +### Example + +```bash +kubectl kots get apps -n default +``` + +================ +File: docs/reference/kots-cli-get-backups.md +================ +# get backups + +The `kots get backups` command lists available full snapshots (instance). + +### Usage + +```bash +kubectl kots get backups [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for get backups | +| `-n, --namespace` | string | filter by the namespace in which the Admin Console is/was installed | + +### Examples + +Basic + +```bash +kubectl kots get backups +``` + +Filtering by a namespace + +```bash +kubectl kots get backups -n default +``` + +================ +File: docs/reference/kots-cli-get-config.md +================ +# get config + +The `kots get config` command returns the `configValues` file for an application. + +### Usage + +```bash +kubectl kots get config [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `--appslug` | string | The slug of the target application. Required when more than one application is deployed. Your software vendor provides the application slug. For more information, see <a href="/vendor/vendor-portal-manage-app#slug">Get the Application Slug</a> in <em>Managing Applications</em>.| +| `--current` | bool | When set, the `configValues` file for the currently deployed version of the application is retrieved.| +| `--sequence` | int | Retrieves the `configValues` file for the specified application sequence. **Default**: Latest (unless the `--current` flag is set).| +| `--decrypt` | bool | Decrypts password items within the configuration.| +| `-h, --help` | | Help for `get config`.| +| `-n, --namespace` | string | (Required) The namespace where the Admin Console is running.| + +### Example + +```bash +kubectl kots get config -n default --sequence 5 --appslug myapp +``` + +================ +File: docs/reference/kots-cli-get-index.md +================ +# get + +The `kots get` command shows information about one or more resources. + +### Usage +```bash +kubectl kots get [resource] [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +|:----------------------|------|-------------| +| `-o, --output` | | Output format. **Supported formats**: `json`. | + +### Resources + +* `apps` lists installed applications. +* `backups` lists available full snapshots (instance). +* `config` lists the **configValues** for an application. +* `restores` lists created full snapshot restores. +* `versions` lists the versions available for a given `app-slug`. + +================ +File: docs/reference/kots-cli-get-restores.md +================ +# get restores + +The `kots get restores` command lists created full snapshot restores. + +### Usage + +```bash +kubectl kots get restores [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for get restores | +| `-n, --namespace` | string | filter by the namespace in which the Admin Console is/was installed | + +### Examples + +Basic + +```bash +kubectl kots get restores +``` + +Filtering by a namespace + +```bash +kubectl kots get restores -n default +``` + +================ +File: docs/reference/kots-cli-get-versions.md +================ +# get versions + +The `kots get versions` command lists all versions of an application. + +> Introduced in KOTS v1.61.0 + +### Usage + +```bash +kubectl kots get versions [app-slug] [flags] +``` + +- _Replace `[app-slug]` with the app slug for your KOTS application (required)._ +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :------------------------ | ------ | --------------------------------------------------------------------------------------------------- | +| `-h, --help` | | Help for `get versions`. | +| `-n, --namespace` | string | (Required) The namespace where the Admin Console is running. | +| `--current-page` | int | Offset, by page size, at which to start retrieving versions. **Default:** 0 | +| `--page-size` | int | Number of versions to return. **Default:** 20 | +| `--pin-latest` | int | When set to true, always returns the latest version at the beginning. **Default:** false | +| `--pin-latest-deployable` | int | When set to true, always returns the latest version that can be deployed. The latest deployable version can differ from the latest version if a required version, which cannot be skipped, is present. **Default:** false | +| `-o, --output` | string | Output format. **Supported formats:** `json`. **Default:** Plain text | + +### Example + +```bash +kubectl kots get versions kots-sentry -n default +``` + +================ +File: docs/reference/kots-cli-getting-started.md +================ +# Installing the KOTS CLI + +Users can interact with the Replicated KOTS CLI to install and manage applications with Replicated KOTS. The KOTS CLI is a kubectl plugin that runs locally on any computer. + + +## Prerequisite + +Install kubectl, the Kubernetes command-line tool. See [Install Tools](https://kubernetes.io/docs/tasks/tools/) in the Kubernetes documentation. + +:::note +If you are using a cluster created with Replicated kURL, kURL already installed both kubectl and the KOTS CLI when provisioning the cluster. For more information, see [Online Installation with kURL](/enterprise/installing-kurl) and [Air Gap Installation with kURL](/enterprise/installing-kurl-airgap). +::: + +## Install + +To install the latest version of the KOTS CLI to `/usr/local/bin`, run: + +```bash +curl https://kots.io/install | bash +``` + +To install to a directory other than `/usr/local/bin`, run: + +```bash +curl https://kots.io/install | REPL_INSTALL_PATH=/path/to/cli bash +``` + +To install a specific version of the KOTS CLI, run: + +```bash +curl https://kots.io/install/<version> | bash +``` + +To verify your installation, run: + +```bash +kubectl kots --help +``` + +## Install without Root Access + +You can install the KOTS CLI on computers without root access or computers that cannot write to the `/usr/local/bin` directory. + +To install the KOTS CLI without root access, you can do any of the following: + +* (Online Only) [Install to a Different Directory](#install-to-a-different-directory) +* (Online Only) [Install Using Sudo](#install-using-sudo) +* (Online or Air Gap) [Manually Download and Install](#manually-download-and-install) + +### Install to a Different Directory + +You can set the `REPL_INSTALL_PATH` environment variable to install the KOTS CLI to a directory other than `/usr/local/bin` that does not require elevated permissions. + +**Example:** + +In the following example, the installation script installs the KOTS CLI to `~/bin` in the local directory. You can use the user home symbol `~` in the `REPL_INSTALL_PATH` environment variable. The script expands `~` to `$HOME`. + +```bash +curl -L https://kots.io/install | REPL_INSTALL_PATH=~/bin bash +``` + +### Install Using Sudo + +If you have sudo access to the directory where you want to install the KOTS CLI, you can set the `REPL_USE_SUDO` environment variable so that the installation script prompts you for your sudo password. + +When you set the `REPL_USE_SUDO` environment variable to any value, the installation script uses sudo to create and write to the installation directory as needed. The script prompts for a sudo password if it is required for the user executing the script in the specified directory. + +**Example:** + +In the following example, the script uses sudo to install the KOTS CLI to the default `/usr/local/bin` directory. + +```bash +curl -L https://kots.io/install | REPL_USE_SUDO=y bash +``` + +**Example:** + +In the following example, the script uses sudo to install the KOTS CLI to the `/replicated/bin` directory. + +```bash +curl -L https://kots.io/install | REPL_INSTALL_PATH=/replicated/bin REPL_USE_SUDO=y bash +``` + +### Manually Download and Install + +You can manually download and install the KOTS CLI binary to install without root access, rather than using the installation script. + +Users in air gap environments can also follow this procedure to install the KOTS CLI. + +To manually download and install the KOTS CLI: + +1. Download the KOTS CLI release for your operating system. + + You can run one of the following commands to download the latest version of the KOTS CLI from the [Releases](https://github.com/replicatedhq/kots/releases/latest) page in the KOTS GitHub repository: + + * **MacOS (AMD and ARM)**: + + ```bash + curl -L https://github.com/replicatedhq/kots/releases/latest/download/kots_darwin_all.tar.gz + ``` + + * **Linux (AMD)**: + + ```bash + curl -L https://github.com/replicatedhq/kots/releases/latest/download/kots_linux_amd64.tar.gz + ``` + + * **Linux (ARM)**: + + ```bash + curl -L https://github.com/replicatedhq/kots/releases/latest/download/kots_linux_arm64.tar.gz + ``` + +1. Unarchive the `.tar.gz` file that you downloaded: + + * **MacOS (AMD and ARM)**: + + ```bash + tar xvf kots_darwin_all.tar.gz + ``` + * **Linux (AMD)**: + + ```bash + tar xvf kots_linux_amd64.tar.gz + ``` + * **Linux (ARM)**: + + ```bash + tar xvf kots_linux_arm64.tar.gz + ``` + +1. Rename the `kots` executable to `kubectl-kots` and move it to one of the directories that is in your PATH environment variable. This ensures that the system can access the executable when you run KOTS CLI commands. + + :::note + You can run `echo $PATH` to view the list of directories in your PATH. + ::: + + Run one of the following commands, depending on if you have write access to the target directory: + + * **You have write access to the directory**: + + ```bash + mv kots /PATH_TO_TARGET_DIRECTORY/kubectl-kots + ``` + Replace `PATH_TO_TARGET_DIRECTORY` with the path to a directory that is in your PATH environment variable. For example, `/usr/local/bin`. + + * **You do _not_ have write access to the directory**: + + ```bash + sudo mv kots /PATH_TO_TARGET_DIRECTORY/kubectl-kots + ``` + Replace `PATH_TO_TARGET_DIRECTORY` with the path to a directory that is in your PATH environment variable. For example, `/usr/local/bin`. + +1. Verify the installation: + + ``` + kubectl kots --help + ``` + +## Uninstall + +The KOTS CLI is a plugin for the Kubernetes kubectl command line tool. The KOTS CLI plugin is named `kubectl-kots`. + +For more information about working with kubectl, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. + +To uninstall the KOTS CLI: + +1. Find the location where the `kubectl-kots` plugin is installed on your `PATH`: + + ``` + kubectl plugin list kubectl-kots cli + ``` + +2. Delete `kubectl-kots`: + + ``` + sudo rm PATH_TO_KOTS + ``` + Replace `PATH_TO_KOTS` with the location where `kubectl-kots` is installed. + + **Example**: + + ``` + sudo rm /usr/local/bin/kubectl-kots + ``` + +================ +File: docs/reference/kots-cli-global-flags.md +================ +# Global flags + +All KOTS CLI commands support a set of global flags to be used to connect to the cluster. + +| Flag | Type | Description | +|---|---|---| +| `--as` | string | Username to impersonate for the operation | +| `--as-group` | stringArray | Group to impersonate for the operation, this flag can be repeated to specify multiple groups. | +| `--cache-dir` | string | Default HTTP cache directory (default "~/.kube/http-cache") | +| `--certificate-authority` | string | Path to a cert file for the certificate authority | +| `--client-certificate` | string | Path to a client certificate file for TLS | +| `--client-key` | string | Path to a client key file for TLS | +| `--cluster` | string | The name of the kubeconfig cluster to use | +| `--context` | string | The name of the kubeconfig context to use | +| `--insecure-skip-tls-verify` | bool | If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure | +| `--kubeconfig` | string | Path to the kubeconfig file to use for CLI requests. | +| `-n, --namespace` | string | If present, the namespace scope for this CLI request | +| `--request-timeout` | string | The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") | +| `-s, --server` | string | The address and port of the Kubernetes API server | +| `--token` | string | Bearer token for authentication to the API server | +| `--user` | string | The name of the kubeconfig user to use | + +================ +File: docs/reference/kots-cli-identity-service-enable-shared-password.md +================ +# identity-service enable-shared-password + +Enable the shared password login option in the KOTS Admin Console. + +### Usage + +```bash +kubectl kots identity-service enable-shared-password [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------ | +| `-n, --namespace` | string | the namespace where the Admin Console is running | + +NOTE: `--namespace` flag is required. + +### Examples + +```bash +kubectl kots identity-service enable-shared-password --namespace kots-sentry +``` + +================ +File: docs/reference/kots-cli-identity-service-index.md +================ +# identity-service + +KOTS Identity Service + +### Usage + +```bash +kubectl kots identity-service [command] +``` + +This command supports all [global flags](kots-cli-global-flags). + +================ +File: docs/reference/kots-cli-install.mdx +================ +import StrictSecurityContext from "../partials/kots-cli/_strict-security-context.mdx" +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import Help from "../partials/kots-cli/_help.mdx" + +# install + +Installs the application and the KOTS Admin Console directly to a cluster. +The `kots install` command pulls Kubernetes manifests from the remote upstream, deploys the manifests to the specified cluster, installs the Admin Console, and sets up port forwarding to make the Admin Console accessible on port 8800. +Alternatively, you can specify the `--port` flag to override the default port. + +### Usage + +```bash +kubectl kots install [upstream uri] [flags] +``` + +- _Replace [upstream-uri] with the URI for your KOTS application (required)._ +- _If the KOTS application has been packaged by Replicated Vendor, the `--license-file` flag must be provided._ +- _Provide [flags] according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + +<table> + <tr> + <td>Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <tr> + <td><code>--additional-annotations</code></td> + <td>bool</td> + <td>Additional annotations to add to kotsadm pods.</td> + </tr> + <tr> + <td><code>--additional-labels</code></td> + <td>bool</td> + <td>Additional labels to add to kotsadm pods.</td> + </tr> + <tr> + <td><code>--airgap</code></td> + <td>bool</td> + <td>Set to <code>true</code> to run install in air gapped mode. Setting <code>--airgap-bundle</code> implies <code>--airgap=true</code>. <strong>Default:</strong> <code>false</code>. For more information, see <a href="/enterprise/installing-existing-cluster-airgapped">Air Gap Installation in Existing Clusters with KOTS</a>.</td> + </tr> + <tr> + <td><code>--airgap-bundle</code></td> + <td>string</td> + <td>Path to the application air gap bundle where application metadata will be loaded from. Setting <code>--airgap-bundle</code> implies <code>--airgap=true</code>. For more information, see <a href="/enterprise/installing-existing-cluster-airgapped">Air Gap Installation in Existing Clusters with KOTS</a>.</td> + </tr> + <tr> + <td><code>--app-version-label</code></td> + <td>string</td> + <td>The application version label to install. If not specified, the latest version is installed.</td> + </tr> + <tr> + <td><code>--config-values</code></td> + <td>string</td> + <td>Path to a manifest file containing configuration values. This manifest must be <code>apiVersion: kots.io/v1beta1</code> and <code>kind: ConfigValues</code>. For more information, see <a href="/enterprise/installing-existing-cluster-automation">Installing with the KOTS CLI</a>.</td> + </tr> + <tr> + <td><code>--copy-proxy-env</code></td> + <td>bool</td> + <td>Copy proxy environment variables from current environment into all Admin Console components. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--disable-image-push</code></td> + <td>bool</td> + <td>Set to <code>true</code> to disable images from being pushed to private registry. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--ensure-rbac</code></td> + <td>bool</td> + <td>When <code>false</code>, KOTS does not attempt to create the RBAC resources necessary to manage applications. <strong>Default:</strong> <code>true</code>. If a role specification is needed, use the [generate-manifests](kots-cli-admin-console-generate-manifests) command.</td> + </tr> + <Help/> + <tr> + <td><code>--http-proxy</code></td> + <td>string</td> + <td>Sets HTTP_PROXY environment variable in all Admin Console components.</td> + </tr> + <tr> + <td><code>--https-proxy</code></td> + <td>string</td> + <td>Sets HTTPS_PROXY environment variable in all Admin Console components.</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <tr> + <td><code>--license-file</code></td> + <td>string</td> + <td>Path to a license file.</td> + </tr> + <tr> + <td><code>--local-path</code></td> + <td>string</td> + <td>Specify a local-path to test the behavior of rendering a Replicated application locally. Only supported on Replicated application types.</td> + </tr> + <tr> + <td><code>--name</code></td> + <td>string</td> + <td>Name of the application to use in the Admin Console.</td> + </tr> + <tr> + <td><code>--no-port-forward</code></td> + <td>bool</td> + <td>Set to <code>true</code> to disable automatic port forward. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--no-proxy</code></td> + <td>string</td> + <td>Sets NO_PROXY environment variable in all Admin Console components.</td> + </tr> + <tr> + <td><code>--port</code></td> + <td>string</td> + <td>Override the local port to access the Admin Console. <strong>Default:</strong> 8800</td> + </tr> + <tr> + <td><code>--private-ca-configmap</code></td> + <td>string</td> + <td>Name of a ConfigMap containing private CAs to add to the kotsadm deployment.</td> + </tr> + <tr> + <td><code>--preflights-wait-duration</code></td> + <td>string</td> + <td>Timeout to be used while waiting for preflights to complete. Must be in [Go duration](https://pkg.go.dev/time#ParseDuration) format. For example, 10s, 2m. <strong>Default:</strong> 15m</td> + </tr> + <RegistryPassword/> + <RegistryUsername/> + <tr> + <td><code>--repo</code></td> + <td>string</td> + <td>Repo URI to use when installing a Helm chart.</td> + </tr> + <tr> + <td><code>--shared-password</code></td> + <td>string</td> + <td>Shared password to use when deploying the Admin Console.</td> + </tr> + <tr> + <td><code>--skip-compatibility-check</code></td> + <td>bool</td> + <td>Set to <code>true</code> to skip compatibility checks between the current KOTS version and the application. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--skip-preflights</code></td> + <td>bool</td> + <td>Set to <code>true</code> to skip preflight checks. <strong>Default:</strong> <code>false</code>. If any strict preflight checks are configured, the <code>--skip-preflights</code> flag is not honored because strict preflight checks must run and contain no failures before the application is deployed. For more information, see [Defining Preflight Checks](/vendor/preflight-defining).</td> + </tr> + <tr> + <td><code>--skip-rbac-check</code></td> + <td>bool</td> + <td>Set to <code>true</code> to bypass RBAC check. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--skip-registry-check</code></td> + <td>bool</td> + <td>Set to <code>true</code> to skip the connectivity test and validation of the provided registry information. <strong>Default:</strong> <code>false</code></td> + </tr> + <StrictSecurityContext/> + <tr> + <td><code>--use-minimal-rbac</code></td> + <td>bool</td> + <td>When set to <code>true</code>, KOTS RBAC permissions are limited to the namespace where it is installed. To use <code>--use-minimal-rbac</code>, the application must support namespace-scoped installations and the user must have the minimum RBAC permissions required by KOTS in the target namespace. For a complete list of requirements, see [Namespace-scoped RBAC Requirements​](/enterprise/installing-general-requirements#namespace-scoped) in _Installation Requirements_. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--wait-duration</code></td> + <td>string</td> + <td>Timeout to be used while waiting for individual components to be ready. Must be in [Go duration](https://pkg.go.dev/time#ParseDuration) format. For example, 10s, 2m. <strong>Default:</strong> 2m</td> + </tr> + <tr> + <td><code>--with-minio</code></td> + <td>bool</td> + <td>When set to <code>true</code>, KOTS deploys a local MinIO instance for storage and uses MinIO for host path and NFS snapshot storage. <strong>Default:</strong> <code>true</code></td> + </tr> + <tr> + <td><code>--storage-class</code></td> + <td>string</td> + <td>Sets the storage class to use for the KOTS Admin Console components. <strong>Default:</strong> unset, which means the default storage class will be used</td> + </tr> +</table> + + +### Examples + +```bash +kubectl kots install sentry/unstable --license-file ~/license.yaml +kubectl kots install kots-sentry/stable --shared-password IgqG5OBc9Gp --license-file ~/sentry-license.yaml --namespace sentry-namespace --config-values ~/config-values.yaml +kubectl kots install --ensure-rbac=false +``` + +================ +File: docs/reference/kots-cli-pull.md +================ +# pull + +Running this command will create a directory on the workstation containing the application and Kubernetes manifests. These assets can be used to deploy KOTS to a cluster through other workflows, such as kubectl. This command is necessary when managing a application without the use of the Admin Console. + +### Usage +```bash +kubectl kots pull [upstream uri] [flags] +``` +* _Replace `[upstream-uri]` with the URI for your KOTS application (required)._ +* _If the KOTS application has been packaged by Replicated Vendor, the `--license-file` flag must be provided._ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +|:-----|------|-------------| +| `--downstream` | strings | the list of any downstreams to create/update | +| `--exclude-admin-console` | bool | set to true to exclude the Admin Console _(only valid when `[upstream-uri]` points to a replicated app)_ | +| `--exclude-kots-kinds` | bool | set to true to exclude rendering KOTS custom objects to the base directory _(default `true`)_ | +| `-h, --help` | | help for pull | +| `--image-namespace` | string | the namespace/org in the docker registry to push images to _(required when `--rewrite-images` is set)_ | +| `--license-file` | string | path to a license file _(required when `[upstream-uri]` points to a replicated app)_ | +| `--local-path` | string | specify a local-path to pull a locally available replicated app _(only valid when `[upstream-uri]` points to a replicated app)_ | +| `-n, --namespace` | string | namespace to render the upstream to in the base _(default `"default"`)_ | +| `--private-ca-configmap` | string | name of a ConfigMap containing private CAs to add to the kotsadm deployment. +| `--registry-endpoint` | string | the endpoint of the local docker registry to use when pushing images _(required when `--rewrite-images` is set)_ | +| `--rewrite-images` | bool | set to true to force all container images to be rewritten and pushed to a local registry | +| `--rootdir` | string | root directory that will be used to write the yaml to _(default `${HOME}` or `%USERPROFILE%`)_ | +| `--shared-password` | string | shared password to use when deploying the Admin Console | +| `--http-proxy` | string | sets HTTP_PROXY environment variable in all KOTS Admin Console components | +| `--https-proxy` | string | sets HTTPS_PROXY environment variable in all KOTS Admin Console components | +| `--no-proxy` | string | sets NO_PROXY environment variable in all KOTS Admin Console components | +| `--copy-proxy-env` | bool | copy proxy environment variables from current environment into all KOTS Admin Console components | +| `--config-values` | string | path to a manifest containing config values (must be apiVersion: kots.io/v1beta1, kind: ConfigValues) | +| `--with-minio` | bool | set to true to include a local minio instance to be used for storage _(default true)_ | +| `--storage-class` | string | sets the storage class to use for the KOTS Admin Console components. _(default unset, which means the default storage class will be used)_ | + +### Example +```bash +kubectl kots pull sentry/unstable --license-file ~/license.yaml +``` + +================ +File: docs/reference/kots-cli-remove.md +================ +# remove + +Remove application reference from the KOTS Admin Console. + +You can use the `kots remove` command to remove one or more installed applications from the Admin Console. +By default, the deployed application is not removed from the cluster. Only the reference for the application is removed from the Admin Console. To completely remove the application and delete its resources from the cluster, use the `--undeploy` flag. + +### Usage +```bash +kubectl kots remove [app-slug] -n [namespace] +``` +* _`[app-slug]` is the slug of the installed application to be removed (required)_ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + +<table> + <tr> + <th width="20%">Flag</th> + <th width="10%">Type</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>--force</code></td> + <td><code>bool</code></td> + <td> + <p>Removes the reference even if the application has already been deployed.</p> + </td> + </tr> + <tr> + <td><code>--undeploy</code></td> + <td><code>bool</code></td> + <td> + <p>Un-deploys the application by deleting all its resources from the cluster. When <code>--undeploy</code> is set, the <code>--force</code> flag is set automatically.</p> + <p><strong>Note:</strong> <code>--undeploy</code> can remove application resources only from the namespace where KOTS is installed and from any namespaces provided in the <a href="custom-resource-application#additionalnamespaces">additionalNamespaces</a> field in the Application custom resource.</p> + <p>The following describes how <code>--undeploy</code> removes application resources:</p> + <ul> + <li>For applications deployed with <code>kubectl apply</code> (including standalone manifest files and Helm charts deployed with <a href="/vendor/helm-native-about#replicated">Replicated Helm</a>), <code>--undeploy</code> identifies and removes resources based on a <code>kots.io/app-slug: <app_slug></code> annotation that is applied to all application resources during deployment. </li> + <li>For Helm chart applications deployed with HelmChart custom resources with <code>apiVersion: kots.io/v1beta2</code> or <code>apiVersion: kots.io/v1beta1</code> and <code>useHelmInstall: true</code>, <code>--undeploy</code> runs <code>helm uninstall</code>.</li> + </ul> + </td> + </tr> + <tr> + <td><code>-n</code></td> + <td><code>string</code></td> + <td><p>The namespace where the target application is deployed. Use <code>default</code> for the default namespace.</p></td> + </tr> +</table> + +### Example +```bash +kubectl kots remove sentry -n default +``` + +================ +File: docs/reference/kots-cli-reset-password.md +================ +# reset-password + +If you deployed an application with the KOTS Admin Console, the `kots reset-password` command will change the bcrypted password hash in the cluster, allowing you to log in again. + +### Usage +```bash +kubectl kots reset-password [namespace] [flags] +``` +* _Replace `[namespace]` with the namespace where the Admin Console and your KOTS application resides (required)._ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:----------------------|------|-------------| +| `-h, --help` | | help for reset-password | +| `-n, --namespace`| string | the namespace where the Admin Console is running | + +### Examples +```bash +kubectl kots reset-password sentry-namespace +``` + +================ +File: docs/reference/kots-cli-reset-tls.md +================ +# reset-tls + +If a bad TLS certificate is uploaded to the KOTS Admin Console or the kotsadm-tls secret is missing, the `kots reset-tls` command reapplies a default self-signed TLS certificate. +For more information about the certificates stored in this secret, see [Setting up TLS Certificates](https://kurl.sh/docs/install-with-kurl/setup-tls-certs) in the open source kURL documentation. + +### Usage +```bash +kubectl kots reset-tls [namespace] [flags] +``` +* _Replace `[namespace]` with the namespace where the Admin Console and your KOTS application resides (required)._ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:----------------------|------|-------------| +| `-h, --help` | | Help for `reset-tls`. | +| `-n, --namespace`| string | The namespace where the Admin Console is running. | +| `--accept-anonymous-uploads`| bool | Allow uploading a new certificate prior to authenticating. | + +### Examples +```bash +kubectl kots reset-tls sentry-namespace + +================ +File: docs/reference/kots-cli-restore-index.md +================ +# restore + +Restore full snapshots for disaster recovery, or do a partial restore of the application only or the Replicated Admin Console only. + +### Usage + +```bash +kubectl kots restore --from-backup [flags] +``` + +This command supports the following flags: + +| Flag | Type | Description | +| :-------------------------- | ------ | --------------------------------------------------------------------------------------------- | +| `--exclude-admin-console` | bool | Exclude restoring the Admin Console and only restore the applications. **Default:** false | +| `--exclude-apps` | bool | Exclude restoring the applications and only restore the Admin Console. **Default:** false | +| `--from-backup` | string | (Required) The name of the backup to restore from. | +| `-h, --help` | | Help for `restore`. | +| `-o, --output` | string | The output format. Supports JSON. Defaults to plain text if not set. | +| `--velero-namespace` | string | (Required for minimal RBAC installations) The namespace where Velero is installed. | +| `--wait-for-apps` | bool | Wait for all applications to be restored. **Default:** true | + +### Example + +```bash +kubectl kots restore --from-backup instance-942kf +``` + +================ +File: docs/reference/kots-cli-restore-ls.md +================ +# restore ls + +:::note +This command is deprecated. Use [`kubectl kots get restores`](/reference/kots-cli-get-restores) instead. +::: + +Show a list of all the available full snapshot restores for disaster recovery. + +### Usage + +```bash +kubectl kots restore ls [flags] +``` + +This command supports the following flags: + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | Help for `restore ls`. | +| `-n, --namespace` | string | Filter by the namespace the Admin Console was installed in.| + +### Example + +```bash +kubectl kots restore ls --namespace kots-sentry +``` + +================ +File: docs/reference/kots-cli-set-config.mdx +================ +import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" + + +# set config + +The `kots set config` allows setting values for application config items in the latest release version. + +> Introduced in KOTS v1.31.0 + +## Usage + +```bash +kubectl kots set config [appSlug] [KEY_1=VAL_1 ... KEY_N=VAL_N] [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :-------------------| ------ | ------------------------------------------------------------------------------------------------------------------------------------- | +| `--config-file` | string | path to a manifest containing config values (must be `apiVersion: kots.io/v1beta1, kind: ConfigValues`) | +| `--merge` | bool | when set to true, only keys specified in config file will be updated. This flag can only be used when `--config-file` flag is used. | +|`--key` | string | name of a single key to set. This flag requires `--value` or `--value-from-file` flags | +| `--value` | string | the value to set for the key specified in the `--key` flag. This flag cannot be used with `--value-from-file` flag. | +| `--value-from-file` | string | path to the file containing the value to set for the key specified in the `--key` flag. This flag cannot be used with `--value` flag. | +| `--deploy` | bool | when set, automatically deploy the latest version with the new configuration | +| `--skip-preflights` | bool | set to true to skip preflight checks when deploying new version | +| `--current` | bool | set to true to use the currently deployed version of the app as the base for the new version | +| `--sequence` | int | sequence of the app version to use as the base for the new version (defaults to the latest version unless --current flag is set) | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | + + +#### About Strict Preflight Checks + +<PreflightsStrict/> + + +## Examples + +```bash +kubectl kots set config myapp -n default --config-file /path/to/local/config.yaml +``` + +```bash +kubectl kots set config myapp -n default --key config-item-name --value-from-file /path/to/config/file/value.txt +``` + +```bash +kubectl kots set config myapp -n default config-item-name="config item value" +``` + +```bash +kubectl kots set config myapp -n default --key config-item-name --value "config item value" +``` + +================ +File: docs/reference/kots-cli-set-index.md +================ +# set + +Configure KOTS resources. + +### Usage + +```bash +kubectl kots set [resource] [flags] +``` + +This command supports all [global flags](kots-cli-global-flags). + +### Resources + +* `config` set config items for application. + +================ +File: docs/reference/kots-cli-upload.mdx +================ +import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" + +# upload + +Upload Kubernetes manifests from the local filesystem, creating a new version of the application that can be deployed. +When you have a copy of an application that was created with `kots pull` or `kots download`, you can upload it back to the Admin Console using the `kots upload` command. + +## Usage +```bash +kubectl kots upload [source] [flags] +``` +* _Replace `[source]` with a directory containing the manifests of your KOTS application (required)._ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:----------------------|------|-------------| +| `-h, --help` | | help for upload | +| `--name`| string | the name of the kotsadm application to create | +| `-n, --namespace`| string | the namespace to upload to _(default `"default"`)_ | +| `--slug`| string | the application slug to use. if not present, a new one will be created | +| `--upstream-uri`| string | the upstream uri that can be used to check for updates | +| `--deploy`| bool | when set, automatically deploy the uploaded version | +| `--skip-preflights`| bool | set to true to skip preflight checks | +| `-o, --output` | string | output format (currently supported: json) _(defaults to plain text if not set)_ | + + +Any `plainText` values in the `upstream/userdata/config.yaml` file will be re-encrypted using the application cipher automatically, if the matching config item is a password type. +If both an encrypted and plainText value is provided on a single item, the plainText value will overwrite the encrypted value, if they differ. + +#### About Strict Preflight Checks + +<PreflightsStrict/> + + +## Examples + +```bash +kubectl kots upload ./manifests --name kots-sentry --namespace kots-sentry --slug kots-sentry --upstream-uri kots-sentry/unstable +``` + +================ +File: docs/reference/kots-cli-upstream-download.md +================ +# upstream download + +The `kots upstream download` command retries downloading a failed update of the upstream application. + +### Usage +```bash +kubectl kots upstream download [app-slug] [flags] +``` +* _Replace `[app-slug]` with the app slug for your KOTS application (required)._ +* _Provide `[flags]` according to the table below._ + +| Flag | Type | Description | +|:----------------------------------|--------|--------------------------------------------------------------------------------------------------| +| `-h, --help` | | Help for `upstream download`. | +| `--kubeconfig` | string | The kubeconfig to use. **Default**: `$KUBECONFIG`. If unset, then `$HOME/.kube/config`. | +| `-n, --namespace` | string | (Required) The namespace where the Admin Console is running. | +| `--sequence` | int | (Required) The local app sequence for the version to retry downloading. | +| `--skip-preflights` | bool | Set to `true` to skip preflight checks. | +| `--skip-compatibility-check` | bool | Set to `true` to skip compatibility checks between the current kots version and the update. | +| `--wait` | bool | Set to `false` to download the update in the background. **Default**: `true`. | +| `-o, --output` | string | Output format. **Supported formats**: `json`. **Default**: Plain text. | + +### Example +```bash +kubectl kots upstream download kots-sentry --namespace kots-sentry --sequence 8 +``` + +================ +File: docs/reference/kots-cli-upstream-upgrade.mdx +================ +import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" + +# upstream upgrade + +The `kots upstream upgrade` fetches the latest version of the upstream application. +It is functionality equivalent to clicking the "Check For Updates" in the Admin Console. + +## Usage +```bash +kubectl kots upstream upgrade [app-slug] [flags] +``` +* _Replace `[app-slug]` with the app slug for your KOTS application (required)._ +* _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|:-------------------------|--------|--------------------------------------------------------------------------------------------------| +| `-h, --help` | | help for upstream | +| `--kubeconfig` | string | the kubeconfig to use. **Default:** `$KUBECONFIG`. If unset, then `$HOME/.kube/config` | +| `-n, --namespace` | string | (Required) the namespace where the Admin Console is running | +| `--deploy` | bool | ensures the latest available release is deployed | +| `--deploy-version-label` | string | ensures the release with the provided version label is deployed | +| `--skip-preflights` | bool | set to true to skip preflight checks | +| `--airgap-bundle` | string | path to the application airgap bundle where application images and metadata will be loaded from | +| `--kotsadm-namespace` | string | the registry namespace to use for application images | +| `--kotsadm-registry` | string | the registry endpoint where application images will be pushed | +| `--registry-password` | string | the password to use to authenticate with the registry | +| `--registry-username` | string | the username to use to authenticate with the registry | +| `--disable-image-push` | bool | set to true to disable images from being pushed to private registry. **Default:** `false` | +| `--skip-registry-check` | bool | Set to `true` to skip the connectivity test and validation of the provided registry information. **Default:** `false` | +| `--wait` | bool | set to false to download the updates in the background **Default:** `true` | +| `-o, --output` | string | output format (currently supported: json). **Default:** Plain text if not set | + + +#### About Strict Preflight Checks + +<PreflightsStrict/> + + +## Example +```bash +kubectl kots upstream upgrade kots-sentry --namespace kots-sentry +``` + +================ +File: docs/reference/kots-cli-upstream.md +================ +# upstream + +KOTS Upstream interface. + +### Usage +```bash +kubectl kots upstream [command] [flags] +``` + +This command supports all [global flags](kots-cli-global-flags). + +================ +File: docs/reference/kots-cli-velero-configure-aws-s3.md +================ +# velero configure-aws-s3 + +Configures snapshots to use an AWS S3 Bucket as a storage destination. +This command supports auth via [IAM User Access Keys](https://github.com/vmware-tanzu/velero-plugin-for-aws#option-1-set-permissions-with-an-iam-user) and IAM Instance Roles for the velero-plugin-for-aws. + +Valid Subcommands: +* `access-key` +* `instance-role` + +### Usage + +```bash +kubectl kots velero configure-aws-s3 [subcommand] +``` + +| Flag | Type | Description | +|--------------|------|--------------------------| +| `-h, --help` | | help for configure-aws-s3 | + +### access-key + +```bash +kubectl kots velero configure-aws-s3 access-key [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|------------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--access-key-id` | string | the aws access key id to use for accessing the bucket _(required)_ | +| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | +| `--path ` | string | path to a subdirectory in the object store bucket | +| `--region ` | string | the region where the bucket exists _(required)_ | +| `--secret-access-key ` | string | the aws secret access key to use for accessing the bucket _(required)_ | +| `--skip-validation` | bool | skip the validation of the S3 Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-aws-s3 access-key --namespace default --region us-east-1 --bucket kots-snaps --access-key-id XXXXXXXJTJB7M2XZUV7D --secret-access-key <secret access key here> +``` + +### instance-role + +```bash +kubectl kots velero configure-aws-s3 instance-role [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|------------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | +| `--path ` | string | path to a subdirectory in the object store bucket | +| `--region ` | string | the region where the bucket exists _(required)_ | +| `--skip-validation` | bool | skip the validation of the S3 Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-aws-s3 instance-role --namespace default --region us-east-1 --bucket kots-snaps +``` + +================ +File: docs/reference/kots-cli-velero-configure-azure.md +================ +# velero configure-azure + +Configures snapshots to use an Azure Blob Storage Container as a storage destination. +Currently only the [Service Principle authentication method](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#option-1-create-service-principal) of the velero-plugin-for-microsoft-azure. + +Valid Subcommands: +* service-principle + +### Usage + +```bash +kubectl kots velero configure-azure [subcommand] +``` + +| Flag | Type | Description | +|--------------|------|--------------------------| +| `-h, --help` | | help for configure-azure | + +### service-principle + +```bash +kubectl kots velero configure-azure service-principle [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|---------------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------| +| `-h, --help` | | help for service-principle | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--client-id` | string | the client ID of a Service Principle with access to the blob storage container _(required)_ | +| `--client-secret` | string | the client secret of a Service Principle with access to the blob storage container _(required)_ | +| `--cloud-name` | string | the Azure cloud target. Options: AzurePublicCloud, AzureUSGovernmentCloud, AzureChinaCloud, AzureGermanCloud _(default `AzurePublicCloud`)_ | +| `--container` | string | name of the Azure blob storage container where backups should be stored _(required)_ | +| `--path ` | string | path to a subdirectory in the blob storage container | +| `--resource-group` | string | the resource group name of the blob storage container _(required)_ | +| `--skip-validation` | bool | skip the validation of the blob storage container _(default `false`)_ | +| `--storage-account` | string | the storage account name of the blob storage container _(required)_ | +| `--subscription-id` | string | the subscription id associated with the blob storage container _(required)_ | +| `--tenant-id ` | string | the tenant ID associated with the blob storage container _(required)_ | + +#### Example + +```bash +kubectl kots velero configure-azure service-principle --namespace default --container velero --resource-group Velero_Backups --storage-account velero1111362eb32b --subscription-id "1111111-1111-47a7-9671-c904d681c2b2" --tenant-id "1111111-1111-42e1-973b-ad2efc689308" --client-id "1111111-1111-4ac3-9e2b-bbea61392432" --client-secret "<secret here>" +``` + +================ +File: docs/reference/kots-cli-velero-configure-gcp.md +================ +# velero configure-gcp + +Configures snapshots to use a Google Cloud Platform Object Storage Bucket as a storage destination. +This command supports auth via [Serivce Account Credentials](https://github.com/vmware-tanzu/velero-plugin-for-gcp#option-1-set-permissions-with-a-service-account) or [Workload Identity](https://github.com/vmware-tanzu/velero-plugin-for-gcp#option-2-set-permissions-with-using-workload-identity-optional). + +Valid Subcommands: +* `service-account` +* `workload-identity` + +### Usage + +```bash +kubectl kots velero configure-gcp [subcommand] +``` + +| Flag | Type | Description | +|--------------|------|--------------------------| +| `-h, --help` | | help for configure-aws-s3 | + +### service-account + +```bash +kubectl kots velero configure-gcp service-account [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|---------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | +| `--json-file` | string | path to JSON credntials file for veloro _(required)_ | +| `--path ` | string | path to a subdirectory in the object store bucket | +| `--skip-validation` | bool | skip the validation of the GCP Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-gcp service-account --namespace default --bucket velero-backups --json-file sa-creds.json +``` + +### workload-identity + +```bash +kubectl kots velero configure-gcp workload-identity [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|---------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | +| `--path ` | string | path to a subdirectory in the object store bucket | +| `--service-account` | string | the service account to use if using Google Cloud instance role _(required)_ | +| `--skip-validation` | bool | skip the validation of the GCP Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-gcp workload-identity --namespace default --bucket velero-backups --service-account ss-velero@gcp-project.iam.gserviceaccount.com +``` + +================ +File: docs/reference/kots-cli-velero-configure-hostpath.mdx +================ +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import Help from "../partials/kots-cli/_help.mdx" + +# velero configure-hostpath + +Configure snapshots to use a host path as storage destination. + +### Usage + +```bash +kubectl kots velero configure-hostpath [flags] +``` + +- _Provide `[flags]` according to the table below_ + +<table> + <tr> + <td width="30%">Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <Help/> + <tr> + <td>`-n, --namespace`</td> + <td>string</td> + <td>The namespace of the Admin Console (required)</td> + </tr> + <tr> + <td>`--hostpath`</td> + <td>string</td> + <td>A local host path on the node</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <RegistryPassword/> + <RegistryUsername/> + <tr> + <td>`--force-reset`</td> + <td>bool</td> + <td>Bypass the reset prompt and force resetting the nfs path. (default `false`)</td> + </tr> + <tr> + <td>`--output`</td> + <td>string</td> + <td>Output format. Supported values: `json`</td> + </tr> +</table> + +### Examples + +Basic + +```bash +kubectl kots velero configure-hostpath --hostpath /mnt/kots-sentry-snapshots --namespace kots-sentry +``` + +Using a registry for airgapped installations + +```bash +kubectl kots velero configure-hostpath \ + --hostpath /mnt/kots-sentry-snapshots \ + --namespace kots-sentry \ + --kotsadm-registry private.registry.host/kots-sentry \ + --registry-username ro-username \ + --registry-password ro-password +``` + +================ +File: docs/reference/kots-cli-velero-configure-internal.md +================ +# velero configure-internal + +:::important +The following command is applicable only to embedded clusters created by Replicated kURL and is _not_ recommended for production usage. +Consider configuring one of the other available storage destinations. See [Configuring Other Storage Destinations](/enterprise/snapshots-storage-destinations). +::: + +Configures snapshots to use the internal object store in embedded clusters as a storage destination. + +### Usage + +```bash +kubectl kots velero configure-internal [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|------------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `--skip-validation` | bool | skip the validation of the S3 Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-internal +``` + +================ +File: docs/reference/kots-cli-velero-configure-nfs.mdx +================ +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import Help from "../partials/kots-cli/_help.mdx" + +# velero configure-nfs + +Configures snapshots to use NFS as storage destination. + +### Usage + +```bash +kubectl kots velero configure-nfs [flags] +``` + +- _Provide `[flags]` according to the table below_ + +<table> + <tr> + <td width="30%">Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <Help/> + <tr> + <td>`-n, --namespace`</td> + <td>string</td> + <td>The namespace of the Admin Console (required)</td> + </tr> + <tr> + <td>`--nfs-server`</td> + <td>string</td> + <td>The hostname or IP address of the NFS server (required)</td> + </tr> + <tr> + <td>`--nfs-path`</td> + <td>string</td> + <td>The path that is exported by the NFS server (required)</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <RegistryPassword/> + <RegistryUsername/> + <tr> + <td>`--force-reset`</td> + <td>bool</td> + <td>Bypass the reset prompt and force resetting the nfs path. (default `false`)</td> + </tr> + <tr> + <td>`--output`</td> + <td>string</td> + <td>Output format. Supported values: `json`</td> + </tr> +</table> + +### Examples + +Basic + +```bash +kubectl kots velero configure-nfs --nfs-server 10.128.0.32 --nfs-path /mnt/nfs_share --namespace kots-sentry +``` + +Using a registry for airgapped installations + +```bash +kubectl kots velero configure-nfs \ + --nfs-server 10.128.0.32 \ + --nfs-path /mnt/nfs_share \ + --namespace kots-sentry \ + --kotsadm-registry private.registry.host/kots-sentry \ + --registry-username ro-username \ + --registry-password ro-password +``` + +================ +File: docs/reference/kots-cli-velero-configure-other-s3.mdx +================ +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import Help from "../partials/kots-cli/_help.mdx" + +# velero configure-other-s3 + +Configures snapshots to use an S3-compatible storage provider, such as Minio, as a storage destination. + +### Usage + +```bash +kubectl kots velero configure-other-s3 [flags] +``` + +- _Provide `[flags]` according to the table below_ + +<table> + <tr> + <td width="30%">Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <Help/> + <tr> + <td>`-n, --namespace`</td> + <td>string</td> + <td>The namespace of the Admin Console (required)</td> + </tr> + <tr> + <td>`--access-key-id`</td> + <td>string</td> + <td>The AWS access key ID to use for accessing the bucket (required)</td> + </tr> + <tr> + <td>`--bucket`</td> + <td>string</td> + <td>Name of the object storage bucket where backups should be stored (required)</td> + </tr> + <tr> + <td>`--endpoint`</td> + <td>string</td> + <td>The S3 endpoint (for example, http://some-other-s3-endpoint) (required)</td> + </tr> + <tr> + <td>`--path`</td> + <td>string</td> + <td>Path to a subdirectory in the object store bucket</td> + </tr> + <tr> + <td>`--region`</td> + <td>string</td> + <td>The region where the bucket exists (required)</td> + </tr> + <tr> + <td>`--secret-access-key`</td> + <td>string</td> + <td>The AWS secret access key to use for accessing the bucket (required)</td> + </tr> + <tr> + <td>`--cacert`</td> + <td>string</td> + <td>File containing a certificate bundle to use when verifying TLS connections to the object store</td> + </tr> + <tr> + <td>`--skip-validation`</td> + <td>bool</td> + <td>Skip the validation of the S3 bucket (default `false`)</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <RegistryPassword/> + <RegistryUsername/> +</table> + +#### Example + +```bash +kubectl kots velero configure-other-s3 --namespace default --endpoint http://minio --region us-east-1 --bucket kots-snaps --access-key-id XXXXXXXJTJB7M2XZUV7D --secret-access-key mysecretkey +``` + +================ +File: docs/reference/kots-cli-velero-ensure-permissions.md +================ +# velero ensure-permissions + +Ensures the necessary permissions that enables Replicated KOTS to access Velero. + +### Usage + +```bash +kubectl kots velero ensure-permissions [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| ----------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for ensure-permissions | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | +| `--velero-namespace` | string | the namespace where velero is running _(required)_ | + +### Example + +```bash +kubectl kots velero ensure-permissions --namespace kots-sentry --velero-namespace velero +``` + +================ +File: docs/reference/kots-cli-velero-index.md +================ +# velero + +The KOTS Velero interface, which configures storage destinations for backups (snapshots), permissions, and print instructions fo set up. + +### Usage + +```bash +kubectl kots velero [command] [global flags] +``` + +This command supports all [global flags](kots-cli-global-flags). + +The following `kots velero` commands are supported: + +- [`configure-aws-s3`](kots-cli-velero-configure-aws-s3): Configures an AWS S3 bucket as the storage destination. +- [`configure-azure`](kots-cli-velero-configure-azure): Configures an Azure Blob Storage Container as the storage destination. +- [`configure-gcp`](kots-cli-velero-configure-gcp): Configures a Google Cloud Platform Object Storage Bucket as The storage destination. +- [`configure-internal`](kots-cli-velero-configure-internal): (Embedded clusters only) Configures the internal object store in the cluster as the storage destination. +- [`configure-other-s3`](kots-cli-velero-configure-other-s3): Configures an S3-compatible storage provider as the storage destination. +- [`configure-nfs`](kots-cli-velero-configure-nfs): Configures NFS as the storage destination. +- [`configure-hostpath`](kots-cli-velero-configure-hostpath): Configures a host path as the storage destination. +- [`ensure-permissions`](kots-cli-velero-ensure-permissions): Allows the KOTS Admin Console to access Velero. + +================ +File: docs/reference/kots-cli-velero-print-fs-instructions.md +================ +# velero print-fs-instructions + +:::note +This command is deprecated. Use [`kubectl kots velero configure-hostpath`](/reference/kots-cli-velero-configure-hostpath) or [`kubectl kots velero configure-nfs`](/reference/kots-cli-velero-configure-nfs) instead. +::: + +Prints instructions for setting up a file system as the snapshots storage destination (such as NFS or host path). + +### Usage + +```bash +kubectl kots velero print-fs-instructions [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| ----------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for ensure-permissions | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | + +### Example + +Basic + +```bash +kubectl kots velero print-fs-instructions --namespace kots-sentry +``` + +================ +File: docs/reference/linter.mdx +================ +import MissingKindField from "../partials/linter-rules/_missing-kind-field.mdx" +import MissingAPIVersionField from "../partials/linter-rules/_missing-api-version-field.mdx" +import PreflightSpec from "../partials/linter-rules/_preflight-spec.mdx" +import ConfigSpec from "../partials/linter-rules/_config-spec.mdx" +import TroubleshootSpec from "../partials/linter-rules/_troubleshoot-spec.mdx" +import ApplicationSpec from "../partials/linter-rules/_application-spec.mdx" +import ApplicationIcon from "../partials/linter-rules/_application-icon.mdx" +import ApplicationStatusInformers from "../partials/linter-rules/_application-statusInformers.mdx" +import InvalidTargetKOTS from "../partials/linter-rules/_invalid-target-kots-version.mdx" +import InvalidMinKOTS from "../partials/linter-rules/_invalid-min-kots-version.mdx" +import InvalidKubernetesInstaller from "../partials/linter-rules/_invalid-kubernetes-installer.mdx" +import DeprecatedKubernetesInstallerVersion from "../partials/linter-rules/_deprecated-kubernetes-installer-version.mdx" +import InvalidHelmReleaseName from "../partials/linter-rules/_invalid-helm-release-name.mdx" +import Replicas1 from "../partials/linter-rules/_replicas-1.mdx" +import Privileged from "../partials/linter-rules/_privileged.mdx" +import AllowPrivilegeEscalation from "../partials/linter-rules/_allow-privilege-escalation.mdx" +import ContainerImageLatestTag from "../partials/linter-rules/_container-image-latest-tag.mdx" +import ContainerImageLocalImageName from "../partials/linter-rules/_container-image-local-image-name.mdx" +import ContainerResources from "../partials/linter-rules/_container-resources.mdx" +import ContainerResourceLimits from "../partials/linter-rules/_container-resource-limits.mdx" +import ContainerResourceRequests from "../partials/linter-rules/_container-resource-requests.mdx" +import ResourceLimitsCPU from "../partials/linter-rules/_resource-limits-cpu.mdx" +import ResourceLimitsMemory from "../partials/linter-rules/_resource-limits-memory.mdx" +import ResourceRequestsCPU from "../partials/linter-rules/_resource-requests-cpu.mdx" +import ResourceRequestsMemory from "../partials/linter-rules/_resource-requests-memory.mdx" +import VolumesHostPaths from "../partials/linter-rules/_volumes-host-paths.mdx" +import VolumeDockerSock from "../partials/linter-rules/_volume-docker-sock.mdx" +import HardcodedNamespace from "../partials/linter-rules/_hardcoded-namespace.mdx" +import ConfigOptionInvalidType from "../partials/linter-rules/_config-option-invalid-type.mdx" +import ConfigOptionInvalidRegexValidator from "../partials/linter-rules/_config-option-invalid-regex-validator.mdx" +import ConfigOptionRegexValidatorInvalidType from "../partials/linter-rules/_config-option-regex-validator-invalid-type.mdx" +import RepeatOptionMissingTemplate from "../partials/linter-rules/_repeat-option-missing-template.mdx" +import RepeatOptionMissingValuesByGroup from "../partials/linter-rules/_repeat-option-missing-valuesByGroup.mdx" +import RepeatOptionMalformedYAMLPath from "../partials/linter-rules/_repeat-option-malformed-yamlpath.mdx" +import ConfigOptionPasswordType from "../partials/linter-rules/_config-option-password-type.mdx" +import ConfigOptionIsCircular from "../partials/linter-rules/_config-option-is-circular.mdx" +import InvalidRenderedYaml from "../partials/linter-rules/_invalid-rendered-yaml.mdx" +import InvalidType from "../partials/linter-rules/_invalid_type.mdx" +import InvalidYaml from "../partials/linter-rules/_invalid-yaml.mdx" +import LinterDefinition from "../partials/linter-rules/_linter-definition.mdx" +import MayContainSecrets from "../partials/linter-rules/_may-contain-secrets.mdx" + +# Linter Rules + +This topic describes the release linter and the linter rules. + +## Overview + +<LinterDefinition/> + +The linter runs automatically against KOTS releases that you create in the Replicated vendor portal, and displays any error or warning messages in the vendor portal UI. + +To lint manifest files from the command line, you can run the Replicated CLI `replicated release lint` command against the root directory of your application manifest files. You can also use the `--lint` flag when you create a release with the `replicated release create` command. For more information, see [release lint](/reference/replicated-cli-release-lint) and [release create](/reference/replicated-cli-release-create) in the _Replicated CLI_ section. + +## Linter Rules + +This section lists the linter rules and the default rule levels (Info, Warn, Error). You can customize the default rule levels in the Replicated LinterConfig custom resource. +For more information, see [LintConfig](custom-resource-lintconfig). + +### allow-privilege-escalation + +<table> + <tr> + <th>Description</th> + <td>Notifies if any manifest file has <code>allowPrivilegeEscalation</code> set to <code>true</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><AllowPrivilegeEscalation/></td> + </tr> +</table> + +### application-icon + +<table> + <tr> + <th>Description</th> + <td> + Requires an application icon. + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><ApplicationIcon/></td> + </tr> +</table> + +### application-spec + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires an Application custom resource manifest file.</p> + <p>Accepted value for <code>kind</code>: <code>Application</code></p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ApplicationSpec/></td> + </tr> +</table> + +### application-statusInformers + +<table> + <tr> + <th>Description</th> + <td> + Requires <code>statusInformers</code>. + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><ApplicationStatusInformers/></td> + </tr> +</table> + +### config-option-invalid-type + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid types for Config items.</p> + <p>For more information, see <a href="/reference/custom-resource-config#items">Items</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><ConfigOptionInvalidType/></td> + </tr> +</table> + +### config-option-is-circular + +<table> + <tr> + <th>Description</th> + <td>Enforces that all ConfigOption items do not reference themselves.</td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td> <ConfigOptionIsCircular/> </td> + </tr> +</table> + + +### config-option-not-found + +<table> + <tr> + <th>Description</th> + <td> + Requires all ConfigOption items to be defined in the <code>Config</code> custom resource manifest file. + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> +</table> + + +### config-option-not-repeatable + +<table> + <tr> + <th>Description</th> + <td> + Enforces that sub-templated ConfigOption items must be repeatable. + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> +</table> + +### config-option-password-type + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires ConfigOption items with any of the following names to have <code>type</code> set to <code>password</code>:</p> + <ul> + <li><code>password</code></li> + <li><code>secret</code></li> + <li><code>token</code></li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><ConfigOptionPasswordType/></td> + </tr> +</table> + +### config-option-when-is-invalid + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid <code>ConfigOption.when</code>.</p> + <p>For more information, see <a href="/reference/custom-resource-config#when">when</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>.</td> + </tr> +</table> + +### config-option-invalid-regex-validator + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid <a href="https://github.com/google/re2/wiki/Syntax">RE2 regular expressions</a> pattern when regex validation is present.</p> + <p>For more information, see <a href="/reference/custom-resource-config#validation">Validation</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>.</td> + </tr> + <tr> + <th>Example</th> + <td><ConfigOptionInvalidRegexValidator/></td> + </tr> +</table> + +### config-option-regex-validator-invalid-type + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid item type when regex validation is present.</p> + <p>Item type should be <code>text</code>|<code>textarea</code>|<code>password</code>|<code>file</code></p> + <p>For more information, see <a href="/reference/custom-resource-config#validation">Validation</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>.</td> + </tr> + <tr> + <th>Example</th> + <td><ConfigOptionRegexValidatorInvalidType/></td> + </tr> +</table> + +### config-spec + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires a Config custom resource manifest file.</p> + <p>Accepted value for <code>kind</code>: <code>Config</code></p> + <p>Accepted value for <code>apiVersion</code>: <code>kots.io/v1beta1</code></p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ConfigSpec/></td> + </tr> +</table> + +### container-image-latest-tag + +<table> + <tr> + <th>Description</th> + <td>Notifies if any manifest file has a container image tag appended with + <code>:latest</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerImageLatestTag/></td> + </tr> +</table> + +### container-image-local-image-name + +<table> + <tr> + <th>Description</th> + <td>Disallows any manifest file having a container image tag that includes <code>LocalImageName</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerImageLocalImageName/></td> + </tr> +</table> + +### container-resource-limits + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.limits</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerResourceLimits/></td> + </tr> +</table> + + +### container-resource-requests + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.requests</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerResourceRequests/></td> + </tr> +</table> + +### container-resources + +<table> + <tr> + <th>Description</th> + <td>Notifies if a manifest file has no <code>resources</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerResources/></td> + </tr> +</table> + +### deprecated-kubernetes-installer-version + +<table> + <tr> + <th>Description</th> + <td> + <p>Disallows using the deprecated kURL installer <code>apiVersion</code>.</p> + <p><code>kurl.sh/v1beta1</code> is deprecated. Use <code>cluster.kurl.sh/v1beta1</code> instead.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Installer</code> and <code>apiVersion: kurl.sh/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><DeprecatedKubernetesInstallerVersion/></td> + </tr> +</table> + +### duplicate-helm-release-name + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces unique <code>spec.chart.releaseName</code> across all HelmChart custom resource manifest files.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: HelmChart</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> +</table> + +### duplicate-kots-kind + +<table> + <tr> + <th>Description</th> + <td> + <p>Disallows duplicate Replicated custom resources. + A release can only include one of each <code>kind</code> of custom resource.</p> + <p>This rule disallows inclusion of more than one file with:</p> + <ul> + <li>The same <code>kind</code> and <code>apiVersion</code></li> + <li><code>kind: Troubleshoot</code> and any Troubleshoot <code>apiVersion</code></li> + <li><code>kind: Installer</code> and any Installer <code>apiVersion</code></li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + All files + </td> + </tr> +</table> + +### hardcoded-namespace + +<table> + <tr> + <th>Description</th> + <td> + <p>Notifies if any manifest file has a <code>metadata.namespace</code> set + to a static field.</p> + <p>Replicated strongly recommends not specifying a namespace to allow + for flexibility when deploying into end user environments.</p> + <p>For more information, see <a href="/vendor/namespaces">Managing Application Namespaces</a>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><HardcodedNamespace/></td> + </tr> +</table> + +### helm-archive-missing + +<table> + <tr> + <th>Description</th> + <td><p>Requires that a <code>*.tar.gz</code> file is present that matches what is in the HelmChart custom resource manifest file.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Releases with a HelmChart custom resource manifest file containing <code>kind: HelmChart</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> +</table> + +### helm-chart-missing + +<table> + <tr> + <th>Description</th> + <td><p>Enforces that a HelmChart custom resource manifest file with <code>kind: HelmChart</code> is present if there is a <code>*.tar.gz</code> archive present.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Releases with a <code>*.tar.gz</code> archive file present. + </td> + </tr> +</table> + +### invalid-helm-release-name + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid <code>spec.chart.releaseName</code> in the HelmChart custom resource manifest file.</p> + <p><code>spec.chart.releaseName</code> must meet the following requirements:</p> + <ul> + <li>Begin and end with a lowercase letter or number</li> + <li>Contain only lowercase letters, numbers, periods, and hyphens (<code>-</code>)</li> + <li>Contain a lowercase letter or number between any two symbols (periods or hyphens)</li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: HelmChart</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><InvalidHelmReleaseName/></td> + </tr> +</table> + +### invalid-kubernetes-installer + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid Replicated kURL add-on versions.</p> + <p>kURL add-ons included in the kURL installer must pin specific versions rather than <code>latest</code> or x-ranges (1.2.x).</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + <p>Files with <code>kind: Installer</code> and one of the following values for <code>apiVersion</code>:</p> + <ul> + <li><code>cluster.kurl.sh/v1beta1</code></li> + <li><code>kurl.sh/v1beta1</code></li> + </ul> + </td> + </tr> + <tr> + <th>Example</th> + <td><InvalidKubernetesInstaller/></td> + </tr> +</table> + +### invalid-min-kots-version + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires <code>minKotsVersion</code> in the Application custom resource to use valid Semantic Versioning. + See <a href="https://semver.org/">Semantic Versioning 2.0.0</a>.</p> + <p>Accepts a <code>v</code> as an optional prefix, so both <code>1.0.0</code> and <code>v1.0.0</code> are valid.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><InvalidMinKOTS/></td> + </tr> +</table> + +### invalid-rendered-yaml + +<table> + <tr> + <th>Description</th> + <td><p>Enforces valid YAML after rendering the manifests using the Config spec.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + YAML files + </td> + </tr> + <tr> + <th>Example</th> + <td><InvalidRenderedYaml/></td> + </tr> +</table> + +### invalid-target-kots-version + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires <code>targetKotsVersion</code> in the Application custom resource to use valid Semantic Versioning. + See <a href="https://semver.org/">Semantic Versioning 2.0.0</a>.</p> + <p>Accepts a <code>v</code> as an optional prefix, so both <code>1.0.0</code> and <code>v1.0.0</code> are valid.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code> + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><InvalidTargetKOTS/></td> + </tr> +</table> + +### invalid-type + +<table> + <tr> + <th>Description</th> + <td><p>Requires that the value of a property matches that property's expected type.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + All files + </td> + </tr> + <tr> + <th>Example</th> + <td><InvalidType/></td> + </tr> +</table> + +### invalid-yaml + +<table> + <tr> + <th>Description</th> + <td><p>Enforces valid YAML.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + YAML files + </td> + </tr> + <tr> + <th>Example</th> + <td><InvalidYaml/></td> + </tr> +</table> + +### may-contain-secrets + +<table> + <tr> + <th>Description</th> + <td> Notifies if any manifest file may contain secrets.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><MayContainSecrets/></td> + </tr> +</table> + +### missing-api-version-field + +<table> + <tr> + <th>Description</th> + <td>Requires the <code>apiVersion:</code> field in all files.</td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><MissingAPIVersionField/></td> + </tr> +</table> + +### missing-kind-field + +<table> + <tr> + <th>Description</th> + <td>Requires the <code>kind:</code> field in all files.</td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><MissingKindField/></td> + </tr> +</table> + +### nonexistent-status-informer-object + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires that each <code>statusInformers</code> entry references an existing Kubernetes workload.</p> + <p>The linter cannot evaluate <code>statusInformers</code> for Helm-managed resources because it does not template Helm charts during analysis.</p> + <p>If you configure status informers for Helm-managed resources, you can ignore <code>nonexistent-status-informer-object</code> warnings for those workloads. To disable <code>nonexistent-status-informer-object</code> warnings, change the level for this rule to <code>info</code> or <code>off</code> in the LintConfig custom resource manifest file. See <a href="custom-resource-lintconfig">LintConfig</a> in <em>Custom Resources</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warning</td> + </tr> + <tr> + <th>Applies To</th> + <td> + <p>Compares <code>statusInformer</code> values in files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code> to all manifests in the release.</p> + </td> + </tr> +</table> + +### preflight-spec + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires a Preflight custom resource manifest file with:</p> + <p><code>kind: Preflight</code></p> + <p>and one of the following:</p> + <ul> + <li><code>apiVersion: troubleshoot.replicated.com/v1beta1</code></li> + <li><code>apiVersion: troubleshoot.sh/v1beta2</code></li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><PreflightSpec/></td> + </tr> +</table> + +### privileged + +<table> + <tr> + <th>Description</th> + <td>Notifies if any manifest file has <code>privileged</code> set to <code>true</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><Privileged/></td> + </tr> +</table> + +### repeat-option-malformed-yamlpath + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces ConfigOption <code>yamlPath</code> ending with square brackets denoting index position.</p> + <p>For more information, see <a href="/reference/custom-resource-config#template-targets">Repeatable Item Template Targets</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><RepeatOptionMalformedYAMLPath/></td> + </tr> +</table> + +### repeat-option-missing-template + +<table> + <tr> + <th>Description</th> + <td> + <p>Disallows repeating Config item with undefined <code>item.templates</code>.</p> + <p>For more information, see <a href="/reference/custom-resource-config#template-targets">Repeatable Item Template Targets</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><RepeatOptionMissingTemplate/></td> + </tr> +</table> + + +### repeat-option-missing-valuesByGroup + +<table> + <tr> + <th>Description</th> + <td> + <p>Disallows repeating Config item with undefined <code>item.valuesByGroup</code>.</p> + <p>For more information, see <a href="/reference/custom-resource-config#repeatable-items">Repeatable Items</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><RepeatOptionMissingValuesByGroup/></td> + </tr> +</table> + +### replicas-1 + +<table> + <tr> + <th>Description</th> + <td>Notifies if any manifest file has <code>replicas</code> set to <code>1</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><Replicas1/></td> + </tr> +</table> + +### resource-limits-cpu + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.limits.cpu</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ResourceLimitsCPU/></td> + </tr> +</table> + +### resource-limits-memory + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.limits.memory</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ResourceLimitsMemory/></td> + </tr> +</table> + +### resource-requests-cpu + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.requests.cpu</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ResourceRequestsCPU/></td> + </tr> +</table> + +### resource-requests-memory + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.requests.memory</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ResourceRequestsMemory/></td> + </tr> +</table> + +### troubleshoot-spec + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires a Troubleshoot manifest file.</p> + <p>Accepted values for <code>kind</code>:</p> + <ul> + <li><code>Collector</code></li> + <li><code>SupportBundle</code></li> + </ul> + <p>Accepted values for <code>apiVersion</code>:</p> + <ul> + <li><code>troubleshoot.replicated.com/v1beta1</code></li> + <li><code>troubleshoot.sh/v1beta2</code></li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><TroubleshootSpec/></td> + </tr> +</table> + +### volume-docker-sock + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.volumes</code> has <code>hostPath</code> + set to <code>/var/run/docker.sock</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><VolumeDockerSock/></td> + </tr> +</table> + +### volumes-host-paths + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.volumes</code> has defined a <code>hostPath</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><VolumesHostPaths/></td> + </tr> +</table> + +================ +File: docs/reference/replicated-cli-api-get.mdx +================ +# replicated api get + +Make ad-hoc GET API calls to the Replicated API + +### Synopsis + +This is essentially like curl for the Replicated API, but +uses your local credentials and prints the response unmodified. + +We recommend piping the output to jq for easier reading. + +Pass the PATH of the request as the final argument. Do not include the host or version. + +``` +replicated api get [flags] +``` + +### Examples + +``` +replicated api get /v3/apps +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API + +================ +File: docs/reference/replicated-cli-api-patch.mdx +================ +# replicated api patch + +Make ad-hoc PATCH API calls to the Replicated API + +### Synopsis + +This is essentially like curl for the Replicated API, but +uses your local credentials and prints the response unmodified. + +We recommend piping the output to jq for easier reading. + +Pass the PATH of the request as the final argument. Do not include the host or version. + +``` +replicated api patch [flags] +``` + +### Examples + +``` +replicated api patch /v3/customer/2VffY549paATVfHSGpJhjh6Ehpy -b '{"name":"Valuable Customer"}' +``` + +### Options + +``` + -b, --body string JSON body to send with the request + -h, --help help for patch +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API + +================ +File: docs/reference/replicated-cli-api-post.mdx +================ +# replicated api post + +Make ad-hoc POST API calls to the Replicated API + +### Synopsis + +This is essentially like curl for the Replicated API, but +uses your local credentials and prints the response unmodified. + +We recommend piping the output to jq for easier reading. + +Pass the PATH of the request as the final argument. Do not include the host or version. + +``` +replicated api post [flags] +``` + +### Examples + +``` +replicated api post /v3/app/2EuFxKLDxKjPNk2jxMTmF6Vxvxu/channel -b '{"name":"marc-waz-here"}' +``` + +### Options + +``` + -b, --body string JSON body to send with the request + -h, --help help for post +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API + +================ +File: docs/reference/replicated-cli-api-put.mdx +================ +# replicated api put + +Make ad-hoc PUT API calls to the Replicated API + +### Synopsis + +This is essentially like curl for the Replicated API, but +uses your local credentials and prints the response unmodified. + +We recommend piping the output to jq for easier reading. + +Pass the PATH of the request as the final argument. Do not include the host or version. + +``` +replicated api put [flags] +``` + +### Examples + +``` +replicated api put /v3/app/2EuFxKLDxKjPNk2jxMTmF6Vxvxu/channel/2QLPm10JPkta7jO3Z3Mk4aXTPyZ -b '{"name":"marc-waz-here2"}' +``` + +### Options + +``` + -b, --body string JSON body to send with the request + -h, --help help for put +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API + +================ +File: docs/reference/replicated-cli-api.mdx +================ +# replicated api + +Make ad-hoc API calls to the Replicated API + +### Options + +``` + -h, --help help for api +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated api get](replicated-cli-api-get) - Make ad-hoc GET API calls to the Replicated API +* [replicated api patch](replicated-cli-api-patch) - Make ad-hoc PATCH API calls to the Replicated API +* [replicated api post](replicated-cli-api-post) - Make ad-hoc POST API calls to the Replicated API +* [replicated api put](replicated-cli-api-put) - Make ad-hoc PUT API calls to the Replicated API + +================ +File: docs/reference/replicated-cli-app-create.mdx +================ +# replicated app create + +Create a new application + +### Synopsis + +Create a new application in your Replicated account. + +This command allows you to initialize a new application that can be distributed +and managed using the KOTS platform. When you create a new app, it will be set up +with default configurations, which you can later customize. + +The NAME argument is required and will be used as the application's name. + +``` +replicated app create NAME [flags] +``` + +### Examples + +``` +# Create a new app named "My App" +replicated app create "My App" + +# Create a new app and output the result in JSON format +replicated app create "Another App" --output json + +# Create a new app with a specific name and view details in table format +replicated app create "Custom App" --output table +``` + +### Options + +``` + -h, --help help for create + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated app](replicated-cli-app) - Manage applications + +================ +File: docs/reference/replicated-cli-app-ls.mdx +================ +# replicated app ls + +List applications + +### Synopsis + +List all applications in your Replicated account, +or search for a specific application by name or ID. + +This command displays information about your applications, including their +names, IDs, and associated channels. If a NAME argument is provided, it will +filter the results to show only applications that match the given name or ID. + +The output can be customized using the --output flag to display results in +either table or JSON format. + +``` +replicated app ls [NAME] [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all applications +replicated app ls + +# Search for a specific application by name +replicated app ls "My App" + +# List applications and output in JSON format +replicated app ls --output json + +# Search for an application and display results in table format +replicated app ls "App Name" --output table +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated app](replicated-cli-app) - Manage applications + +================ +File: docs/reference/replicated-cli-app-rm.mdx +================ +# replicated app rm + +Delete an application + +### Synopsis + +Delete an application from your Replicated account. + +This command allows you to permanently remove an application from your account. +Once deleted, the application and all associated data will be irretrievably lost. + +Use this command with caution as there is no way to undo this operation. + +``` +replicated app rm NAME [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Delete a app named "My App" +replicated app delete "My App" + +# Delete an app and skip the confirmation prompt +replicated app delete "Another App" --force + +# Delete an app and output the result in JSON format +replicated app delete "Custom App" --output json +``` + +### Options + +``` + -f, --force Skip confirmation prompt. There is no undo for this action. + -h, --help help for rm + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated app](replicated-cli-app) - Manage applications + +================ +File: docs/reference/replicated-cli-app.mdx +================ +# replicated app + +Manage applications + +### Synopsis + +The app command allows you to manage applications in your Replicated account. + +This command provides a suite of subcommands for creating, listing, updating, and +deleting applications. You can perform operations such as creating new apps, +viewing app details, modifying app settings, and removing apps from your account. + +Use the various subcommands to: +- Create new applications +- List all existing applications +- View details of a specific application +- Update application settings +- Delete applications from your account + +### Examples + +``` +# List all applications +replicated app ls + +# Create a new application +replicated app create "My New App" + +# View details of a specific application +replicated app inspect "My App Name" + +# Delete an application +replicated app delete "App to Remove" + +# Update an application's settings +replicated app update "My App" --channel stable + +# List applications with custom output format +replicated app ls --output json +``` + +### Options + +``` + -h, --help help for app +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated app create](replicated-cli-app-create) - Create a new application +* [replicated app ls](replicated-cli-app-ls) - List applications +* [replicated app rm](replicated-cli-app-rm) - Delete an application + +================ +File: docs/reference/replicated-cli-channel-create.mdx +================ +# replicated channel create + +Create a new channel in your app + +### Synopsis + +Create a new channel in your app and print the channel on success. + +``` +replicated channel create [flags] +``` + +### Examples + +``` +replicated channel create --name Beta --description 'New features subject to change' +``` + +### Options + +``` + --description string A longer description of this channel + -h, --help help for create + --name string The name of this channel + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + +================ +File: docs/reference/replicated-cli-channel-demote.mdx +================ +# replicated channel demote + +Demote a release from a channel + +### Synopsis + +Demote a channel release from a channel using a channel sequence or release sequence. + +``` +replicated channel demote CHANNEL_ID_OR_NAME [flags] +``` + +### Examples + +``` + # Demote a release from a channel by channel sequence + replicated channel release demote Beta --channel-sequence 15 + + # Demote a release from a channel by release sequence + replicated channel release demote Beta --release-sequence 12 +``` + +### Options + +``` + --channel-sequence int The channel sequence to demote + -h, --help help for demote + --release-sequence int The release sequence to demote +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + +================ +File: docs/reference/replicated-cli-channel-disable-semantic-versioning.mdx +================ +# replicated channel disable-semantic-versioning + +Disable semantic versioning for CHANNEL_ID + +### Synopsis + +Disable semantic versioning for the CHANNEL_ID. + +``` +replicated channel disable-semantic-versioning CHANNEL_ID [flags] +``` + +### Examples + +``` +replicated channel disable-semantic-versioning CHANNEL_ID +``` + +### Options + +``` + -h, --help help for disable-semantic-versioning +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + +================ +File: docs/reference/replicated-cli-channel-enable-semantic-versioning.mdx +================ +# replicated channel enable-semantic-versioning + +Enable semantic versioning for CHANNEL_ID + +### Synopsis + +Enable semantic versioning for the CHANNEL_ID. + +``` +replicated channel enable-semantic-versioning CHANNEL_ID [flags] +``` + +### Examples + +``` +replicated channel enable-semantic-versioning CHANNEL_ID +``` + +### Options + +``` + -h, --help help for enable-semantic-versioning +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + +================ +File: docs/reference/replicated-cli-channel-inspect.mdx +================ +# replicated channel inspect + +Show full details for a channel + +### Synopsis + +Show full details for a channel + +``` +replicated channel inspect CHANNEL_ID [flags] +``` + +### Options + +``` + -h, --help help for inspect + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + +================ +File: docs/reference/replicated-cli-channel-ls.mdx +================ +# replicated channel ls + +List all channels in your app + +### Synopsis + +List all channels in your app + +``` +replicated channel ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + +================ +File: docs/reference/replicated-cli-channel-rm.mdx +================ +# replicated channel rm + +Remove (archive) a channel + +### Synopsis + +Remove (archive) a channel + +``` +replicated channel rm CHANNEL_ID [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Options + +``` + -h, --help help for rm +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + +================ +File: docs/reference/replicated-cli-channel-un-demote.mdx +================ +# replicated channel un-demote + +Un-demote a release from a channel + +### Synopsis + +Un-demote a channel release from a channel using a channel sequence or release sequence. + +``` +replicated channel un-demote CHANNEL_ID_OR_NAME [flags] +``` + +### Examples + +``` + # Un-demote a release from a channel by channel sequence + replicated channel release un-demote Beta --channel-sequence 15 + + # Un-demote a release from a channel by release sequence + replicated channel release un-demote Beta --release-sequence 12 +``` + +### Options + +``` + --channel-sequence int The channel sequence to un-demote + -h, --help help for un-demote + --release-sequence int The release sequence to un-demote +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + +================ +File: docs/reference/replicated-cli-channel.mdx +================ +# replicated channel + +List channels + +### Synopsis + +List channels + +### Options + +``` + -h, --help help for channel +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated channel create](replicated-cli-channel-create) - Create a new channel in your app +* [replicated channel demote](replicated-cli-channel-demote) - Demote a release from a channel +* [replicated channel disable-semantic-versioning](replicated-cli-channel-disable-semantic-versioning) - Disable semantic versioning for CHANNEL_ID +* [replicated channel enable-semantic-versioning](replicated-cli-channel-enable-semantic-versioning) - Enable semantic versioning for CHANNEL_ID +* [replicated channel inspect](replicated-cli-channel-inspect) - Show full details for a channel +* [replicated channel ls](replicated-cli-channel-ls) - List all channels in your app +* [replicated channel rm](replicated-cli-channel-rm) - Remove (archive) a channel +* [replicated channel un-demote](replicated-cli-channel-un-demote) - Un-demote a release from a channel + +================ +File: docs/reference/replicated-cli-cluster-addon-create-object-store.mdx +================ +# replicated cluster addon create object-store + +Create an object store bucket for a cluster. + +### Synopsis + +Creates an object store bucket for a cluster, requiring a bucket name prefix. The bucket name will be auto-generated using the format "[BUCKET_PREFIX]-[ADDON_ID]-cmx". This feature provisions an object storage bucket that can be used for storage in your cluster environment. + +``` +replicated cluster addon create object-store CLUSTER_ID --bucket-prefix BUCKET_PREFIX [flags] +``` + +### Examples + +``` +# Create an object store bucket with a specified prefix +replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket + +# Create an object store bucket and wait for it to be ready (up to 5 minutes) +replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket --wait 5m + +# Perform a dry run to validate inputs without creating the bucket +replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket --dry-run + +# Create an object store bucket and output the result in JSON format +replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket --output json + +# Create an object store bucket with a custom prefix and wait for 10 minutes +replicated cluster addon create object-store 05929b24 --bucket-prefix custom-prefix --wait 10m +``` + +### Options + +``` + --bucket-prefix string A prefix for the bucket name to be created (required) + --dry-run Simulate creation to verify that your inputs are valid without actually creating an add-on + -h, --help help for object-store + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --wait duration Wait duration for add-on to be ready before exiting (leave empty to not wait) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster addon create](replicated-cli-cluster-addon-create) - Create cluster add-ons. + +================ +File: docs/reference/replicated-cli-cluster-addon-create.mdx +================ +# replicated cluster addon create + +Create cluster add-ons. + +### Synopsis + +Create new add-ons for a cluster. This command allows you to add functionality or services to a cluster by provisioning the required add-ons. + +### Examples + +``` +# Create an object store bucket add-on for a cluster +replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket + +# Perform a dry run for creating an object store add-on +replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket --dry-run +``` + +### Options + +``` + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. +* [replicated cluster addon create object-store](replicated-cli-cluster-addon-create-object-store) - Create an object store bucket for a cluster. + +================ +File: docs/reference/replicated-cli-cluster-addon-ls.mdx +================ +# replicated cluster addon ls + +List cluster add-ons for a cluster. + +### Synopsis + +The 'cluster addon ls' command allows you to list all add-ons for a specific cluster. This command provides a detailed overview of the add-ons currently installed on the cluster, including their status and any relevant configuration details. + +This can be useful for monitoring the health and configuration of add-ons or performing troubleshooting tasks. + +``` +replicated cluster addon ls CLUSTER_ID [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List add-ons for a cluster with default table output +replicated cluster addon ls CLUSTER_ID + +# List add-ons for a cluster with JSON output +replicated cluster addon ls CLUSTER_ID --output json + +# List add-ons for a cluster with wide table output +replicated cluster addon ls CLUSTER_ID --output wide +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. + +================ +File: docs/reference/replicated-cli-cluster-addon-rm.mdx +================ +# replicated cluster addon rm + +Remove cluster add-on by ID. + +### Synopsis + +The 'cluster addon rm' command allows you to remove a specific add-on from a cluster by specifying the cluster ID and the add-on ID. + +This command is useful when you want to deprovision an add-on that is no longer needed or when troubleshooting issues related to specific add-ons. The add-on will be removed immediately, and you will receive confirmation upon successful removal. + +``` +replicated cluster addon rm CLUSTER_ID --id ADDON_ID [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Remove an add-on with ID 'abc123' from cluster 'cluster456' +replicated cluster addon rm cluster456 --id abc123 +``` + +### Options + +``` + -h, --help help for rm + --id string The ID of the cluster add-on to remove (required) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. + +================ +File: docs/reference/replicated-cli-cluster-addon.mdx +================ +# replicated cluster addon + +Manage cluster add-ons. + +### Synopsis + +The 'cluster addon' command allows you to manage add-ons installed on a test cluster. Add-ons are additional components or services that can be installed and configured to enhance or extend the functionality of the cluster. + +You can use various subcommands to create, list, remove, or check the status of add-ons on a cluster. This command is useful for adding databases, object storage, monitoring, security, or other specialized tools to your cluster environment. + +### Examples + +``` +# List all add-ons installed on a cluster +replicated cluster addon ls CLUSTER_ID + +# Remove an add-on from a cluster +replicated cluster addon rm CLUSTER_ID --id ADDON_ID + +# Create an object store bucket add-on for a cluster +replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket + +# List add-ons with JSON output +replicated cluster addon ls CLUSTER_ID --output json +``` + +### Options + +``` + -h, --help help for addon +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated cluster addon create](replicated-cli-cluster-addon-create) - Create cluster add-ons. +* [replicated cluster addon ls](replicated-cli-cluster-addon-ls) - List cluster add-ons for a cluster. +* [replicated cluster addon rm](replicated-cli-cluster-addon-rm) - Remove cluster add-on by ID. + +================ +File: docs/reference/replicated-cli-cluster-create.mdx +================ +# replicated cluster create + +Create test clusters. + +### Synopsis + +The 'cluster create' command provisions a new test cluster with the specified Kubernetes distribution and configuration. You can customize the cluster's size, version, node groups, disk space, IP family, and other parameters. + +This command supports creating clusters on multiple Kubernetes distributions, including setting up node groups with different instance types and counts. You can also specify a TTL (Time-To-Live) to automatically terminate the cluster after a set duration. + +Use the '--dry-run' flag to simulate the creation process and get an estimated cost without actually provisioning the cluster. + +``` +replicated cluster create [flags] +``` + +### Examples + +``` +# Create a new cluster with basic configuration +replicated cluster create --distribution eks --version 1.21 --nodes 3 --instance-type t3.large --disk 100 --ttl 24h + +# Create a cluster with a custom node group +replicated cluster create --distribution eks --version 1.21 --nodegroup name=workers,instance-type=t3.large,nodes=5 --ttl 24h + +# Simulate cluster creation (dry-run) +replicated cluster create --distribution eks --version 1.21 --nodes 3 --disk 100 --ttl 24h --dry-run + +# Create a cluster with autoscaling configuration +replicated cluster create --distribution eks --version 1.21 --min-nodes 2 --max-nodes 5 --instance-type t3.large --ttl 24h + +# Create a cluster with multiple node groups +replicated cluster create --distribution eks --version 1.21 \ +--nodegroup name=workers,instance-type=t3.large,nodes=3 \ +--nodegroup name=cpu-intensive,instance-type=c5.2xlarge,nodes=2 \ +--ttl 24h + +# Create a cluster with custom tags +replicated cluster create --distribution eks --version 1.21 --nodes 3 --tag env=test --tag project=demo --ttl 24h + +# Create a cluster with addons +replicated cluster create --distribution eks --version 1.21 --nodes 3 --addon object-store --ttl 24h +``` + +### Options + +``` + --addon stringArray Addons to install on the cluster (can be specified multiple times) + --bucket-prefix string A prefix for the bucket name to be created (required by '--addon object-store') + --disk int Disk Size (GiB) to request per node (default 50) + --distribution string Kubernetes distribution of the cluster to provision + --dry-run Dry run + -h, --help help for create + --instance-type string The type of instance to use (e.g. m6i.large) + --ip-family string IP Family to use for the cluster (ipv4|ipv6|dual). + --license-id string License ID to use for the installation (required for Embedded Cluster distribution) + --max-nodes string Maximum Node count (non-negative number) (only for EKS, AKS and GKE clusters). + --min-nodes string Minimum Node count (non-negative number) (only for EKS, AKS and GKE clusters). + --name string Cluster name (defaults to random name) + --nodegroup stringArray Node group to create (name=?,instance-type=?,nodes=?,min-nodes=?,max-nodes=?,disk=? format, can be specified multiple times). For each nodegroup, at least one flag must be specified. The flags min-nodes and max-nodes are mutually dependent. + --nodes int Node count (default 1) + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --tag stringArray Tag to apply to the cluster (key=value format, can be specified multiple times) + --ttl string Cluster TTL (duration, max 48h) + --version string Kubernetes version to provision (format is distribution dependent) + --wait duration Wait duration for cluster to be ready (leave empty to not wait) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + +================ +File: docs/reference/replicated-cli-cluster-kubeconfig.mdx +================ +# replicated cluster kubeconfig + +Download credentials for a test cluster. + +### Synopsis + +The 'cluster kubeconfig' command downloads the credentials (kubeconfig) required to access a test cluster. You can either merge these credentials into your existing kubeconfig file or save them as a new file. + +This command ensures that the kubeconfig is correctly configured for use with your Kubernetes tools. You can specify the cluster by ID or by name. Additionally, the kubeconfig can be written to a specific file path or printed to stdout. + +You can also use this command to automatically update your current Kubernetes context with the downloaded credentials. + +``` +replicated cluster kubeconfig [ID] [flags] +``` + +### Examples + +``` +# Download and merge kubeconfig into your existing configuration +replicated cluster kubeconfig CLUSTER_ID + +# Save the kubeconfig to a specific file +replicated cluster kubeconfig CLUSTER_ID --output-path ./kubeconfig + +# Print the kubeconfig to stdout +replicated cluster kubeconfig CLUSTER_ID --stdout + +# Download kubeconfig for a cluster by name +replicated cluster kubeconfig --name "My Cluster" + +# Download kubeconfig for a cluster by ID +replicated cluster kubeconfig --id CLUSTER_ID +``` + +### Options + +``` + -h, --help help for kubeconfig + --id string id of the cluster to download credentials for (when name is not provided) + --name string name of the cluster to download credentials for (when id is not provided) + --output-path string path to kubeconfig file to write to, if not provided, it will be merged into your existing kubeconfig + --stdout write kubeconfig to stdout +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + +================ +File: docs/reference/replicated-cli-cluster-ls.mdx +================ +# replicated cluster ls + +List test clusters. + +### Synopsis + +The 'cluster ls' command lists all test clusters. This command provides information about the clusters, such as their status, name, distribution, version, and creation time. The output can be formatted in different ways, depending on your needs. + +You can filter the list of clusters by time range and status (e.g., show only terminated clusters). You can also watch clusters in real-time, which updates the list every few seconds. + +Clusters that have been deleted will be shown with a 'deleted' status. + +``` +replicated cluster ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all clusters with default table output +replicated cluster ls + +# Show clusters created after a specific date +replicated cluster ls --start-time 2023-01-01T00:00:00Z + +# Watch for real-time updates +replicated cluster ls --watch + +# List clusters with JSON output +replicated cluster ls --output json + +# List only terminated clusters +replicated cluster ls --show-terminated + +# List clusters with wide table output +replicated cluster ls --output wide +``` + +### Options + +``` + --end-time string end time for the query (Format: 2006-01-02T15:04:05Z) + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --show-terminated when set, only show terminated clusters + --start-time string start time for the query (Format: 2006-01-02T15:04:05Z) + -w, --watch watch clusters +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + +================ +File: docs/reference/replicated-cli-cluster-nodegroup-ls.mdx +================ +# replicated cluster nodegroup ls + +List node groups for a cluster. + +### Synopsis + +The 'cluster nodegroup ls' command lists all the node groups associated with a given cluster. Each node group defines a specific set of nodes with particular configurations, such as instance types and scaling options. + +You can view information about the node groups within the specified cluster, including their ID, name, node count, and other configuration details. + +You must provide the cluster ID to list its node groups. + +``` +replicated cluster nodegroup ls [ID] [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all node groups in a cluster with default table output +replicated cluster nodegroup ls CLUSTER_ID + +# List node groups with JSON output +replicated cluster nodegroup ls CLUSTER_ID --output json + +# List node groups with wide table output +replicated cluster nodegroup ls CLUSTER_ID --output wide +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster nodegroup](replicated-cli-cluster-nodegroup) - Manage node groups for clusters. + +================ +File: docs/reference/replicated-cli-cluster-nodegroup.mdx +================ +# replicated cluster nodegroup + +Manage node groups for clusters. + +### Synopsis + +The 'cluster nodegroup' command provides functionality to manage node groups within a cluster. This command allows you to list node groups in a Kubernetes or VM-based cluster. + +Node groups define a set of nodes with specific configurations, such as instance types, node counts, or scaling rules. You can use subcommands to perform various actions on node groups. + +### Examples + +``` +# List all node groups for a cluster +replicated cluster nodegroup ls CLUSTER_ID +``` + +### Options + +``` + -h, --help help for nodegroup +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated cluster nodegroup ls](replicated-cli-cluster-nodegroup-ls) - List node groups for a cluster. + +================ +File: docs/reference/replicated-cli-cluster-port-expose.mdx +================ +# replicated cluster port expose + +Expose a port on a cluster to the public internet. + +### Synopsis + +The 'cluster port expose' command is used to expose a specified port on a cluster to the public internet. When exposing a port, the command automatically creates a DNS entry and, if using the "https" protocol, provisions a TLS certificate for secure communication. + +You can also create a wildcard DNS entry and TLS certificate by specifying the "--wildcard" flag. Please note that creating a wildcard certificate may take additional time. + +This command supports different protocols including "http", "https", "ws", and "wss" for web traffic and web socket communication. + +NOTE: Currently, this feature only supports VM-based cluster distributions. + +``` +replicated cluster port expose CLUSTER_ID --port PORT [flags] +``` + +### Examples + +``` +# Expose port 8080 with HTTPS protocol and wildcard DNS +replicated cluster port expose CLUSTER_ID --port 8080 --protocol https --wildcard + +# Expose port 3000 with HTTP protocol +replicated cluster port expose CLUSTER_ID --port 3000 --protocol http + +# Expose port 8080 with multiple protocols +replicated cluster port expose CLUSTER_ID --port 8080 --protocol http,https + +# Expose port 8080 and display the result in JSON format +replicated cluster port expose CLUSTER_ID --port 8080 --protocol https --output json +``` + +### Options + +``` + -h, --help help for expose + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --port int Port to expose (required) + --protocol strings Protocol to expose (valid values are "http", "https", "ws" and "wss") (default [http,https]) + --wildcard Create a wildcard DNS entry and TLS certificate for this port +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. + +================ +File: docs/reference/replicated-cli-cluster-port-ls.mdx +================ +# replicated cluster port ls + +List cluster ports for a cluster. + +### Synopsis + +The 'cluster port ls' command lists all the ports configured for a specific cluster. You must provide the cluster ID to retrieve and display the ports. + +This command is useful for viewing the current port configurations, protocols, and other related settings of your test cluster. The output format can be customized to suit your needs, and the available formats include table, JSON, and wide views. + +``` +replicated cluster port ls CLUSTER_ID [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List ports for a cluster in the default table format +replicated cluster port ls CLUSTER_ID + +# List ports for a cluster in JSON format +replicated cluster port ls CLUSTER_ID --output json + +# List ports for a cluster in wide format +replicated cluster port ls CLUSTER_ID --output wide +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. + +================ +File: docs/reference/replicated-cli-cluster-port-rm.mdx +================ +# replicated cluster port rm + +Remove cluster port by ID. + +### Synopsis + +The 'cluster port rm' command removes a specific port from a cluster. You must provide either the ID of the port or the port number and protocol(s) to remove. + +This command is useful for managing the network settings of your test clusters by allowing you to clean up unused or incorrect ports. After removing a port, the updated list of ports will be displayed. + +Note that you can only use either the port ID or port number when removing a port, not both at the same time. + +``` +replicated cluster port rm CLUSTER_ID --id PORT_ID [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Remove a port using its ID +replicated cluster port rm CLUSTER_ID --id PORT_ID + +# Remove a port using its number (deprecated) +replicated cluster port rm CLUSTER_ID --port 8080 --protocol http,https + +# Remove a port and display the result in JSON format +replicated cluster port rm CLUSTER_ID --id PORT_ID --output json +``` + +### Options + +``` + -h, --help help for rm + --id string ID of the port to remove (required) + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. + +================ +File: docs/reference/replicated-cli-cluster-port.mdx +================ +# replicated cluster port + +Manage cluster ports. + +### Synopsis + +The 'cluster port' command is a parent command for managing ports in a cluster. It allows users to list, remove, or expose specific ports used by the cluster. Use the subcommands (such as 'ls', 'rm', and 'expose') to manage port configurations effectively. + +This command provides flexibility for handling ports in various test clusters, ensuring efficient management of cluster networking settings. + +### Examples + +``` +# List all exposed ports in a cluster +replicated cluster port ls [CLUSTER_ID] + +# Remove an exposed port from a cluster +replicated cluster port rm [CLUSTER_ID] [PORT] + +# Expose a new port in a cluster +replicated cluster port expose [CLUSTER_ID] [PORT] +``` + +### Options + +``` + -h, --help help for port +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated cluster port expose](replicated-cli-cluster-port-expose) - Expose a port on a cluster to the public internet. +* [replicated cluster port ls](replicated-cli-cluster-port-ls) - List cluster ports for a cluster. +* [replicated cluster port rm](replicated-cli-cluster-port-rm) - Remove cluster port by ID. + +================ +File: docs/reference/replicated-cli-cluster-prepare.mdx +================ +# replicated cluster prepare + +Prepare cluster for testing. + +### Synopsis + +The 'cluster prepare' command provisions a Kubernetes cluster and installs an application using a Helm chart or KOTS YAML configuration. + +This command is designed to be used in CI environments to prepare a cluster for testing by deploying a Helm chart or KOTS application with entitlements and custom values. You can specify the cluster configuration, such as the Kubernetes distribution, version, node count, and instance type, and then install your application automatically. + +Alternatively, if you prefer deploying KOTS applications, you can specify YAML manifests for the release and use the '--shared-password' flag for the KOTS admin console. + +You can also pass entitlement values to configure the cluster's customer entitlements. + +Note: +- The '--chart' flag cannot be used with '--yaml', '--yaml-file', or '--yaml-dir'. +- If deploying a Helm chart, use the '--set' flags to pass chart values. When deploying a KOTS application, the '--shared-password' flag is required. + +``` +replicated cluster prepare [flags] +``` + +### Examples + +``` +replicated cluster prepare --distribution eks --version 1.27 --instance-type c6.xlarge --node-count 3 --chart ./your-chart.tgz --values ./values.yaml --set chart-key=value --set chart-key2=value2 +``` + +### Options + +``` + --app-ready-timeout duration Timeout to wait for the application to be ready. Must be in Go duration format (e.g., 10s, 2m). (default 5m0s) + --chart string Path to the helm chart package to deploy + --cluster-id string The ID of an existing cluster to use instead of creating a new one. + --config-values-file string Path to a manifest containing config values (must be apiVersion: kots.io/v1beta1, kind: ConfigValues). + --disk int Disk Size (GiB) to request per node. (default 50) + --distribution string Kubernetes distribution of the cluster to provision + --entitlements strings The entitlements to set on the customer. Can be specified multiple times. + -h, --help help for prepare + --instance-type string the type of instance to use clusters (e.g. x5.xlarge) + --name string Cluster name + --namespace string The namespace into which to deploy the KOTS application or Helm chart. (default "default") + --node-count int Node count. (default 1) + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2). + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2). + --set-json stringArray Set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2). + --set-literal stringArray Set a literal STRING value on the command line. + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2). + --shared-password string Shared password for the KOTS admin console. + --ttl string Cluster TTL (duration, max 48h) + --values strings Specify values in a YAML file or a URL (can specify multiple). + --version string Kubernetes version to provision (format is distribution dependent) + --wait duration Wait duration for cluster to be ready. (default 5m0s) + --yaml string The YAML config for this release. Use '-' to read from stdin. Cannot be used with the --yaml-file flag. + --yaml-dir string The directory containing multiple yamls for a KOTS release. Cannot be used with the --yaml flag. + --yaml-file string The YAML config for this release. Cannot be used with the --yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + +================ +File: docs/reference/replicated-cli-cluster-rm.mdx +================ +# replicated cluster rm + +Remove test clusters. + +### Synopsis + +The 'rm' command removes test clusters immediately. + +You can remove clusters by specifying a cluster ID, or by using other criteria such as cluster names or tags. Alternatively, you can remove all clusters in your account at once. + +This command can also be used in a dry-run mode to simulate the removal without actually deleting anything. + +You cannot mix the use of cluster IDs with other options like removing by name, tag, or removing all clusters at once. + +``` +replicated cluster rm ID [ID …] [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Remove a specific cluster by ID +replicated cluster rm CLUSTER_ID + +# Remove all clusters +replicated cluster rm --all +``` + +### Options + +``` + --all remove all clusters + --dry-run Dry run + -h, --help help for rm + --name stringArray Name of the cluster to remove (can be specified multiple times) + --tag stringArray Tag of the cluster to remove (key=value format, can be specified multiple times) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + +================ +File: docs/reference/replicated-cli-cluster-shell.mdx +================ +# replicated cluster shell + +Open a new shell with kubeconfig configured. + +### Synopsis + +The 'shell' command opens a new shell session with the kubeconfig configured for the specified test cluster. This allows you to have immediate kubectl access to the cluster within the shell environment. + +You can either specify the cluster ID directly or provide the cluster name to resolve the corresponding cluster ID. The shell will inherit your existing environment and add the necessary kubeconfig context for interacting with the Kubernetes cluster. + +Once inside the shell, you can use 'kubectl' to interact with the cluster. To exit the shell, press Ctrl-D or type 'exit'. When the shell closes, the kubeconfig will be reset back to your default configuration. + +``` +replicated cluster shell [ID] [flags] +``` + +### Examples + +``` +# Open a shell for a cluster by ID +replicated cluster shell CLUSTER_ID + +# Open a shell for a cluster by name +replicated cluster shell --name "My Cluster" +``` + +### Options + +``` + -h, --help help for shell + --id string id of the cluster to have kubectl access to (when name is not provided) + --name string name of the cluster to have kubectl access to. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + +================ +File: docs/reference/replicated-cli-cluster-update-nodegroup.mdx +================ +# replicated cluster update nodegroup + +Update a nodegroup for a test cluster. + +### Synopsis + +The 'nodegroup' command allows you to update the configuration of a nodegroup within a test cluster. You can update attributes like the number of nodes, minimum and maximum node counts for autoscaling, and more. + +If you do not provide the nodegroup ID, the command will try to resolve it based on the nodegroup name provided. + +``` +replicated cluster update nodegroup [ID] [flags] +``` + +### Examples + +``` +# Update the number of nodes in a nodegroup +replicated cluster update nodegroup CLUSTER_ID --nodegroup-id NODEGROUP_ID --nodes 3 + +# Update the autoscaling limits for a nodegroup +replicated cluster update nodegroup CLUSTER_ID --nodegroup-id NODEGROUP_ID --min-nodes 2 --max-nodes 5 +``` + +### Options + +``` + -h, --help help for nodegroup + --max-nodes string The maximum number of nodes in the nodegroup + --min-nodes string The minimum number of nodes in the nodegroup + --nodegroup-id string The ID of the nodegroup to update + --nodegroup-name string The name of the nodegroup to update + --nodes int The number of nodes in the nodegroup + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --id string id of the cluster to update (when name is not provided) + --name string Name of the cluster to update. + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster update](replicated-cli-cluster-update) - Update cluster settings. + +================ +File: docs/reference/replicated-cli-cluster-update-ttl.mdx +================ +# replicated cluster update ttl + +Update TTL for a test cluster. + +### Synopsis + +The 'ttl' command allows you to update the Time-To-Live (TTL) of a test cluster. The TTL represents the duration for which the cluster will remain active before it is automatically terminated. The duration starts from the moment the cluster becomes active. You must provide a valid duration, with a maximum limit of 48 hours. + +``` +replicated cluster update ttl [ID] [flags] +``` + +### Examples + +``` +# Update the TTL for a specific cluster +replicated cluster update ttl CLUSTER_ID --ttl 24h +``` + +### Options + +``` + -h, --help help for ttl + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --ttl string Update TTL which starts from the moment the cluster is running (duration, max 48h). +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --id string id of the cluster to update (when name is not provided) + --name string Name of the cluster to update. + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster update](replicated-cli-cluster-update) - Update cluster settings. + +================ +File: docs/reference/replicated-cli-cluster-update.mdx +================ +# replicated cluster update + +Update cluster settings. + +### Synopsis + +The 'update' command allows you to update various settings of a test cluster, such as its name or ID. + +You can either specify the cluster ID directly or provide the cluster name, and the command will resolve the corresponding cluster ID. This allows you to modify the cluster's configuration based on the unique identifier or the name of the cluster. + +### Examples + +``` +# Update a cluster using its ID +replicated cluster update --id <cluster-id> [subcommand] + +# Update a cluster using its name +replicated cluster update --name <cluster-name> [subcommand] +``` + +### Options + +``` + -h, --help help for update + --id string id of the cluster to update (when name is not provided) + --name string Name of the cluster to update. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated cluster update nodegroup](replicated-cli-cluster-update-nodegroup) - Update a nodegroup for a test cluster. +* [replicated cluster update ttl](replicated-cli-cluster-update-ttl) - Update TTL for a test cluster. + +================ +File: docs/reference/replicated-cli-cluster-upgrade.mdx +================ +# replicated cluster upgrade + +Upgrade a test cluster. + +### Synopsis + +The 'upgrade' command upgrades a Kubernetes test cluster to a specified version. You must provide a cluster ID and the version to upgrade to. The upgrade can be simulated with a dry-run option, or you can choose to wait for the cluster to be fully upgraded. + +``` +replicated cluster upgrade [ID] [flags] +``` + +### Examples + +``` +# Upgrade a cluster to a new Kubernetes version +replicated cluster upgrade [CLUSTER_ID] --version 1.31 + +# Perform a dry run of a cluster upgrade without making any changes +replicated cluster upgrade [CLUSTER_ID] --version 1.31 --dry-run + +# Upgrade a cluster and wait for it to be ready +replicated cluster upgrade [CLUSTER_ID] --version 1.31 --wait 30m +``` + +### Options + +``` + --dry-run Dry run + -h, --help help for upgrade + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --version string Kubernetes version to upgrade to (format is distribution dependent) + --wait duration Wait duration for cluster to be ready (leave empty to not wait) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + +================ +File: docs/reference/replicated-cli-cluster-versions.mdx +================ +# replicated cluster versions + +List cluster versions. + +### Synopsis + +The 'versions' command lists available Kubernetes versions for supported distributions. You can filter the versions by specifying a distribution and choose between different output formats. + +``` +replicated cluster versions [flags] +``` + +### Examples + +``` +# List all available Kubernetes cluster versions +replicated cluster versions + +# List available versions for a specific distribution (e.g., eks) +replicated cluster versions --distribution eks + +# Output the versions in JSON format +replicated cluster versions --output json +``` + +### Options + +``` + --distribution string Kubernetes distribution to filter by. + -h, --help help for versions + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + +================ +File: docs/reference/replicated-cli-cluster.mdx +================ +# replicated cluster + +Manage test Kubernetes clusters. + +### Synopsis + +The 'cluster' command allows you to manage and interact with Kubernetes clusters used for testing purposes. With this command, you can create, list, remove, and manage node groups within clusters, as well as retrieve information about available clusters. + +### Examples + +``` +# Create a single-node EKS cluster +replicated cluster create --distribution eks --version 1.31 + +# List all clusters +replicated cluster ls + +# Remove a specific cluster by ID +replicated cluster rm <cluster-id> + +# List all nodegroups in a specific cluster +replicated cluster nodegroup ls <cluster-id> +``` + +### Options + +``` + -h, --help help for cluster +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. +* [replicated cluster create](replicated-cli-cluster-create) - Create test clusters. +* [replicated cluster kubeconfig](replicated-cli-cluster-kubeconfig) - Download credentials for a test cluster. +* [replicated cluster ls](replicated-cli-cluster-ls) - List test clusters. +* [replicated cluster nodegroup](replicated-cli-cluster-nodegroup) - Manage node groups for clusters. +* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. +* [replicated cluster prepare](replicated-cli-cluster-prepare) - Prepare cluster for testing. +* [replicated cluster rm](replicated-cli-cluster-rm) - Remove test clusters. +* [replicated cluster shell](replicated-cli-cluster-shell) - Open a new shell with kubeconfig configured. +* [replicated cluster update](replicated-cli-cluster-update) - Update cluster settings. +* [replicated cluster upgrade](replicated-cli-cluster-upgrade) - Upgrade a test cluster. +* [replicated cluster versions](replicated-cli-cluster-versions) - List cluster versions. + +================ +File: docs/reference/replicated-cli-completion.mdx +================ +# replicated completion + +Generate completion script + +``` +replicated completion [bash|zsh|fish|powershell] +``` + +### Examples + +``` +To load completions: + +Bash: + + This script depends on the 'bash-completion' package. + If it is not installed already, you can install it via your OS's package manager. + + $ source <(replicated completion bash) + + # To load completions for each session, execute once: + # Linux: + $ replicated completion bash > /etc/bash_completion.d/replicated + # macOS: + $ replicated completion bash > $(brew --prefix)/etc/bash_completion.d/replicated + +Zsh: + + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + $ replicated completion zsh > "${fpath[1]}/_replicated" + + # You will need to start a new shell for this setup to take effect. + +fish: + + $ replicated completion fish | source + + # To load completions for each session, execute once: + $ replicated completion fish > ~/.config/fish/completions/replicated.fish + +PowerShell: + + PS> replicated completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> replicated completion powershell > replicated.ps1 + # and source this file from your PowerShell profile. + +``` + +### Options + +``` + -h, --help help for completion +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated + +================ +File: docs/reference/replicated-cli-customer-archive.mdx +================ +# replicated customer archive + +Archive a customer + +### Synopsis + +Archive a customer for the current application. + +This command allows you to archive a customer record. Archiving a customer +will make their license inactive and remove them from active customer lists. +This action is reversible - you can unarchive a customer later if needed. + +The customer can be specified by either their name or ID. + +``` +replicated customer archive <customer_name_or_id> [flags] +``` + +### Examples + +``` +# Archive a customer by name +replicated customer archive "Acme Inc" + +# Archive a customer by ID +replicated customer archive cus_abcdef123456 + +# Archive multiple customers by ID +replicated customer archive cus_abcdef123456 cus_xyz9876543210 + +# Archive a customer in a specific app (if you have multiple apps) +replicated customer archive --app myapp "Acme Inc" +``` + +### Options + +``` + --app string The app to archive the customer in (not required when using a customer id) + -h, --help help for archive +``` + +### Options inherited from parent commands + +``` + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + +================ +File: docs/reference/replicated-cli-customer-create.mdx +================ +# replicated customer create + +Create a new customer for the current application + +### Synopsis + +Create a new customer for the current application with specified attributes. + +This command allows you to create a customer record with various properties such as name, +custom ID, channels, license type, and feature flags. You can set expiration dates, +enable or disable specific features, and assign the customer to one or more channels. + +The --app flag must be set to specify the target application. + +``` +replicated customer create [flags] +``` + +### Examples + +``` +# Create a basic customer with a name and assigned to a channel +replicated customer create --app myapp --name "Acme Inc" --channel stable + +# Create a customer with multiple channels and a custom ID +replicated customer create --app myapp --name "Beta Corp" --custom-id "BETA123" --channel beta --channel stable + +# Create a paid customer with specific features enabled +replicated customer create --app myapp --name "Enterprise Ltd" --type paid --channel enterprise --airgap --snapshot + +# Create a trial customer with an expiration date +replicated customer create --app myapp --name "Trial User" --type trial --channel stable --expires-in 720h + +# Create a customer with all available options +replicated customer create --app myapp --name "Full Options Inc" --custom-id "FULL001" \ + --channel stable --channel beta --default-channel stable --type paid \ + --email "contact@fulloptions.com" --expires-in 8760h \ + --airgap --snapshot --kots-install --embedded-cluster-download \ + --support-bundle-upload --ensure-channel +``` + +### Options + +``` + --airgap If set, the license will allow airgap installs. + --channel stringArray Release channel to which the customer should be assigned (can be specified multiple times) + --custom-id string Set a custom customer ID to more easily tie this customer record to your external data systems + --default-channel string Which of the specified channels should be the default channel. if not set, the first channel specified will be the default channel. + --developer-mode If set, Replicated SDK installed in dev mode will use mock data. + --email string Email address of the customer that is to be created. + --embedded-cluster-download If set, the license will allow embedded cluster downloads. + --ensure-channel If set, channel will be created if it does not exist. + --expires-in duration If set, an expiration date will be set on the license. Supports Go durations like '72h' or '3600m' + --geo-axis If set, the license will allow Geo Axis usage. + --gitops If set, the license will allow the GitOps usage. + --helm-install If set, the license will allow Helm installs. + --helmvm-cluster-download If set, the license will allow helmvm cluster downloads. + -h, --help help for create + --identity-service If set, the license will allow Identity Service usage. + --installer-support If set, the license will allow installer support. + --kots-install If set, the license will allow KOTS install. Otherwise license will allow Helm CLI installs only. (default true) + --kurl-install If set, the license will allow kURL installs. + --name string Name of the customer + --output string The output format to use. One of: json|table (default: table) (default "table") + --snapshot If set, the license will allow Snapshots. + --support-bundle-upload If set, the license will allow uploading support bundles. + --type string The license type to create. One of: dev|trial|paid|community|test (default: dev) (default "dev") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + +================ +File: docs/reference/replicated-cli-customer-download-license.mdx +================ +# replicated customer download-license + +Download a customer's license + +### Synopsis + +The download-license command allows you to retrieve and save a customer's license. + +This command fetches the license for a specified customer and either outputs it +to stdout or saves it to a file. The license contains crucial information about +the customer's subscription and usage rights. + +You must specify the customer using either their name or ID with the --customer flag. + +``` +replicated customer download-license [flags] +``` + +### Examples + +``` +# Download license for a customer by ID and output to stdout +replicated customer download-license --customer cus_abcdef123456 + +# Download license for a customer by name and save to a file +replicated customer download-license --customer "Acme Inc" --output license.yaml + +# Download license for a customer in a specific app (if you have multiple apps) +replicated customer download-license --app myapp --customer "Acme Inc" --output license.yaml +``` + +### Options + +``` + --customer string The Customer Name or ID + -h, --help help for download-license + -o, --output string Path to output license to. Defaults to stdout (default "-") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + +================ +File: docs/reference/replicated-cli-customer-inspect.mdx +================ +# replicated customer inspect + +Show detailed information about a specific customer + +### Synopsis + +The inspect command provides comprehensive details about a customer. + + This command retrieves and displays full information about a specified customer, + including their assigned channels, registry information, and other relevant attributes. + It's useful for getting an in-depth view of a customer's configuration and status. + + You must specify the customer using either their name or ID with the --customer flag. + +``` +replicated customer inspect [flags] +``` + +### Examples + +``` +# Inspect a customer by ID +replicated customer inspect --customer cus_abcdef123456 + +# Inspect a customer by name +replicated customer inspect --customer "Acme Inc" + +# Inspect a customer and output in JSON format +replicated customer inspect --customer cus_abcdef123456 --output json + +# Inspect a customer for a specific app (if you have multiple apps) +replicated customer inspect --app myapp --customer "Acme Inc" +``` + +### Options + +``` + --customer string The Customer Name or ID + -h, --help help for inspect + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + +================ +File: docs/reference/replicated-cli-customer-ls.mdx +================ +# replicated customer ls + +List customers for the current application + +### Synopsis + +List customers associated with the current application. + +This command displays information about customers linked to your application. +By default, it shows all non-test customers. You can use flags to: +- Filter customers by a specific app version +- Include test customers in the results +- Change the output format (table or JSON) + +The command requires an app to be set using the --app flag. + +``` +replicated customer ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all customers for the current application +replicated customer ls --app myapp +# Output results in JSON format +replicated customer ls --app myapp --output json + +# Combine multiple flags +replicated customer ls --app myapp --output json +``` + +### Options + +``` + --app-version string Filter customers by a specific app version + -h, --help help for ls + --include-test Include test customers in the results + --output string Output format: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + +================ +File: docs/reference/replicated-cli-customer-update.mdx +================ +# replicated customer update + +Update an existing customer + +### Synopsis + +Update an existing customer's information and settings. + + This command allows you to modify various attributes of a customer, including their name, + custom ID, assigned channels, license type, and feature flags. You can update expiration dates, + enable or disable specific features, and change channel assignments. + + The --customer flag is required to specify which customer to update. + +``` +replicated customer update --customer <id> --name <name> [options] [flags] +``` + +### Examples + +``` +# Update a customer's name +replicated customer update --customer cus_abcdef123456 --name "New Company Name" + +# Change a customer's channel and make it the default +replicated customer update --customer cus_abcdef123456 --channel stable --default-channel stable + +# Enable airgap installations for a customer +replicated customer update --customer cus_abcdef123456 --airgap + +# Update multiple attributes at once +replicated customer update --customer cus_abcdef123456 --name "Updated Corp" --type paid --channel enterprise --airgap --snapshot + +# Set an expiration date for a customer's license +replicated customer update --customer cus_abcdef123456 --expires-in 8760h + +# Update a customer and output the result in JSON format +replicated customer update --customer cus_abcdef123456 --name "JSON Corp" --output json +``` + +### Options + +``` + --airgap If set, the license will allow airgap installs. + --channel stringArray Release channel to which the customer should be assigned (can be specified multiple times) + --custom-id string Set a custom customer ID to more easily tie this customer record to your external data systems + --customer string The ID of the customer to update + --default-channel string Which of the specified channels should be the default channel. if not set, the first channel specified will be the default channel. + --developer-mode If set, Replicated SDK installed in dev mode will use mock data. + --email string Email address of the customer that is to be updated. + --embedded-cluster-download If set, the license will allow embedded cluster downloads. + --ensure-channel If set, channel will be created if it does not exist. + --expires-in duration If set, an expiration date will be set on the license. Supports Go durations like '72h' or '3600m' + --geo-axis If set, the license will allow Geo Axis usage. + --gitops If set, the license will allow the GitOps usage. + --helm-install If set, the license will allow Helm installs. + --helmvm-cluster-download If set, the license will allow helmvm cluster downloads. + -h, --help help for update + --identity-service If set, the license will allow Identity Service usage. + --kots-install If set, the license will allow KOTS install. Otherwise license will allow Helm CLI installs only. (default true) + --kurl-install If set, the license will allow kURL installs. + --name string Name of the customer + --output string The output format to use. One of: json|table (default: table) (default "table") + --snapshot If set, the license will allow Snapshots. + --support-bundle-upload If set, the license will allow uploading support bundles. + --type string The license type to update. One of: dev|trial|paid|community|test (default: dev) (default "dev") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + +================ +File: docs/reference/replicated-cli-customer.mdx +================ +# replicated customer + +Manage customers + +### Synopsis + +The customers command allows vendors to create, display, modify end customer records. + +### Options + +``` + -h, --help help for customer +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated customer archive](replicated-cli-customer-archive) - Archive a customer +* [replicated customer create](replicated-cli-customer-create) - Create a new customer for the current application +* [replicated customer download-license](replicated-cli-customer-download-license) - Download a customer's license +* [replicated customer inspect](replicated-cli-customer-inspect) - Show detailed information about a specific customer +* [replicated customer ls](replicated-cli-customer-ls) - List customers for the current application +* [replicated customer update](replicated-cli-customer-update) - Update an existing customer + +================ +File: docs/reference/replicated-cli-default-clear-all.mdx +================ +# replicated default clear-all + +Clear all default values + +### Synopsis + +Clears all default values that are used by other commands. + +This command removes all default values that are used by other commands run by the current user. + +``` +replicated default clear-all [flags] +``` + +### Examples + +``` +# Clear all default values +replicated default clear-all +``` + +### Options + +``` + -h, --help help for clear-all +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated default](replicated-cli-default) - Manage default values used by other commands + +================ +File: docs/reference/replicated-cli-default-clear.mdx +================ +# replicated default clear + +Clear default value for a key + +### Synopsis + +Clears default value for the specified key. + +This command removes default values that are used by other commands run by the current user. + +Supported keys: +- app: the default application to use + +``` +replicated default clear KEY [flags] +``` + +### Examples + +``` +# Clear default application +replicated default clear app +``` + +### Options + +``` + -h, --help help for clear +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated default](replicated-cli-default) - Manage default values used by other commands + +================ +File: docs/reference/replicated-cli-default-set.mdx +================ +# replicated default set + +Set default value for a key + +### Synopsis + +Sets default value for the specified key. + +This command sets default values that will be used by other commands run by the current user. + +Supported keys: +- app: the default application to use + +The output can be customized using the --output flag to display results in +either table or JSON format. + +``` +replicated default set KEY VALUE [flags] +``` + +### Examples + +``` +# Set default application +replicated default set app my-app-slug +``` + +### Options + +``` + -h, --help help for set + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated default](replicated-cli-default) - Manage default values used by other commands + +================ +File: docs/reference/replicated-cli-default-show.mdx +================ +# replicated default show + +Show default value for a key + +### Synopsis + +Shows defaul values for the specified key. + +This command shows default values that will be used by other commands run by the current user. + +Supported keys: +- app: the default application to use + +The output can be customized using the --output flag to display results in +either table or JSON format. + +``` +replicated default show KEY [flags] +``` + +### Examples + +``` +# Show default application +replicated default show app + +``` + +### Options + +``` + -h, --help help for show + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated default](replicated-cli-default) - Manage default values used by other commands + +================ +File: docs/reference/replicated-cli-default.mdx +================ +# replicated default + +Manage default values used by other commands + +### Options + +``` + -h, --help help for default +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated default clear](replicated-cli-default-clear) - Clear default value for a key +* [replicated default clear-all](replicated-cli-default-clear-all) - Clear all default values +* [replicated default set](replicated-cli-default-set) - Set default value for a key +* [replicated default show](replicated-cli-default-show) - Show default value for a key + +================ +File: docs/reference/replicated-cli-installer-create.mdx +================ +# replicated installer create + +Create a new installer spec + +### Synopsis + +Create a new installer spec by providing YAML configuration for a https://kurl.sh cluster. + +``` +replicated installer create [flags] +``` + +### Options + +``` + --auto generate default values for use in CI + -y, --confirm-auto auto-accept the configuration generated by the --auto flag + --ensure-channel When used with --promote <channel>, will create the channel if it doesn't exist + -h, --help help for create + --promote string Channel name or id to promote this installer to + --yaml string The YAML config for this installer. Use '-' to read from stdin. Cannot be used with the --yaml-file flag. + --yaml-file string The file name with YAML config for this installer. Cannot be used with the --yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated installer](replicated-cli-installer) - Manage Kubernetes installers + +================ +File: docs/reference/replicated-cli-installer-ls.mdx +================ +# replicated installer ls + +List an app's Kubernetes Installers + +### Synopsis + +List an app's https://kurl.sh Kubernetes Installers + +``` +replicated installer ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated installer](replicated-cli-installer) - Manage Kubernetes installers + +================ +File: docs/reference/replicated-cli-installer.mdx +================ +# replicated installer + +Manage Kubernetes installers + +### Synopsis + +The installers command allows vendors to create, display, modify and promote kurl.sh specs for managing the installation of Kubernetes. + +### Options + +``` + -h, --help help for installer +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated installer create](replicated-cli-installer-create) - Create a new installer spec +* [replicated installer ls](replicated-cli-installer-ls) - List an app's Kubernetes Installers + +================ +File: docs/reference/replicated-cli-installing.mdx +================ +import Verify from "../partials/replicated-cli/_verify-install.mdx" +import Sudo from "../partials/replicated-cli/_sudo-install.mdx" +import Login from "../partials/replicated-cli/_login.mdx" +import Logout from "../partials/replicated-cli/_logout.mdx" +import AuthToken from "../partials/replicated-cli/_authorize-with-token-note.mdx" + +# Installing the Replicated CLI + +Vendors can use the Replicated CLI to manage their applications with Replicated programmatically, rather than using the Replicated vendor portal. + +## Prerequisites + +Complete the following prerequisites before installing the Replicated CLI: + +- Create a vendor account. See [Creating a Vendor Account](/vendor/vendor-portal-creating-account). +- To run on Linux or Mac, install [curl](https://curl.haxx.se/). +- To run through a Docker container, install [docker](https://www.docker.com). + +## Install and Run + +You can install and run the Replicated CLI in the following environments: + +* Directly on MacOS +* Directly on Linux +* Through Docker (Useful for Windows, GitHub Actions, or computers without sufficient access) + +### MacOS + +To install and run the latest Replicated CLI on MacOS: + +1. Run one of the following commands: + + - With Brew: + + ```shell + brew install replicatedhq/replicated/cli + ``` + + - Without Brew: + + ```shell + curl -s https://api.github.com/repos/replicatedhq/replicated/releases/latest \ + | grep "browser_download_url.*darwin_all.tar.gz" \ + | cut -d : -f 2,3 \ + | tr -d \" \ + | wget -O replicated.tar.gz -qi - + tar xf replicated.tar.gz replicated && rm replicated.tar.gz + mv replicated /usr/local/bin/replicated + ``` + + <Sudo/> + +1. <Verify/> + +1. <Login/> + + <AuthToken/> + +1. <Logout/> + +### Linux + +To install and run the latest Replicated CLI on Linux: + +1. Run the following command: + + ```shell + curl -s https://api.github.com/repos/replicatedhq/replicated/releases/latest \ + | grep "browser_download_url.*linux_amd64.tar.gz" \ + | cut -d : -f 2,3 \ + | tr -d \" \ + | wget -O replicated.tar.gz -qi - + tar xf replicated.tar.gz replicated && rm replicated.tar.gz + mv replicated /usr/local/bin/replicated + ``` + + <Sudo/> + +1. <Verify/> + +1. <Login/> + + <AuthToken/> + +1. <Logout/> + +### Docker / Windows + +Installing in Docker environments requires that you set the `REPLICATED_API_TOKEN` environment variable to authorize the Replicated CLI with an API token. For more information, see [(Optional) Set Environment Variables](#env-var) below. + +To install and run the latest Replicated CLI in Docker environments: + +1. Generate a service account or user API token in the vendor portal. To create new releases, the token must have `Read/Write` access. See [Generating API Tokens](/vendor/replicated-api-tokens). + +1. Get the latest Replicated CLI installation files from the [replicatedhq/replicated repository](https://github.com/replicatedhq/replicated/releases) on GitHub. + + Download and install the files. For simplicity, the usage in the next step is represented assuming that the CLI is downloaded and installed to the desktop. + +1. Authorize the Replicated CLI: + + - Through a Docker container: + + ```shell + docker run \ + -e REPLICATED_API_TOKEN=$TOKEN \ + replicated/vendor-cli --help + ``` + Replace `TOKEN` with your API token. + + - On Windows: + + ```dos + docker.exe run \ + -e REPLICATED_API_TOKEN=%TOKEN% \ + replicated/vendor-cli --help + ``` + + Replace `TOKEN` with your API token. + + For more information about the `docker run` command, see [docker run](https://docs.docker.com/engine/reference/commandline/run/) in the Docker documentation. + +## (Optional) Set Environment Variables {#env-var} + +The Replicated CLI supports setting the following environment variables: + +* **`REPLICATED_API_TOKEN`**: A service account or user API token generated from a vendor portal team or individual account. The `REPLICATED_API_TOKEN` environment variable has the following use cases: + + * To use Replicated CLI commands as part of automation (such as from continuous integration and continuous delivery pipelines), authenticate by providing the `REPLICATED_API_TOKEN` environment variable. + + * To authorize the Replicated CLI when installing and running the CLI in Docker containers. + + * Optionally set the `REPLICATED_API_TOKEN` environment variable instead of using the `replicated login` command to authorize the Replicated CLI in MacOS or Linux environments. + +* **`REPLICATED_APP`**: The slug of the target application. + + When using the Replicated CLI to manage applications through your vendor account (including channels, releases, customers, or other objects associated with an application), you can set the `REPLICATED_APP` environment variable to avoid passing the application slug with each command. + +### `REPLICATED_API_TOKEN` + +To set the `REPLICATED_API_TOKEN` environment variable: + +1. Generate a service account or user API token in the vendor portal. To create new releases, the token must have `Read/Write` access. See [Generating API Tokens](/vendor/replicated-api-tokens). + +1. Set the environment variable, replacing `TOKEN` with the token you generated in the previous step: + + * **MacOs or Linux**: + + ``` + export REPLICATED_API_TOKEN=TOKEN + ``` + + * **Docker**: + + ``` + docker run \ + -e REPLICATED_API_TOKEN=$TOKEN \ + replicated/vendor-cli --help + ``` + + * **Windows**: + + ``` + docker.exe run \ + -e REPLICATED_API_TOKEN=%TOKEN% \ + replicated/vendor-cli --help + ``` + +### `REPLICATED_APP` + +To set the `REPLICATED_APP` environment variable: + +1. In the [vendor portal](https://vendor.replicated.com), go to the **Application Settings** page and copy the slug for the target application. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Application_. + +1. Set the environment variable, replacing `APP_SLUG` with the slug for the target application that you retreived in the previous step: + + * **MacOs or Linux**: + + ``` + export REPLICATED_APP=APP_SLUG + ``` + + * **Docker**: + + ``` + docker run \ + -e REPLICATED_APP=$APP_SLUG + replicated/vendor-cli --help + ``` + + * **Windows**: + + ``` + docker.exe run \ + -e REPLICATED_APP=%APP_SLUG% \ + replicated/vendor-cli --help + ``` + +================ +File: docs/reference/replicated-cli-instance-inspect.mdx +================ +# replicated instance inspect + +Show full details for a customer instance + +### Synopsis + +Show full details for a customer instance + +``` +replicated instance inspect [flags] +``` + +### Options + +``` + --customer string Customer Name or ID + -h, --help help for inspect + --instance string Instance Name or ID + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated instance](replicated-cli-instance) - Manage instances + +================ +File: docs/reference/replicated-cli-instance-ls.mdx +================ +# replicated instance ls + +list customer instances + +### Synopsis + +list customer instances + +``` +replicated instance ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + --customer string Customer Name or ID + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") + --tag stringArray Tags to use to filter instances (key=value format, can be specified multiple times). Only one tag needs to match (an OR operation) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated instance](replicated-cli-instance) - Manage instances + +================ +File: docs/reference/replicated-cli-instance-tag.mdx +================ +# replicated instance tag + +tag an instance + +### Synopsis + +remove or add instance tags + +``` +replicated instance tag [flags] +``` + +### Options + +``` + --customer string Customer Name or ID + -h, --help help for tag + --instance string Instance Name or ID + --output string The output format to use. One of: json|table (default: table) (default "table") + --tag stringArray Tags to apply to instance. Leave value empty to remove tag. Tags not specified will not be removed. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated instance](replicated-cli-instance) - Manage instances + +================ +File: docs/reference/replicated-cli-instance.mdx +================ +# replicated instance + +Manage instances + +### Synopsis + +The instance command allows vendors to display and tag customer instances. + +### Options + +``` + -h, --help help for instance +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated instance inspect](replicated-cli-instance-inspect) - Show full details for a customer instance +* [replicated instance ls](replicated-cli-instance-ls) - list customer instances +* [replicated instance tag](replicated-cli-instance-tag) - tag an instance + +================ +File: docs/reference/replicated-cli-login.mdx +================ +# replicated login + +Log in to Replicated + +### Synopsis + +This command will open your browser to ask you authentication details and create / retrieve an API token for the CLI to use. + +``` +replicated login [flags] +``` + +### Options + +``` + -h, --help help for login +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated + +================ +File: docs/reference/replicated-cli-logout.mdx +================ +# replicated logout + +Logout from Replicated + +### Synopsis + +This command will remove any stored credentials from the CLI. + +``` +replicated logout [flags] +``` + +### Options + +``` + -h, --help help for logout +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated + +================ +File: docs/reference/replicated-cli-registry-add-dockerhub.mdx +================ +# replicated registry add dockerhub + +Add a DockerHub registry + +### Synopsis + +Add a DockerHub registry using a username/password or an account token + +``` +replicated registry add dockerhub [flags] +``` + +### Options + +``` + --authtype string Auth type for the registry (default "password") + -h, --help help for dockerhub + --output string The output format to use. One of: json|table (default: table) (default "table") + --password string The password to authenticate to the registry with + --password-stdin Take the password from stdin + --token string The token to authenticate to the registry with + --token-stdin Take the token from stdin + --username string The userame to authenticate to the registry with +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + +================ +File: docs/reference/replicated-cli-registry-add-ecr.mdx +================ +# replicated registry add ecr + +Add an ECR registry + +### Synopsis + +Add an ECR registry using an Access Key ID and Secret Access Key + +``` +replicated registry add ecr [flags] +``` + +### Options + +``` + --accesskeyid string The access key id to authenticate to the registry with + --endpoint string The ECR endpoint + -h, --help help for ecr + --output string The output format to use. One of: json|table (default: table) (default "table") + --secretaccesskey string The secret access key to authenticate to the registry with + --secretaccesskey-stdin Take the secret access key from stdin +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + +================ +File: docs/reference/replicated-cli-registry-add-gar.mdx +================ +# replicated registry add gar + +Add a Google Artifact Registry + +### Synopsis + +Add a Google Artifact Registry using a service account key + +``` +replicated registry add gar [flags] +``` + +### Options + +``` + --authtype string Auth type for the registry (default "serviceaccount") + --endpoint string The GAR endpoint + -h, --help help for gar + --output string The output format to use. One of: json|table (default: table) (default "table") + --serviceaccountkey string The service account key to authenticate to the registry with + --serviceaccountkey-stdin Take the service account key from stdin + --token string The token to use to auth to the registry with + --token-stdin Take the token from stdin +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + +================ +File: docs/reference/replicated-cli-registry-add-gcr.mdx +================ +# replicated registry add gcr + +Add a Google Container Registry + +### Synopsis + +Add a Google Container Registry using a service account key + +``` +replicated registry add gcr [flags] +``` + +### Options + +``` + --endpoint string The GCR endpoint + -h, --help help for gcr + --output string The output format to use. One of: json|table (default: table) (default "table") + --serviceaccountkey string The service account key to authenticate to the registry with + --serviceaccountkey-stdin Take the service account key from stdin +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + +================ +File: docs/reference/replicated-cli-registry-add-ghcr.mdx +================ +# replicated registry add ghcr + +Add a GitHub Container Registry + +### Synopsis + +Add a GitHub Container Registry using a username and personal access token (PAT) + +``` +replicated registry add ghcr [flags] +``` + +### Options + +``` + -h, --help help for ghcr + --output string The output format to use. One of: json|table (default: table) (default "table") + --token string The token to use to auth to the registry with + --token-stdin Take the token from stdin +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + +================ +File: docs/reference/replicated-cli-registry-add-other.mdx +================ +# replicated registry add other + +Add a generic registry + +### Synopsis + +Add a generic registry using a username/password + +``` +replicated registry add other [flags] +``` + +### Options + +``` + --endpoint string endpoint for the registry + -h, --help help for other + --output string The output format to use. One of: json|table (default: table) (default "table") + --password string The password to authenticate to the registry with + --password-stdin Take the password from stdin + --username string The userame to authenticate to the registry with +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + +================ +File: docs/reference/replicated-cli-registry-add-quay.mdx +================ +# replicated registry add quay + +Add a quay.io registry + +### Synopsis + +Add a quay.io registry using a username/password (or a robot account) + +``` +replicated registry add quay [flags] +``` + +### Options + +``` + -h, --help help for quay + --output string The output format to use. One of: json|table (default: table) (default "table") + --password string The password to authenticate to the registry with + --password-stdin Take the password from stdin + --username string The userame to authenticate to the registry with +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + +================ +File: docs/reference/replicated-cli-registry-add.mdx +================ +# replicated registry add + +add + +### Synopsis + +add + +### Options + +``` + -h, --help help for add + --skip-validation Skip validation of the registry (not recommended) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry](replicated-cli-registry) - Manage registries +* [replicated registry add dockerhub](replicated-cli-registry-add-dockerhub) - Add a DockerHub registry +* [replicated registry add ecr](replicated-cli-registry-add-ecr) - Add an ECR registry +* [replicated registry add gar](replicated-cli-registry-add-gar) - Add a Google Artifact Registry +* [replicated registry add gcr](replicated-cli-registry-add-gcr) - Add a Google Container Registry +* [replicated registry add ghcr](replicated-cli-registry-add-ghcr) - Add a GitHub Container Registry +* [replicated registry add other](replicated-cli-registry-add-other) - Add a generic registry +* [replicated registry add quay](replicated-cli-registry-add-quay) - Add a quay.io registry + +================ +File: docs/reference/replicated-cli-registry-ls.mdx +================ +# replicated registry ls + +list registries + +### Synopsis + +list registries, or a single registry by name + +``` +replicated registry ls [NAME] [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry](replicated-cli-registry) - Manage registries + +================ +File: docs/reference/replicated-cli-registry-rm.mdx +================ +# replicated registry rm + +remove registry + +### Synopsis + +remove registry by endpoint + +``` +replicated registry rm [ENDPOINT] [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Options + +``` + -h, --help help for rm +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry](replicated-cli-registry) - Manage registries + +================ +File: docs/reference/replicated-cli-registry-test.mdx +================ +# replicated registry test + +test registry + +### Synopsis + +test registry + +``` +replicated registry test HOSTNAME [flags] +``` + +### Options + +``` + -h, --help help for test + --image string The image to test pulling +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry](replicated-cli-registry) - Manage registries + +================ +File: docs/reference/replicated-cli-registry.mdx +================ +# replicated registry + +Manage registries + +### Synopsis + +registry can be used to manage existing registries and add new registries to a team + +### Options + +``` + -h, --help help for registry +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated registry add](replicated-cli-registry-add) - add +* [replicated registry ls](replicated-cli-registry-ls) - list registries +* [replicated registry rm](replicated-cli-registry-rm) - remove registry +* [replicated registry test](replicated-cli-registry-test) - test registry + +================ +File: docs/reference/replicated-cli-release-compatibility.mdx +================ +# replicated release compatibility + +Report release compatibility + +### Synopsis + +Report release compatibility for a kubernetes distribution and version + +``` +replicated release compatibility SEQUENCE [flags] +``` + +### Options + +``` + --distribution string Kubernetes distribution of the cluster to report on. + --failure If set, the compatibility will be reported as a failure. + -h, --help help for compatibility + --notes string Additional notes to report. + --success If set, the compatibility will be reported as a success. + --version string Kubernetes version of the cluster to report on (format is distribution dependent) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + +================ +File: docs/reference/replicated-cli-release-create.mdx +================ +# replicated release create + +Create a new release + +### Synopsis + +Create a new release by providing application manifests for the next release in + your sequence. + +``` +replicated release create [flags] +``` + +### Options + +``` + --auto generate default values for use in CI + -y, --confirm-auto auto-accept the configuration generated by the --auto flag + --ensure-channel When used with --promote <channel>, will create the channel if it doesn't exist + --fail-on string The minimum severity to cause the command to exit with a non-zero exit code. Supported values are [info, warn, error, none]. (default "error") + -h, --help help for create + --lint Lint a manifests directory prior to creation of the KOTS Release. + --promote string Channel name or id to promote this release to + --release-notes string When used with --promote <channel>, sets the **markdown** release notes + --version string When used with --promote <channel>, sets the version label for the release in this channel + --yaml-dir string The directory containing multiple yamls for a Kots release. Cannot be used with the --yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + +================ +File: docs/reference/replicated-cli-release-download.mdx +================ +# replicated release download + +Download application manifests for a release. + +### Synopsis + +Download application manifests for a release to a specified directory. + +For non-KOTS applications, this is equivalent to the 'release inspect' command. + +``` +replicated release download RELEASE_SEQUENCE [flags] +``` + +### Examples + +``` +replicated release download 1 --dest ./manifests +``` + +### Options + +``` + -d, --dest string Directory to which release manifests should be downloaded + -h, --help help for download +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + +================ +File: docs/reference/replicated-cli-release-inspect.mdx +================ +# replicated release inspect + +Long: information about a release + +### Synopsis + +Show information about the specified application release. + +This command displays detailed information about a specific release of an application. + +The output can be customized using the --output flag to display results in +either table or JSON format. + + +``` +replicated release inspect RELEASE_SEQUENCE [flags] +``` + +### Examples + +``` +# Display information about a release +replicated release inspect 123 + +# Display information about a release in JSON format +replicated release inspect 123 --output json +``` + +### Options + +``` + -h, --help help for inspect + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + +================ +File: docs/reference/replicated-cli-release-lint.mdx +================ +# replicated release lint + +Lint a directory of KOTS manifests + +### Synopsis + +Lint a directory of KOTS manifests + +``` +replicated release lint [flags] +``` + +### Options + +``` + --fail-on string The minimum severity to cause the command to exit with a non-zero exit code. Supported values are [info, warn, error, none]. (default "error") + -h, --help help for lint + --output string The output format to use. One of: json|table (default: table) (default "table") + --yaml-dir yaml The directory containing multiple yamls for a Kots release. Cannot be used with the yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + +================ +File: docs/reference/replicated-cli-release-ls.mdx +================ +# replicated release ls + +List all of an app's releases + +### Synopsis + +List all of an app's releases + +``` +replicated release ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + +================ +File: docs/reference/replicated-cli-release-promote.mdx +================ +# replicated release promote + +Set the release for a channel + +### Synopsis + +Set the release for a channel + +``` +replicated release promote SEQUENCE CHANNEL_ID [flags] +``` + +### Examples + +``` +replicated release promote 15 fe4901690971757689f022f7a460f9b2 +``` + +### Options + +``` + -h, --help help for promote + --optional If set, this release can be skipped + --release-notes string The **markdown** release notes + --required If set, this release can't be skipped + --version string A version label for the release in this channel +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + +================ +File: docs/reference/replicated-cli-release-test.mdx +================ +# replicated release test + +Test the application release + +### Synopsis + +Test the application release + +``` +replicated release test SEQUENCE [flags] +``` + +### Options + +``` + -h, --help help for test +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + +================ +File: docs/reference/replicated-cli-release-update.mdx +================ +# replicated release update + +Updated a release's yaml config + +### Synopsis + +Updated a release's yaml config + +``` +replicated release update SEQUENCE [flags] +``` + +### Options + +``` + -h, --help help for update + --yaml string The new YAML config for this release. Use '-' to read from stdin. Cannot be used with the --yaml-file flag. + --yaml-dir string The directory containing multiple yamls for a Kots release. Cannot be used with the --yaml flag. + --yaml-file string The file name with YAML config for this release. Cannot be used with the --yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + +================ +File: docs/reference/replicated-cli-release.mdx +================ +# replicated release + +Manage app releases + +### Synopsis + +The release command allows vendors to create, display, and promote their releases. + +### Options + +``` + -h, --help help for release +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated release compatibility](replicated-cli-release-compatibility) - Report release compatibility +* [replicated release create](replicated-cli-release-create) - Create a new release +* [replicated release download](replicated-cli-release-download) - Download application manifests for a release. +* [replicated release inspect](replicated-cli-release-inspect) - Long: information about a release +* [replicated release lint](replicated-cli-release-lint) - Lint a directory of KOTS manifests +* [replicated release ls](replicated-cli-release-ls) - List all of an app's releases +* [replicated release promote](replicated-cli-release-promote) - Set the release for a channel +* [replicated release test](replicated-cli-release-test) - Test the application release +* [replicated release update](replicated-cli-release-update) - Updated a release's yaml config + +================ +File: docs/reference/replicated-cli-version-upgrade.mdx +================ +# replicated version upgrade + +Upgrade the replicated CLI to the latest version + +### Synopsis + +Download, verify, and upgrade the Replicated CLI to the latest version + +``` +replicated version upgrade [flags] +``` + +### Options + +``` + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated version](replicated-cli-version) - Print the current version and exit + +================ +File: docs/reference/replicated-cli-version.mdx +================ +# replicated version + +Print the current version and exit + +### Synopsis + +Print the current version and exit + +``` +replicated version [flags] +``` + +### Options + +``` + -h, --help help for version + --json output version info in json +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated version upgrade](replicated-cli-version-upgrade) - Upgrade the replicated CLI to the latest version + +================ +File: docs/reference/replicated-cli-vm-create.mdx +================ +# replicated vm create + +Create one or more test VMs with specified distribution, version, and configuration options. + +### Synopsis + +Create one or more test VMs with a specified distribution, version, and a variety of customizable configuration options. + +This command allows you to provision VMs with different distributions (e.g., Ubuntu, RHEL), versions, instance types, and more. You can set the number of VMs to create, disk size, and specify the network to use. If no network is provided, a new network will be created automatically. You can also assign tags to your VMs and use a TTL (Time-To-Live) to define how long the VMs should live. + +By default, the command provisions one VM, but you can customize the number of VMs to create by using the "--count" flag. Additionally, you can use the "--dry-run" flag to simulate the creation without actually provisioning the VMs. + +The command also supports a "--wait" flag to wait for the VMs to be ready before returning control, with a customizable timeout duration. + +``` +replicated vm create [flags] +``` + +### Examples + +``` +# Create a single Ubuntu 20.04 VM +replicated vm create --distribution ubuntu --version 20.04 + +# Create 3 Ubuntu 22.04 VMs +replicated vm create --distribution ubuntu --version 22.04 --count 3 + +# Create 5 Ubuntu VMs with a custom instance type and disk size +replicated vm create --distribution ubuntu --version 20.04 --count 5 --instance-type r1.medium --disk 100 +``` + +### Options + +``` + --count int Number of matching VMs to create (default 1) + --disk int Disk Size (GiB) to request per node (default 50) + --distribution string Distribution of the vm to provision + --dry-run Dry run + -h, --help help for create + --instance-type string The type of instance to use (e.g. r1.medium) + --name string VM name (defaults to random name) + --network string The network to use for the VM(s). If not supplied, create a new network + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --tag stringArray Tag to apply to the VM (key=value format, can be specified multiple times) + --ttl string VM TTL (duration, max 48h) + --version string Vversion to provision (format is distribution dependent) + --wait duration Wait duration for VM(s) to be ready (leave empty to not wait) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + +================ +File: docs/reference/replicated-cli-vm-ls.mdx +================ +# replicated vm ls + +List test VMs and their status, with optional filters for start/end time and terminated VMs. + +### Synopsis + +List all test VMs in your account, including their current status, distribution, version, and more. You can use optional flags to filter the output based on VM termination status, start time, or end time. This command can also watch the VM status in real-time. + +By default, the command will return a table of all VMs, but you can switch to JSON or wide output formats for more detailed information. The command supports filtering to show only terminated VMs or to specify a time range for the query. + +You can use the '--watch' flag to monitor VMs continuously. This will refresh the list of VMs every 2 seconds, displaying any updates in real-time, such as new VMs being created or existing VMs being terminated. + +The command also allows you to customize the output format, supporting 'json', 'table', and 'wide' views for flexibility based on your needs. + +``` +replicated vm ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all active VMs +replicated vm ls + +# List all VMs that were created after a specific start time +replicated vm ls --start-time 2024-10-01T00:00:00Z + +# Show only terminated VMs +replicated vm ls --show-terminated + +# Watch VM status changes in real-time +replicated vm ls --watch +``` + +### Options + +``` + --end-time string end time for the query (Format: 2006-01-02T15:04:05Z) + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --show-terminated when set, only show terminated vms + --start-time string start time for the query (Format: 2006-01-02T15:04:05Z) + -w, --watch watch vms +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + +================ +File: docs/reference/replicated-cli-vm-port-expose.mdx +================ +# replicated vm port expose + +Expose a port on a vm to the public internet. + +### Synopsis + +The 'vm port expose' command is used to expose a specified port on a vm to the public internet. When exposing a port, the command automatically creates a DNS entry and, if using the "https" protocol, provisions a TLS certificate for secure communication. + +You can also create a wildcard DNS entry and TLS certificate by specifying the "--wildcard" flag. Please note that creating a wildcard certificate may take additional time. + +This command supports different protocols including "http", "https", "ws", and "wss" for web traffic and web socket communication. + +``` +replicated vm port expose VM_ID --port PORT [flags] +``` + +### Examples + +``` +# Expose port 8080 with HTTPS protocol and wildcard DNS +replicated vm port expose VM_ID --port 8080 --protocol https --wildcard + +# Expose port 3000 with HTTP protocol +replicated vm port expose VM_ID --port 3000 --protocol http + +# Expose port 8080 with multiple protocols +replicated vm port expose VM_ID --port 8080 --protocol http,https + +# Expose port 8080 and display the result in JSON format +replicated vm port expose VM_ID --port 8080 --protocol https --output json +``` + +### Options + +``` + -h, --help help for expose + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --port int Port to expose (required) + --protocol strings Protocol to expose (valid values are "http", "https", "ws" and "wss") (default [http,https]) + --wildcard Create a wildcard DNS entry and TLS certificate for this port +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. + +================ +File: docs/reference/replicated-cli-vm-port-ls.mdx +================ +# replicated vm port ls + +List vm ports for a vm. + +### Synopsis + +The 'vm port ls' command lists all the ports configured for a specific vm. You must provide the vm ID to retrieve and display the ports. + +This command is useful for viewing the current port configurations, protocols, and other related settings of your test vm. The output format can be customized to suit your needs, and the available formats include table, JSON, and wide views. + +``` +replicated vm port ls VM_ID [flags] +``` + +### Examples + +``` +# List ports for a vm in the default table format +replicated vm port ls VM_ID + +# List ports for a vm in JSON format +replicated vm port ls VM_ID --output json + +# List ports for a vm in wide format +replicated vm port ls VM_ID --output wide +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. + +================ +File: docs/reference/replicated-cli-vm-port-rm.mdx +================ +# replicated vm port rm + +Remove vm port by ID. + +### Synopsis + +The 'vm port rm' command removes a specific port from a vm. You must provide the ID of the port to remove. + +This command is useful for managing the network settings of your test vms by allowing you to clean up unused or incorrect ports. After removing a port, the updated list of ports will be displayed. + +``` +replicated vm port rm VM_ID --id PORT_ID [flags] +``` + +### Examples + +``` +# Remove a port using its ID +replicated vm port rm VM_ID --id PORT_ID + +# Remove a port and display the result in JSON format +replicated vm port rm VM_ID --id PORT_ID --output json +``` + +### Options + +``` + -h, --help help for rm + --id string ID of the port to remove (required) + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. + +================ +File: docs/reference/replicated-cli-vm-port.mdx +================ +# replicated vm port + +Manage VM ports. + +### Synopsis + +The 'vm port' command is a parent command for managing ports in a vm. It allows users to list, remove, or expose specific ports used by the vm. Use the subcommands (such as 'ls', 'rm', and 'expose') to manage port configurations effectively. + +This command provides flexibility for handling ports in various test vms, ensuring efficient management of vm networking settings. + +### Examples + +``` +# List all exposed ports in a vm +replicated vm port ls [VM_ID] + +# Remove an exposed port from a vm +replicated vm port rm [VM_ID] [PORT] + +# Expose a new port in a vm +replicated vm port expose [VM_ID] [PORT] +``` + +### Options + +``` + -h, --help help for port +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. +* [replicated vm port expose](replicated-cli-vm-port-expose) - Expose a port on a vm to the public internet. +* [replicated vm port ls](replicated-cli-vm-port-ls) - List vm ports for a vm. +* [replicated vm port rm](replicated-cli-vm-port-rm) - Remove vm port by ID. + +================ +File: docs/reference/replicated-cli-vm-rm.mdx +================ +# replicated vm rm + +Remove test VM(s) immediately, with options to filter by name, tag, or remove all VMs. + +### Synopsis + +The 'rm' command allows you to remove test VMs from your account immediately. You can specify one or more VM IDs directly, or use flags to filter which VMs to remove based on their name, tags, or simply remove all VMs at once. + +This command supports multiple filtering options, including removing VMs by their name, by specific tags, or by specifying the '--all' flag to remove all VMs in your account. + +You can also use the '--dry-run' flag to simulate the removal without actually deleting the VMs. + +``` +replicated vm rm ID [ID …] [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Remove a VM by ID +replicated vm rm aaaaa11 + +# Remove multiple VMs by ID +replicated vm rm aaaaa11 bbbbb22 ccccc33 + +# Remove all VMs with a specific name +replicated vm rm --name test-vm + +# Remove all VMs with a specific tag +replicated vm rm --tag env=dev + +# Remove all VMs +replicated vm rm --all + +# Perform a dry run of removing all VMs +replicated vm rm --all --dry-run +``` + +### Options + +``` + --all remove all vms + --dry-run Dry run + -h, --help help for rm + --name stringArray Name of the vm to remove (can be specified multiple times) + --tag stringArray Tag of the vm to remove (key=value format, can be specified multiple times) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + +================ +File: docs/reference/replicated-cli-vm-update-ttl.mdx +================ +# replicated vm update ttl + +Update TTL for a test VM. + +### Synopsis + +The 'ttl' command allows you to update the Time to Live (TTL) for a test VM. This command modifies the lifespan of a running VM by updating its TTL, which is a duration starting from the moment the VM is provisioned. + +The TTL specifies how long the VM will run before it is automatically terminated. You can specify a duration up to a maximum of 48 hours. + +The command accepts a VM ID as an argument and requires the '--ttl' flag to specify the new TTL value. + +You can also specify the output format (json, table, wide) using the '--output' flag. + +``` +replicated vm update ttl [ID] [flags] +``` + +### Examples + +``` +# Update the TTL of a VM to 2 hours +replicated vm update ttl aaaaa11 --ttl 2h + +# Update the TTL of a VM to 30 minutes +replicated vm update ttl aaaaa11 --ttl 30m +``` + +### Options + +``` + -h, --help help for ttl + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --ttl string Update TTL which starts from the moment the vm is running (duration, max 48h). +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --id string id of the vm to update (when name is not provided) + --name string Name of the vm to update. + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm update](replicated-cli-vm-update) - Update VM settings. + +================ +File: docs/reference/replicated-cli-vm-update.mdx +================ +# replicated vm update + +Update VM settings. + +### Synopsis + +The 'vm update' command allows you to modify the settings of a virtual machine. You can update a VM either by providing its ID or by specifying its name. This command supports updating various VM settings, which will be handled by specific subcommands. + +- To update the VM by its ID, use the '--id' flag. +- To update the VM by its name, use the '--name' flag. + +Subcommands will allow for more specific updates like TTL + +### Examples + +``` +# Update a VM by specifying its ID +replicated vm update --id aaaaa11 --ttl 12h + +# Update a VM by specifying its name +replicated vm update --name --ttl 12h +``` + +### Options + +``` + -h, --help help for update + --id string id of the vm to update (when name is not provided) + --name string Name of the vm to update. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. +* [replicated vm update ttl](replicated-cli-vm-update-ttl) - Update TTL for a test VM. + +================ +File: docs/reference/replicated-cli-vm-versions.mdx +================ +# replicated vm versions + +List available VM versions. + +### Synopsis + +The 'vm versions' command lists all the available versions of virtual machines that can be provisioned. This includes the available distributions and their respective versions. + +- You can filter the list by a specific distribution using the '--distribution' flag. +- The output can be formatted as a table or in JSON format using the '--output' flag. + +``` +replicated vm versions [flags] +``` + +### Examples + +``` +# List all available VM versions +replicated vm versions + +# List VM versions for a specific distribution (e.g., Ubuntu) +replicated vm versions --distribution ubuntu + +# Display the output in JSON format +replicated vm versions --output json +``` + +### Options + +``` + --distribution string Kubernetes distribution to filter by. + -h, --help help for versions + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + +================ +File: docs/reference/replicated-cli-vm.mdx +================ +# replicated vm + +Manage test virtual machines. + +### Synopsis + +The 'vm' command allows you to manage and interact with virtual machines (VMs) used for testing purposes. With this command, you can create, list, remove, update, and manage VMs, as well as retrieve information about available VM versions. + +### Examples + +``` +# Create a single Ubuntu VM +replicated vm create --distribution ubuntu --version 20.04 + +# List all VMs +replicated vm ls + +# Remove a specific VM by ID +replicated vm rm <vm-id> + +# Update TTL for a specific VM +replicated vm update ttl <vm-id> --ttl 24h +``` + +### Options + +``` + -h, --help help for vm +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated vm create](replicated-cli-vm-create) - Create one or more test VMs with specified distribution, version, and configuration options. +* [replicated vm ls](replicated-cli-vm-ls) - List test VMs and their status, with optional filters for start/end time and terminated VMs. +* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. +* [replicated vm rm](replicated-cli-vm-rm) - Remove test VM(s) immediately, with options to filter by name, tag, or remove all VMs. +* [replicated vm update](replicated-cli-vm-update) - Update VM settings. +* [replicated vm versions](replicated-cli-vm-versions) - List available VM versions. + +================ +File: docs/reference/replicated-sdk-apis.md +================ +# Replicated SDK API + +The Replicated SDK provides an API that you can use to embed Replicated functionality in your Helm chart application. + +For example, if your application includes a UI where users manage their application instance, then you can use the `/api/v1/app/updates` endpoint to include messages in the UI that encourage users to upgrade when new versions are available. You could also revoke access to the application during runtime when a license expires using the `/api/v1/license/fields` endpoint. + +For more information about how to get started with the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +For information about how to develop against the Replicated SDK API with mock data, see [Developing Against the Replicated SDK](/vendor/replicated-sdk-development). + +## app + +### GET /app/info + +List details about an application instance, including the app name, location of the Helm chart in the Replicated OCI registry, and details about the current application release that the instance is running. + +```bash +GET http://replicated:3000/api/v1/app/info +``` + +Response: + +```json +{ + "instanceID": "8dcdb181-5cc4-458c-ad95-c0a1563cb0cb", + "appSlug": "my-app", + "appName": "My App", + "appStatus": "ready", + "helmChartURL": "oci://registry.replicated.com/my-app/beta/my-helm-chart", + "currentRelease": { + "versionLabel": "0.1.72", + "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", + "channelName": "Beta", + "createdAt": "2023-05-28T16:31:21Z", + "releaseNotes": "", + "helmReleaseName": "my-helm-chart", + "helmReleaseRevision": 5, + "helmReleaseNamespace": "my-helm-chart" + }, + "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", + "channelName": "Beta", + "channelSequence": 4, + "releaseSequence": 30 +} +``` + +### GET /app/status + +List details about an application status, including the list of individual resource states and the overall application state. + +```bash +GET http://replicated:3000/api/v1/app/status +``` + +Response: + +```json +{ + "appStatus": { + "appSlug": "my-app", + "resourceStates": [ + { + "kind": "deployment", + "name": "api", + "namespace": "default", + "state": "ready" + } + ], + "updatedAt": "2024-12-19T23:01:52.207162284Z", + "state": "ready", + "sequence": 268 + } +} +``` + +### GET /app/updates + +List details about the releases that are available to an application instance for upgrade, including the version label, created timestamp, and release notes. + +```bash +GET http://replicated:3000/api/v1/app/updates +``` + +Response: + +```json +[ + { + "versionLabel": "0.1.15", + "createdAt": "2023-05-12T15:48:45.000Z", + "releaseNotes": "Awesome new features!" + } +] +``` + +### GET /app/history + +List details about the releases that an application instance has installed previously. + +```bash +GET http://replicated:3000/api/v1/app/history +``` + +Response: + +```json +{ + "releases": [ + { + "versionLabel": "0.1.70", + "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", + "channelName": "Stable", + "createdAt": "2023-05-12T17:43:51Z", + "releaseNotes": "", + "helmReleaseName": "echo-server", + "helmReleaseRevision": 2, + "helmReleaseNamespace": "echo-server-helm" + } + ] +} +``` + +### POST /app/custom-metrics + +Send custom application metrics. For more information and examples see [Configuring Custom Metrics](/vendor/custom-metrics). + +### PATCH /app/custom-metrics + +Send partial custom application metrics for upserting. + +```bash +PATCH http://replicated:3000/api/v1/app/custom-metrics +``` +Request: + +```json +{ + "data": { + "numProjects": 20, + } +} +``` + +Response: Status `200` OK + +### DELETE /app/custom-metrics/\{metric_name\} + +Delete an application custom metric. + +```bash +DELETE http://replicated:3000/api/v1/app/custom-metrics/numProjects +``` + +Response: Status `204` No Content + +### POST /app/instance-tags + +Programmatically set new instance tags or overwrite existing tags. Instance tags are key-value pairs, where the key and the value are strings. + +Setting a tag with the `name` key will set the instance's name in the vendor portal. + +The `force` parameter defaults to `false`. If `force` is `false`, conflicting pre-existing tags will not be overwritten and the existing tags take precedence. If the `force` parameter is set to `true`, any conflicting pre-existing tags will be overwritten. + +To delete a particular tag, set the key's value to an empty string `""`. + +```bash +POST http://replicated:3000/api/v1/app/instance-tags +``` +Request: + +```json +{ + "data": { + "force": false, + "tags": { + "name": "my-instance-name", + "preExistingKey": "will-not-be-overwritten", + "cpuCores": "10", + "supportTier": "basic" + } + } +} +``` + +Response: Status `200` OK + +## license + +### GET /license/info + +List details about the license that was used to install, including the license ID, type, the customer name, and the channel the customer is assigned. + +```bash +GET http://replicated:3000/api/v1/license/info +``` + +Response: + +```json +{ + "licenseID": "YiIXRTjiB7R...", + "appSlug": "my-app", + "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", + "channelName": "Stable", + "customerName": "Example Customer", + "customerEmail": "username@example.com", + "licenseType": "dev", + "licenseSequence": 1, + "isAirgapSupported": false, + "isGitOpsSupported": false, + "isIdentityServiceSupported": false, + "isGeoaxisSupported": false, + "isSnapshotSupported": false, + "isSupportBundleUploadSupported": false, + "isSemverRequired": true, + "endpoint": "https://replicated.app", + "entitlements": { + "expires_at": { + "title": "Expiration", + "description": "License Expiration", + "value": "", + "valueType": "String" + }, + "numSeats": { + "title": "Number of Seats", + "value": 10, + "valueType": "Integer" + } + } +} +``` + +### GET /license/fields + +List details about all the fields in the license that was used to install, including the field names, descriptions, values, and signatures. + +```bash +GET http://replicated:3000/api/v1/license/fields +``` + +Response: + +```json +{ + "expires_at": { + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "Vs+W7+sF0RA6UrFEJcyHAbC5YCIT67hdsDdqtJTRBd4ZitTe4pr1D/SZg2k0NRIozrBP1mXuTgjQgeI8PyQJc/ctQwZDikIEKFW0sVv0PFPQV7Uf9fy7wRgadfUxkagcCS8O6Tpcm4WqlhEcgiJGvPBki3hZLnMO9Ol9yOepZ7UtrUMVsBUKwcTJWCytpFpvvOLfSNoHxMnPuSgpXumbHZjvdXrJoJagoRDXPiXXKGh02DOr58ncLofYqPzze+iXWbE8tqdFBZc72lLayT1am3MN0n3ejCNWNeX9+CiBJkqMqLLkjN4eugUmU/gBiDtJgFUB2gq8ejVVcohqos69WA==" + } + }, + "numSeats": { + "name": "numSeats", + "title": "Number of Seats", + "value": 10, + "valueType": "Integer", + "signature": { + "v1": "UmsYlVr4+Vg5TWsJV6goagWUM4imdj8EUUcdau7wIzfcU0MuZnv3UNVlwVE/tCuROCMcbei6ygjm4j5quBdkAGUyq86BCtohg/SqRsgVoNV6BN0S+tnqJ7w4/nqRVBc2Gsn7wTYNXiszLMkmfeNOrigLgsrtaGJmZ4IsczwI1V5Tr+AMAgrACL/UyLg78Y6EitKFW4qvJ9g5Q8B3uVmT+h9xTBxJFuKTQS6qFcDx9XCu+bKqoSmJDZ8lwgwpJDAiBzIhxiAd66lypHX9cpOg5A7cKEW+FLdaBKQdNRcPHQK2O9QwFj/NKEeCJEufuD3OeV8MSbN2PCehMzbj7tXSww==" + } + } +} +``` + +### GET /license/fields/\{field_name\} + +List details about one of the fields in the license that was used to install, including the field name, description, value, and signature. + +```bash +GET http://replicated:3000/api/v1/license/fields/\{field_name\} +``` + +Example request: + +```bash +curl replicated:3000/api/v1/license/fields/expires_at +``` + +Response: + +```json +{ + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2MD0YBlIAZEs1zXpmvwLdfcoTsZMOj0lZbxkPN5dPhEPIVcQgrzfzwU5HIwQbwc2jwDrLBQS4hGOKdxOWXnBUNbztsHXMqlAYQsmAhspRLDhBiEoYpFV/8oaaAuNBrmRu/IVAW6ahB4KtP/ytruVdBup3gn1U/uPAl5lhzuBifaW+NDFfJxAXJrhdTxMBxzfdKa6dGmlGu7Ou/xqDU1bNF3AuWoP3C78GzSBQrD1ZPnu/d+nuEjtakKSX3EK6VUisNucm8/TFlEVKUuX7hex7uZ9Of+UgS1GutQXOhXzfMZ7u+0zHXvQ==" + } +} +``` + +## Integration + +### GET /api/v1/integration/status + +Get status of Development Mode. When this mode is enabled, the `app` API will use mock data. This value cannot be set programmatically. It is controlled by the installed license. + +```json +{ + "isEnabled": true +} +``` + +### GET /api/v1/integration/mock-data + +Get mock data that is used when Development Mode is enabled. + +```json +{ + "appStatus": "ready", + "helmChartURL": "oci://registry.replicated.com/dev-app/dev-channel/dev-parent-chart", + "currentRelease": { + "versionLabel": "0.1.3", + "releaseNotes": "release notes 0.1.3", + "createdAt": "2023-05-23T20:58:07Z", + "deployedAt": "2023-05-23T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 3, + "helmReleaseNamespace": "default" + }, + "deployedReleases": [ + { + "versionLabel": "0.1.1", + "releaseNotes": "release notes 0.1.1", + "createdAt": "2023-05-21T20:58:07Z", + "deployedAt": "2023-05-21T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 1, + "helmReleaseNamespace": "default" + }, + { + "versionLabel": "0.1.2", + "releaseNotes": "release notes 0.1.2", + "createdAt": "2023-05-22T20:58:07Z", + "deployedAt": "2023-05-22T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 2, + "helmReleaseNamespace": "default" + }, + { + "versionLabel": "0.1.3", + "releaseNotes": "release notes 0.1.3", + "createdAt": "2023-05-23T20:58:07Z", + "deployedAt": "2023-05-23T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 3, + "helmReleaseNamespace": "default" + } + ], + "availableReleases": [ + { + "versionLabel": "0.1.4", + "releaseNotes": "release notes 0.1.4", + "createdAt": "2023-05-24T20:58:07Z", + "deployedAt": "2023-05-24T21:58:07Z", + "helmReleaseName": "", + "helmReleaseRevision": 0, + "helmReleaseNamespace": "" + }, + { + "versionLabel": "0.1.5", + "releaseNotes": "release notes 0.1.5", + "createdAt": "2023-06-01T20:58:07Z", + "deployedAt": "2023-06-01T21:58:07Z", + "helmReleaseName": "", + "helmReleaseRevision": 0, + "helmReleaseNamespace": "" + } + ] +} +``` + +### POST /api/v1/integration/mock-data + +Programmatically set mock data that is used when Development Mode is enabled. The payload will overwrite the existing mock data. Any data that is not included in the payload will be removed. For example, to remove release data, simply include empty arrays: + +```bash +POST http://replicated:3000/api/v1/integration/mock-data +``` + +Request: + +```json +{ + "appStatus": "ready", + "helmChartURL": "oci://registry.replicated.com/dev-app/dev-channel/dev-parent-chart", + "currentRelease": { + "versionLabel": "0.1.3", + "releaseNotes": "release notes 0.1.3", + "createdAt": "2023-05-23T20:58:07Z", + "deployedAt": "2023-05-23T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 3, + "helmReleaseNamespace": "default" + }, + "deployedReleases": [], + "availableReleases": [] +} +``` + +Response: Status `201` Created + +## Examples + +This section provides example use cases for the Replicated SDK API. + +### Support Update Checks in Your Application + +The `api/v1/app/updates` endpoint returns details about new releases that are available to an instance for upgrade. You could use the `api/v1/app/updates` endpoint to allow your users to easily check for available updates from your application. + +Additionally, to make it easier for users to upgrade to new versions of your application, you could provide customer-specific upgrade instructions in your application by injecting values returned by the `/api/v1/license/info` and `/api/vi/app/info` endpoints. + +The following examples show how you could include a page in your application that lists available updates and also provides customer-specific upgrade instructions: + +![a user interface showing a list of available releases](/images/slackernews-update-page.png) +[View a larger version of this image](/images/slackernews-update-page.png) + +![user-specific application upgrade instructions displayed in a dialog](/images/slackernews-update-instructions.png) +[View a larger version of this image](/images/slackernews-update-instructions.png) + +To use the SDK API to check for available application updates and provide customer-specific upgrade instructions: + +1. From your application, call the `api/v1/app/updates` endpoint to return available updates for the application instance. Use the response to display available upgrades for the customer. + + ```bash + curl replicated:3000/api/v1/app/updates + ``` + + **Example response**: + + ```json + [ + { + "versionLabel": "0.1.15", + "createdAt": "2023-05-12T15:48:45.000Z", + "releaseNotes": "Awesome new features!" + } + ] + ``` + +1. For each available release, add logic that displays the required upgrade commands with customer-specific values. To upgrade, users must first run `helm registry login` to authenticate to the Replicated registry. Then, they can run `helm upgrade`: + + 1. Inject customer-specific values into the `helm registry login` command: + + ```bash + helm registry login REGISTRY_DOMAIN --username EMAIL --password LICENSE_ID + ``` + + The `helm registry login` command requires the following components: + + * `REGISTRY_DOMAIN`: The domain for the registry where your Helm chart is pushed. The registry domain is either `replicated.registry.com` or a custom domain that you added. + + * `EMAIL`: The customer email address is available from the `/api/v1/license/info` endpoint in the `customerEmail` field. + + * `LICENSE_ID` The customer license ID is available from the `/api/v1/license/info` endpoint in the `licenseID` field. + + 1. Inject customer-specific values into the `helm upgrade` command: + + ```bash + helm upgrade -n NAMESPACE RELEASE_NAME HELM_CHART_URL + ``` + + The following describes where the values in the `helm upgrade` command are available: + + * `NAMESPACE`: The release namespace is available from the `/api/v1/app/info` endpoint in the `currentRelease.helmReleaseNamespace` + + * `RELEASE_NAME`: The release name is available from the `/api/v1/app/info` endpoint in the `currentRelease.helmReleaseName` field. + + * `HELM_CHART_URL`: The URL of the Helm chart at the OCI registry is available from the `/api/v1/app/info` endpoint in the `helmChartURL` field. + +### Revoke Access at Runtime When a License Expires + +You can use the Replicated SDK API `/api/v1/license/fields/{field_name}` endpoint to revoke a customer's access to your application during runtime when their license expires. + +To revoke access to your application when a license expires: + +1. In the vendor portal, click **Customers**. Select the target customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. + +1. Under **Expiration policy**: + + 1. Enable **Customer's license has an expiration date**. + + 1. For **When does this customer expire?**, use the calendar to set an expiration date for the license. + + <img alt="expiration policy field in the manage customer page" src="/images/customer-expiration-policy.png" width="500px"/> + + [View a larger version of this image](/images/customer-expiration-policy.png) + +1. Install the Replicated SDK as a standalone component in your cluster. This is called _integration mode_. Installing in integration mode allows you to develop locally against the SDK API without needing to create releases for your application in the vendor portal. See [Developing Against the SDK API](/vendor/replicated-sdk-development). + +1. In your application, use the `/api/v1/license/fields/expires_at` endpoint to get the `expires_at` field that you defined in the previous step. + + **Example:** + + ```bash + curl replicated:3000/api/v1/license/fields/expires_at + ``` + + ```json + { + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2M..." + } + } + ``` + +1. Add logic to your application to revoke access if the current date and time is more recent than the expiration date of the license. + +1. (Recommended) Use signature verification in your application to ensure the integrity of the license field. See [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). + +================ +File: docs/reference/replicated.mdx +================ +# replicated + +Manage your Commercial Software Distribution Lifecycle using Replicated + +### Synopsis + +The 'replicated' CLI allows Replicated customers (vendors) to manage their Commercial Software Distribution Lifecycle (CSDL) using the Replicated API. + +### Options + +``` + --app string The app slug or app id to use in all calls + -h, --help help for replicated + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API +* [replicated app](replicated-cli-app) - Manage applications +* [replicated channel](replicated-cli-channel) - List channels +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated completion](replicated-cli-completion) - Generate completion script +* [replicated customer](replicated-cli-customer) - Manage customers +* [replicated default](replicated-cli-default) - Manage default values used by other commands +* [replicated installer](replicated-cli-installer) - Manage Kubernetes installers +* [replicated instance](replicated-cli-instance) - Manage instances +* [replicated login](replicated-cli-login) - Log in to Replicated +* [replicated logout](replicated-cli-logout) - Logout from Replicated +* [replicated registry](replicated-cli-registry) - Manage registries +* [replicated release](replicated-cli-release) - Manage app releases +* [replicated version](replicated-cli-version) - Print the current version and exit +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + +================ +File: docs/reference/template-functions-about.mdx +================ +import UseCases from "../partials/template-functions/_use-cases.mdx" + +# About Template Functions + +This topic describes Replicated KOTS template functions, including information about use cases, template function contexts, syntax. + +## Overview + +For Kubernetes manifest files for applications deployed by Replicated KOTS, Replicated provides a set of custom template functions based on the Go text/template library. + +<UseCases/> + +All functionality of the Go templating language, including if statements, loops, and variables, is supported with KOTS template functions. For more information about the Go library, see [text/template](https://golang.org/pkg/text/template/) in the Go documentation. + +### Supported File Types + +You can use KOTS template functions in Kubernetes manifest files for applications deployed by KOTS, such as: +* Custom resources in the `kots.io` API group like Application, Config, or HelmChart +* Custom resources in other API groups like Preflight, SupportBundle, or Backup +* Kubernetes objects like Deployments, Services, Secrets, or ConfigMaps +* Kubernetes Operators + +### Limitations + +* Not all fields in the Config and Application custom resources support templating. For more information, see [Application](/reference/custom-resource-application) and [Item Properties](/reference/custom-resource-config#item-properties) in _Config_. + +* Templating is not supported in the [Embedded Cluster Config](/reference/embedded-config) resource. + +* KOTS template functions are not directly supported in Helm charts. For more information, see [Helm Charts](#helm-charts) below. + +### Helm Charts + +KOTS template functions are _not_ directly supported in Helm charts. However, the HelmChart custom resource provides a way to map values rendered by KOTS template functions to Helm chart values. This allows you to use KOTS template functions with Helm charts without making changes to those Helm charts. + +For information about how to map values from the HelmChart custom resource to Helm chart `values.yaml` files, see [Setting Helm Chart Values with KOTS](/vendor/helm-optional-value-keys). + +### Template Function Rendering + +During application installation and upgrade, KOTS templates all Kubernetes manifest files in a release (except for the Config custom resource) at the same time during a single process. + +For the [Config](/reference/custom-resource-config) custom resource, KOTS templates each item separately so that config items can be used in templates for other items. For examples of this, see [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional) and [Template Function Examples](/reference/template-functions-examples). + +## Syntax {#syntax} + +The KOTS template function syntax supports the following functionally equivalent delimiters: +* [`repl{{ ... }}`](#syntax-integer) +* [`{{repl ... }}`](#syntax-string) + +### Syntax Requirements + +KOTS template function syntax has the following requirements: +* In both the `repl{{ ... }}` and `{{repl ... }}` syntaxes, there must be no whitespace between `repl` and the `{{` delimiter. +* The manifests where KOTS template functions are used must be valid YAML. This is because the YAML manifests are linted before KOTS template functions are rendered. + +### `repl{{ ... }}` {#syntax-integer} + +This syntax is recommended for most use cases. + +Any quotation marks wrapped around this syntax are stripped during rendering. If you need the rendered value to be quoted, you can pipe into quote (`| quote`) or use the [`{{repl ... }}`](#syntax-string) syntax instead. + +#### Integer Example + +```yaml +http: + port: repl{{ ConfigOption "load_balancer_port" }} +``` +```yaml +http: + port: 8888 +``` + +#### Example with `| quote` + +```yaml +customTag: repl{{ ConfigOption "tag" | quote }} +``` +```yaml +customTag: 'key: value' +``` + +#### If-Else Example + +```yaml +http: + port: repl{{ if ConfigOptionEquals "ingress_type" "load_balancer" }}repl{{ ConfigOption "load_balancer_port" }}repl{{ else }}8081repl{{ end }} +``` +```yaml +http: + port: 8081 +``` + +For more examples, see [Template Function Examples](/reference/template-functions-examples). + +### `{{repl ... }}` {#syntax-string} + +This syntax can be useful when having the delimiters outside the template function improves readability of the YAML, such as in multi-line statements or if-else statements. + +To use this syntax at the beginning of a value in YAML, it _must_ be wrapped in quotes because you cannot start a YAML value with the `{` character and manifests consumed by KOTS must be valid YAML. When this syntax is wrapped in quotes, the rendered value is also wrapped in quotes. + +#### Example With Quotes + +The following example is wrapped in quotes because it is used at the beginning of a statement in YAML: + +```yaml +customTag: '{{repl ConfigOption "tag" }}' +``` +```yaml +customTag: 'key: value' +``` + +#### If-Else Example +```yaml +my-service: + type: '{{repl if ConfigOptionEquals "ingress_type" "load_balancer" }}LoadBalancer{{repl else }}ClusterIP{{repl end }}' +``` +```yaml +my-service: + type: 'LoadBalancer' +``` + +For more examples, see [Template Function Examples](/reference/template-functions-examples). + +## Contexts {#contexts} + +KOTS template functions are grouped into different contexts, depending on the phase of the application lifecycle when the function is available and the context of the data that is provided. + +### Static Context + +The context necessary to render the static template functions is always available. + +The static context also includes the Masterminds Sprig function library. For more information, see [Sprig Function Documentation](http://masterminds.github.io/sprig/) on the sprig website. + +For a list of all KOTS template functions available in the static context, see [Static context](template-functions-static-context). + +### Config Context + +Template functions in the config context are available when rendering an application that includes a Config custom resource. +At execution time, template functions in the config context also can use the static context functions. + +For a list of all KOTS template functions available in the config context, see [Config context](template-functions-config-context). + +### License Context + +Template functions in the license context have access to customer license and version data. + +For a list of all KOTS template functions available in the license context, see [License context](template-functions-license-context). + +### kURL Context + +Template functions in the kURL context have access to information about applications installed in embedded clusters created by Replicated kURL. + +For a list of all KOTS template functions available in the kURL context, see [kURL context](template-functions-kurl-context). + +### Identity Context + +Template functions in the Identity context have access to Replicated identity service information. + +For a list of all KOTS template functions available in the identity context, see [Identity context](template-functions-identity-context). + +================ +File: docs/reference/template-functions-config-context.md +================ +# Config Context + +## ConfigOption + +```go +func ConfigOption(optionName string) string +``` + +Returns the value of the config option as a string. + +For information about the config screen and associated options, see [Config](custom-resource-config) in the _Custom Resources_ section. + +```yaml +'{{repl ConfigOption "hostname" }}' +``` + +`ConfigOption` returns the base64 **encoded** value of the `file` config option. + +```yaml +'{{repl ConfigOption "ssl_key"}}' +``` + +To use files in a Secret, use `ConfigOption`: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: tls-secret +type: kubernetes.io/tls +data: + tls.crt: '{{repl ConfigOption "tls_certificate_file" }}' + tls.key: '{{repl ConfigOption "tls_private_key_file" }}' +``` + +For more information about using TLS certificates, see [Using TLS Certificates](../vendor/packaging-using-tls-certs). + +## ConfigOptionData + +```go +func ConfigOptionData(optionName string) string +``` + +`ConfigOptionData` returns the base64 **decoded** value of a `file` config option. + +```yaml +'{{repl ConfigOptionData "ssl_key"}}' +``` + +To use files in a ConfigMap, use `ConfigOptionData`: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: tls-config +data: + tls.crt: | + repl{{- ConfigOptionData "tls_certificate_file" | nindent 4 }} + + tls.key: | + repl{{- ConfigOptionData "tls_private_key_file" | nindent 4 }} +``` + +## ConfigOptionFilename + +```go +func ConfigOptionFilename(optionName string) string +``` + +`ConfigOptionFilename` returns the filename associated with a `file` config option. +It will return an empty string if used erroneously with other types. + +```yaml +'{{repl ConfigOptionFilename "pom_file"}}' +``` + +As an example, if you have the following Config Spec defined: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: my-application +spec: + groups: + - name: java_settings + title: Java Settings + description: Configures the Java Server build parameters + items: + - name: pom_file + type: file + required: true +``` + +You can use `ConfigOptionFilename` in a Pod Spec to mount a file like so: +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: configmap-demo-pod +spec: + containers: + - name: some-java-app + image: busybox + command: ["bash"] + args: + - "-C" + - "cat /config/{{repl ConfigOptionFilename pom_file}}" + volumeMounts: + - name: config + mountPath: "/config" + readOnly: true + volumes: + - name: config + configMap: + name: demo-configmap + items: + - key: data_key_one + path: repl{{ ConfigOptionFilename pom_file }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: demo-configmap +data: + data_key_one: repl{{ ConfigOptionData pom_file }} +``` + +## ConfigOptionEquals + +```go +func ConfigOptionEquals(optionName string, expectedValue string) bool +``` + +Returns true if the configuration option value is equal to the supplied value. + +```yaml +'{{repl ConfigOptionEquals "http_enabled" "1" }}' +``` + +## ConfigOptionNotEquals + +```go +func ConfigOptionNotEquals(optionName string, expectedValue string) bool +``` + +Returns true if the configuration option value is not equal to the supplied value. + +```yaml +'{{repl ConfigOptionNotEquals "http_enabled" "1" }}' +``` + +## LocalRegistryAddress + +```go +func LocalRegistryAddress() string +``` + +Returns the local registry host or host/namespace that's configured. +This will always return everything before the image name and tag. + +## LocalRegistryHost + +```go +func LocalRegistryHost() string +``` + +Returns the host of the local registry that the user configured. Alternatively, for air gap installations with Replicated Embedded Cluster or Replicated kURL, LocalRegistryHost returns the host of the built-in registry. + +Includes the port if one is specified. + +## LocalRegistryNamespace + +```go +func LocalRegistryNamespace() string +``` + +Returns the namespace of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryNamespace returns the namespace of the built-in registry. + +## LocalImageName + +```go +func LocalImageName(remoteImageName string) string +``` + +Given a `remoteImageName`, rewrite the `remoteImageName` so that it can be pulled to local hosts. + +A common use case for the `LocalImageName` function is to ensure that a Kubernetes Operator can determine the names of container images on Pods created at runtime. For more information, see [Referencing Images](/vendor/operator-referencing-images) in the _Packaging a Kubernetes Operator Application_ section. + +`LocalImageName` rewrites the `remoteImageName` in one of the following ways, depending on if a private registry is configured and if the image must be proxied: + +* If there is a private registry configured in the customer's environment, such as in air gapped environments, rewrite `remoteImageName` to reference the private registry locally. For example, rewrite `elasticsearch:7.6.0` as `registry.somebigbank.com/my-app/elasticsearch:7.6.0`. + +* If there is no private registry configured in the customer's environment, but the image must be proxied, rewrite `remoteImageName` so that the image can be pulled through the proxy registry. For example, rewrite `"quay.io/orgname/private-image:v1.2.3"` as `proxy.replicated.com/proxy/app-name/quay.io/orgname/private-image:v1.2.3`. + +* If there is no private registry configured in the customer's environment and the image does not need to be proxied, return `remoteImageName` without changes. + +For more information about the Replicated proxy registry, see [About the Proxy Registry](/vendor/private-images-about). + +## LocalRegistryImagePullSecret + +```go +func LocalRegistryImagePullSecret() string +``` + +Returns the base64 encoded local registry image pull secret value. +This is often needed when an operator is deploying images to a namespace that is not managed by Replicated KOTS. +Image pull secrets must be present in the namespace of the pod. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-image-pull-secret + namespace: my-namespace +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +--- +apiVersion: v1 +kind: Pod +metadata: + name: dynamic-pod + namespace: my-namespace +spec: + containers: + - image: '{{repl LocalImageName "registry.replicated.com/my-app/my-image:abcdef" }}' + name: my-container + imagePullSecrets: + - name: my-image-pull-secret +``` + +## ImagePullSecretName + +```go +func ImagePullSecretName() string +``` + +Returns the name of the image pull secret that can be added to pod specs that use private images. +The secret will be automatically created in all application namespaces. +It will contain authentication information for any private registry used with the application. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-deployment +spec: + template: + spec: + imagePullSecrets: + - name: repl{{ ImagePullSecretName }} +``` + +## HasLocalRegistry + +```go +func HasLocalRegistry() bool +``` + +Returns true if the environment is configured to rewrite images to a local registry. +HasLocalRegistry is always true for air gap installations. HasLocalRegistry is true in online installations if the user pushed images to a local registry. + +================ +File: docs/reference/template-functions-examples.mdx +================ +import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" +import StringComparison from "../partials/template-functions/_string-comparison.mdx" +import NeComparison from "../partials/template-functions/_ne-comparison.mdx" +import GoSprig from "../partials/template-functions/_go-sprig.mdx" +import UseCases from "../partials/template-functions/_use-cases.mdx" + +# Template Function Examples + +This topic provides examples of how to use Replicated KOTS template functions in various common use cases. For more information about working with KOTS template functions, including the supported syntax and the types of files where KOTS template functions can be used, see [About Template Functions](template-functions-about). + +## Overview + +<GoSprig/> + +<UseCases/> + +For examples demonstrating these use cases and more, see the sections below. + +## Comparison Examples + +This section includes examples of how to use KOTS template functions to compare different types of data. + +### Boolean Comparison + +Boolean values can be used in comparisons to evaluate if a given statement is true or false. Because many KOTS template functions return string values, comparing boolean values often requires using the KOTS [ParseBool](/reference/template-functions-static-context#parsebool) template function to return the boolean represented by the string. + +One common use case for working with boolean values is to check that a given field is present in the customer's license. For example, you might need to show a configuration option on the KOTS Admin Console **Config** page only when the customer's license has a certain entitlement. + +The following example creates a conditional statement in the KOTS Config custom resource that evaluates to true when a specified license field is present in the customer's license _and_ the customer enables a specified configuration option on the Admin Console **Config** page. + +```yaml +# KOTS Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_group + title: Example Config + items: + - name: radio_example + title: Select One + type: radio + items: + - name: option_one + title: Option One + - name: option_two + title: Option Two + - name: conditional_item + title: Conditional Item + type: text + # Display this item only when the customer enables the option_one config field *and* + # has the feature-1 entitlement in their license + when: repl{{ and (LicenseFieldValue "feature-1" | ParseBool) (ConfigOptionEquals "radio_example" "option_one")}} +``` + +This example uses the following KOTS template functions: +* [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) to return the string value of a boolean type license field named `feature-1` + :::note + The LicenseFieldValue template function always returns a string, regardless of the license field type. + ::: +* [ParseBool](/reference/template-functions-static-context#parsebool) to convert the string returned by the LicenseFieldValue template function to a boolean +* [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) to return a boolean that evaluates to true if the configuration option value is equal to the supplied value + +### Integer Comparison + +Integer values can be compared using operators such as greater than, less than, equal to, and so on. Because many KOTS template functions return string values, working with integer values often requires using another function to return the integer represented by the string, such as: +* KOTS [ParseInt](/reference/template-functions-static-context#parseint), which returns the integer value represented by the string with the option to provide a `base` other than 10 +* Sprig [atoi](https://masterminds.github.io/sprig/conversion.html), which is equivalent to ParseInt(s, 10, 0), converted to type integer + +A common use case for comparing integer values with KOTS template functions is to display different configuration options on the KOTS Admin Console **Config** page depending on integer values from the customer's license. For example, licenses might include an entitlement that defines the number of seats the customer is entitled to. In this case, it can be useful to conditionally display or hide certain fields on the **Config** page depending on the customer's team size. + +<IntegerComparison/> + +### String Comparison + +A common use case for string comparison is to compare the rendered value of a KOTS template function against a string to conditionally show or hide fields on the KOTS Admin Console **Config** page depending on details about the customer's environment. For example, a string comparison can be used to check the Kubernetes distribution of the cluster where an application is deployed. + +<StringComparison/> + +### Not Equal To Comparison + +It can be useful to compare the rendered value of a KOTS template function against another value to check if the two values are different. For example, you can conditionally show fields on the KOTS Admin Console **Config** page only when the Kubernetes distribution of the cluster where the application is deployed is _not_ [Replicated embedded cluster](/vendor/embedded-overview). + +<NeComparison/> + +### Logical AND Comparison + +Logical comparisons such as AND, OR, and NOT can be used with KOTS template functions. A common use case for logical AND comparisons is to construct more complex conditional statements where it is necessary that two different conditions are both true. + +The following example shows how to use an `and` operator that evaluates to true when two different configuration options on the Admin Console **Config** page are both enabled. This example uses the KOTS [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to return a boolean that evaluates to true if the configuration option value is equal to the supplied value. + +```yaml +# KOTS Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_group + title: Example Config + items: + - name: radio_example + title: Select One Example + type: radio + items: + - name: option_one + title: Option One + - name: option_two + title: Option Two + - name: boolean_example + title: Boolean Example + type: bool + default: "0" + - name: conditional_item + title: Conditional Item + type: text + # Display this item only when *both* specified config options are enabled + when: repl{{ and (ConfigOptionEquals "radio_example" "option_one") (ConfigOptionEquals "boolean_example" "1")}} +``` + +As shown below, when both `Option One` and `Boolean Example` are selected, the conditional statement evaluates to true and the `Conditional Item` field is displayed: + +<img alt="Conditional item displayed" src="/images/conditional-item-true.png" width="550px"/> + +[View a larger version of this image](/images/conditional-item-true.png) + +Alternatively, if either `Option One` or `Boolean Example` is not selected, then the conditional statement evaluates to false and the `Conditional Item` field is not displayed: + +<img alt="Option two selected" src="/images/conditional-item-false-option-two.png" width="550px"/> + +[View a larger version of this image](/images/conditional-item-false-option-two.png) + +<img alt="Boolean field deselected" src="/images/conditional-item-false-boolean.png" width="550px"/> + +[View a larger version of this image](/images/conditional-item-false-boolean.png) + +## Conditional Statement Examples + +This section includes examples of using KOTS template functions to construct conditional statements. Conditional statements can be used with KOTS template functions to render different values depending on a given condition. + +### If-Else Statements + +A common use case for if-else statements with KOTS template functions is to set values for resources or objects deployed by your application, such as custom annotations or service types, based on user-specific data. + +This section includes examples of both single line and multi-line if-else statements. Using multi-line formatting can be useful to improve the readability of YAML files when longer or more complex if-else statements are needed. + +Multi-line if-else statements can be constructed using YAML block scalars and block chomping characters to ensure the rendered result is valid YAML. A _folded_ block scalar style is denoted using the greater than (`>`) character. With the folded style, single line breaks in the string are treated as a space. Additionally, the block chomping minus (`-`) character is used to remove all the line breaks at the end of a string. For more information about working with these characters, see [Block Style Productions](https://yaml.org/spec/1.2.2/#chapter-8-block-style-productions) in the YAML documentation. + +:::note +For Helm-based applications that need to use more complex or nested if-else statements, you can alternatively use templating within your Helm chart `templates` rather than in the KOTS HelmChart custom resource. For more information, see [If/Else](https://helm.sh/docs/chart_template_guide/control_structures/#ifelse) in the Helm documentation. +::: + +#### Single Line + +The following example shows if-else statements used in the KOTS HelmChart custom resource `values` field to render different values depending on if the user selects a load balancer or an ingress controller as the ingress type for the application. This example uses the KOTS [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to return a boolean that evaluates to true if the configuration option value is equal to the supplied value. + +```yaml +# KOTS HelmChart custom resource +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: my-app +spec: + chart: + name: my-app + chartVersion: 0.23.0 + values: + services: + my-service: + enabled: true + appName: ["my-app"] + # Render the service type based on the user's selection + # '{{repl ...}}' syntax is used for `type` to improve readability of the if-else statement and render a string + type: '{{repl if ConfigOptionEquals "ingress_type" "load_balancer" }}LoadBalancer{{repl else }}ClusterIP{{repl end }}' + ports: + http: + enabled: true + # Render the HTTP port for the service depending on the user's selection + # repl{{ ... }} syntax is used for `port` to render an integer value + port: repl{{ if ConfigOptionEquals "ingress_type" "load_balancer" }}repl{{ ConfigOption "load_balancer_port" }}repl{{ else }}8081repl{{ end }} + protocol: HTTP + targetPort: 8081 +``` + +#### Multi-Line in KOTS HelmChart Values + +The following example uses a multi-line if-else statement in the KOTS HelmChart custom resource to render the path to the Replicated SDK image depending on if the user pushed images to a local private registry. + +This example uses the following KOTS template functions: +* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry) to return true if the environment is configured to rewrite images to a local registry +* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost) to return the local registry host configured by the user +* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) to return the local registry namespace configured by the user + +:::note +This example uses the `{{repl ...}}` syntax rather than the `repl{{ ... }}` syntax to improve readability in the YAML file. However, both syntaxes are supported for this use case. For more information, see [Syntax](/reference/template-functions-about#syntax) in _About Template Functions_. +::: + +```yaml +# KOTS HelmChart custom resource +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + images: + replicated-sdk: >- + {{repl if HasLocalRegistry -}} + {{repl LocalRegistryHost }}/{{repl LocalRegistryNamespace }}/replicated-sdk:1.0.0-beta.29 + {{repl else -}} + docker.io/replicated/replicated-sdk:1.0.0-beta.29 + {{repl end}} +``` + +Given the example above, if the user is _not_ using a local registry, then the `replicated-sdk` value in the Helm chart is set to the location of the image on the default docker registry, as shown below: + +```yaml +# Helm chart values file + +images: + replicated-sdk: 'docker.io/replicated/replicated-sdk:1.0.0-beta.29' +``` + +#### Multi-Line in Secret Object + +The following example uses multi-line if-else statements in a Secret object deployed by KOTS to conditionally set the database hostname, port, username, and password depending on if the customer uses the database embedded with the application or brings their own external database. + +This example uses the following KOTS template functions: +* [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) to return a boolean that evaluates to true if the configuration option value is equal to the supplied value +* [ConfigOption](/reference/template-functions-config-context#configoption) to return the user-supplied value for the specified configuration option +* [Base64Encode](/reference/template-functions-static-context#base64encode) to encode the string with base64 + +:::note +This example uses the `{{repl ...}}` syntax rather than the `repl{{ ... }}` syntax to improve readability in the YAML file. However, both syntaxes are supported for this use case. For more information, see [Syntax](/reference/template-functions-about#syntax) in _About Template Functions_. +::: + +```yaml +# Postgres Secret +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + # Render the value for the database hostname depending on if an embedded or + # external db is used. + # Also, base64 encode the rendered value. + DB_HOST: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_host" | Base64Encode }} + {{repl end}} + DB_PORT: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "5432" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_port" | Base64Encode }} + {{repl end}} + DB_USER: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_user" | Base64Encode }} + {{repl end}} + DB_PASSWORD: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl ConfigOption "embedded_postgres_password" | Base64Encode }} + {{repl else -}} + {{repl ConfigOption "external_postgres_password" | Base64Encode }} + {{repl end}} +``` + +### Ternary Operators + +Ternary operators are useful for templating strings where certain values within the string must be rendered differently depending on a given condition. Compared to if-else statements, ternary operators are useful when a small portion of a string needs to be conditionally rendered, as opposed to rendering different values based on a conditional statement. For example, a common use case for ternary operators is to template the path to an image repository based on user-supplied values. + +The following example uses ternary operators to render the registry and repository for a private nginx image depending on if a local image regsitry is used. This example uses the following KOTS template functions: +* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry) to return true if the environment is configured to rewrite images to a local registry +* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost) to return the local registry host configured by the user +* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) to return the local registry namespace configured by the user + +```yaml +# KOTS HelmChart custom resource +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + image: + # If a local registry is configured, use the local registry host. + # Otherwise, use proxy.replicated.com + registry: repl{{ HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }} + # If a local registry is configured, use the local registry's namespace. + # Otherwise, use proxy/my-app/quay.io/my-org + repository: repl{{ HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx + tag: v1.0.1 +``` + +## Formatting Examples + +This section includes examples of how to format the rendered output of KOTS template functions. + +In addition to the examples in this section, KOTS template functions in the Static context include several options for formatting values, such as converting strings to upper or lower case and trimming leading and trailing space characters. For more information, see [Static Context](/reference/template-functions-static-context). + +### Indentation + +When using template functions within nested YAML, it is important that the rendered template functions are indented correctly so that the YAML renders. A common use case for adding indentation to KOTS template functions is when templating annotations in the metadata of resources or objects deployed by your application based on user-supplied values. + +The [nindent](https://masterminds.github.io/sprig/strings.html) function can be used to prepend a new line to the beginning of the string and indent the string by a specified number of spaces. + +#### Indent Templated Helm Chart Values + +The following example shows templating a Helm chart value that sets annotations for an Ingress object. This example uses the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function to return user-supplied annotations from the Admin Console **Config** page. It also uses [nindent](https://masterminds.github.io/sprig/strings.html) to indent the rendered value ten spaces. + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: myapp +spec: + values: + services: + myservice: + annotations: repl{{ ConfigOption "additional_annotations" | nindent 10 }} +``` + +#### Indent Templated Annotations in Manifest Files + +The following example shows templating annotations for an Ingress object. This example uses the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function to return user-supplied annotations from the Admin Console **Config** page. It also uses [nindent](https://masterminds.github.io/sprig/strings.html) to indent the rendered value four spaces. + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-ingress + annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "ingress_annotations" | nindent 4 }} +``` + +### Render Quoted Values + +To wrap a rendered value in quotes, you can pipe the result from KOTS template functions with the `repl{{ ... }}` syntax into quotes using `| quote`. Or, you can use the `'{{repl ... }}'` syntax instead. + +One use case for quoted values in YAML is when indicator characters are included in values. In YAML, indicator characters (`-`, `?`, `:`) have special semantics and must be escaped if used in values. For more information, see [Indicator Charactors](https://yaml.org/spec/1.2.2/#53-indicator-characters) in the YAML documentation. + +#### Example with `'{{repl ... }}'` Syntax + +```yaml +customTag: '{{repl ConfigOption "tag" }}' +``` +#### Example with `| quote` + +```yaml +customTag: repl{{ ConfigOption "tag" | quote }} +``` + +The result for both examples is: + +```yaml +customTag: 'key: value' +``` + +## Variables Example + +This section includes an example of using variables with KOTS template functions. For more information, see [Variables](https://pkg.go.dev/text/template#hdr-Variables) in the Go documentation. + +### Using Variables to Generate TLS Certificates in JSON + +You can use the Sprig [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions with KOTS template functions to generate certificate authorities (CAs) and signed certificates in JSON. One use case for this is to generate default CAs, certificates, and keys that users can override with their own values on the Admin Console **Config** page. + +The Sprig [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions require the subject's common name and the certificate's validity duration in days. The `genSignedCert` function also requires the CA that will sign the certificate. You can use variables and KOTS template functions to provide the necessary parameters when calling these functions. + +The following example shows how to use variables and KOTS template functions in the `default` property of a [`hidden`](/reference/custom-resource-config#hidden) item to pass parameters to the `genCA` and `genSignedCert` functions and generate a CA, certificate, and key. This example uses a `hidden` item (which is an item that is not displayed on the **Config** page) to generate the certificate chain because variables used in the KOTS Config custom resource can only be accessed from the same item where they were declared. For this reason, `hidden` items can be useful for evaluating complex templates. + +This example uses the following: +* KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function to render the user-supplied value for the ingress hostname. This is passed as a parameter to the [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions +* Sprig [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions to generate a CA and a certificate signed by the CA +* Sprig [dict](https://masterminds.github.io/sprig/dicts.html), [set](https://masterminds.github.io/sprig/dicts.html), and [dig](https://masterminds.github.io/sprig/dicts.html) dictionary functions to create a dictionary with entries for both the CA and the certificate, then traverse the dictionary to return the values of the CA, certificate, and key. +* [toJson](https://masterminds.github.io/sprig/defaults.html) and [fromJson](https://masterminds.github.io/sprig/defaults.html) Sprig functions to encode the CA and certificate into a JSON string, then decode the JSON for the purpose of displaying the values on the **Config** page as defaults + +:::important +Default values are treated as ephemeral. The following certificate chain is recalculated each time the application configuration is modified. Before using this example with your application, be sure that your application can handle updating these parameters dynamically. +::: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + items: + - name: ingress_hostname + title: Ingress Hostname + help_text: Enter a DNS hostname to use as the cert's CN. + type: text + - name: tls_json + title: TLS JSON + type: textarea + hidden: true + default: |- + repl{{ $ca := genCA (ConfigOption "ingress_hostname") 365 }} + repl{{ $tls := dict "ca" $ca }} + repl{{ $cert := genSignedCert (ConfigOption "ingress_hostname") (list ) (list (ConfigOption "ingress_hostname")) 365 $ca }} + repl{{ $_ := set $tls "cert" $cert }} + repl{{ toJson $tls }} + - name: tls_ca + title: Signing Authority + type: textarea + default: repl{{ fromJson (ConfigOption "tls_json") | dig "ca" "Cert" "" }} + - name: tls_cert + title: TLS Cert + type: textarea + default: repl{{ fromJson (ConfigOption "tls_json") | dig "cert" "Cert" "" }} + - name: tls_key + title: TLS Key + type: textarea + default: repl{{ fromJson (ConfigOption "tls_json") | dig "cert" "Key" "" }} +``` + +The following image shows how the default values for the CA, certificate, and key are displayed on the **Config** page: + +<img alt="Default values for CA, certificate, and key on the Config page" src="/images/certificate-chain-default-values.png" width="550px"/> + +[View a larger version of this image](/images/certificate-chain-default-values.png) + +## Additional Examples + +The following topics include additional examples of using KOTS template functions in Kubernetes manifests deployed by KOTS or in KOTS custom resources: + +* [Add Status Informers](/vendor/admin-console-display-app-status#add-status-informers) in _Adding Resource Status Informers_ +* [Conditionally Including or Excluding Resources](/vendor/packaging-include-resources) +* [Example: Including Optional Helm Charts](/vendor/helm-optional-charts) +* [Example: Adding Database Configuration Options](/vendor/tutorial-adding-db-config) +* [Templating Annotations](/vendor/resources-annotations-templating) +* [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup) + +================ +File: docs/reference/template-functions-identity-context.md +================ +# Identity Context + +## IdentityServiceEnabled + +```go +func IdentityServiceEnabled() bool +``` + +Returns true if the Replicated identity service has been enabled and configured by the end customer. + +```yaml +apiVersion: apps/v1 +kind: Deployment +... + env: + - name: IDENTITY_ENABLED + value: repl{{ IdentityServiceEnabled }} +``` + + +## IdentityServiceClientID + +```go +func IdentityServiceClientID() string +``` + +Returns the client ID required for the application to connect to the identity service OIDC server. + +```yaml +apiVersion: apps/v1 +kind: Deployment +... + env: + - name: CLIENT_ID + value: repl{{ IdentityServiceClientID }} +``` + + +## IdentityServiceClientSecret + +```go +func IdentityServiceClientSecret() (string, error) +``` + +Returns the client secret required for the application to connect to the identity service OIDC server. + +```yaml +apiVersion: v1 +kind: Secret +... +data: + CLIENT_SECRET: repl{{ IdentityServiceClientSecret | b64enc }} +``` + + +## IdentityServiceRoles + +```go +func IdentityServiceRoles() map[string][]string +``` + +Returns a list of groups specified by the customer mapped to a list of roles as defined in the Identity custom resource manifest file. + +For more information about roles in the Identity custom resource, see [Identity](custom-resource-identity#roles) in the _Custom resources_ section. + +```yaml +apiVersion: apps/v1 +kind: Deployment +... + env: + - name: RESTRICTED_GROUPS + value: repl{{ IdentityServiceRoles | keys | toJson }} +``` + + +## IdentityServiceName + +```go +func IdentityServiceName() string +``` + +Returns the Service name for the identity service OIDC server. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +... + - path: /dex + backend: + service: + name: repl{{ IdentityServiceName }} + port: + number: repl{{ IdentityServicePort }} +``` + + +## IdentityServicePort + +```go +func IdentityServicePort() string +``` + +Returns the Service port number for the identity service OIDC server. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +... + - path: /dex + backend: + service: + name: repl{{ IdentityServiceName }} + port: + number: repl{{ IdentityServicePort }} +``` + +================ +File: docs/reference/template-functions-kurl-context.md +================ +# kURL Context + +## kURL Context Functions + +For applications installed in embedded clusters created with Replicated kURL, you can use template functions to show all options the cluster was installed with. + +The creation of the Installer custom resource will reflect both install script changes made by posting YAML to the kURL API and changes made with -s flags at runtime. These functions are not available on the config page. + +KurlBool, KurlInt, KurlString, and KurlOption all take a string yamlPath as a param. +This path is the path from the manifest file, and is delineated between addon and subfield by a period ’.’. +For example, the kURL Kubernetes version can be accessed as `{{repl KurlString "Kubernetes.Version" }}`. + +KurlBool, KurlInt, KurlString respectively return a bool, integer, and string value. +If used on a valid field but with the wrong type these will return the falsy value for their type, false, 0, and “string respectively. +The `KurlOption` function will convert all bool, int, and string fields to string. +All functions will return falsy values if there is nothing at the yamlPath specified, or if these functions are run in a cluster with no installer custom resource (as in, not a cluster created by kURL). + +The following provides a complete list of the Installer custom resource with annotations: + +## KurlBool + +```go +func KurlBool(yamlPath string) bool +``` + +Returns the value at the yamlPath if there is a valid boolean there, or false if there is not. + +```yaml +'{{repl KurlBool "Docker.NoCEonEE" }}' +``` + + +## KurlInt + +```go +func KurlInt(yamlPath string) int +``` + +Returns the value at the yamlPath if there is a valid integer there, or 0 if there is not. + +```yaml +'{{repl KurlInt "Rook.CephReplicaCount" }}' +``` + + +## KurlString + +```go +func KurlString(yamlPath string) string +``` + +Returns the value at the yamlPath if there is a valid string there, or "" if there is not. + +```yaml +'{{repl KurlString "Kubernetes.Version" }}' +``` + + +## KurlOption + +```go +func KurlOption(yamlPath string) string +``` + +Returns the value at the yamlPath if there is a valid string, int, or bool value there, or "" if there is not. +Int and Bool values will be converted to string values. + +```yaml +'{{repl KurlOption "Rook.CephReplicaCount" }}' +``` + + +## KurlAll + +```go +func KurlAll() string +``` + +Returns all values in the Installer custom resource as key:value pairs, sorted by key. + +```yaml +'{{repl KurlAll }}' +``` + +================ +File: docs/reference/template-functions-license-context.md +================ +# License Context + +## LicenseFieldValue +```go +func LicenseFieldValue(name string) string +``` +LicenseFieldValue returns the value of the specified license field. LicenseFieldValue accepts custom license fields and all built-in license fields. For a list of all built-in fields, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). + +LicenseFieldValue always returns a string, regardless of the license field type. To return integer or boolean values, you need to use the [ParseInt](/reference/template-functions-static-context#parseint) or [ParseBool](/reference/template-functions-static-context#parsebool) template function to convert the string value. + +#### String License Field + +The following example returns the value of the built-in `customerName` license field: + +```yaml +customerName: '{{repl LicenseFieldValue "customerName" }}' +``` +#### Integer License Field + +The following example returns the value of a custom integer license field named `numSeats`: + +```yaml +numSeats: repl{{ LicenseFieldValue "numSeats" | ParseInt }} +``` +This example uses [ParseInt](/reference/template-functions-static-context#parseint) to convert the returned value to an integer. + +#### Boolean License Field + +The following example returns the value of a custom boolean license field named `feature-1`: + +```yaml +feature-1: repl{{ LicenseFieldValue "feature-1" | ParseBool }} +``` +This example uses [ParseBool](/reference/template-functions-static-context#parsebool) to convert the returned value to a boolean. + +## LicenseDockerCfg +```go +func LicenseDockerCfg() string +``` +LicenseDockerCfg returns a value that can be written to a secret if needed to deploy manually. +Replicated KOTS creates and injects this secret automatically in normal conditions, but some deployments (with static, additional namespaces) may need to include this. + +```yaml +apiVersion: v1 +kind: Secret +type: kubernetes.io/dockerconfigjson +metadata: + name: myapp-registry + namespace: my-other-namespace +data: + .dockerconfigjson: repl{{ LicenseDockerCfg }} +``` + +## Sequence + +```go +func Sequence() int64 +``` +Sequence is the sequence of the application deployed. +This will start at 0 for each installation, and increase with every app update, config change, license update and registry setting change. + +```yaml +'{{repl Sequence }}' +``` + +## Cursor + +```go +func Cursor() string +``` +Cursor is the channel sequence of the app. +For instance, if 5 releases have been promoted to the channel that the app is running, then this would return the string `5`. + +```yaml +'{{repl Cursor }}' +``` + +## ChannelName + +```go +func ChannelName() string +``` +ChannelName is the name of the deployed channel of the app. + +```yaml +'{{repl ChannelName }}' +``` + +## VersionLabel + +```go +func VersionLabel() string +``` +VersionLabel is the semantic version of the app, as specified when promoting a release to a channel. + +```yaml +'{{repl VersionLabel }}' +``` + +## ReleaseNotes + +```go +func ReleaseNotes() string +``` +ReleaseNotes is the release notes of the current version of the app. + +```yaml +'{{repl ReleaseNotes }}' +``` + +## IsAirgap + +```go +func IsAirgap() bool +``` +IsAirgap is `true` when the app is installed via uploading an airgap package, false otherwise. + +```yaml +'{{repl IsAirgap }}' +``` + +================ +File: docs/reference/template-functions-static-context.md +================ +# Static Context + +## About Mastermind Sprig + +Many of the utility functions provided come from sprig, a third-party library of Go template functions. +For more information, see [Sprig Function Documentation](https://masterminds.github.io/sprig/) on the sprig website. + +## Certificate Functions + +### PrivateCACert + +>Introduced in KOTS v1.117.0 + +```yaml +func PrivateCACert() string +``` + +PrivateCACert returns the name of a ConfigMap that contains private CA certificates provided by the end user. For Embedded Cluster installations, these certificates are provided with the `--private-ca` flag for the `install` command. For KOTS installations, the user provides the ConfigMap using the `--private-ca-configmap` flag for the `install` command. + +You can use this template function to mount the specified ConfigMap so your containers can access the internet through enterprise proxies that issue their own TLS certificates in order to inspect traffic. + +:::note +This function will return the name of the ConfigMap even if the ConfigMap has no entries. If no ConfigMap exists, this function returns the empty string. +::: + +## Cluster Information Functions + +### Distribution +```go +func Distribution() string +``` +Distribution returns the Kubernetes distribution detected. The possible return values are: + +* aks +* digitalOcean +* dockerDesktop +* eks +* embedded-cluster +* gke +* ibm +* k0s +* k3s +* kind +* kurl +* microk8s +* minikube +* oke +* openShift +* rke2 + +:::note +[IsKurl](#iskurl) can also be used to detect kURL instances. +::: + +#### Detect the Distribution +```yaml +repl{{ Distribution }} +``` +#### Equal To Comparison +```yaml +repl{{ eq Distribution "gke" }} +``` +#### Not Equal To Comparison +```yaml +repl{{ ne Distribution "embedded-cluster" }} +``` +See [Functions](https://pkg.go.dev/text/template#hdr-Functions) in the Go documentation. + +### IsKurl +```go +func IsKurl() bool +``` +IsKurl returns true if running within a kurl-based installation. +#### Detect kURL Installations +```yaml +repl{{ IsKurl }} +``` +#### Detect Non-kURL Installations +```yaml +repl{{ not IsKurl }} +``` +See [Functions](https://pkg.go.dev/text/template#hdr-Functions) in the Go documentation. + +### KotsVersion + +```go +func KotsVersion() string +``` + +KotsVersion returns the current version of KOTS. + +```yaml +repl{{ KotsVersion }} +``` + +You can compare the KOTS version as follows: +```yaml +repl{{KotsVersion | semverCompare ">= 1.19"}} +``` + +This returns `true` if the KOTS version is greater than or equal to `1.19`. + +For more complex comparisons, see [Semantic Version Functions](https://masterminds.github.io/sprig/semver.html) in the sprig documentation. + +### KubernetesMajorVersion + +> Introduced in KOTS v1.92.0 + +```go +func KubernetesMajorVersion() string +``` + +KubernetesMajorVersion returns the Kubernetes server *major* version. + +```yaml +repl{{ KubernetesMajorVersion }} +``` + +You can compare the Kubernetes major version as follows: +```yaml +repl{{lt (KubernetesMajorVersion | ParseInt) 2 }} +``` + +This returns `true` if the Kubernetes major version is less than `2`. + +### KubernetesMinorVersion + +> Introduced in KOTS v1.92.0 + +```go +func KubernetesMinorVersion() string +``` + +KubernetesMinorVersion returns the Kubernetes server *minor* version. + +```yaml +repl{{ KubernetesMinorVersion }} +``` + +You can compare the Kubernetes minor version as follows: +```yaml +repl{{gt (KubernetesMinorVersion | ParseInt) 19 }} +``` + +This returns `true` if the Kubernetes minor version is greater than `19`. + +### KubernetesVersion + +> Introduced in KOTS v1.92.0 + +```go +func KubernetesVersion() string +``` + +KubernetesVersion returns the Kubernetes server version. + +```yaml +repl{{ KubernetesVersion }} +``` + +You can compare the Kubernetes version as follows: +```yaml +repl{{KubernetesVersion | semverCompare ">= 1.19"}} +``` + +This returns `true` if the Kubernetes version is greater than or equal to `1.19`. + +For more complex comparisons, see [Semantic Version Functions](https://masterminds.github.io/sprig/semver.html) in the sprig documentation. + +### Namespace +```go +func Namespace() string +``` +Namespace returns the Kubernetes namespace that the application belongs to. +```yaml +'{{repl Namespace}}' +``` + +### NodeCount +```go +func NodeCount() int +``` +NodeCount returns the number of nodes detected within the Kubernetes cluster. +```yaml +repl{{ NodeCount }} +``` + +### Lookup + +> Introduced in KOTS v1.103.0 + +```go +func Lookup(apiversion string, resource string, namespace string, name string) map[string]interface{} +``` + +Lookup searches resources in a running cluster and returns a resource or resource list. + +Lookup uses the Helm lookup function to search resources and has the same functionality as the Helm lookup function. For more information, see [lookup](https://helm.sh/docs/chart_template_guide/functions_and_pipelines/#using-the-lookup-function) in the Helm documentation. + +```yaml +repl{{ Lookup "API_VERSION" "KIND" "NAMESPACE" "NAME" }} +``` + +Both `NAME` and `NAMESPACE` are optional and can be passed as an empty string (""). + +The following combination of parameters are possible: + +<table> + <tr> + <th>Behavior</th> + <th>Lookup function</th> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get pod mypod -n mynamespace</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Pod" "mynamespace" "mypod" }}</code></td> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get pods -n mynamespace</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Pod" "mynamespace" "" }}</code></td> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get pods --all-namespaces</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Pod" "" "" }}</code></td> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get namespace mynamespace</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Namespace" "" "mynamespace" }}</code></td> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get namespaces</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Namespace" "" "" }}</code></td> + </tr> +</table> + +The following describes working with values returned by the Lookup function: + +* When Lookup finds an object, it returns a dictionary with the key value pairs from the object. This dictionary can be navigated to extract specific values. For example, the following returns the annotations for the `mynamespace` object: + + ``` + repl{{ (Lookup "v1" "Namespace" "" "mynamespace").metadata.annotations }} + ``` + +* When Lookup returns a list of objects, it is possible to access the object list through the `items` field. For example: + + ``` + services: | + repl{{- range $index, $service := (Lookup "v1" "Service" "mynamespace" "").items }} + - repl{{ $service.metadata.name }} + repl{{- end }} + ``` + + For an array value type, omit the `|`. For example: + + ``` + services: + repl{{- range $index, $service := (Lookup "v1" "Service" "mynamespace" "").items }} + - repl{{ $service.metadata.name }} + repl{{- end }} + ``` + +* When no object is found, Lookup returns an empty value. This can be used to check for the existence of an object. + +## Date Functions + +### Now +```go +func Now() string +``` +Returns the current timestamp as an RFC3339 formatted string. +```yaml +'{{repl Now }}' +``` + +### NowFmt +```go +func NowFmt(format string) string +``` +Returns the current timestamp as a formatted string. +For information about Go time formatting guidelines, see [Constants](https://golang.org/pkg/time/#pkg-constants) in the Go documentation. +```yaml +'{{repl NowFmt "20060102" }}' +``` + +## Encoding Functions + +### Base64Decode +```go +func Base64Decode(stringToDecode string) string +``` +Returns decoded string from a Base64 stored value. +```yaml +'{{repl ConfigOption "base_64_encoded_name" | Base64Decode }}' +``` + +### Base64Encode +```go +func Base64Encode(stringToEncode string) string +``` +Returns a Base64 encoded string. +```yaml +'{{repl ConfigOption "name" | Base64Encode }}' +``` + +### UrlEncode +```go +func UrlEncode(stringToEncode string) string +``` +Returns the string, url encoded. +Equivalent to the `QueryEscape` function within the golang `net/url` library. For more information, see [func QueryEscape](https://godoc.org/net/url#QueryEscape) in the Go documentation. +```yaml +'{{repl ConfigOption "smtp_email" | UrlEncode }}:{{repl ConfigOption "smtp_password" | UrlEncode }}@smtp.example.com:587' +``` + +### UrlPathEscape + +```go +func UrlPathEscape(stringToEncode string) string +``` +Returns the string, url *path* encoded. +Equivalent to the `PathEscape` function within the golang `net/url` library. For more information, see [func PathEscape](https://godoc.org/net/url#PathEscape) in the Go documentation. +```yaml +'{{repl ConfigOption "smtp_email" | UrlPathEscape }}:{{repl ConfigOption "smtp_password" | UrlPathEscape }}@smtp.example.com:587' +``` + +## Encryption Functions + +### KubeSeal +```go +func KubeSeal(certData string, namespace string, name string, value string) string +``` + +## Integer and Float Functions + +### HumanSize +```go +func HumanSize(size interface{}) string +``` +HumanSize returns a human-readable approximation of a size in bytes capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +The size must be a integer or floating point number. +```yaml +'{{repl ConfigOption "min_size_bytes" | HumanSize }}' +``` + +## Proxy Functions + +### HTTPProxy + +```go +func HTTPProxy() string +``` +HTTPProxy returns the address of the proxy that the Admin Console is configured to use. +```yaml +repl{{ HTTPProxy }} +``` + +### HTTPSProxy + +```go +func HTTPSProxy() string +``` +HTTPSProxy returns the address of the proxy that the Admin Console is configured to use. +```yaml +repl{{ HTTPSProxy }} +``` + +### NoProxy + +```go +func NoProxy() string +``` +NoProxy returns the comma-separated list of no-proxy addresses that the Admin Console is configured to use. +```yaml +repl{{ NoProxy }} +``` + +## Math Functions +### Add +```go +func Add(x interface{}, y interface{}) interface{} +``` +Adds x and y. + +If at least one of the operands is a floating point number, the result will be a floating point number. + +If both operands are integers, the result will be an integer. +```yaml +'{{repl Add (ConfigOption "maximum_users") 1}}' +``` + +### Div +```go +func Div(x interface{}, y interface{}) interface{} +``` +Divides x by y. + +If at least one of the operands is a floating point number, the result will be a floating point number. + +If both operands are integers, the result will be an integer and will be rounded down. +```yaml +'{{repl Div (ConfigOption "maximum_users") 2.0}}' +``` + +### Mult +```go +func Mult(x interface{}, y interface{}) interface{} +``` +Multiplies x and y. + +Both operands must be either an integer or a floating point number. + +If at least one of the operands is a floating point number, the result will be a floating point number. + +If both operands are integers, the result will be an integer. +```yaml +'{{repl Mult (NodePrivateIPAddressAll "DB" "redis" | len) 2}}' +``` + +If a template function returns a string, the value must be converted to an integer or a floating point number first: +```yaml +'{{repl Mult (ConfigOption "session_cookie_age" | ParseInt) 86400}}' +``` + +### Sub +```go +func Sub(x interface{}, y interface{}) interface{} +``` +Subtracts y from x. + +If at least one of the operands is a floating point number, the result will be a floating point number. + +If both operands are integers, the result will be an integer. +```yaml +'{{repl Sub (ConfigOption "maximum_users") 1}}' +``` + +## String Functions + +### ParseBool +```go +func ParseBool(str string) bool +``` +ParseBool returns the boolean value represented by the string. +```yaml +'{{repl ConfigOption "str_value" | ParseBool }}' +``` + +### ParseFloat +```go +func ParseFloat(str string) float64 +``` +ParseFloat returns the float value represented by the string. +```yaml +'{{repl ConfigOption "str_value" | ParseFloat }}' +``` + +### ParseInt +```go +func ParseInt(str string, args ...int) int64 +``` +ParseInt returns the integer value represented by the string with optional base (default 10). +```yaml +'{{repl ConfigOption "str_value" | ParseInt }}' +``` + +### ParseUint +```go +func ParseUint(str string, args ...int) uint64 +``` +ParseUint returns the unsigned integer value represented by the string with optional base (default 10). +```yaml +'{{repl ConfigOption "str_value" | ParseUint }}' +``` + +### RandomString +```go +func RandomString(length uint64, providedCharset ...string) string +``` +Returns a random string with the desired length and charset. +Provided charsets must be Perl formatted and match individual characters. +If no charset is provided, `[_A-Za-z0-9]` will be used. + +#### Examples + +The following example generates a 64-character random string: + +```yaml +'{{repl RandomString 64}}' +``` +The following example generates a 64-character random string that contains `a`s and `b`s: + +```yaml +'{{repl RandomString 64 "[ab]" }}' +``` +#### Generating Persistent and Ephemeral Strings + +When you assign the RandomString template function to a `value` key in the Config custom resource, you can use the `hidden` and `readonly` properties to control the behavior of the RandomString function each time it is called. The RandomString template function is called each time the user deploys a change to the configuration settings for the application. + +Depending on if the `hidden` and `readonly` properties are `true` or `false`, the random string generated by a RandomString template function in a `value` key is either ephemeral or persistent between configuration changes: + +* **Ephemeral**: The value of the random string _changes_ when the user deploys a change to the configuration settings for the application. +* **Persistent**: The value of the random string does _not_ change when the user deploys a change to the configuration settings for the application. + +For more information about these properties, see [`hidden`](custom-resource-config#hidden) and [`readonly`](custom-resource-config#readonly) in _Config_. + +:::note +If you assign the RandomString template function to a `default` key in the Config custom resource rather than a `value` key, then the `hidden` and `readonly` properties do _not_ affect the behavior of the RandomString template function. For more information about the behavior of the `default` key in the Config custom resource, see [`default`](custom-resource-config#default) in _Config_. +::: + +The following table describes the behavior of the RandomString template function when it is assigned to a `value` key in the Config custom resource and the `hidden` and `readonly` properties are `true` or `false`: + +<table> + <tr> + <th width="15%">readonly</th> + <th width="15%">hidden</th> + <th width="15%">Outcome</th> + <th width="55%">Use Case</th> + </tr> + <tr> + <td>false</td> + <td>true</td> + <td>Persistent</td> + <td> + <p>Set <code>readonly</code> to <code>false</code> and <code>hidden</code> to <code>true</code> if:</p> + <ul> + <li>The random string must <em>not</em> change each time the user deploys a change to the application's configuration settings.</li> + <li>The user does <em>not</em> need to see or change, or must be prevented from seeing or changing, the value of the random string.</li> + </ul> + </td> + </tr> + <tr> + <td>true</td> + <td>false</td> + <td>Ephemeral</td> + <td> + <p>Set <code>readonly</code> to <code>true</code> and <code>hidden</code> to <code>false</code> if:</p> + <ul> + <li>The random string <em>must</em> change each time the user deploys a change to the application's configuration settings.</li> + <li>The user does <em>not</em> need to change, or must be prevented from changing, the value of the random string.</li> + <li>The user <em>must</em> be able to see the value of the random string.</li> + </ul> + </td> + </tr> + <tr> + <td>true</td> + <td>true</td> + <td>Ephemeral</td> + <td> + <p>Set <code>readonly</code> to <code>true</code> and <code>hidden</code> to <code>true</code> if:</p> + <ul> + <li>The random string <em>must</em> change each time the user deploys a change to the application's configuration settings.</li> + <li>The user does <em>not</em> need to see or change, or must be preventing from seeing or changing, the value of the random string.</li> + </ul> + </td> + </tr> + <tr> + <td>false</td> + <td>false</td> + <td>Persistent</td> + <td> + <p>Set <code>readonly</code> to <code>false</code> and <code>hidden</code> to <code>false</code> if:</p> + <ul> + <li>The random string must <em>not</em> change each time the user deploys a change to the application's configuration settings.</li> + <li>The user <em>must</em> be able to see and change the value of the random string.</li> + </ul> + <p>For example, set both <code>readonly</code> and <code>hidden</code> to <code>false</code> to generate a random password that users must be able to see and then change to a different value that they choose.</p> + </td> + </tr> +</table> + +### Split +```go +func Split(s string, sep string) []string +``` +Split slices s into all substrings separated by sep and returns an array of the substrings between those separators. +```yaml +'{{repl Split "A,B,C" "," }}' +``` + +Combining `Split` and `index`: +Assuming the `github_url` param is set to `https://github.mycorp.internal:3131`, the following would set +`GITHUB_HOSTNAME` to `github.mycorp.internal`. +```yaml +'{{repl index (Split (index (Split (ConfigOption "github_url") "/") 2) ":") 0}}' +``` + +### ToLower +```go +func ToLower(stringToAlter string) string +``` +Returns the string, in lowercase. +```yaml +'{{repl ConfigOption "company_name" | ToLower }}' +``` + +### ToUpper +```go +func ToUpper(stringToAlter string) string +``` +Returns the string, in uppercase. +```yaml +'{{repl ConfigOption "company_name" | ToUpper }}' +``` + +### Trim +```go +func Trim(s string, args ...string) string +``` +Trim returns a string with all leading and trailing strings contained in the optional args removed (default space). +```yaml +'{{repl Trim (ConfigOption "str_value") "." }}' +``` + +### TrimSpace +```go +func TrimSpace(s string) string +``` +Trim returns a string with all leading and trailing spaces removed. +```yaml +'{{repl ConfigOption "str_value" | TrimSpace }}' +``` + +### YamlEscape +```go +func YamlEscape(input string) string +``` + +YamlEscape returns an escaped and quoted version of the input string, suitable for use within a YAML document. +This can be useful when dealing with user-uploaded files that may include null bytes and other nonprintable characters. For more information about printable characters, see [Character Set](https://yaml.org/spec/1.2.2/#51-character-set) in the YAML documentation. + +```yaml +repl{{ ConfigOptionData "my_file_upload" | YamlEscape }} +``` + +================ +File: docs/reference/vendor-api-using.md +================ +import ApiAbout from "../partials/vendor-api/_api-about.mdx" + +# Using the Vendor API v3 + +This topic describes how to use Replicated Vendor API authentication tokens to make API calls. + +## About the Vendor API + +<ApiAbout/> + +## API Token Requirement + +To use the Vendor API v3, you need a token for authorization. You provide the token as the value of the `Authorization` header of Vendor API calls. For example, to pass a token as the authorization header in a request: + +``` +curl --request GET \ + --url https://api.replicated.com/vendor/v3/customers \ + --header 'Accept: application/json' \ + --header 'Authorization: my-token' +``` + +Generate a service account or user API token in the Vendor Portal. The token must have `Read/Write` access to create new releases. See [Generating API Tokens](/vendor/replicated-api-tokens). + +## Vendor API v3 Documentation + +For Vendor API documentation and an interactive API console, see [Vendor API v3 Reference](https://replicated-vendor-api.readme.io/v3/reference/createapp). + +For the Vendor API swagger specification, see [vendor-api-v3.json](https://api.replicated.com/vendor/v3/spec/vendor-api-v3.json). + +![vendor api documentation page](/images/vendor-api-docs.png) + +[View a larger version of this image](/images/vendor-api-docs.png) + +================ +File: docs/release-notes/rn-app-manager.md +================ +--- +toc_max_heading_level: 2 +pagination_next: null +pagination_prev: null +--- + +import KubernetesCompatibility from "../partials/install/_kubernetes-compatibility.mdx" + +# KOTS Release Notes + +This topic contains release notes for the [Replicated KOTS](../intro-kots) installer. The release notes list new features, improvements, bug fixes, known issues, and breaking changes. + +## Kubernetes Compatibility + +The following table lists the versions of Kubernetes that are compatible with each version of KOTS: + +<KubernetesCompatibility/> + +<!--RELEASE_NOTES_PLACEHOLDER--> + +## 1.124.4 + +Released on February 14, 2025 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Improvements {#improvements-1-124-4} +* Improves error visibility by displaying the actual error message in the UI instead of a generic one when an upgrade fails to start. + +### Bug Fixes {#bug-fixes-1-124-4} +* Fixes an issue when installing with a provided license, that can cause the installer not to respect the custom domain in the license and instead make a request to https://replicated.app. + +## 1.124.3 + +Released on February 5, 2025 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Improvements {#improvements-1-124-3} +* Updates images to resolve CVE-2024-45337 with critical severity and CVE-2024-45338 with high severity. + +## 1.124.2 + +Released on February 4, 2025 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Improvements {#improvements-1-124-2} +* Improvements for Embedded Cluster upgrades. + +## 1.124.1 + +Released on January 29, 2025 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Bug Fixes {#bug-fixes-1-123-1} +* Improves an unclear error message that could occur when rewriting private images. + +## 1.124.0 + +Released on January 24, 2025 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### New Features {#new-features-1-124-0} +* You can migrate Helm charts that were installed with HelmChart `v1beta1` and `useHelmInstall: false` to HelmChart `v1beta2` by passing the `--take-ownership` flag to the `helmUpgradeFlags` field in HelmChart custom resource as shown below: + + ```yaml + # HelmChart v1 beta2 + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: samplechart + spec: + helmUpgradeFlags: + - --take-ownership + ``` + + This flag allows Helm to take ownership of existing resources that were installed without Helm, like resources deployed with HelmChart v1beta1 and `useHelmInstall: false`. + + For information about how to migrate an existing installation to KOTS HelmChart `v1beta2`, see [Migrating Existing Installations to HelmChart v2](/vendor/helm-v2-migrate). + +## 1.123.1 + +Released on January 13, 2025 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Bug Fixes {#bug-fixes-1-123-1} +* Fixes an issue where the navigation menu was not visible on the Config page. + +## 1.123.0 + +Released on January 2, 2025 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### New Features {#new-features-1-123-0} +* Adds the `--tolerations` flag to `kots install` to supply additional tolerations to the KOTS pods. + +## 1.122.1 + +Released on December 20, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Bug Fixes {#bug-fixes-1-122-1} +* Fixes a bug that could result in instance being reported as unavailable if application includes an Ingress resource. + +## 1.122.0 + +Released on December 12, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### New Features {#new-features-1-122-0} +* Adds support for the `kots.io/keep` annotation, which prevents KOTS from deleting resources during an upgrade if the resource is no longer present in the new release. This annotation is useful when migrating existing KOTS installations to the KOTS HelmChart v2 installation method. For more information, see [Migrating Existing Installations to HelmChart v2](/vendor/helm-v2-migrate). + +## 1.121.2 + +Released on November 27, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Improvements {#improvements-1-121-2} +* Various updates for Embedded Cluster. + +## 1.121.1 + +Released on November 26, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Bug Fixes {#bug-fixes-1-121-1} +* Various fixes for Embedded Cluster. + +## 1.121.0 + +Released on November 12, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### New Features {#new-features-1-121-0} +* Adds the ability to download the application archive for any version, including the currently deployed version, by using the `--sequence` and `--current` flags for the `kots download` command. + +## 1.120.3 + +Released on November 7, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Improvements {#improvements-1-120-3} +* Various updates for Embedded Cluster. + +## 1.120.2 + +Released on November 5, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Bug Fixes {#bug-fixes-1-120-2} +* Fixes an issue where generating a support bundle in air gap kURL environments took a really long time. + +## 1.120.1 + +Released on November 4, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Bug Fixes {#bug-fixes-1-120-1} +* Fixes an issue where generating support bundles failed in air gap and minimal RBAC installations. +* Fixes an issue where pushing images from an air gap bundle could time out due to the host's environment variables interfering with the temporary registry used by the KOTS CLI. + +## 1.120.0 + +Released on October 30, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### New Features {#new-features-1-120-0} +* Various new features to support Replicated Embedded Cluster. + +## 1.119.1 + +Released on October 22, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Bug Fixes {#bug-fixes-1-119-1} + +* Fixes an issue where proxy settings was removed when upgrading the Admin Console with `kubectl kots admin-console upgrade`. +* Fixes an issue where `--strict-security-context` was removed when upgrading the Admin Console with `kubectl kots admin-console upgrade`. + +## 1.119.0 + +Released on October 18, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Bug Fixes {#bug-fixes-1-119-0} +* Fixes an issue where the Replicated SDK failed to deploy if a private CA was provided to the installation but the SDK was installed into a different namespace than KOTS. +* If an application includes the Replicated SDK, the SDK will be deployed with the same ClusterRole as the Admin Console. + +## 1.118.0 + +Released on October 15, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Improvements {#improvements-1-118-0} +* Improves the flexibility of configuring the [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) collector in support bundle specs by limiting KOTS's default collection to its own namespace. + +### Bug Fixes {#bug-fixes-1-118-0} +* Fixes an issue where you could not upgrade Embedded Cluster instances if the new version didn't include config and preflight checks. + +## 1.117.5 + +Released on October 8, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Improvements {#improvements-1-117-5} +* Adds support for specifying an alternative data directory in Embedded Cluster. + +## 1.117.4 + +Released on October 8, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Improvements {#improvements-1-117-4} +* Various improvements for surfacing errors in Embedded Cluster upgrades. + +## 1.117.3 + +Released on September 23, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### New Features {#new-features-1-117-3} +* If the Replicated SDK is deployed by KOTS as part of an application, the SDK will automatically be configured with any additional CA certificates provided to `--private-ca-configmap` flag for the `kots install` command. + +## 1.117.2 + +Released on September 20, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Improvements {#improvements-1-117-2} +* Improvements to support specifying ports in Embedded Cluster. + +## 1.117.1 + +Released on September 17, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### Bug Fixes {#bug-fixes-1-117-1} +* Fixes an issue where the values provided to the `--http-proxy`, `--https-proxy`, and `--no-proxy` flags for the `kots install` command were not propagated to the Replicated SDK when running an automated install. + +## 1.117.0 + +Released on September 13, 2024 + +Support for Kubernetes: 1.29, 1.30, and 1.31 + +### New Features {#new-features-1-117-0} +* Adds the `--private-ca-configmap` flag to the `install` and `generate-manifests` commands. The contents of the provided ConfigMap are used as additional trusted certificate authorities. +* Adds the [`PrivateCACert` template function](/reference/template-functions-static-context#privatecacert) to return the name of a ConfigMap containing additional trusted CA certificates provided by the end user. + +### Bug Fixes {#bug-fixes-1-117-0} +* Fixes an issue where `dropdown` Config items did not respect the `when` property. + +## 1.116.1 + +Released on September 12, 2024 + +Support for Kubernetes: 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-116-1} +* KOTS now uses the fully qualified `.svc.cluster.local` address when making requests to the `kotsadm-rqlite` and `kotsadm-minio` services for simplified HTTP proxy configuration using `NO_PROXY=.cluster.local`. + +## 1.116.0 + +Released on September 9, 2024 + +Support for Kubernetes: 1.28, 1.29, and 1.30 + +### New Features {#new-features-1-116-0} +* Adds the ability to specify additional annotations (`--additional-annotations`) and labels (`--additional-labels`) that will be applied to kotsadm pods. + +## 1.115.2 + +Released on September 5, 2024 + +Support for Kubernetes: 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-115-2} +* Available updates and the check for updates button are shown on the **Dashboard** page of the Admin Console for Embedded Cluster. These were removed in a previous version. +* When nodes need to be added to the cluster during an Embedded Cluster restore operation, the `join` command is more clearly shown in the Admin Console. +* Improves messaging when the requested channel slug is not allowed by the provided license. + +### Bug Fixes {#bug-fixes-1-115-2} +* Fixes an issue where the values provided to the `--http-proxy`, `--https-proxy`, and `--no-proxy` flags for the `kots install` command were not propagated to the Replicated SDK. +* Hides a banner on the **View Files** page in Embedded Cluster that told users to use `kubectl kots` commands that are not intended for Embedded Cluster. + +## 1.115.1 + +Released on August 22, 2024 + +Support for Kubernetes: 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-115-1} +* Fixes an issue where the default `nodeMetrics` analyzer did not run. + +## 1.115.0 + +Released on August 20, 2024 + +Support for Kubernetes: 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-115-0} +* The **Nodes** page displays guidance and easier access to the node join command during initial install of Embedded Cluster. +* Adds back the check for updates button on the **Version history** page in Embedded Cluster, so you can check for updates without refreshing the page. + +## 1.114.0 + +Released on August 12, 2024 + +Support for Kubernetes: 1.28, 1.29, and 1.30 + +### New Features {#new-features-1-114-0} +* Adds support for the `dropdown` config item type, which creates a dropdown on the config screen. See [dropdown](/reference/custom-resource-config#dropdown) in _Config_. +* Adds the `radio` config item type, which is functionally equivalent to the `select_one` item type but is more clearly named. The `select_one` config item type is deprecated in favor of `radio` but is still fully functional. See [radio](/reference/custom-resource-config#radio) in _Config_. + +## 1.113.0 + +:::important +In KOTS 1.113.0 and later, an installation error can occur if you use the `kots install` command without specifying a channel slug _and_ the license used to install does not have access to the Stable channel. For more information, see [Breaking Change](#breaking-changes-1-113-0) below. +::: + +Released on August 9, 2024 + +Support for Kubernetes: 1.28, 1.29, and 1.30 + +### New Features {#new-features-1-113-0} +* Adds support for multi-channel licenses. This allows each license to be assigned to more than one channel. + + With the introduction of multi-channel licenses, an installation error can occur if you use the `kots install` command without specifying a channel slug _and_ the license used to install does not have access to the Stable channel. For more information, see [Breaking Change](#breaking-changes-1-113-0) below. + +### Bug Fixes {#bug-fixes-1-113-0} +* Fixes an issue in Embedded Cluster where going back to the Nodes page during the installation and then clicking continue did not work. + +### Breaking Change {#breaking-changes-1-113-0} + +In KOTS 1.113.0 and later, the following error will occur during installation if the `kots install` command lacks a channel slug _and_ the license does not have access to the Stable channel: `"failed to verify and update license: requested channel not found in latest license"`. This can break existing automation and documentation that includes a `kots install` command without a channel slug. + +This error occurs because, when the channel slug is omitted from the `kots install` command (for example, `kots install app`), KOTS defaults to pulling metadata like the application icon and minimal RBAC configurations from the Stable channel. With the introduction of multi-channel licenses in KOTS 1.113.0, only licenses with access to a channel can pull metadata and download releases from that channel. This means that only licenses with access to the Stable channel can install without specifying the channel slug in the `kots install` command. + +Previously, any license regardless of its assigned channel could install by excluding the channel slug from the `kots install` command. This could cause mismatches in deployment settings such as icons and minimal RBAC configurations because KOTS would pull metadata from the Stable channel and then install the release from the channel where the license was assigned. + +**Solution:** To install a release from a channel other than Stable, specify the channel slug in the `kots install` command (for example, `kots install app/beta`). Also, ensure that the license has access to the specified channel. Refer to the Vendor Portal installation instructions or use the `replicated channel inspect CHANNEL_ID` command in the Replicated CLI for the correct commands. + +To avoid breaking changes, update automation that uses the `kots install` command accordingly. Also, update documentation as needed so that the documented installation commands include the channel slug. + +If you cannot update your KOTS installation command immediately, temporarily revert to KOTS 1.112.4 or earlier. + +## 1.112.4 + +Released on July 31, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-112-4} +* Fixes an issue in Embedded Cluster upgrades where preflights did not rerun when the config was re-edited. +* Fixes an issue that caused K8s minor version parsing errors to be logged repeatedly. + +## 1.112.3 + +Released on July 30, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-112-3} +* Fixes an issue where the Admin Console **vVersion history** page for Embedded Cluster had to be refreshed to show a newly available version after uploading an air gap bundle. + +## 1.112.2 + +Released on July 26, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-112-2} +* Fixes an issue in Embedded Cluster upgrades where rendering Helm charts that utilize Helm capabilities could fail. + +## 1.112.1 + +Released on July 16, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-112-1} +* Fixes an issue where reporting information wasn't sent for Embedded Cluster in some cases. + +## 1.112.0 + +Released on June 27, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### New Features {#new-features-1-112-0} +* Changes the workflow for upgrading to newly available versions in the Admin Console for Embedded Cluster only. When upgrading to new versions, users are taken through a wizard where the license is sycned, config can be edited, and preflight checks are run before deploying. + +## 1.111.0 + +Released on July 9, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-111-0} +* Fixes an issue where the Troubleshoot button on the **Resource status** modal didn't take you to the Troubleshoot page. + +## 1.110.0 + +Released on June 27, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### New Features {#new-features-1-110-0} +* Adds the ability to specify a custom storage class for the KOTS Admin Console components when installing to an existing cluster. + +## 1.109.14 + +Released on June 21, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-109-14} +* Fixes an issue where required releases were enforced in air gapped mode when changing between channels that didn't have semantic versioning enabled + +## 1.109.13 + +Released on June 20, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-13} +* Changes to support Embedded Cluster. + +## 1.109.12 + +Released on June 10, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-12} +* Updates to enable high availability support for embedded cluster. + +### Bug Fixes {#bug-fixes-1-109-12} +* Fixes an issue where air gap uploads could incorrectly fail with an error indicating the version being uploaded matches the current version. This occurred because the version labels matched and were valid semantic versions. Comparing version labels is intentional for channels with semantic versioning enabled, but was unintentional for channels without semantic versioning enabled. + +## 1.109.11 + +Released on June 7, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-11} +* Minor UI improvements for the air gap bundle upload progress bar. + +### Bug Fixes {#bug-fixes-1-109-11} +* Fixes an issue where the `--skip-preflights` flag would not work if all strict preflights passed but a non-strict preflight failed. + +## 1.109.10 + +Released on June 6, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-10} +* Various updates to enable high availability support for embedded cluster. + +## 1.109.9 + +Released on May 31, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-9} +* Various updates to enable high availability support for embedded cluster. + +### Bug Fixes {#bug-fixes-1-109-9} +* An incorrect CLI command for generating support bundles is no longer shown on the Troubleshoot page in embedded clusters. + +## 1.109.8 + +Released on May 30, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-8} +* Updates to enable high-availability support for Embedded Cluster. + +## 1.109.7 + +Released on May 29, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-7} +* Updates to enable high-availability support for embedded cluster. + +## 1.109.6 + +Released on May 24, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-6} +* Updates to enable disaster recovery support for embedded cluster. + +## 1.109.5 + +Released on May 23, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-5} +* Updates to enable disaster recovery support for embedded cluster. + +## 1.109.4 + +Released on May 21, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-109-4} +* Fix `kubectl kots port-forward` for high-latency network connections. + +## 1.109.3 + +Released on May 15, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Bug Fixes {#bug-fixes-1-109-3} +* Fixes an issue where the [Distribution](/reference/template-functions-static-context#distribution) template function returned `k0s` instead of `embedded-cluster` for embedded clusters. + +## 1.109.2 + +Released on May 15, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-2} +* Updates images to resolve CVE-2024-33599 with high severity; and CVE-2024-33600, CVE-2024-33601, CVE-2024-33602 with medium severity. + +## 1.109.1 + +Released on May 15, 2024 + +Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + +### Improvements {#improvements-1-109-1} +* Displays the volume name, Pod name, and namespace of snapshotted volumes in the snapshot details page. + +### Bug Fixes {#bug-fixes-1-109-1} +* Fixes an issue where the **Config** and **View files** tabs did not display as active when clicked. +* Fixes an issue where KOTS failed to process Helm charts with required values that were configured with the v1beta2 HelmChart custom resource. + +## 1.109.0 + +Released on May 9, 2024 + +Support for Kubernetes: 1.27, 1.28, and 1.29 + +### New Features {#new-features-1-109-0} +* Adds the ability to detect embedded cluster with the [Distribution](/reference/template-functions-static-context#distribution) template function. + +## 1.108.13 + +Released on May 6, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-13} +* Updates the snapshot settings page to clarify that the retention policy applies to all snapshots, not just scheduled snapshots. + +## 1.108.12 + +Released on May 3, 2024 + +Support for Kubernetes: 1.27, 1.28, and 1.29 + +### Bug Fixes {#bug-fixes-1-108-12} +* Fixes an issue where the snapshot settings card on the admin console dashboard contained an extra slash in the object store bucket path. + +## 1.108.11 + +Released on May 1, 2024 + +Support for Kubernetes: 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-11} +* Various updates to enable disaster recovery support for embedded cluster. +* Updates Troubleshoot to v0.91.0. + +## 1.108.10 + +Released on April 26, 2024 + +Support for Kubernetes: 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-10} +* Various updates to enable disaster recovery support for embedded cluster. + +## 1.108.9 + +Released on April 24, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-9} +* Updates images to resolve CVE-2024-3817 with critical severity. + +### Bug Fixes {#bug-fixes-1-108-9} +* Fixes an issue where the **Edit config** link on the dashboard didn't work. + +## 1.108.8 + +Released on April 18, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-8} +* Various updates to improve air gap and multi-node support for embedded cluster. + +## 1.108.7 + +Released on April 16, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-7} +* Various updates to enable air gap and multi-node support for embedded cluster. + +## 1.108.6 + +Released on April 11, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-6} +* Provide a progress indicator to users when pushing images and embedded cluster artifacts during an installation. + +## 1.108.5 + +Released on April 8, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-5} +* Various updates to enable air gap support for embedded cluster. + +## 1.108.4 + +Released on April 3, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-4} +* Re-builds the kotsadm image with the latest Wolfi base image to mitigate CVE-2024-3094. + +## 1.108.3 + +Released on March 26, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-3} +* Updates to enable air gap support for embedded cluster. + +## 1.108.2 + +Released on March 25, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-2} +* Various updates to enable air gap support for embedded cluster. + +## 1.108.1 + +Released on March 19, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-108-1} +* Prevents application rollback in Embedded Cluster installations. + +### Bug Fixes {#bug-fixes-1-108-1} +* Fixes an issue in Embedded Cluster where forward slashes were replaced with dashes in custom role labels. + +## 1.108.0 + +Released on March 5, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### New Features {#new-features-1-108-0} +* Adds the ability to get the config values of the currently deployed app version via the CLI by passing the `--current` flag to the [kubectl kots get config](/reference/kots-cli-get-config) CLI command. +* Adds the ability to update the config values of the currently deployed app version via the CLI by passing the `--current` flag to the [kubectl kots set config](/reference/kots-cli-set-config) CLI command. +* Adds the ability to update the config values of any app version via the CLI by providing the target sequence with the `--sequence` flag in the [kubectl kots set config](/reference/kots-cli-set-config) CLI command. +* Adds the ability to update the config values for any app version using the admin console. + +### Improvements {#improvements-1-108-0} +* Hides the **Application** and **Cluster Management** tabs on the admin console navbar during the initial installation flow with Replicated embedded cluster (Beta). For more information, see [Using Embedded Cluster](/vendor/embedded-overview). + +### Bug Fixes {#bug-fixes-1-108-0} +* Fixes an issue where the license upload page flashed briefly before being redirected to the login page. +* Fixes an issue in embedded cluster (Beta) where the cluster upgrade modal occasionally failed to display during upgrades. + +## 1.107.8 + +Released on February 27, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-107-8} +* Resolves the false positive CVEs with critical severity in the `kotsadm` image which stemmed from the Dex Go library. + +## 1.107.7 + +Released on February 23, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Bug Fixes {#bug-fixes-1-107-7} +* Fixes an issue where the "Ignore Preflights" button was not displayed on the preflights page when preflights were running. +* Fixes an issue where the [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function did not return the new value when syncing the license. + +## 1.107.6 + +Released on February 22, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-107-6} +* UI improvements when running in an embedded cluster (Alpha) + +## 1.107.5 + +Released on February 20, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Bug Fixes {#bug-fixes-1-107-5} +* Fixes an issue in kURL clusters where images from Helm charts configured using the v1beta2 HelmChart custom resource were incorrectly removed from the in-cluster registry, potentially leading to failed deployments. + +## 1.107.4 + +Released on February 16, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Bug Fixes {#bug-fixes-1-107-4} +* Fixes an issue where processing images from Helm charts configured using the v1beta2 HelmChart custom resource may fail in air gapped mode. + +## 1.107.3 + +Released on February 12, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Bug Fixes {#bug-fixes-1-107-3} +* Fixes an issue where the preflights page was not displayed during initial installation if the preflight spec was included in a Secret or ConfigMap in the Helm chart templates. + +## 1.107.2 + +Released on February 2, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-107-2} +* Removes the rqlite DB data dump from support bundles generated by KOTS. +* Updates the `minio`, `rqlite`, `dex`, and `local-volume-provider` images to resolves CVE-2023-6779, CVE-2023-6246, CVE-2024-21626 with high severity; and CVE-2023-6780 with medium severity. + +## 1.107.1 + +Released on February 1, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-107-1} + +* Updates the `kotsadm`, `kotsadm-migrations`, and `kurl-proxy` images to resolves CVE-2023-6779, CVE-2023-6246, CVE-2024-21626 with high severity; and CVE-2023-6780 with medium severity. + +## 1.107.0 + +Released on January 30, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### New Features {#new-features-1-107-0} +* Adds support for running KOTS on ARM64 nodes. For air gap installations, the KOTS air gap bundle has an updated format and also now includes images for both AMD64 and ARM64 architectures. When updating KOTS in air gap environments, ensure the CLI version you use matches the version of the KOTS air gap bundle because earlier KOTS versions are not compatible with the new air gap bundle format. For more information about KOTS installation requirements, see [Installation Requirements](/enterprise/installing-general-requirements). + +### Improvements {#improvements-1-107-0} +* Removes support `kubectlVersion` and `kustomizeVersion` in the KOTS Application custom resource. One version of kubectl and one version of kustomize are now included in KOTS and will always be used. + +## 1.106.0 + +Released on January 23, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### New Features {#new-features-1-106-0} +* Adds support for an experimental air gap bundle feature that allows KOTS to process partial air gap bundles that only include the images needed to update to the desired version. + +## 1.105.5 + +Released on January 18, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-105-5} +* Adds the namespace to the password reset command that is displayed when the admin console is locked after hitting the limit of unsuccessful login attempts. + +## 1.105.4 + +Released on January 16, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Bug Fixes {#bug-fixes-1-105-4} +* Fixes an issue where Pods associated with KOTS components could be incorrectly scheduled on a non-AMD64 node. +* Fixes an issue where configuring snapshots to use internal storage failed in kURL clusters with HA MinIO and OpenEBS. + +## 1.105.3 + +Released on January 10, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Improvements {#improvements-1-105-3} +* Upgrades the github.com/cloudflare/circl go module from 1.3.3 to 1.3.7 to resolve GHSA-9763-4f94-gfch with high severity. + +## 1.105.2 + +Released on January 9, 2024 + +Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + +### Bug Fixes {#bug-fixes-1-105-2} +* Fixes an issue where rendering KOTS custom resources could fail if there are required configuration items that don't have defaults. +* Fixes an issue where the `kotsadm-rqlite` and `kotsadm-minio` Pods could be incorrectly scheduled on Arm nodes. + +## 1.105.1 + +Released on December 29, 2023 + +Support for Kubernetes: 1.26, 1.27, and 1.28 + +### Bug Fixes {#bug-fixes-1-105-1} +* Fixes an issue where the `minKotsVersion` and `targetKotsVersion` fields in the Application custom resource would not be enforced if it was part of a multi-doc yaml file. + +## 1.105.0 + +Released on December 28, 2023 + +Support for Kubernetes: 1.26, 1.27, and 1.28 + +### New Features {#new-features-1-105-0} +* Adds the ability to template the entire [values](/reference/custom-resource-helmchart-v2#values) field in the HelmChart custom resource. + +### Bug Fixes {#bug-fixes-1-105-0} +* Fixes an issue where the [namespace](/reference/custom-resource-helmchart-v2#namespace) field in HelmChart custom resources was not rendered when uninstalling the corresponding chart. +* Fixes an issue where KOTS failed to parse the Preflight custom resource if template functions were used for non-string fields. + +## 1.104.7 + +Released on December 14, 2023 + +Support for Kubernetes: 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-104-7} +* Uses Chainguard to build the local-volume-provider image to resolve CVE-2019-8457 and CVE-2023-45853 with critical severity; and CVE-2022-3715, CVE-2021-33560, CVE-2022-4899, CVE-2022-1304, CVE-2020-16156, CVE-2023-31484, CVE-2023-47038 with high severity; and CVE-2023-4806, CVE-2023-4813, CVE-2023-5981, CVE-2023-5678, CVE-2023-4039, CVE-2023-50495, CVE-2023-4641 with medium severity; and TEMP-0841856-B18BAF, CVE-2016-2781, CVE-2017-18018, CVE-2022-3219, CVE-2011-3374, CVE-2010-4756, CVE-2018-20796, CVE-2019-1010022, CVE-2019-1010023, CVE-2019-1010024, CVE-2019-1010025, CVE-2019-9192, CVE-2018-6829, CVE-2011-3389, CVE-2018-5709, CVE-2022-41409, CVE-2017-11164, CVE-2017-16231, CVE-2017-7245, CVE-2017-7246, CVE-2019-20838, CVE-2021-36084, CVE-2021-36085, CVE-2021-36086, CVE-2021-36087, CVE-2007-6755, CVE-2010-0928, CVE-2013-4392, CVE-2020-13529, CVE-2023-31437, CVE-2023-31438, CVE-2023-31439, CVE-2007-5686, CVE-2013-4235, CVE-2019-19882, CVE-2023-29383, TEMP-0628843-DBAD28, CVE-2011-4116, CVE-2023-31486, TEMP-0517018-A83CE6, CVE-2005-2541, CVE-2022-48303, CVE-2023-39804, TEMP-0290435-0B57B5, CVE-2022-0563 with low severity. + +## 1.104.6 + +Released on December 13, 2023 + +Support for Kubernetes: 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-104-6} +* Uses Chainguard to build the kotsadm-migrations image to resolve CVE-2023-45853 with critical severity; CVE-2023-31484, CVE-2023-47038, and CVE-2023-39325 with high severity; CVE-2023-5981, CVE-2023-4039, CVE-2023-5678, CVE-2023-4641, and CVE-2023-44487 with medium severity; and TEMP-0841856-B18BAF, CVE-2022-0563, CVE-2016-2781, CVE-2017-18018, CVE-2022-27943, CVE-2022-3219, CVE-2011-3374, CVE-2010-4756, CVE-2018-20796, CVE-2019-1010022, CVE-2019-1010023, CVE-2019-1010024, CVE-2019-1010025, CVE-2019-9192, CVE-2018-6829, CVE-2011-3389, CVE-2013-4392, CVE-2023-31437, CVE-2023-31438, CVE-2023-31439, CVE-2007-6755, CVE-2010-0928, CVE-2007-5686, CVE-2019-19882, CVE-2023-29383, TEMP-0628843-DBAD28, CVE-2011-4116, CVE-2023-31486, TEMP-0517018-A83CE6, CVE-2005-2541, CVE-2022-48303, CVE-2023-39804, and TEMP-0290435-0B57B5 with low severity. +* Uses Chainguard to build the rqlite image to resolve CVE-2023-5363, CVE-2023-39325, and GHSA-m425-mq94-257g with high severity; and CVE-2023-5678, CVE-2023-3978, and CVE-2023-44487 with medium severity. +* Uses Chainguard to build the MinIO image to resolve CVE-2022-27943 and CVE-2022-29458 with low severity. +* Uses Chainguard to build the dex image to resolve CVE-2022-48174 with critical severity; CVE-2023-5363, CVE-2023-39325, and GHSA-m425-mq94-257g with high severity; and CVE-2023-2975, CVE-2023-3446, CVE-2023-3817, CVE-2023-5678, GHSA-2c7c-3mj9-8fqh, CVE-2023-3978, and CVE-2023-44487 with medium severity. + +### Bug Fixes {#bug-fixes-1-104-6} +* Fixes an issue where preflights could hang indefinitely when rerun, if the sequence was created by KOTS versions earlier than 1.96.0. + +## 1.104.5 + +Released on December 8, 2023 + +Support for Kubernetes: 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-104-5} +* Uses Chainguard to build the kurl-proxy image to resolve CVE-2023-45853 with critical severity; CVE-2023-25652, CVE-2023-29007 CVE-2023-5981, CVE-2023-2953, CVE-2023-44487, CVE-2023-31484, and CVE-2023-47038 with high severity; CVE-2023-4039, CVE-2023-5678, and CVE-2023-4641 with medium severity; CVE-2011-3374, TEMP-0841856-B18BAF, CVE-2022-0563, CVE-2016-2781, CVE-2017-18018, CVE-2022-27943, CVE-2018-1000021, CVE-2022-24975, CVE-2023-25815, CVE-2022-3219, CVE-2010-4756, CVE-2018-20796, CVE-2019-1010022, CVE-2019-1010023, CVE-2019-1010024, CVE-2019-1010025, CVE-2019-9192, CVE-2018-6829, CVE-2011-3389, CVE-2018-5709, CVE-2015-3276, CVE-2017-14159, CVE-2017-17740, CVE-2020-15719, CVE-2011-4116, CVE-2023-31486, CVE-2007-6755, CVE-2010-0928, CVE-2013-4392, CVE-2023-31437, CVE-2023-31438, CVE-2023-31439, CVE-2007-5686, CVE-2019-19882, CVE-2023-29383, TEMP-0628843-DBAD28, TEMP-0517018-A83CE6, CVE-2005-2541, CVE-2022-48303, and TEMP-0290435-0B57B5 with low severity. + +### Bug Fixes {#bug-fixes-1-104-5} +* Fixes an issue that stripped the port from the argument passed to the `--kotsadm-registry` flag. This could result in an error when validating the registry when installing, upgrading, or pushing admin console images. + +## 1.104.4 + +Released on December 1, 2023 + +Support for Kubernetes: 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-104-3} +* Uses Chainguard to build the `kotsadm` image to resolve CVE-2023-45853 with critical severity; CVE-2023-25652, CVE-2023-29007, CVE-2023-5981, CVE-2023-2953, CVE-2023-44487, CVE-2023-31484, CVE-2023-47038, CVE-2023-24329, CVE-2023-41105, and CVE-2023-2253 with high severity; CVE-2023-4039, CVE-2023-27043, CVE-2023-40217, CVE-2023-5678, and CVE-2023-4641 with medium severity; and CVE-2011-3374, TEMP-0841856-B18BAF, CVE-2022-0563, CVE-2016-2781, CVE-2017-18018, CVE-2022-3219, CVE-2022-27943, CVE-2018-1000021, CVE-2022-24975, CVE-2023-25815, CVE-2010-4756, CVE-2018-20796, CVE-2019-1010022, CVE-2019-1010023, CVE-2019-1010024, CVE-2019-1010025, CVE-2019-9192, CVE-2018-6829, CVE-2011-3389, CVE-2018-5709, CVE-2015-3276, CVE-2017-14159, CVE-2017-17740, CVE-2020-15719, CVE-2011-4116, CVE-2023-31486, CVE-2023-24535, CVE-2021-45346, CVE-2007-6755, CVE-2010-0928, CVE-2013-4392, CVE-2023-31437, CVE-2023-31438, CVE-2023-31439, CVE-2007-5686, CVE-2019-19882, CVE-2023-29383, TEMP-0628843-DBAD28, TEMP-0517018-A83CE6, CVE-2005-2541, CVE-2022-48303, and TEMP-0290435-0B57B5 with low severity. + +### Bug Fixes {#bug-fixes-1-104-4} +* Fixes an issue on the admin console Cluster Management page where a secondary node join command was displayed when the primary node type was selected. + +## 1.104.3 + +Released on November 29, 2023 + +Support for Kubernetes: 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-104-3} +* Upgrades the github.com/go-jose/go-jose/v3 go module to 3.0.1 to resolve GHSA-2c7c-3mj9-8fqh with medium severity. + +## 1.104.2 + +Released on November 17, 2023 + +Support for Kubernetes: 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-104-2} +* Upgrades the golang.org/x/net go module to 0.17.0 in kurl_proxy to resolve CVE-2023-39325 with high severity, and CVE-2023-3978 and CVE-2023-44487 with medium severity. +* Upgrades the minio/minio image to RELEASE.2023-11-11T08-14-41Z to resolve CVE-2023-46129 and GHSA-m425-mq94-257g with high severity, and CVE-2023-44487 with medium severity. + +## 1.104.1 + +Released on November 10, 2023 + +Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-104-1} +* Adds support for OKE (Oracle Kubernetes Engine) to the [Distribution](/reference/template-functions-static-context#distribution) template function. +* The CLI now surfaces preflight check errors that are due to insufficient RBAC permissions. +* Upgrades the kotsadm base image to `debian:bookworm-slim` to resolve CVE-2023-23914 with critical severity, and CVE-2022-42916 and CVE-2022-43551 with high severity. +* Upgrades the Helm binary in the kotsadm image to 3.13.2 to resolve CVE-2023-39325 and GHSA-m425-mq94-257g with high severity and CVE-2023-44487 and GHSA-jq35-85cj-fj4p with medium severity. +* Upgrades the google.golang.org/grpc go module to v1.59.0 to resolve GHSA-m425-mq94-257g with high severity and CVE-2023-44487 with medium severity. +* Upgrades the github.com/docker/docker go module to v24.0.7 to resolve GHSA-jq35-85cj-fj4p with medium severity. + +### Bug Fixes {#bug-fixes-1-104-1} +* Fixes an issue where the reporting data stored in Secrets in air gapped installations could exceed the size of the secret (1MB). + +## 1.104.0 + +Released on November 6, 2023 + +Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 + +### New Features {#new-features-1-104-0} +* Releases that include version 1.0.0-beta.12 or later of the Replicated SDK can now be installed by KOTS. When KOTS deploys a release that includes the SDK, the SDK and KOTS both operate in the environment and independently report telemetry. Replicated recommends that everyone--not just vendors that support Helm CLI installations--include the SDK in their application for access to the latest features from Replicated! + +### Improvements {#improvements-1-104-0} +* Upgrades the replicated/local-volume-provider image to v0.5.5 to resolve CVE-2023-45128 with critical severity, CVE-2023-4911, CVE-2023-29491, CVE-2023-45141, and GHSA-m425-mq94-257g with high severity, and CVE-2023-36054, CVE-2023-3446, CVE-2023-3817, CVE-2023-41338, CVE-2023-39325, CVE-2023-3978, and CVE-2023-44487 with medium severity. +* Upgrades the replicated/schemahero image to 0.16.0 to resolve CVE-2023-4911 with high severity, CVE-2023-2603, CVE-2023-29491, CVE-2023-2650, CVE-2023-31484, and CVE-2023-3978 with medium severity. +* Upgrades the minio/minio image to RELEASE.2023-10-25T06-33-25Z to resolve CVE-2023-4911 and CVE-2023-44487 with high severity, CVE-2023-4527, CVE-2023-4806, CVE-2023-4813, CVE-2023-39325, and CVE-2023-44487 with medium severity. +* Upgrades the minio/mc image to RELEASE.2023-10-14T01-57-03Z to resolve CVE-2023-4911 with high severity, and CVE-2023-4527, CVE-2023-4806, CVE-2023-4813, and CVE-2023-39325 with medium severity. + +### Bug Fixes {#bug-fixes-1-104-0} +* Fixes an issue where KOTS didn't discover specs with the `troubleshoot.sh/kind=support-bundle` label when generating support bundles. + +## 1.103.3 + +Released on October 25, 2023 + +Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-103-3} +* Updates the kubectl binary in the kotsadm image to resolve CVE-2023-39325, CVE-2023-3978, and CVE-2023-44487 with medium severity. +* Updates the golang.org/x/net go module to version 0.17.0 to resolve CVE-2023-39325 and CVE-2023-44487 with medium severity. + +## 1.103.2 + +Released on October 9, 2023 + +Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-103-2} +* Upgrades the minio/minio and minio/mc images to versions RELEASE.2023-09-23T03-47-50Z and RELEASE.2023-09-22T05-07-46Z, respectively, to resolve CVE-2023-29491 with high severity. +* Upgrades the Helm binary in the kotsadm image to 3.13.0 to resolve CVE-2023-28840 with high severity and CVE-2023-28841, CVE-2023-28842, and GHSA-6xv5-86q9-7xr8 with medium severity. +* Log preflight checks to the CLI and kotsadm logs whenever there are checks that fail leading to a failed deployment. + +### Bug Fixes {#bug-fixes-1-103-2} +* Fixes a bug that caused no status code to be returned from the custom metrics API requests. + +## 1.103.1 + +Released on September 29, 2023 + +Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-103-1} +* Adds clarifying language that configured automatic update checks use the local server time. + +### Bug Fixes {#bug-fixes-1-103-1} +* Fixes an issue where Helm charts that were previously deployed with the Replicated HelmChart kots.io/v1beta2 installation method were not uninstalled as expected after making configuration changes to exclude the chart. +* Fixes an issue where image pull secrets and hook informers were not applied for dynamically created namespaces if the `kotsadm` pod/API restarts. +* Fixes an issue where the applications dropdown for automatic partial snapshots settings showed no options or applications to select. + +## 1.103.0 + +Released on September 20, 2023 + +Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 + +### New Features {#new-features-1-103-0} +* Adds support for [Lookup](/reference/template-functions-static-context#lookup) template function. + +## 1.102.2 + +Released on September 15, 2023 + +Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 + +### Improvements {#improvements-1-102-2} +* The [custom metrics](/vendor/custom-metrics#configure-custom-metrics) API no longer requires authorization header. + +### Bug Fixes {#bug-fixes-1-102-2} +* Fixes an issue where updating the registry settings would not always display the loading indicator and status messages in the UI. +* Fixes an issue where deployments or diffing could fail after upgrading from KOTS 1.95 or earlier to KOTS 1.101.2-1.102.1 if versions contained empty Kustomize bases. + +## 1.102.1 + +Released on September 8, 2023 + +Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 + +### Bug Fixes {#bug-fixes-1-102-1} +* Fixes an issue where uploading the application air gap bundle could fail due to a permissions issue when creating files under the `/tmp` directory inside the `kotsadm` pod/container. This is only applicable to embedded cluster installations with Replicated kURL. + +## 1.102.0 + +Released on September 6, 2023 + +Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 + +### New Features {#new-features-1-102-0} +* Adds support for sending custom application metrics using the `/api/v1/app/custom-metrics` endpoint. For more information, see [Configuring Custom Metrics (Beta)](/vendor/custom-metrics). + +## 1.101.3 + +Released on August 18, 2023 + +Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 + +### Improvements {#improvements-1-101-3} +* Updates the MinIO image to RELEASE.2023-08-09T23-30-22Z to resolve CVE-2023-27536, CVE-2023-28321, CVE-2023-34969, CVE-2023-2603, CVE-2023-28484, CVE-2023-29469 with medium severity and CVE-2023-2602 with low severity. + +### Bug Fixes {#bug-fixes-1-101-3} +* Removes the distinction between `gke` and `gke-autopilot` from Kubernetes distribution reporting as this check was not working as intended and potentially displaying inconsistent information. All Standard and Autopilot GKE clusters are now reported as `gke`. +* Fixes an issue where the admin console was not correctly processing multi-doc yaml files containing windows line endings. + +## 1.101.2 + +Released on August 4, 2023 + +Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 + +### Improvements {#improvements-1-101-2} +* Upgrades the Helm binary in the kotsadm image to 3.12.2 to resolve CVE-2023-2253 with high severity. + +### Bug Fixes {#bug-fixes-1-101-2} +* Fixes an issue where parsing invalid KOTS kinds failed silently. + +## 1.101.1 + +Released on July 31, 2023 + +Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 + +### Bug Fixes {#bug-fixes-1-101-1} +* Fixes an issue where defaults were not used for [repeatable config items](/reference/custom-resource-config#repeatable-items) when doing an automated install with the kots CLI. +* Fixes an issue where processing Helm charts or sub-charts that have `-charts` as a suffix failed. + +## 1.101.0 + +Released on July 19, 2023 + +Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 + +### New Features {#new-features-1-101-0} +* KOTS now supports running preflight checks defined in a Helm chart. If any Helm charts in a release contain preflight specifications, KOTS runs those. If no Helm charts exist or no preflights are defined in any Helm charts, KOTS uses the previous behavior and runs any preflights defined in a `kind: Preflight` file in the root of the release. For more information about preflights in Helm charts, see [Define Preflight Checks for Helm Installations +](/vendor/preflight-helm-defining). + +### Improvements {#improvements-1-101-0} +* Updates the replicated/local-volume-provider image to v0.5.4 to resolve CVE-2023-0464 with high severity. +* Updates the kotsadm/dex image to v2.37.0 to resolve CVE-2022-4450, CVE-2023-0215, CVE-2023-0464, CVE-2023-2650 with high severity and CVE-2022-4304, CVE-2023-0465, CVE-2023-0466, CVE-2023-1255 with medium severity. +* Updates the MinIO image to RELEASE.2023-06-29T05-12-28Z to resolve CVE-2020-24736, CVE-2023-1667, CVE-2023-2283, and CVE-2023-26604 with medium severity. +* Upgrades webpack to 5.88.1 to resolve CVE-2023-28154 with critical severity. + +### Bug Fixes {#bug-fixes-1-101-0} +* Fixes an issue where the `rendered` directory was not created for airgap application updates. + +## 1.100.3 + +Released on June 20, 2023 + +Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 + +### Improvements {#improvements-1-100-3} +* Updates the github.com/dexidp/dex module to v2.36.0 (git hash v0.0.0-20230320125501-2bb4896d120e) to resolve CVE-2020-26290 with critical severity. +* Updates the github.com/sigstore/rekor module to v1.2.0 to resolve CVE-2023-30551 with high severity and CVE-2023-33199 with medium severity. +* Updates the github.com/gin-gonic/gin module to v1.9.1 in the kurl-proxy to resolve CVE-2023-26125 and CVE-2023-29401 with medium severity. + +### Bug Fixes {#bug-fixes-1-100-3} +* Fixes an issue where [repeatable items](/reference/custom-resource-config#repeatable-items) did not work as expected on the Config page. + +## 1.100.2 + +Released on June 7, 2023 + +Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 + +### Bug Fixes {#bug-fixes-1-100-2} +* Fixes an issue where the Config values were not saved when a release contained a multiple-document YAML file. +* Fixes an issue where the Config specification was missing from the rendered release in the kotsKinds folder if the release contained a multiple-document YAML file. +* Fixes an issue that allowed users to edit `readonly` Config items. + +## 1.100.1 + +Released on June 2, 2023 + +Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 + +### Improvements {#improvements-1-100-1} +* Updates the way custom domains for the Replicated registry and proxy registry are used. If a default or channel-specific custom domain is configured, that custom domain is associated with a release when it is promoted to a channel. KOTS will rewrite images using that custom domain. The `replicatedRegistryDomain` and `proxyRegistryDomain` fields in the Application custom resource are deprecated but continue to work to give time to migrate to the new mechanism. +* Updates the rqlite/rqlite image to 7.19.0 to resolve CVE-2023-1255 with medium severity. + +## 1.100.0 + +Released on May 26, 2023 + +Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 + +### New Features {#new-features-1-100-0} +* Adds support for `kots.io/creation-phase` and `kots.io/deletion-phase` annotations to control the order in which native Kubernetes resources are created and deleted, respectively. See [Deployment Phases](/vendor/orchestrating-resource-deployment#deployment-phases) in _Orchestrating Resource Deployment_. +* Adds support for a `kots.io/wait-for-ready` annotation, which causes KOTS to wait for a native Kubernetes resource to be ready before continuing with the deployment. See [Waiting for a Resource to be Ready](/vendor/orchestrating-resource-deployment#wait-for-a-resource-to-be-ready) in _Orchestrating Resource Deployment_. +* Adds support for a `kots.io/wait-for-properties` annotation, which causes KOTS to wait for one or more properties to match a desired value before continuing with the deployment. See [Wait for Resource Properties](/vendor/orchestrating-resource-deployment#wait-for-resource-properties) in _Orchestrating Resource Deployment_. + +### Improvements {#improvements-1-100-0} +* Updates the github.com/cloudflare/circl module to v1.3.3 to resolve CVE-2023-1732 with medium severity. + +### Bug Fixes {#bug-fixes-1-100-0} +* Fixes an issue where Helm charts deployed using the native Helm installation method were uninstalled then reinstalled when the chart version changed or was updated. +* Fixes an issue in embedded clusters where images from native Helm v2 (Beta) charts were incorrectly removed from the in-cluster registry, potentially leading to failed deployments. +* Bumps the Helm version used by KOTS to 3.12.0 to fix an issue where native Helm installations were failing on Kubernetes 1.27. + +## 1.99.0 + +Released on May 18, 2023 + +Support for Kubernetes: 1.24, 1.25, and 1.26 + +### New Features {#new-features-1-99-0} +* Adds a new native Helm v2 installation method (Beta) that leverages the `kots.io/v1beta2` HelmChart custom resource. This v2 installation method does a Helm installation or upgrade of your Helm chart without modifying the chart with Kustomize. This is an improvement to the v1 installation method because it results in Helm installations that can be reproduced outside of the app manager, and it enables the use of additional Helm functionality that was not available in v1. See [HelmChart v2 (Beta)](/reference/custom-resource-helmchart-v2) in the _Custom Resources_ section. + +### Improvements {#improvements-1-99-0} +* Applies application status informers before deploying the actual resources. This is helpful in cases where deployments take a long time, because the statuses are now available while the deployment happens. +* Updates the replicated/local-volume-provider image to v0.5.3 to resolve CVE-2022-4415 and CVE-2022-3821 with high severity. +* Replace the misleading call-to-action button on the instance snapshot restore modal, which could have mistakenly lead the user to believe the instance restore was initiated. +* Enhances formatting to accommodate lengthy strings for fields such as the application name and config item names. + +### Bug Fixes {#bug-fixes-1-99-0} +* Fixes a bug where the rqlite collector was unable to collect a data dump if the name of the rqlite container changed. +* Fixes an issue where re-running preflights during the initial installation could cause the UI to incorrectly show a status of "Currently pending version". +* Fixes an issue where re-running preflights during the initial installation could cause the application to be re-deployed. + +## 1.98.3 + +Released on May 5, 2023 + +Support for Kubernetes: 1.24, 1.25, and 1.26 + +### Improvements {#improvements-1-98-3} +* The JSON Web Token (JWT) is stored in an HttpOnly cookie to prevent cross-site scripting (XSS) attacks. +* The **Cluster Management** page shows by default the command for joining a primary node instead of a secondary node for high availability clusters. +* The resource status modal displays the time the data was last fetched automatically. +* Introduces a deterministic order for applying and deleting Kubernetes manifests based on the resource kind. +* Uses the [weight](https://docs.replicated.com/reference/custom-resource-helmchart#weight) field from the HelmChart custom resource to determine the order in which to uninstall charts that have `useHelmInstall: true`. Charts are uninstalled by weight in descending order, with higher weights uninstalled first. +* Application Helm charts are uninstalled first, then other Kubernetes manifests are uninstalled. +* Improvements to the **Version history** page include truncating long version labels, removing unnecessary preflight icons, and improving the content layout. +* The `kots admin-console push-images` command now returns an error if the provided air gap bundle file is missing. +* Adds a **Back** button to the **Preflights** page. + +### Bug Fixes {#bug-fixes-1-98-3} +* Fixes an issue where snapshot restores hung if RabbitMQ cluster custom resources were used. +* Fixes an issue where Helm releases were not uninstalled when undeploying an application using the [kots remove](/reference/kots-cli-remove) command and passing the `--undeploy` flag. +* Fixes an issue where Helm charts that were deployed with native Helm to a different namespace than KOTS were not uninstalled when they were removed from subsequent application releases. +* Fixes an issue where uploading an air gap bundle through the admin console might have failed due to issues getting layers for OCI images. +* Fixes an issue where canceling a restore of an application (partial) snapshot sometimes did not work if multiple applications were installed in the same admin console. +* The **Config** page now shows the correct error message if errors other than regex validation occurred. +* Fixes an issue where the Config page incorrectly displayed "Edit the currently deployed config" when there was no application deployed. +* Fixes an issue where installations and upgrades could fail when checking if the cluster was a kURL cluster, if the user running the command was not authorized to list ConfigMaps in the `kube-system` namespace. +* Fixes an issue where air gapped application pods could fail to pull images from the kURL registry due to the image names being rewritten incorrectly, if the application was upgraded using the [`kots upstream upgrade`](/reference/kots-cli-upstream-upgrade) command. +* Fixes an issue where the **Version history** page could incorrectly show a **Deployed** button if an application version was deployed while preflight checks were running. + +## 1.98.2 + +Released on April 26, 2023 + +Support for Kubernetes: 1.24, 1.25, and 1.26 + +### Bug Fixes {#bug-fixes-1-98-2} +* Fixes an issue where quotes were stripped from fields in HelmChart custom resources, which led to unexpected behavior and failed deployments. +* Fixes an issue where invalid Kustomize patches were generated for Helm charts with deeply nested dependencies. +* Fixes an issue where processing application manifests occasionally failed if null values were encountered after rendering. + +## 1.98.1 + +Released on April 21, 2023 + +Support for Kubernetes: 1.24, 1.25, and 1.26 + +### Bug Fixes {#bug-fixes-1-98-1} +* Fixes an issue where multiple copies of the same Kubernetes resource (for example, the same `kind` and `name`) were deduplicated even if they had a different namespace. This deduplication resulted in the app manager deploying only one of the resources to the cluster. +* Fixes an issue that caused config updates to fail when the user did not provide a value for a required config item with a default value, even if the item was hidden. +* Fixes an issue where switching the license to a different channel did not fetch the current release on that channel if the number of releases was the same on both channels. + +## 1.98.0 + +Released on April 19, 2023 + +Support for Kubernetes: 1.24, 1.25, and 1.26 + +### New Features {#new-features-1-98-0} +* Adds support for validating config items with type `text`, `textarea`, `password`, or `file` by matching the item's values against a regex pattern. For more information, see [validation](/reference/custom-resource-config#validation) in _Config_. +* Adds a new `kotsKinds` directory to the application archive that includes the rendered KOTS custom resources. + +### Improvements {#improvements-1-98-0} +* Sorts multi-application installations in the admin console by their creation date with the most recently installed application at the top. +* Updates spacing and font sizes to improve visual grouping of items on admin console Config page. +* Updates Kustomize from v4.5.7 to v5.0.1 which resolves CVE-2022-27664, CVE-2022-41723, CVE-2022-41723, and CVE-2022-28948 with high severity and CVE-2022-41717 with medium severity. +* Updates the Helm binary included in the kotsadm image from 3.11.0 to 3.11.3 to resolve CVE-2022-41723 and CVE-2023-25173 with high severity and CVE-2023-25153 with medium severity. +* Updates the github.com/opencontainers/runc module to v1.1.5 to resolve CVE-2023-27561 with high severity. +* Updates the minio/minio image to RELEASE.2023-04-13T03-08-07Z to resolve CVE-2023-0361 with medium severity. +* Updates the minio/mc image to RELEASE.2023-04-12T02-21-51Z to resolve CVE-2023-0361 with medium severity. +* Adds support for template functions to the `namespace` and `helmUpgradeFlags` fields of the [HelmChart](/reference/custom-resource-helmchart) custom resource. + +### Bug Fixes {#bug-fixes-1-98-0} +* Fixes an issue where strict security context configurations were not applied in OpenShift environments when the `--strict-security-context` flag was passed to the [kots install](https://docs.replicated.com/reference/kots-cli-install) or [kots admin-console upgrade](https://docs.replicated.com/reference/kots-cli-admin-console-upgrade) commands. + +## 1.97.0 + +Released on April 7, 2023 + +Support for Kubernetes: 1.24, 1.25, and 1.26 + +### New Features {#new-features-1-97-0} +* Allows users to unmask passwords on various forms in the admin console. + +### Improvements {#improvements-1-97-0} +* Simplifies the wording on the air gap bundle upload page. +* Updates the log in page to say **Log in to APP_NAME admin console** instead of **Log in to APP_NAME**. +* Upgrades the MinIO image to RELEASE.2023-03-24T21-41-23Z to resolve CVE-2023-0286 with high severity, and CVE-2022-4304, CVE-2022-4450, and CVE-2023-0215 with medium severity. + +## 1.96.3 + +Released on March 29, 2023 + +Support for Kubernetes: 1.24, 1.25, and 1.26 + +### Improvements {#improvements-1-96-3} +* Wraps the logs in the deploy logs modal to increase readability by eliminating the need to scroll horizontally. +* Removes support for cipher suites that use the CBC encryption algorithm or SHA-1 from the kurl_proxy service that runs in embedded cluster installations. + +### Bug Fixes {#bug-fixes-1-96-3} +* Fixes a bug that caused application upgrades to fail because the app manager attempted to migrate the Helm release secret when the release secret already existed in the release namespace. + +## 1.96.2 + +Released on March 24, 2023 + +Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 + +### Improvements {#improvements-1-96-2} +* Updates the kotsadm/dex image to v2.36.0 to resolve CVE-2022-4450, CVE-2023-0215, CVE-2023-0286, CVE-2022-41721, CVE-2022-41723, and CVE-2022-32149 with high severity, and CVE-2022-4304 and CVE-2022-41717 with medium severity. +* Updates the MinIO image to RELEASE.2023-03-13T19-46-17Z to resolve CVE-2023-23916 with medium severity. +* Updates the kubectl binary in the kotsadm image to resolve CVE-2022-41723 with high severity and CVE-2022-41717 with medium severity. +* Updates the golang.org/x/net module in the kurl-proxy to resolve CVE-2022-41723 with high severity. +* Upgrades the schemahero image tag to v0.14.0 and replicated/local-volume-provider image to v0.5.2 to resolve CVE-2022-41723 with high severity. + +### Bug Fixes {#bug-fixes-1-96-2} +* Fixes a bug where multi-node embedded cluster installations hang indefinitely with the KOTS add-on. +* Increases the time for displaying the slow loading indicator to two minutes to prevent the admin console from continuously reloading when the internet connection is slow. +* Removes hardcoded application name on the Troubleshoot page when a community license is used. +* Fixes a known issue that was introduced in v1.95.0 that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. See the [known issue](/release-notes/rn-app-manager#known-issues-1-95-0) in the v1.95.0 release notes, and see [useHelmInstall](/reference/custom-resource-helmchart#usehelminstall) in the _HelmChart_ reference. + +## 1.96.1 + +:::important +The app manager v1.96.1 has a known issue that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. + This issue is resolved in the app manager v1.96.2. See [Known Issue](#known-issues-1-95-0) in _1.95.0_ below. +::: + +Released on March 16, 2023 + +Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 + +### Improvements {#improvements-1-96-1} +* Refreshes the design of the preflights page in the admin console to improve usability and match the style of other pages. +* Updates the helm.sh/helm/v3 module to v3.11.2 to resolve CVE-2023-25165 with medium severity. +* If the application's port is not available when the user enables access to the admin console with `kubectl kots admin-console`, failure messages print one time and retry silently. + +## 1.96.0 + +:::important +The app manager v1.96.0 has a known issue that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. +This issue is resolved in the app manager v1.96.2. See [Known Issue](#known-issues-1-95-0) in _1.95.0_ below. +::: + +Released on March 9, 2023 + +Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 + +### New Features {#new-features-1-96-0} +* Adds the deployable, rendered application manifests to the version archive. This increases the transparency of what KOTS deploys by showing the exact manifests that are deployed as part of this version on the **View Files** page. For more information, see [Rendered](/enterprise/updating-patching-with-kustomize#rendered) in _Patching with Kustomize_. + +### Improvements {#improvements-1-96-0} +* Updates the replicated/local-volume-provider image to v0.5.1 to resolve CVE-2023-0361, CVE-2022-4450, CVE-2023-0215, and CVE-2023-0286 with high severity, and CVE-2022-2097 and CVE-2022-4304 with medium severity. +* Improves the performance of creating, diffing, configuring, and deploying application versions by retrieving the rendered application manifests when they are available, instead of rendering them on the fly. +* Improves the performance of creating application versions by running private image checks concurrently. + +### Bug Fixes {#bug-fixes-1-96-0} +* Resolves a clickjacking vulnerability that was present in the kurl_proxy service that runs in embedded cluster installations. +* Adds a **Rerun** button on the preflights page when an application is initially installed. +* Fixes an issue where the selected subnavigation tab was not underlined. +* Fixes an issue where CRDs from subcharts were included in the Secret that Helm stores the release information in. In some cases, this issue could dramatically increase the Secret's size. + +## 1.95.0 + +:::important +The app manager v1.95.0 has a known issue that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. +This issue is resolved in the app manager v1.96.2. See [Known Issue](#known-issues-1-95-0) below. +::: + +Released on March 1, 2023 + +Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 + +### New Features {#new-features-1-95-0} +* Adds an `--undeploy` flag to the [kots remove](/reference/kots-cli-remove) command that allows you to completely undeploy the application and delete its resources from the cluster. +* Adds support for Azure Container Registry (ACR). For a full list of supported registries, see [Private Registry Requirements](/enterprise//installing-general-requirements#private-registry-requirements). +* Status informers now support DaemonSets. See [Resource Statuses](/vendor/admin-console-display-app-status#resource-statuses). +* When using custom branding for the admin console, you can more easily change the color of groups of elements in the admin console (Beta). + +### Improvements {#improvements-1-95-0} +* The [kots install](/reference/kots-cli-install), [kots upstream upgrade](/reference/kots-cli-upstream-upgrade), and [kots admin-console push-images](/reference/kots-cli-admin-console-push-images) commands now validate the provided registry information before processing the air gap bundle. +* Upgrades the MinIO image to RELEASE.2023-02-22T18-23-45Z to resolve CVE-2022-42898, CVE-2022-47629, and CVE-2022-41721 with high severity and CVE-2022-2509, CVE-2022-1304, CVE-2021-46848, CVE-2016-3709, CVE-2022-40303, CVE-2022-40304, CVE-2020-35527, CVE-2022-35737, CVE-2022-3821, CVE-2022-4415, CVE-2022-37434, and CVE-2022-41717 with medium severity. +* The [kots admin-console generate-manifests](/reference/kots-cli-admin-console-generate-manifests) command now supports OpenShift and GKE Autopilot, if it is executed with a Kubernetes cluster context. +* Support bundles generated from the admin console include a copy of rqlite data for debugging purposes. + +### Bug Fixes {#bug-fixes-1-95-0} +* Fixes an issue where the [namespace](/reference/custom-resource-helmchart#namespace) field in the HelmChart custom resource was not respected when [useHelmInstall](/reference/custom-resource-helmchart#usehelminstall) was set to `true`. + +### Known Issue {#known-issues-1-95-0} + +There is a known issue in the app manager v1.95.0 that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. For more information about native Helm, see [How Replicated Deploys Helm Charts](/vendor/helm-overview#how-replicated-deploys-helm-charts) in _About Packaging with Helm_. + +The upgrade failure occurs for a Helm chart when the following conditions are met: +- The Helm chart in the application has been installed previously using the app manager v1.94.2 or earlier. +- In the HelmChart custom resource for the Helm chart: + - `useHelmInstall` is set to `true`. See [useHelmInstall](/reference/custom-resource-helmchart#usehelminstall) in _HelmChart_. + - `namespace` is set to a value different than the namespace where the app manager is installed. See [namespace](/reference/custom-resource-helmchart#namespace) in _HelmChart_. + +To avoid this known issue, Replicated recommends that you do not upgrade to v1.95.0. To work around this issue in v1.95.0, manually uninstall the affected Helm chart using the Helm CLI, and then redeploy the application using the app manager. See [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) in the Helm documentation. + +## 1.94.2 + +Released on February 17, 2023 + +Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 + +### Improvements {#improvements-1-94-2} +* Updates kurl_proxy go mod gopkg.in/yaml.v3 to resolve CVE-2022-28948 with high severity. +* Support bundles generated from the admin console now include collectors and analyzers from all support bundle specifications found in the cluster. This includes support bundle specifications found in Secret and ConfigMap objects. For more information about how to generate support bundles using discovery, see [Generating Support Bundles](/vendor/support-bundle-generating#generate-a-bundle). + +### Bug Fixes {#bug-fixes-1-94-2} +* Fixes a bug that didn't properly display config items that had the `affix` property. +* Fixes an issue where the button to rerun preflights did not show if preflights failed during an air gapped installation. +* Fixes a bug where Velero backups failed due to pods in the Shutdown state. + +## 1.94.1 + +Released on February 14, 2023 + +Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 + +### Improvements {#improvements-1-94-1} +* Adds support for Velero 1.10. + +### Bug Fixes {#bug-fixes-1-94-1} +* Fixes an issue where errors related to parsing and rendering HelmChart custom resources were silently ignored. + +## 1.94.0 + +Released on February 7, 2023 + +Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 + +### New Features {#new-features-1-94-0} +* Updates the [kots velero configure-nfs](/reference/kots-cli-velero-configure-nfs) and [kots velero configure-hostpath](/reference/kots-cli-velero-configure-hostpath) commands to remove required manual steps and better automate the workflow. Users are now given a command to install Velero without a backup storage location. Then the user reruns the configure command to automatically configure the storage destination. +* Updates the [kots velero subcommands](/reference/kots-cli-velero-index) for configuring storage destinations, with instructions on how to install Velero if it is not yet installed. +* The instructions displayed in the admin console for configuring an NFS or host path snapshot storage destination no longer use the `kots velero print-fs-instructions` command. Instead they use the [kots velero configure-nfs](/reference/kots-cli-velero-configure-nfs) and [kots velero configure-hostpath](/reference/kots-cli-velero-configure-hostpath) commands to instruct the user to install Velero and configure the storage destination. + +### Improvements {#improvements-1-94-0} +* Updates the golang.org/x/net module in the kurl-proxy image to resolve CVE-2022-41721 with high severity. +* Updates github.com/dexidp/dex go mod to resolve CVE-2022-39222 with medium severity. +* Updates the rqlite/rqlite image to 7.13.1 to resolve CVE-2022-41721 with high severity and CVE-2022-41717 with medium severity. +* Updates the replicated/local-volume-provider image to v0.4.4 to resolve CVE-2022-41721 with high severity. +* Deprecates the [kots velero print-fs-instructions](/reference/kots-cli-velero-print-fs-instructions) command because its functionality is replaced by the improved [kots velero configure-hostpath](/reference/kots-cli-velero-configure-hostpath) and [kots velero configure-nfs](/reference/kots-cli-velero-configure-nfs) commands. +* Improves the layout of deploy and redeploy network errors. + +### Bug Fixes {#bug-fixes-1-94-0} +* Fixes an issue where the Edit Config icon was visible on the dashboard for application versions that did not include config. +* Fixes an issue where a user had to refresh the page to generate a new support bundle after deleting a support bundle that was still being generated. +* Fixes a regression where the text wasn't colored for certain status informer states. +* Fixes a bug where the app icon for latest version was shown instead of the icon for the currently deployed version. +* Fixes an issue where backup logs failed to download if a log line exceeded the default `bufio.Scanner` buffer size of 64KB. This limit is increased to 1MB in the admin console. + +## 1.93.1 + +Released on January 27, 2023 + +Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 + +### Improvements {#improvements-1-93-1} +* Updates the Helm binary included in the kotsadm image from 3.9.3 to 3.11.0 to resolve CVE-2022-27664 and CVE-2022-32149 with high severity. +* Updates the golang.org/x/net module to resolve CVE-2022-41721 with high severity. +* Public and private SSH keys are deleted when GitOps is disabled and the keys are not in use by another application. + +### Bug Fixes {#bug-fixes-1-93-1} +* Fixes a bug where the snapshots page showed no snapshots for a moment after starting a snapshot. +* Fixes a bug where a warning related to `kubectl apply` displayed during embedded cluster installations. +* Fixes an issue where registry.replicated.com images were rewritten to proxy.replicated.com when the application version specified a custom domain for the Replicated registry. +* Fixes an issue where the Edit Config icon was visible on the version history page for application versions that did not include config. + +## 1.93.0 + +Released on January 19, 2023 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### New Features {#new-features-1-93-0} +* Adds the ability to delete support bundles from the Troubleshoot page of the admin console. +* Config navigation links are highlighted as the user scrolls. + +### Improvements {#improvements-1-93-0} +* Updates the helm.sh/helm/v3 module to v3.10.3 to resolve CVE-2022-23524, CVE-2022-23525, and CVE-2022-23526 with high severity. + +### Bug Fixes {#bug-fixes-1-93-0} +* Fixes an issue where the Cluster Management tab does not show up in Kubernetes installer clusters. +* Fixes an issue where the description for generating a support bundle used a hard coded application name. +* Fixes an issue on the Version History page where the row layout broke when displaying preflight check warnings. +* Fixes an issue where an error occurred when uploading a PKCS #12 certificate with the private key listed first. + +## 1.92.1 + +Released on December 29, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### Improvements {#improvements-1-92-1} +* Preflight checks run and support bundles generate at least twice as fast as before. +* Updates the kubectl binary in the kotsadm image to resolve CVE-2022-27664 and CVE-2022-32149 with high severity. +* Updates the replicated/local-volume-provider image to v0.4.3 to resolve CVE-2021-46848 with critical severity. + +### Bug Fixes {#bug-fixes-1-92-1} +* Fixes an issue that caused the license upload to fail for applications that include Helm charts with [required](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-required-function) values missing from configuration. + +## 1.92.0 + +Released on December 16, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### New Features {#new-features-1-92-0} +* The app manager uses the `replicatedRegistryDomain` domain to rewrite images stored in the Replicated registry, when the `replicatedRegistryDomain` field is provided in the Application custom resource. +* Adds the [KubernetesVersion](/reference/template-functions-static-context#kubernetesversion), [KubernetesMajorVersion](/reference/template-functions-static-context#kubernetesmajorversion), and [KubernetesMinorVersion](/reference/template-functions-static-context#kubernetesminorversion) template functions. + +### Improvements {#improvements-1-92-0} +* Standardizes classes used for branding the admin console. +* Pins the config navigation so that it does not disappear when scrolling. +* The [`LicenseDockerCfg`](/reference/template-functions-license-context#licensedockercfg) template function in the License Context now utilizes the `replicatedRegistryDomain` and `proxyRegistryDomain` values from the Application custom resource, if specified. + +### Bug Fixes {#bug-fixes-1-92-0} +* Disables image garbage collection when an external registry is enabled. +* Fixes a bug where the rqlite headless service manifest was not generated. +* Fixes an issue where labels displayed as config items in the config navigation. +* Fixes a bug where the `kots get config` command always decrypted passwords, even when the `--decrypt` flag wasn't passed. + +## 1.91.3 + +Released on December 10, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### Bug Fixes {#bug-fixes-1-91-3} +* Fixes an issue where air gap uploads failed for applications containing required configuration without default values. +* Fixes errors when generating support bundles in existing clusters via the CLI. + +## 1.91.2 + +:::important +The app manager v1.91.2 has a known issue that affects the use of +required configuration items in air gapped environments. +See [Known Issue](#known-issues-1-91-2) below. +::: + +Released on December 8, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### Improvements {#improvements-1-91-2} +* Improved the TLS certificate flow to make it clearer which fields are needed when using a self-signed certificate or uploading your own. +* Adds the `proxyRegistryDomain` field to the Application custom resource. When this field is provided, the app manager will rewrite proxied private images using that domain instead of proxy.replicated.com. + +### Bug Fixes {#bug-fixes-1-91-2} +* Fixes overlapping labels on TLS configuration page. +* Fixes an issue that caused the login button to be stuck in the "Logging in" state in Helm-managed mode (Beta). For more information on Helm-managed mode, see [Supporting helm CLI Installations (Beta)](/vendor/helm-install). +* Fixes an issue where snapshots to NFS storage locations failed due to file permission issues in environments running without MinIO. +* Fixes an issue that caused the license upload to fail for applications that include Helm charts with [`required`](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-required-function) values missing from configuration. +* Fixes an issue where release notes did not display when the release notes icon was clicked on the dashboard. +* Fixes an issue where no tab was selected by default when opening the View Logs modal in Helm-managed mode. +* Fixes an issue that prevented image garbage collection from being enabled or disabled. +* Fixes an issue where DockerHub credentials provided to the admin console via the [kots docker ensure-secret](/reference/kots-cli-docker-ensure-secret) CLI command did not increase the rate limit. +* Fixes an issue that prevented Helm render errors from being surfaced to the user when running [`kots upload`](/reference/kots-cli-upload) commands. +* Fixes leaked goroutines. +* Increases the memory limit for rqlite to 1Gi to fix an issue where rqlite was OOM killed during the migration from Postgres when there was a very large number of versions available in the admin console. + +### Known Issue {#known-issues-1-91-2} + +There is a known issue in the app manager v1.91.2 that causes air gap uploads to fail when there are configuration items with the `required` property set to `true` and no default value specified. + +To avoid this known issue, Replicated recommends that you do not upgrade to v1.91.2. To work around this issue in v1.92.2, ensure that all required configuration items in the Config custom resource have a default value. For more information about adding default values to configuration items, see [`default` and `value`](/reference/custom-resource-config#default-and-value) in _Config_. + +## 1.91.1 + +Released on November 18, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### Improvements {#improvements-1-91-1} +* Updates the Snapshots page to standardize the look of admin console. +* Updates the schemahero image to v0.13.8 to resolve CVE-2022-32149 with high severity. +* Updates the kotsadm-migrations base image to `debian:bullseye` to resolve CVE-2022-29458 with high severity. +* Updates the kurl-proxy base image to `debian:bullseye-slim` to resolve CVE-2022-29458 with high severity. +* Updates the github.com/mholt/archiver module to v3.5.1 to resolve CVE-2019-10743 with medium severity. +* Updates the replicated/local-volume-provider image to v0.4.1 to resolve CVE-2022-29458 with high severity. +* Updates the Helm dependency from 3.9.0 to 3.9.4 to resolve CVE-2022-36055 with medium severity. + +### Bug Fixes {#bug-fixes-1-91-1} +* Fixes a bug that could result in `invalid status code from registry 400` error when pushing images from an air gap bundle into a private registry. +* Fixes an issue where configuring snapshot schedules in Firefox didn't work. +* Fixes an issue where installing or upgrading the app manager failed for GKE Autopilot clusters. +* Fixes an issue where the existing cluster snapshot onboarding flow did not work when using the local volume provider plugin. + +## 1.91.0 + +Released on November 14, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### New Features {#new-features-1-91-0} +* Updates the Troubleshoot and Config pages to standardize the look of admin console. + +### Improvements {#improvements-1-91-0} +* Updates the kotsadm base image to `debian:bullseye-slim` to resolve CVE-2022-29458 with high severity. +* Shows password complexity rules when setting the admin console password with the CLI. Passwords must be at least six characters long. + +### Bug Fixes {#bug-fixes-1-91-0} +* Fixes an issue where the admin console automatically redirected to the login page after a snapshot was restored successfully, which could have prevented users from knowing the outcome of the restore. + +## 1.90.0 + +Released on November 4, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### New Features {#new-features-1-90-0} +* Adds the ability to remove registry info from the **Registry settings** page. +* Adds the ability to use status informers for Helm charts when running in Helm-managed mode (Beta). For more information on Helm-managed mode, see [Supporting helm CLI Installations (Beta)](/vendor/helm-install). + +### Improvements {#improvements-1-90-0} +* Updates the golang.org/x/text module in the kurl-proxy image used for embedded cluster installations, to resolve CVE-2022-32149 with high severity. +* The file explorer now includes rendered `values.yaml` files for each Helm chart that is deployed by the app manager. + +### Bug Fixes {#bug-fixes-1-90-0} +* Updates the Prometheus query to show disk usage by instance and mount point. +* Fixes an issue where checking for updates failed with the message "License is expired", but the **License** tab indicated that the license was not expired. +* Fixes an issue where the admin console could restart during the migration from Postgres to rqlite due to a short timeout. + +## 1.89.0 + +Released on October 28, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### New Features {#new-features-1-89-0} +* Automatically migrates data from Postgres to rqlite and removes Postgres. Also introduces a new [kubectl kots enable-ha](/reference/kots-cli-enable-ha) command that runs rqlite as three replicas for higher availability. This command should only be run on clusters with at least three nodes. Now multiple node clusters deployed with the Kubernetes installer can use OpenEBS local PV, because data will be replicated across all three replicas of rqlite, allowing the app manager to run on any node in the cluster without requiring distributed storage like Rook provides. + +### Bug Fixes {#bug-fixes-1-89-0} +* Fixes an issue that causes the Released timestamp to be the same for all releases on the [version history](/enterprise/updating-apps#update-an-application-in-the-admin-console) page in [Helm managed mode (Alpha)](/vendor/helm-install). +* Allows kots CLI commands to use the kubeconfig namespace by default if a flag is not provided. +* Fixes an issue where installing, updating, or configuring applications that have many images defined in KOTS custom resources (such as collectors, preflights, and analyzers) hangs or takes a long time. +* Fixes an issue that could cause the preflight progress bar to be stuck at nearly 100% but never complete. +* Fixes an issue where unused Host Path and NFS volumes were not being cleaned up when changing snapshot storage locations in clusters without MinIO. +* Fixes the issue that caused [`Sequence`](/reference/template-functions-license-context#sequence) template function to return 1 instead of 0 during initial configuration. + +## 1.88.0 + +Released on October 19, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + +### New Features {#new-features-1-88-0} +* Adds ability to deploy an application with new values after syncing license from admin console in Helm-managed mode (Alpha). For more information on Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). + +### Improvements {#improvements-1-88-0} +* Updates the kotsadm/dex image to v2.35.3 to resolve CVE-2022-27664 with high severity. +* Updates the golang.org/x/net module to resolve CVE-2022-27664 with high severity. +* Updates the schemahero image to v0.13.5 to resolve CVE-2022-37434 with critical severity and CVE-2022-27664 with high severity. +* Updates the replicated/local-volume-provider image to v0.3.10 to resolve CVE-2022-37434 with critical severity and CVE-2022-27664 with high severity. + +### Bug Fixes {#bug-fixes-1-88-0} +* Fixes an issue where the cluster management page was blank when the pod capacity for a node was defined with an SI prefix (e.g., `1k`). +* Fixes an issue where the admin console occasionally would not redirect to the dashboard after preflight checks were skipped. +* Fixes an issue where the application icon did not show on the login page until the application was deployed. + +## 1.87.0 + +Released on October 12, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, 1.25 + +### New Features {#new-features-1-87-0} +* Uses Ed25519 SSH keys for GitOps when integrating with Github Enterprise. See [Pushing Updates to a GitOps Workflow](/enterprise/gitops-workflow). + +### Improvements {#improvements-1-87-0} +* Adds support for template functions to the `spec.graphs` field of the Application custom resource. See [Application](/reference/custom-resource-application). + +### Bug Fixes {#bug-fixes-1-87-0} +* Fixes an issue where log tabs for Helm installs were hidden. +* Fixes a bug that caused pre-existing rows on the version history page in Helm-managed mode (Alpha) to be highlighted as newly available versions when the page is opened. For more information on Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +* Fixes an issue that could cause embedded installations to fail with error "yaml: did not find expected node content" when installing behind an `HTTP_PROXY`. +* Fixes an issue where APIs that require an auth token were called while the client was logged out. +* Fixes an issue that caused the Troubleshoot page to display the support bundle collection progress bar even when a support bundle was not being collected. +* Sorts the entitlements returned in the `/license` endpoint to ensure that they display consistently in the admin console. + +### Known Issue {#known-issues-1-87-0} + +There is a known issue in the app manager v1.87.0 that causes a KOTS icon, instead of the application icon, to display on the login page before the application is deployed. After the application is deployed, the application icon shows on the login screen. + +## 1.86.2 + +Released on October 7, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### Improvements {#improvements-1-86-2} +* Changes the way CSS and font files are included for custom admin console branding (Alpha). If you have early access to this feature, see the Alpha documentation for more information. + +### Bug Fixes {#bug-fixes-1-86-2} +* Fixes an issue where large font files for custom admin console branding (Alpha) caused the admin console to fail to create a new application version. +* Fixes an issue where the identity service login redirected to the login page after a successful login. +* Fixes an issue in the **Cluster Management** tab where the button for adding a primary node stopped working if the original join token expired. +* Fixes a bug that allowed the identity service route to be accessed even if the feature was not enabled. +* Fixes a bug that caused the admin console Pod to terminate with an error due to a panic when checking for application updates in Helm-managed mode (Alpha). For more information on Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). + +## 1.86.1 + +Released on September 30, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### Improvements {#improvements-1-86-1} +* Only show relevant tabs on the deployment logs modal depending on whether or not the admin console is in Helm-managed mode. +* Standardizes all page titles using the format **Page Name | App Slug | Admin Console**. The page title is the text that shows in the browser tab. + +### Bug Fixes {#bug-fixes-1-86-1} +* Fixes an issue where automatic update checks failed when the interval is too short for pending updates to be fetched. +* Fixes an issue where the automatic update checks modal didn't show custom schedules after they were saved. See [Configure Automatic Updates](/enterprise/updating-apps#configure-automatic-updates). +* Fixes an issue in Helm-managed mode where checking for updates from the version history page did not show the "License is expired" error when the check failed due to an expired license. For more information on Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +* Fixes an issue where some icons displayed in a very large size on Firefox. See [Known Issue](#known-issues-1-86-0) under _1.86.0_. +* Fixes an issue where the specified registry namespace was sometimes ignored for KOTS images if the specified registry hostname already included a namespace. + +## 1.86.0 + +:::important +The app manager v1.86.0 contains a known issue that affects the use of +the Replicated admin console in Firefox browsers. This issue is resolved +in the app manager v1.86.1. +See [Known Issue](#known-issues-1-86-0) below. +::: + +Released on September 27, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-86-0} +* Allows icon colors to be changed with the CSS when branding the admin console (Alpha). To enable this feature on your account, log in to your vendor portal account. Select **Support** > **Request a feature**, and submit a feature request for "admin console branding". + +### Improvements {#improvements-1-86-0} +* Removes the license upload page when the admin-console Helm chart is installed without installing a Replicated application. +* Makes port forward reconnections faster. + +### Bug Fixes {#bug-fixes-1-86-0} +* Fixes the message alignment when a strict preflight check fails. +* Fixes a bug where versions with `pending_download` status were shown incorrectly on the version history page. +* Fixes a bug where versions with `pending_download` status caused the `View files` tab to navigate to a version that had not been downloaded yet, resulting in a UI error. +* Fixes a bug where downloading an application version that is incompatible with the current admin console version made it impossible to check for updates until the admin console pod was restarted. +* Fixes a bug that caused CLI feedback spinners to spin indefinitely. +* Fixes an issue that caused config templates to be applied to the wrong values.yaml file in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +* Fixes an issue where the license was not synced when checking for application updates in Helm-managed mode (Alpha). +* Fixes a bug in Helm-managed mode (Alpha) that required you to visit the config screen to deploy a new version with required config items, even if all of the config values had been set in a previously deployed version. +* Fixes a bug that caused the currently deployed version to temporarily appear as a newly available version when an update check ran in Helm-managed mode (Alpha). +* Fixes styling on `<pre>` elements in the Helm install modals (Alpha) so that their heights match the content. + +### Known Issue {#known-issues-1-86-0} + +This issue is resolved in the app manager v1.86.1. + +There is a known issue in the app manager v1.86.0 that causes certain icons in the Replicated admin console to display incorrectly in Firefox browsers. The icons display in a very large size, making it difficult for users to access the fields on several of the admin console screens. + +To use the admin console on v1.86.0, users should open the admin console in a supported browser other than Firefox, such as Google Chrome. For more information about supported browsers, see [Supported Browsers](/enterprise/installing-general-requirements#supported-browsers) in _Installation Requirements_. + +If users are unable to use a browser other than Firefox to access the admin console, Replicated recommends that they do not upgrade to the app manager v1.86.0. + +## 1.85.0 + +Released on September 19, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-85-0} +* Adds the ability to automatically check for new chart versions that are available when running in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +* In Helm-managed mode, new Helm chart versions that introduce a required configuration value must be configured before they can be deployed. + +### Improvements {#improvements-1-85-0} +* Improves how license fields display in the admin console, especially when there are multiple license fields or when the value of a field is long. +* Updates the replicated/local-volume-provider image to v0.3.8 to resolve CVE-2022-2509 with high severity. +* Updates the github.com/open-policy-agent/opa module to resolve CVE-2022-36085 with critical severity. +* Updates the kotsadm/dex image to v2.34.0 to resolve CVE-2022-37434 with critical severity and CVE-2021-43565, CVE-2022-27191, and CVE-2021-44716 with high severity. + +### Bug Fixes {#bug-fixes-1-85-0} +* Fixes an issue in embedded clusters where image garbage collection deletes images that are still in use by the application. +* Increases the memory limit for the `kotsadm-minio` StatefulSet from 200Mi to 512Mi. +* Fixes an issue where headless/unattended installations hang in embedded clusters with recent Kubernetes versions. +* Fixes an issue that caused values to be missing on the Config page for pending updates in Helm-managed mode (Alpha). +* Fixes checkbox alignment on the Config page. +* Fixes a bug that did not display errors on the Config page when values for required config items were missing in Helm-managed mode (Alpha). + +## 1.84.0 + +Released on September 12, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-84-0} +* Adds the ability to configure and deploy new Helm chart versions when the admin console is running in Helm-managed mode (Alpha). +* Adds support for including custom font files in an application release, which can be used when branding the admin console (Alpha). To enable this feature on your account, log in to your vendor portal account. Select **Support** > **Request a feature**, and submit a feature request for "admin console branding". + +### Improvements {#improvements-1-84-0} +* Updates the MinIO image to address CVE-2022-2526 with high severity. +* Updates the github.com/gin-gonic/gin module in the kurl-proxy image used for embedded cluster installations, to resolve CVE-2020-28483 with high severity. +* Updates SchemaHero to v0.13.2 to resolve CVE-2022-21698. + +### Bug Fixes {#bug-fixes-1-84-0} +* Updates the `support-bundle` CLI command provided in the admin console to use the generated Kubernetes resources instead of the raw upstream specification when running in Helm-managed mode (Alpha). +* Fixes an issue that caused Secrets and ConfigMaps created by the admin console to be left in the namespace after a Helm chart is uninstalled in Helm-managed mode (Alpha). +* Fixes an issue where application status informers did not update if the admin console Pod was restarted. +* Fixes an issue where a user that is logged in could navigate to the login page instead of being redirected to the application dashboard. +* Fixes an issue where the app manager failed to render Helm charts that have subcharts referenced as local file repositories. + +## 1.83.0 + +Released on September 1, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-83-0} +* Adds support for custom branding of the admin console using CSS (Alpha). To enable this feature on your account, log in to your vendor portal account. Select **Support** > **Request a feature**, and submit a feature request for "admin console branding". + +### Improvements {#improvements-1-83-0} +* Icons supplied in the `icon` field of the Application custom resource can be square or circular. + +### Bug Fixes {#bug-fixes-1-83-0} +* Fixes an issue that could cause inadvertent application upgrades when redeploying or updating the config of the currently installed revision in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +* Fixes an issue where the namespace was omitted from `helm upgrade` commands displayed in the admin console in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +* Removes the checkbox to automatically deploy updates in Helm-managed mode, because this is unsupported. For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +* Fixes an issue where updating the registry settings fails due to permission issues even when the provided credentials have access to the registry. +* Fixes an issue in Helm-managed mode that could cause Replicated templates to show on the config page instead of the rendered values. For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +* Fixes an issue where trailing line breaks were removed during Helm chart rendering. + +## 1.82.0 + +Released on August 25, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-82-0} +* Adds support for a new air gap bundle format that supports image digests and deduplication of image layers shared across images in the bundle. The new air gap bundle format is in Beta. To enable this feature on your account, log in to your vendor portal account. Select **Support** > **Request a feature**, and submit a feature request for "new air gap bundle format". +* Adds support for deploying images that are referenced by digest or by digest and tag, rather than by tag alone, in online installations that have a private registry configured. +* Adds support for displaying the config values for each revision deployed in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). + +### Improvements {#improvements-1-82-0} +* Updates the `local-volume-provider image` to address CVE-2021-44716, CVE-2021-33194, and CVE-2022-21221 with high severity. +* Updates the configuration pages for the GitOps workflow, making it easier to set up. + +### Bug Fixes {#bug-fixes-1-82-0} +* Fixes an issue that prevented you from typing in the **Path** field when **Other S3-Compatible Storage** was set as the snapshot storage destination. +* Fixes an issue where the `LicenseFieldValue` template function always returned an empty string for the `isSnapshotSupported` value. For more information about the `LicenseFieldValue` template function, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue). + +## 1.81.1 + +Released on August 22, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### Improvements {#improvements-1-81-1} +* Show deploy logs for Helm charts when running in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +* Updates the Helm binary included in the kotsadm image from 3.8.2 to 3.9.3 to resolve CVE-2022-21698 and CVE-2022-27191 with high severity. +* Updates the golang.org/x/net module in the kurl-proxy image used for embedded cluster installations, to resolve CVE-2021-44716 with high severity. +* Updates the dex image from 2.32.0 to 2.33.0 to resolve CVE-2022-30065, CVE-2022-2097, and CVE-2022-27191 with high severity. + +### Bug Fixes {#bug-fixes-1-81-1} +* Fixes an issue where starting a manual snapshot resulted in an error dialog when using Firefox or Safari. +* Fixes an issue that caused images formatted as `docker.io/image:tag` to not be rewritten when upgrading applications in airgapped environments. For more information about rewriting images, see [Patching the Image Location with Kustomize](/vendor/packaging-private-images#patching-the-image-location-with-kustomize) in _Connecting to an Image Registry_. + +## 1.81.0 + +Released on August 12, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-81-0} +* Adds support for the `alias` field in Helm chart dependencies. +* Adds support for image tags and digests to be used together for most online installations. For more information, see [Support for Image Tags and Digests](/vendor/packaging-private-images#support-for-image-tags-and-digests) in *Connecting to an Image Registry*. + +### Improvements {#improvements-1-81-0} +* Helm v2 will only be used if `helmVersion` is set to `v2` in the HelmChart custom resource. Support for Helm v2, including security patches, ended on November 13, 2020, and support for Helm v2 in the app manager will be removed in the near future. For more information about the HelmChart custom resource, see [HelmChart](/reference/custom-resource-helmchart). +* Improves the UI responsiveness on the Config page. + +### Bug Fixes {#bug-fixes-1-81-0} +* Fixes an issuse where the license tab did not show for Helm-managed installations. +* Fixes an issue that could cause `Namespace` manifests packaged in Helm charts to be excluded from deployment, causing namespaces to not be created when `useHelmInstall` is set to `true` and `namespace` is an empty string. For more information about these fields, see [useHelmInstall](/reference/custom-resource-helmchart#usehelminstall) and [namespace](/reference/custom-resource-helmchart#usehelminstall) in *HelmChart*. +* Fixes an issue where GitOps was enabled before the deploy key was added to the git provider. +* Hides copy commands on modals in the admin console when clipboard is not available. + +## 1.80.0 + +Released on August 8, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-80-0} +* Displays the `helm rollback` command when deploying previous revisions from the version history page in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install). + +### Improvements {#improvements-1-80-0} +* Password complexity rules will now be shown when changing the password in the admin console. +* Updates Kustomize from 3.5.4 to 4.5.7. Note that Kustomize v4.5.7 does not allow duplicate YAML keys to be present in your application manifests, whereas v3.5.4 did. Kustomize v4.5.7 is a bit slower than v3.5.4, so fetching and deploying new versions takes a bit more time. Our benchmarking did not show this performance degradation to be significant. Updating Kustomize resolves several critical and high severity CVEs, and unblocks additional feature work in the app manager. + +### Bug Fixes {#bug-fixes-1-80-0} +* Fixes an issue where an ambiguous error message was shown when the endpoint field was modified in the license. +* Fixes a bug that caused values from the HelmChart custom resource that did not use KOTS template functions to be rendered into the downloaded values.yaml file after updating the configuration in Helm-managed mode. For more information about Helm-managed mode, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install). +* Fixes an issue in Helm-managed mode that caused an error when clicking the **Analyze application** button on the Troubleshoot page in the admin console for an application that did not include a support bundle specification. For more information about Helm-managed mode, see [Helm-managed mode (Alpha)](/vendor/helm-install). For more information about analyzing an application, see [Create a Support Bundle Using the Admin Console](/enterprise/troubleshooting-an-app#create-a-support-bundle-using-the-admin-console) in *Troubleshooting an Application*. + +## 1.79.0 + +Released on August 4, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-79-0} +* Adds an [HTTPSProxy](/reference/template-functions-static-context#httpsproxy) template function to return the address of the proxy that the Replicated admin console is configured to use. +* Dynamically adds collectors, analyzers, and custom redactors when collecting support bundles from the [troubleshoot](/enterprise/troubleshooting-an-app#create-a-support-bundle-using-the-admin-console) page in [Helm-managed mode (Alpha)](/vendor/helm-install). + +### Improvements {#improvements-1-79-0} +* Removes the "Add new application" option when running the admin console in [Helm-managed mode (Alpha)](/vendor/helm-install). + +### Bug Fixes {#bug-fixes-1-79-0} +* Fixes an issue that caused the [affix](/reference/custom-resource-config#affix) property of config items to be ignored. +* Fixes an issue that caused the [help_text](/reference/custom-resource-config#help_text) property of config items to be ignored. +* Fixes an issue that caused the license card to not be updated when switching applications in the admin console. +* Fixes the ordering of versions on the [version history](/enterprise/updating-apps#update-an-application-in-the-admin-console) page in [Helm-managed mode (Alpha)](/vendor/helm-install). +* Fixes the display of node statistics in the Cluster Management tab. +* Fixes an issue where legacy encryption keys were not loaded properly during snapshot restores. +* Fixes an issue where snapshots would fail if a wildcard (`"*"`) was listed in the `additionalNamespaces` field of an Application manifest. +* Fixes an issue where the diff fails to generate for a version that excludes a Helm chart that was previously included. + +## 1.78.0 + +Released on July 28, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-78-0} +* The analyze application button on the [Troubleshoot tab](/enterprise/troubleshooting-an-app) now works in [Helm managed mode (Alpha)](/vendor/helm-install). +* Adds a deploy modal for versions on the [version history](/enterprise/updating-apps#update-an-application-in-the-admin-console) page in [Helm managed mode (Alpha)](/vendor/helm-install). + +### Improvements {#improvements-1-78-0} +* Upgrades the internal database (Postgres) used by the admin console from `10.21-alpine` to `14.4-alpine`. + +### Bug Fixes {#bug-fixes-1-78-0} +* Fixes an issue where all [dashboard links](/vendor/admin-console-adding-buttons-links) were rewritten to use the admin console hostname instead of the hostname provided in the application manifest. +* Fixes a bug that caused errors when trying to generate `helm upgrade` commands from the [config page](/vendor/config-screen-about#admin-console-config-tab) in [Helm managed mode (Alpha)](/vendor/helm-install). +* Fixes a bug where the same version could be listed twice on the [version history](/enterprise/updating-apps#update-an-application-in-the-admin-console) page in [Helm managed mode (Alpha)](/vendor/helm-install). + +## 1.77.0 + +Released on July 22, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-77-0} +* Displays version history information for Helm charts when running in Helm-managed mode (Alpha). For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install) +* License information can now be synced from the admin console's Dashboard and License pages for Helm charts when running in Helm-managed mode (Alpha). For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install) +* Admin console now supports limited RBAC mode when running in Helm-managed mode (Alpha). For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install) + +### Improvements {#improvements-1-77-0} +* Better handling for network errors on the Helm install modal in Helm-managed mode (Alpha). +* Helm install command now includes authentication in Helm-managed mode (Alpha). +* Adresses the following high severity CVEs: CVE-2022-28946, CVE-2022-29162, and CVE-2022-1996. + +### Bug Fixes {#bug-fixes-1-77-0} +* Fixes an issue that caused automatic deployments not to work on channels where semantic versioning was disabled, unless the version labels were valid [semantic versions](https://semver.org/). +* Fixes an issue that caused errors after the admin console pod restart until the Dashboard tab is visited in Helm-managed mode (Alpha). +* Begins using a temp directory instead of the current directory, to avoid file permissions issues when generating the `helm upgrade` command after editing the config. For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install). + +## 1.76.1 + +Released on July 15, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### Bug Fixes {#bug-fixes-1-76-1} +* Fixes an issue that caused private images in some collectors to not be rewritten during preflight checks. +* Fixes an issue where the [Distribution](/reference/template-functions-static-context#distribution) template function returns an empty string in minimal RBAC installations running on OpenShift clusters. +* Updates the golang.org/x/text go module to address CVE-2021-38561 with high severity. +* Updates the local-volume-provider image to address CVE-2021-38561 with high severity. +* Updates the MinIO image to address CVE-2022-1271 with high severity. + +## 1.76.0 + +Released on July 12, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-76-0} +* Displays license information on the admin console Dashboard and License page for Helm charts when running in Helm-managed mode (Alpha). For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install) + +### Bug Fixes {#bug-fixes-1-76-0} +* Fixes a bug that causes links defined in the [SIG Application custom resource](/reference/custom-resource-sig-application) to not be rewritten to the hostname used in the browser. + +## 1.75.0 + +Released on July 5, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-75-0} +* Adds a `helmUpgradeFlags` parameter to the [HelmChart custom resource](/reference/custom-resource-helmchart) when [Installing with Native Helm](/vendor/helm-overview). The specified flags are passed to the `helm upgrade` command. Note that the Replicated app manager uses `helm upgrade` for all installations, including initial installations, and not just when the application is upgraded. + +### Bug Fixes {#bug-fixes-1-75-0} +* Addresses the following critical severity CVEs: CVE-2022-26945, CVE-2022-30321, CVE-2022-30322, and CVE-2022-30323. +* Fixes a bug that causes the [`push-images`](/reference/kots-cli-admin-console-push-images) command to fail when `--registry-password` and `--registry-username` are not specified for use with anonymous registries. + +## 1.74.0 + +Released on July 1, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-74-0} +* Adds the ability to use a preflight check to compare the Kubernetes installer included in particular application version against the installer that is currently deployed. For more information, see [Include a Supporting Preflight Check](/vendor/packaging-embedded-kubernetes#include-a-supporting-preflight-check) in Creating a Kubernetes Installer Specification. + +### Bug Fixes {#bug-fixes-1-74-0} +* Fixes an issue where you could not deploy valid application releases if the previously deployed version resulted in a kustomize error. +* Fixes an issue where kustomize would fail if a Helm chart and one of its sub-charts had the same name. +* Fixes an issue that caused Velero pods to be stuck in a Pending state when using the Internal Storage snapshot setting in Kubernetes installer-created clusters. +* Fixes an issue where the admin console would crash if a Helm chart with optional values but no values provided was included in a release. + +## 1.73.0 + +Released on June 24, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-73-0} +* Adds a `releaseName` parameter to the [HelmChart custom resource](/reference/custom-resource-helmchart) when [Installing with Native Helm](/vendor/helm-overview). Defaults to the chart name. Specifying a `releaseName` also allows you to deploy multiple instances of the same Helm chart, which was previously impossible. + +### Improvements {#improvements-1-73-0} +* Improved UX on the version history page when the application is up to date or when there are new available versions. + +### Bug Fixes {#bug-fixes-1-73-0} +* Fixes an issue where the preflight screen was displayed even if no analyzers were run. +* Fixes an issue that prevented you from excluding a Helm chart that was previously included when [Installing with Native Helm](/vendor/helm-overview). + +## 1.72.2 + +Released on June 22, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### Bug Fixes {#bug-fixes-1-72-2} +* Fixed a bug that would cause duplicate Helm installations to be shown when running in helm-managed mode in clusters with open permissions. + +## 1.72.1 + +Released on June 17, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### Improvements {#improvements-1-72-1} +* Config values are now stored in a secret when the admin console runs in Helm-managed mode (Alpha), so that the values can be rerendered when a user returns to the Config page. + +### Bug Fixes {#bug-fixes-1-72-1} +* The dashboard "Disk Usage" graph now reports metrics for Prometheus deployments using the `kubernetes-service-endpoints` job. +* The configured Prometheus address now shows as the placeholder in the "Configure Prometheus address" modal. +* Fixes a bug that prevented an application from being deployed if a strict preflight check existed but was excluded. +* Fixes a bug that was caused when a top-level `templates` folder is not present in a Helm chart that also has subcharts and top-level charts. +* Fixes a bug where Kubernetes installer manifests included as part of an application release were applied when deploying the release. +* Updates the MinIO image to address the following critical and high severity CVEs: CVE-2021-42836, CVE-2021-41266, CVE-2020-26160, CVE-2018-25032, CVE-2022-0778, CVE-2022-25235, CVE-2022-25236, CVE-2022-25315, CVE-2022-24407. +* Updates the Dex image to address the following critical and high severity CVEs: CVE-2020-14040, CVE-2021-42836, CVE-2020-36067, CVE-2020-36066, CVE-2020-35380, CVE-2020-26521, CVE-2020-26892, CVE-2021-3121, CVE-2020-26160, CVE-2021-28831, CVE-2020-11080, CVE-2021-3450, CVE-2021-23840, CVE-2020-1967, CVE-2020-8286, CVE-2020-8285, CVE-2020-8231, CVE-2020-8177, CVE-2020-8169, CVE-2021-30139, CVE-2021-36159. +* Updates the local-volume-provider image to address CVE-2022-1664 with critical severity. + +## 1.72.0 + +Released on June 14, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features {#new-features-1-72-0} +* The admin console now shows the chart version and icon for the currently deployed Helm chart when running in Helm-managed mode (Alpha). + +### Improvements {#improvements-1-72-0} +* Moves **Change password**, **Add new application**, and **Log out** functionality into a new menu in the top right of the navigation bar. +* Shows a meaningful error message when the license is expired on the dashboard version card. + +### Bug Fixes {#bug-fixes-1-72-0} +* Fixes a bug that caused the deploy confirmation modal on the dashboard to always show "Redeploy" even if the version was not already deployed. +* Fixes a discrepancy between the license expiry date in the vendor portal and the expiry date in the admin console. +* Sets the User-Agent to the KOTS version string in outgoing HTTP requests where missing. +* Removes the **Registry settings** tab when running in Helm-managed mode (Alpha). +* Removes **Diff versions** links from the application dashboard and version history page when running in Helm-managed mode (Alpha). +* Removes the instructions on how to edit files on the **View files** tab when running in Helm-managed mode (Alpha). + +## 1.71.0 + +Released on June 1, 2022 + +Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 + +### New Features +* Adds a `--port` flag to the `kots install` and `kots admin-console` commands to allow for overriding the local port on which to access the admin console. + +### Improvements +* A temporary success message is displayed if preflight checks pass for a version. + +### Bug Fixes +* Fixes a nil pointer panic when checking for updates if a file in the new release contains incomplete metadata information. + +## 1.70.1 + +Released on May 19, 2022 + +Support for Kubernetes: 1.21, 1.22, and 1.23 + +### Improvements +* When enabling GitOps, the initial commit properly translates all labeled secrets to SealedSecrets. +* Improves the application dashboard and version history pages when GitOps is enabled. +* Prevents a user from generating a support bundle while another support bundle is being generated, and lets the user return to the `/troubleshoot/generate` route to see the progress of the current support bundle generation. +* Improves editing for scheduling automatic snapshots by making the cron expression input always visible. +* Adds a collector and analyzer for cases when NFS configuration fails because the `mount.nfs` binary is missing on the host. +* Cleans up failed `kotsadm-fs-minio-check` pods after the NFS backend for snapshots has been configured successfully. +* Supports Helm v3.8.2 in the app manager. +* Shows Helm installations when running in Helm managed mode (alpha). + +### Bug Fixes +* Fixes an issue where uploading the airgap bundle using the admin console hangs at 0%. +* Fixes an issue where applications using semantic versioning did not receive updates when `--app-version-label` was used in the [kots install](/reference/kots-cli-install) command. +* Fixes an issue where the application was re-deployed when the admin console restarted. +* Fixes an issue where existing Host Path and NFS snapshots did not show up after migrating away from MinIO. Note that this fix is only applicable to new migrations. Users who have already migrated away from MinIO can continue to take new snapshots, but pre-migration snapshots will be missing. +* Fixes an issue where changing the API version for a native Kubernetes object caused that object to be deleted and recreated instead of updated. +* Fixes an issue where image pull secrets were not created in additional namespaces when only Helm charts were used by the application. +* Fixes an issue where custom icons did not show on the TLS/cert page on Safari and Chrome. +* Fixes an issue where the admin console loaded resources from the internet. +* Fixes critical and high CVEs found in the KOTS Go binaries. + +## 1.70.0 + +Released on May 2, 2022 + +Support for Kubernetes: 1.21, 1.22, and 1.23 + +### New Features +* Adds a `weight` parameter to the [Helm custom resource](/reference/custom-resource-helmchart) when [Installing with Native Helm](/vendor/helm-overview). Charts are applied by weight in ascending order, with lower numbered weights applied first. +* Adds the ability to change the admin console password from the **Change Password** link in the admin console page footer. +* Adds the ability to download `Config` file types for a given application sequence. +* Adds a template function `YamlEscape` to escape a string for inclusion in a YAML file. +* Adds the ability to allow uploading new TLS certificates used by kURL proxy with the [`reset-tls`](/reference/kots-cli-reset-tls) command. +* Adds the ability to dynamically set the number of results per page when browsing the application version history. + +### Improvements +* When preflight checks are skipped during an initial installation, the application is still deployed. +* License and preflight errors are now displayed when performing an automated installation using the CLI. +* When changing the password using the `kubectl kots reset-password`, all active sessions are terminated and new sessions can be established with the new password. + +### Bug Fixes +* Fixes an issue where ingress status informers always reported as "Missing" in Kubernetes 1.22+. +* Fixes an issue that caused image garbage collection in Kubernetes installer-created clusters (embedded clusters) to remove images outside of the application's dedicated registry namespace. +* Fixes an issue where a newer version might not have a **Deploy** button after the configuration is updated for the currently deployed version. +* Fixes an issue where the legends on the dashboard graphs were blank. +* Fixes an issue where hovering on a graph the tooltip showed "LLL" instead of a formatted date. + +## 1.69.1 + +Released on April 19, 2022 + +Support for Kubernetes: 1.21, 1.22, and 1.23 + +### Improvements +* Updates `local-volume-provider` to v0.3.3. + +### Bug Fixes +* Fixes an issue where links and text within the `app.k8s.io/v1beta1` `Application` kind were not templated. + +## 1.69.0 + +Released on April 8, 2022 + +Support for Kubernetes: 1.21, 1.22, and 1.23 + +### New Features +* Adds the ability to switch from a community license to a different license for the same application. See [Changing a Community License](/enterprise/updating-licenses#change-community-licenses). + +### Improvements +* The [ensure-secret](/reference/kots-cli-docker-ensure-secret) command now creates a new application version, based on the latest version, that adds the Docker Hub image pull secret to all Kubernetes manifests that have images. This avoids Docker Hub's rate limiting. +* CA certificates for snapshot storage endpoints can now be uploaded on the snapshot page of the admin console. +* User sessions expire after 12 hours of inactivity. +* Removes expired sessions from the store in a daily cleanup job. +* Adds a Beta option for vendors to exclude MinIO images from app manager air gap bundles from the download portal. For more information, see [ MinIO from Air Gap Bundles](/vendor/packaging-air-gap-excluding-minio) in the documentation. + +### Bug Fixes +* Fixes an issue where the registry image pull secrets were not applied in the additional namespaces specified by the application in minimal RBAC installations. +* Fixes an issue where some releases could be missed if they were promoted while other releases were being downloaded and semantic versioning was enabled. +* Fixes an issue where the "Select a different file" link did not allow the user to change the selected file on the config page. + +## 1.68.0 + +Released on April 4, 2022 + +Support for Kubernetes: 1.21, 1.22, and 1.23 + +### New Features +* Adds the ability to make a KOTS application version required. Required version cannot be skipped during upgrades. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). +* Adds the `supportMinimalRBACPrivileges` field to the Application custom resource, and adds the `--use-minimal-rbac` flag to the `kots install` command. `supportMinimalRBACPrivileges` indicates that the application supports minimal RBAC, but it will not be used unless the `--use-minimal-rbac` flag is passed to the `kots install` command. See [`supportMinimalRBACPrivileges`](/reference/custom-resource-application#supportminimalrbacprivileges) in the Application custom resource. + +### Improvements +* Adds pagination to the version history page and improves the admin console API performance. +* Displays on the cluster management page of the admin console the labels applied to nodes in a Kubernetes installer-created cluster. +* The default Troubleshoot analyzers will now specifically call out issues with Envoy/Contour if detected. + +### Bug Fixes +* Fixes a bug with automatic updates where new versions would be deployed automatically regardless of preflight outcomes. When automatic updates are configured, new versions will now only be deployed automatically if the preflights succeed. +* Fixes an issue where NFS snapshots could not be configured when MinIO is enabled in the cluster. +* Fixes an issue where updating the snapshot storage location to NFS or Host Path would incorrectly display a dialog indicating that Velero was not installed and configured properly. +* Fixes an issue that caused wrong metadata to be used at application install time when installing a specific version of an application with the `--app-version-label` flag. +* Fixes an issue that caused the support bundle analysis and/or redactions to not show up in the Troubleshoot page in the admin console in some cases. +* Fixes an issue where deployments weren't blocked when strict preflight analyzers failed due to parse/process errors. +* Fixes a style bug that caused the grid of metric graphs to be broken when there were more than three graphs. +* Fixes an issue on the config editor page that caused an element to be hidden under the navbar when the corresponding config item was clicked on from the sidebar. +* Fixes an issue where a version that was pulled in via automatic checks and deployed via automatic deployments would not be properly updated on the dashboard version card. +* Fixes an issue where two versions could show as being currently deployed on the version history page when using automatic deployments. +* Fixes an issue where AWS IAM instance roles could not be used when configuring the snapshot storage destination. + +## 1.67.0 + +Released on March 21, 2022 + +Support for Kubernetes: 1.21, 1.22, and 1.23 + +### New Features +* Adds support for installing a specific application version. For more information about installing a specific application version, see [Online Installation in Existing Clusters](/enterprise/installing-existing-cluster and [Online Installation with the Kubernetes Installer](/enterprise/installing-embedded-cluster). +* Extends the ability of status informers to detect if the application is being updated. +* Adds the ability to provide a strict preflight, which cannot be skipped and must not have any failure outcomes. Any failure outcomes will prevent the user from deploying the application. For more information on strict preflights, see [Define KOTS Preflight Checks​](/vendor/preflight-kots-defining). +* New versions can automatically be deployed in the admin console, regardless of whether the vendor uses semantic versioning. For more information about automatically deploying new versions, see [Configure Automatic Updates​](/enterprise/updating-apps#configure-automatic-updates) in Updating an Application. + +### Bug Fixes +* Fixes an issue that could cause images that are still used by the application to be deleted from the private registry in a Kubernetes installer-created cluster during image garbage collection. +* Fixes an issue where the same license could be installed more than once in some cases. +* Fixes an issue where the Cluster Management tab was not always initially present for Kubernetes installer-created clusters. +* Fixes an issue where attempting to re-download a pending application version would fail after upgrading the admin console from KOTS 1.65. +* Fixes an issue where the application icon in the metadata did not show as the favicon on the TLS pages. + +## 1.66.0 + +Released on March 8, 2022 + +Support for Kubernetes: 1.21, 1.22, and 1.23 + +### New Features +* Adds the ability to exclude the applications or the admin console from full snapshot restores using the [`kots restore`](/reference/kots-cli-restore-index) command. +* Adds the ability to display the command to restore only the admin console from a [full snapshot](/enterprise/snapshots-understanding#full-snapshots-recommended) on the Full Snapshots page in the admin console. + +### Improvements +* Adds the [`--no-port-forward`](/reference/kots-cli-install#usage) flag to the `kots install` command to disable automatic port-forwarding. The old `--port-forward` flag has been deprecated. + +### Bug Fixes +* Corrects the placeholder Prometheus URL in the admin console dashboard so that it is accurate for embedded installations. +* Fixes a bug where the warning message sometimes printed incorrectly when a mismatch was detected between the kots CLI version and the version of the admin console that was running in the cluster. +* Fixes a bug where the **See details** button on the support bundle analysis page did not show any information about an unhealthy pod. +* Allows a user to re-upload a license if the application is not yet installed. +* Allows GitOps to be disabled when it is enabled but has an invalid configuration. Previously, you were required to fix the configuration before disabling GitOps. + +## 1.65.0 + +Released on February 25, 2022 + +Support for Kubernetes: 1.20, 1.21, 1.22, and 1.23 + +### New Features +* Permanently enables the redesigned admin console app dashboard and version history pages introduced in [KOTS 1.60.0](#1600). +* Application versions that fail to download now appear in the version history. A new button is also present with the version to allow the download to be retried. Previously, these failures were lost when a newer version was downloaded successfully. +* Introduces the [`kots upstream download`](../reference/kots-cli-upstream-download) command to retry downloading a failed update of the upstream application. + +### Improvements +* The port-forward initiated to access the admin console will continually retry when it is disconnected. If a new kotsadm pod comes up, the port forward will switch and forward to the new pod. +* If the `kots` CLI version doesn't match the KOTS API version in the cluster, a warning message is displayed advising the user to update the `kots` CLI to the appropriate version. + +### Bug Fixes +* Fixes uploading preflight results from the CLI. +* Fixes a bug where the app icon in the metadata would not show as the favicon in Google Chrome. + +## 1.64.0 + +Released on February 18, 2022 + +Support for Kubernetes: 1.20, 1.21, 1.22, and 1.23 + +### Improvements +* A MinIO image will no longer be present in new deployments when MinIO is not specified as an add-on in the Kubernetes installer specification. +* Enables an alternative that does not use MinIO for `hostPath` snapshots if the MinIO image is not present on the instance. + +### Bug Fixes +* Fixes a bug that showed an incorrect diff on the version history page. +* Fixes deploy log errors for PVCs when using OpenEBS with Kubernetes 1.19 through 1.21. + +## 1.63.0 + +Released on February 11, 2022 + +Supported on Kubernetes: 1.20, 1.21, 1.22, and 1.23 + +### New Features +* Changes the [`kots upstream upgrade`](../reference/kots-cli-upstream-upgrade) command to be synchronous by default and exposes error messages for it. + +### Improvements +* Sets the Native Helm timeout to 60 minutes instead of 5 minutes. + +## 1.62.0 + +Released on February 4, 2022 + +Supported on Kubernetes: 1.20, 1.21, 1.22, and 1.23 + +### New Features +* Adds [`targetKotsVersion`](../reference/custom-resource-application#targetkotsversion) as a field in the application spec. This field allows you to set a target version of KOTS for a release. The initial installation of an application will fail if the currently installed KOTS version is greater than the target version. When a target version is set, end users will receive a notification in the admin console if their currently deployed version of KOTS is less than the target version. For more informaiton, see the documentation. + +* Adds [`minKotsVersion`](../reference/custom-resource-application/#minkotsversion-beta) (Beta) as a field in the application spec. This allows you to specify the minimum supported KOTS version for a release. An application cannot be installed if the currently deployed KOTS version is less than the minimum KOTS version specified for a release. See the [`minKotsVersion` documentation](../reference/custom-resource-application/#minkotsversion-beta) for caveats since this is a Beta feature. + +### Improvements +* Defaults [`kubectl kots get config --appslug`](../reference/kots-cli-get-config) to the app slug of the deployed application if there is only one in the namespace. +* Defaults [`kubectl kots get config --sequence`](../reference/kots-cli-get-config) to the sequence of the latest available version. + +### Bug Fixes +* Fixes a bug that caused the "Details" link, which shows the [application status](../vendor/admin-console-display-app-status), to be not visible in the new dashboard UI. +* Fixes the omission of certain password values from the rendered YAML file when using [`kubectl kots pull`](../reference/kots-cli-get-config). +* Fixes an issue that caused the license file included in a support bundle to contain a long array of integers instead of a string in the signature field. +* Fixes an issue which caused setting up a host path as a snapshot storage destination to fail. + +## 1.61.0 + +Released on February 1, 2022 + +Supported on Kubernetes: 1.20, 1.21, 1.22, and 1.23 + +### New Features +* Adds a CLI command to [get all available versions for an application](../reference/kots-cli-get-versions) from the app manager. +* Adds the ability to block installing or upgrading an application if the current KOTS version is incompatible with the KOTS version required by the application. This feature is experimental and is only available to vendors who have requested access. + +### Bug Fixes +* Fixes a bug that caused images to be pushed to a private registry multiple times during an air gap installation. +* Fixes a bug that erroneously displays a message to edit the current config when performing a new installation. +* Fixes an issue that caused [image garbage collection](../enterprise/image-registry-embedded-cluster#enable-and-disable-image-garbage-collection) to only remove images with the "latest" tag. + +## 1.60.0 + +Released on January 25, 2022 + +Supported on Kubernetes: 1.20, 1.21, and 1.22 + +### New Features +* The admin console app dashboard and version history pages have been redesigned! This redesign improves the aesthetics of these pages and brings key functionality directly to the app dashboard. See [this blog](https://www.replicated.com/blog/new-features-announced-improvements-to-ux-host-preflights/) for more details. + +### Improvements +* Updates MinIO to RELEASE.2022-01-08T03-11-54Z (resolves CVE-2021-43858 CVE). +* Updates Postgres to version 10.19. + +### Bug Fixes +* Fixes an issue that caused images to be pushed multiple times during an [airgap installation](/enterprise/installing-existing-cluster-airgapped) when the [Native Helm](/vendor/helm-overview#native) feature is enabled. +* Fixes an issue that prevented the deployment status labels from breaking into multiple lines on small displays. + +## 1.59.3 + +Released on January 21, 2022 + +Supported on Kubernetes: 1.20, 1.21, and 1.22 + +### Improvements +* Updates the [kubectl](../reference/custom-resource-application#kubectlversion) patch versions and added kubectl version 1.22.x. + +### Bug Fixes +* Fixes an issue that caused the load balancer services to regenerate, resulting in downtime. + +## 1.59.2 + +Release on January 18, 2022 + +Supported on Kubernetes: 1.19, 1.20, and 1.21 + +### Bug Fixes +* Adds a more descriptive error message to the KOTS CLI when the provided host path does not exist for snapshots storage. +* Fixes a bug that caused the "Send bundle to vendor" link to display when this feature is not enabled. +* Resolves CSS style issues. +* Fixes a bug where excluded Helm charts could not change between `UseHelmInstall: true` and `UseHelmInstall: false` without errors. +* Fixes a problem where the "Internal Storage" option was not selected by default in kURL clusters with the `disableS3` option set. +* Fixes a bug when Helm dependencies are aliased for Helm-native releases. + +## 1.59.1 + +Released on December 29, 2021 + +Supported on Kubernetes: 1.19, 1.20, and 1.21 + +### Bug Fixes +* Fixes a `panic: runtime error` that occurs when the [`kots upstream upgrade`](../reference/kots-cli-upstream-upgrade) command is run. + +## 1.59.0 + +Released on December 22, 2021 + +Supported on Kubernetes: 1.19, 1.20, and 1.21 + +### New Features +* Adds the `kubectl kots get config` command to export config values. This includes a `--decrypt` flag to decrypt sensitive values. +* The internal storage location for snapshots now uses a persistent volume instead of object storage when the `disableS3` flag is set to `true` for embedded clusters. For more information about removing KOTS use of object storage, see the [kURL add-on documentation](https://kurl.sh/docs/add-ons/kotsadm). + +### Improvements +* Adds version output for current and new releases to the [`upstream upgrade`](../reference/kots-cli-upstream-upgrade) CLI command. + +### Bug Fixes +* Fixes a bug that caused analyzers to surface errors in namespaces not used by the application when the admin console has cluster access in existing cluster installations. +* Fixes an issue that caused image pull secrets to be rendered in the admin console namespace instead of the `namespace` specified in the kots.io/v1beta1.HelmChart when using `useHelmInstall`. +* Fixes the `kots pull` CLI command to properly inject `imagePullSecrets` when using Helm Charts with `useHelmInstall` set to `true`. +* Fixes a bug that causes application images to not be deleted from a [private registry](../enterprise/image-registry-embedded-cluster). +* Fixes a bug that causes images included in support bundle's [`run` collector](https://troubleshoot.sh/docs/collect/run/#image-required) to not be deleted from a private registry. + +## 1.58.2 + +Released on December 14, 2021 + +Supported on Kubernetes: 1.19, 1.20, and 1.21 + +### Bug Fixes +* Fixes a bug that caused config updates to take a long time. + +## 1.58.1 + +Released on December 1, 2021 + +Supported on Kubernetes: 1.19, 1.20, and 1.21 + +### Bug Fixes +* Fixes a bug that caused Native Helm to skip deploying some Helm resources on automated installations. + +## 1.58.0 + +Released on December 1, 2021 + +Supported on Kubernetes: 1.19, 1.20, and 1.21 + +### New Features + * Adds support for the semantic versioning of releases when the version labels are [valid](https://semver.org/). To use this feature, [enable semantic versioning for the channel](/vendor/releases-about#semantic-versioning) that the license is currently on. + * Adds the ability to automatically deploy new patch, minor, or major [valid](https://semver.org/) semantic versions when [semantic versioning is enabled](/vendor/releases-about#semantic-versioning). This new capability can be configured from the **Version History** page under the 'Configure automatic updates' option. + +## 1.57.0 and earlier + +For release notes for app manager versions earlier than 1.58.0, see the [Replicated App Manager Release Notes v1.9.0 - v1.65.0](../pdfs/app-manager-release-notes.pdf) PDF. + +================ +File: docs/release-notes/rn-embedded-cluster.md +================ +--- +toc_max_heading_level: 2 +pagination_next: null +pagination_prev: null +--- + +# Embedded Cluster Release Notes + +This topic contains release notes for the [Replicated Embedded Cluster](/vendor/embedded-overview) installer. The release notes list new features, improvements, bug fixes, known issues, and breaking changes. + +Additionally, these release notes list the versions of Kubernetes and Replicated KOTS that are available with each version of Embedded Cluster. + +## 2.1.3 + +Released on February 19, 2025 + +<table> + <tr> + <th>Version</th> + <td id="center">2.1.3+k8s-1.30</td> + <td id="center">2.1.3+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.9</td> + <td id="center">1.29.13</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.124.4</td> + </tr> +</table> + +### Improvements {#improvements-2-1-3} +* During `install` and `join`, permissions for the data directory are set to 755 to ensure successful operation. +* Adds a preflight check to verify execute permissions on the data directory and its parent directories. This prevents installation issues, including etcd permissions issues. +* The following kernel parameters are configured automatically: `fs.inotify.max_user_instances = 1024` and `fs.inotify.max_user_watches = 65536`. +* Adds a preflight check to ensure the following kernel parameters are set correctly: `fs.inotify.max_user_instances = 1024` and `fs.inotify.max_user_watches = 65536`. +* Surfaces better error messages during the installation if the node is not ready. + +## 2.1.2 + +Released on February 19, 2025 + +<table> + <tr> + <th>Version</th> + <td id="center">2.1.2+k8s-1.30</td> + <td id="center">2.1.2+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.9</td> + <td id="center">1.29.13</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.124.4</td> + </tr> +</table> + +### Improvements {#improvements-2-1-2} +* The preflight check that ensures the system clock is synchronized no longer requires NTP to be active. This accommodates systems where the clock is managed by alternative protocols (e.g., PTP). +* If firewalld is enabled, it is now automatically configured at install time to allow required network traffic in the cluster. + +### Bug Fixes {#bug-fixes-2-1-1} +* Fixes host preflight failures for kernel modules in environments where kernel modules are built in. + +## 2.1.1 + +Released on February 18, 2025 + +<table> + <tr> + <th>Version</th> + <td id="center">2.1.1+k8s-1.30</td> + <td id="center">2.1.1+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.9</td> + <td id="center">1.29.13</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.124.4</td> + </tr> +</table> + +### Bug Fixes {#bug-fixes-2-1-1} +* Installing now waits for the Local Artifact Mirror systemd service to be healthy before proceeding, and any errors are reported. Previously, the install appeared successful even if LAM failed to start. +* Fixes host preflight failures for kernel modules in environments where kernel modules are built in. + +## 2.1.0 + +Released on February 14, 2025 + +<table> + <tr> + <th>Version</th> + <td id="center">2.1.0+k8s-1.30</td> + <td id="center">2.1.0+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.9</td> + <td id="center">1.29.13</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.124.4</td> + </tr> +</table> + +### Improvements {#improvements-2-1-0} +* The following kernel parameters are configured automatically: `net.ipv4.conf.all.forwarding = 1`, `net.ipv4.conf.default.forwarding = 1`, `net.bridge.bridge-nf-call-iptables = 1`, `net.ipv4.conf.default.rp_filter = 0`, and `net.ipv4.conf.all.rp_filter = 0`. +* The following kernel modules are configured automatically: `overlay`, `ip_tables`, `br_netfilter`, and `nf_conntrack`. +* Adds a preflight check to ensure the following kernel parameters are set correctly: `net.ipv4.conf.all.forwarding = 1`, `net.ipv4.conf.default.forwarding = 1`, `net.bridge.bridge-nf-call-iptables = 1`, `net.ipv4.conf.default.rp_filter = 0`, and `net.ipv4.conf.all.rp_filter = 0`. +* Adds a preflight check to ensure the `overlay`, `ip_tables`, `br_netfilter`, and `nf_conntrack` kernel modules were configured correctly. +* Adds a preflight check to ensure a node's IP address is not within the Pod and Service CIDR ranges that will be used by Kubernetes. If a conflict exists, a different CIDR block can be specified with `--cidr` or a different network interface can be specified with `--network-interface`. +* Adds a preflight check to ensure that SELinux is not running in enforcing mode. + +### Bug Fixes {#bug-fixes-2-1-0} +* Fixes an issue when installing on Amazon Linux 2 and other older Linux distributions that causes the installation to timeout waiting for storage to be ready. + +## 2.0.0 + +Released on February 7, 2025 + +<table> + <tr> + <th>Version</th> + <td id="center">2.0.0+k8s-1.30</td> + <td id="center">2.0.0+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.9</td> + <td id="center">1.29.13</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.124.3</td> + </tr> +</table> + +### New Features {#new-features-2-0-0} +* The 2.0 release of Embedded Cluster introduces architecture changes that improve the reliability of the upgrade process, particularly the upgrade of Helm extensions like the Admin Console, OpenEBS, and vendor-supplied Helm extensions. As part of these improvements, upgrades from Embedded Cluster versions earlier than 1.8 are not supported. Online instances running Embedded Cluster versions earlier than 1.8.0 must upgrade to an Embedded Cluster version from 1.8.0 to 1.22.0 before upgrading to 2.0.0. Air gap instances running Embedded Cluster versions earlier than 1.8.0 must upgrade to version 1.8.0 before upgrading to later versions, including 2.0.0. If you have customers running these earlier versions, Replicated recommends using a [required release](https://docs.replicated.com/vendor/releases-about#properties) to ensure your customers upgrade to a supported version first. + +### Improvements {#improvements-2-0-0} +* If you don't provide a new Admin Console password to `admin-console reset-password`, you'll be prompted for one. This prevents the password from ending up in your terminal history. +* If there is no TTY (like in CI), the CLI suppresses repeated log lines when there is a spinner, making output more readable. + +## 1.22.0 + +Released on January 24, 2025 + +<table> + <tr> + <th>Version</th> + <td id="center">1.22.0+k8s-1.30</td> + <td id="center">1.22.0+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.9</td> + <td id="center">1.29.13</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.124.0</td> + </tr> +</table> + +### New Features {#new-features-1-22-0} +* Updates the disaster recovery alpha feature so that rather than having to apply specific labels to all the resources you want backed up, you now have full control over how your application is backed up and restored. Specifically, you now provide a Velero Backup resource and a Restore resource in your application release. These resources are used to back up and restore your application, separate from the Embedded Cluster infrastructure. For more information, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). + +## 1.21.0 + +Released on January 22, 2025 + +<table> + <tr> + <th>Version</th> + <td id="center">1.21.0+k8s-1.30</td> + <td id="center">1.21.0+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.6</td> + <td id="center">1.29.10</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.123.1</td> + </tr> +</table> + +### New Features {#new-features-1-21-0} +* The `--no-prompt` flag is deprecated and replaced with the `--yes` flag. `--no-prompt` will be removed in a future release. +* The `--skip-host-preflights` flag is deprecated and replaced with `--ignore-host-preflights`. When `--ignore-host-preflights` is passed, the host preflights are still executed, but the user is prompted and can choose to continue if failures occur. This new behavior ensures that users see any incompatibilities in their environment, while still enabling them to bypass failures if absolutely necessary. To ignore host preflight failures in automation, use both the `--ignore-host-preflights` and `--yes` flags to address the prompt for `--ignore-host-preflights`. `--skip-host-preflights` will be removed in a future release. + +### Improvements {#improvements-1-21-0} +* Adds preflight checks to ensure nodes joining the cluster can communicate with all other nodes in the cluster on ports 6443, 9443, 2380, and 10250. +* Adds a preflight check to ensure that communication can occur between the Pod and Service CIDRs that Kubernetes will use. When this preflight fails, it's often because of a firewall configuration that blocks communication between the Pod and Service CIDRs. +* Adds a preflight check to ensure IP forwarding is enabled (`net.ipv4.ip_forward = 1`). Many machines have IP forwarding disabled by default. As of 1.19.0, Embedded Cluster uses a sysctl configuration file to enable IP forwarding, so this preflight should only fail if Embedded Cluster couldn't enable IP forwarding. +* Adds a preflight check to ensure that a nameserver is configured in `/etc/resolv.conf`. +* If a network interface is not specified with the `--network-interface` flag, Embedded Cluster will use improved logic to determine which interface to use. +* The license file is now stored in the data directory and is included in host support bundles. +* Host support bundles now include whether `/etc/resolv.conf` has at least one nameserver configured. +* Host support bundles now include the output of `firewall-cmd --list-all`. +* Potentially sensitive CLI flag values are no longer included in metrics reporting. +* Usage and error messages have been improved for understandability. +* `kubernetes.default.svc.cluster.local` has been added as a Kubernetes API server SAN. + +### Bug Fixes {#bug-fixes-1-21-0} +* Support bundles now check that `modprobe`, `mount`, and `umount` exist in PATH rather than at hardcoded locations. +* Fixes an issue where `reset` commands run on partially-installed clusters could fail with errors like `no matches for kind "Installation"`. + +## 1.19.0 + +Released on November 14, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.19.0+k8s-1.30</td> + <td id="center">1.19.0+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.5</td> + <td id="center">1.29.9</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.121.0</td> + </tr> +</table> + +### New Features {#new-features-1-19-0} +* Adds preflight checks to ensure that the following kernel parameters are set: `net.ipv4.conf.default.arp_filter = 0`, `net.ipv4.conf.default.arp_ignore = 0`, `net.ipv4.conf.all.arp_filter = 0`, and `net.ipv4.conf.all.arp_ignore = 0`. +* The following kernel parameters will be written to `/etc/sysctl.d/99-embedded-cluster.conf` and configured automatically during installation: `net.ipv4.ip_forward = 1`, `net.ipv4.conf.default.arp_filter = 0`, `net.ipv4.conf.default.arp_ignore = 0`, `net.ipv4.conf.all.arp_filter = 0`, and `net.ipv4.conf.all.arp_ignore = 0`. An error will not occur if Embedded Cluster fails to set these kernel parameters at install time. Instead, the aforementioned preflight checks will instruct the user to set these parameters. + +### Improvements {#improvements-1-19-0} +* If a user downloads an air gap bundle but attempts to install without it, the user will be instructed how to pass the air gap bundle to `install`. They will then be asked if they want to continue with an online installation anyway. + +## 1.18.0 + +Released on November 8, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.18.0+k8s-1.30</td> + <td id="center">1.18.0+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.5</td> + <td id="center">1.29.9</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.120.3</td> + </tr> +</table> + +### New Features {#new-features-1-18-0} +* Adds support for passing ConfigValues using the `--config-values` flag for the `install` command. This also enables automated installations of both Embedded Cluster and the application. + +### Improvements {#improvements-1-18-0} +* When the Admin Console URL is printed at the end of the `install` command, it will now use the public IP address instead of the private IP address for AWS EC2 instances that use IMDSv2. +* During setup of the Admin Console when a self-signed certificate is used, the instructions are updated to better inform users how to ignore the warning on different browsers. + +### Bug Fixes {#bug-fixes-1-18-0} +* Fixes an issue where registry logs weren't included in support bundles. +* Fixes an issue when installing on Azure that caused the Admin Console URL shown at the end of the `install` command to use the private IP address rather than the public IP address. +* Fixes an issue that prevented you from updating an application if the new version contained a required config item without a `default` or `value` set. +* The copy button now works for the command to validate the authenticity of the self-signed certificate during Admin Console setup. +* Fixes an issue where the **Config** page showed an error and wouldn't load. + +## 1.17.0 + +Released on November 4, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.17.0+k8s-1.30</td> + <td id="center">1.17.0+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.5</td> + <td id="center">1.29.9</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.120.1</td> + </tr> +</table> + +### New Features {#new-features-1-17-0} +* Adds support for partial rollbacks. Partial rollbacks are supported only when rolling back to a version where there is no change to the Embedded Cluster Config compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same [Embedded Cluster Config](/reference/embedded-config). For more information about how to enable rollbacks for your application in the KOTS Application custom resource, see [allowRollback](/reference/custom-resource-application#allowrollback) in _Application_. +* Introduces a new landing page and guided installation workflow for the Admin Console. + +### Improvements {#improvements-1-17-0} +* Removes unused infrastructure images from the data directory on upgrades to free up storage space. +* Adds additional host collectors and analyzers to improve troubleshooting with support bundles. +* Support bundles now include information on connectivity between Pods and nodes to help resolve networking issues more quickly. +* The preflight check for connectivity to replicated.app and proxy.replicated.com now use any private CAs provided with `--private-ca`, in case a man-in-the-middle proxy is in use. + +### Bug Fixes {#bug-fixes-1-17-0} +* Fixes a panic that occurred when prompted to proceed after preflight warnings. +* Fixes an issue where `troubleshoot.sh/v1beta2` was erroneously printed to the screen during installation. + +## 1.16.0 + +Released on October 23, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.16.0+k8s-1.30</td> + <td id="center">1.16.0+k8s-1.29</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.30.5</td> + <td id="center">1.29.9</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.119.0</td> + </tr> +</table> + +### New Features {#new-features-1-16-0} +* Adds support for Kubernetes 1.30 and removes support for 1.28. +* Adds a `--data-dir` flag to the `install` and `restore` commands so the data directory can be specified. By default, the data directory is `/var/lib/embedded-cluster`. If the `--data-dir` flag was provided at install time, then the same data directory must be provided when restoring. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install) and [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). +* Adds an `admin-console reset-password` command that allows resetting the password for the Admin Console. +* Adds a `--cidr` flag to the `install` command that replaces the `--pod-cidr` and `--service-cidr` flags. The CIDR range specified with the `--cidr` flag is split and used for both the Pod and Service CIDRs. See [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + :::note + The `--pod-cidr` and `--service-cidr` flags are hidden, but still functional. Replicated recommends that you update any automation that uses the `--pod-cidr` and + `--service-cidr` flags to use the `--cidr` flag instead. + ::: +* Adds the following preflight checks: + * Verify that the CIDR range used for the cluster does not overlap with existing routes. + * Verify the CPU supports x86-64-v2. + * Verify the data directory (`/var/lib/embedded-cluster` by default) is not symlinked. + +### Improvements {#improvements-1-16-0} +* For new installations, the `k0s` and `openebs-local` directories are now subdirectories of `/var/lib/embedded-cluster`. With this change, Embedded Cluster now only documents and includes preflight checks for `/var/lib/embedded-cluster`. +* Adds the `support-bundle` command to make it easier to generate support bundles. For more information, see [Generating Support Bundles for Embedded Cluster](/vendor/support-bundle-embedded). +* Improves the reliability of waiting for the Kubernetes server to start. +* Collects more information about the cluster in support bundles, including the Local Artifact Mirror and Kubernetes API Server logs. +* Requires that the Admin Console password is at least six characters. +* Improves the flexibility of configuring the Cluster Resources collector in support bundle specs by limiting KOTS's default collection to its own namespace. + +### Bug Fixes {#bug-fixes-1-16-0} +* Fixes an issue that could occur when resetting a worker node that used a custom data directory. +* Fixes an issue where k0s images were not updated within the cluster when k0s was upgraded. +* Fixes an issue where upgrading a cluster with a worker node that used a version of Embedded Cluster earlier than 1.15 would fail. +* Fixes an issue that prevented you from upgrading to an application version that didn't have Config and preflights. +* Fixes an issue where the Admin Console could reach out the internet when generating a support bundle in air gap environments. +* Fixes an issue that prevented you from installing Embedded Cluster using a multi-channel license and a channel other than the license's default. +* Fixes an issue that could cause the registry to fail to upgrade in air gap installations. +* Fixes an issue where the Replicated SDK failed to deploy if a private CA was provided to the installation but the SDK was installed into a different namespace than KOTS. +* If an application includes the Replicated SDK, the SDK will be deployed with the same ClusterRole as the Admin Console. +* Fixes an issue where node joins failed because of a version mismatch, even though the versions were the same. + +## 1.15.0 - Removed + +:::important +Embedded Cluster 1.15.0 has been removed and is not available for use because of issues with upgrades. It continues to work for anyone already using it. +::: + +Released on October 10, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.15.0+k8s-1.29</td> + <td id="center">1.15.0+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.9</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.117.5</td> + </tr> +</table> + +### New Features {#new-features-1-15-0} +* Adds the `--data-dir` flag to the `install` command so the data directory can be specified. By default, the data directory is `/var/lib/embedded-cluster`. + +### Improvements {#improvements-1-15-0} +* Adds a preflight check to ensure the CPU supports x86-64-v2. +* Adds a preflight check to ensure the data directory (`/var/lib/embedded-cluster` by default) is not symlinked. +* Adds the `--data-dir` flag to the `restore` command. When restoring a backup that used a non-default data directory (i.e., the `--data-dir` flag was provided at install time), the same data directory must be provided when restoring. +* For new installations, the `k0s` and `openebs-local` directories are now subdirectories of `/var/lib/embedded-cluster`. We will only document and preflight for `/var/lib/embedded-cluster` now. +* The Admin Console password must be at least six characters. + +### Bug Fixes {#bug-fixes-1-15-0} +* Fixes an issue that prevented you from installing Embedded Cluster using a multi-channel license and a channel other than the license's default. +* Fixes an issue that could cause the registry to fail to upgrade in air gap installations. +* Fixes an issue where node joins failed because of a version mismatch, even though the versions were the same. + +## 1.14.2 + +Released on September 26, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.14.2+k8s-1.29</td> + <td id="center">1.14.2+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.8</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.117.3</td> + </tr> +</table> + +### Improvements {#improvements-1-14-2} + +* Preflight checks for the Admin Console and local artifact mirror ports now take into consideration ports specified by the user with the `--admin-console-port` and `--local-artifact-mirror-port` flags. +* Improves the display of preflight failures so they're more readable. + +## 1.14.1 + +Released on September 26, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.14.1+k8s-1.29</td> + <td id="center">1.14.1+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.8</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.117.3</td> + </tr> +</table> + +### New Features {#new-features-1-14-1} + +* Adds host preflight checks to ensure that the required ports are open and available. For more information, see [Port Requirements](/vendor/embedded-overview#port-requirements). + +### Improvements {#improvements-1-14-1} + +* Adds the `--network-interface` flag for the `join` command so a network interface can optionally be selected when joining nodes. If this flag is not provided, the first valid, non-local network interface is used. +* The `reset` command now automatically reboots the machine, and the optional `--reboot` flag is no longer available. A reboot is required to reset iptables. + +### Bug Fixes {#bug-fixes-1-14-1} + +* Fixes an issue where nodes could fail to join with the error "unable to get network interface for address." + +## 1.14.0 + +Released on September 24, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.14.0+k8s-1.29</td> + <td id="center">1.14.0+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.8</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.117.3</td> + </tr> +</table> + +### New Features {#new-features-1-14-0} + +* Introduces the `--admin-console-port` and `--local-artifact-mirror-port` flags to the `install` command so the ports for the Admin Console (default 30000) and the local artifact mirror (default 50000) can be chosen. +* Introduces the `--local-artifact-mirror-port` flag to the `restore` command so the port used for the local artifact mirror can be selected during the restore. If no port is provided, the port in use when the backup was taken will be used. +* Introduces the `--network-interface` flag to the `install` command so a network interface can be selected. If a network interface is not provided, the first valid, non-local network interface is used. + +### Improvements {#improvements-1-14-0} + +* When a proxy server is configured, the default network interface's subnet will automatically be added to the no-proxy list if the node's IP address isn't already included. +* When joining nodes to an Embedded Cluster, the correct network interface is chosen based on the node IP address in the join command. +* The static IP addresses for replicated.app and proxy.replicated.com are now included in the failure messages for the preflight checks that verify connectivity to those endpoints, making it easier for end users to allowlist those endpoints. +* If the Replicated SDK is deployed by KOTS as part of an application, the SDK will automatically be configured with any additional CA certificates provided to `--private-ca` flag for the `install` command. + + +## 1.13.1 + +Released on September 20, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.13.1+k8s-1.29</td> + <td id="center">1.13.1+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.8</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.117.1</td> + </tr> +</table> + +### Bug Fixes {#bug-fixes-1-13-1} + +* Fixes an issue where you could not upgrade to a version that had special characters like `+` in the version label. + +## 1.13.0 + +Released on September 17, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.13.0+k8s-1.29</td> + <td id="center">1.13.0+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.8</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.117.0</td> + </tr> +</table> + +### New Features {#new-features-1-13-0} + +* Adds the [`PrivateCACert` template function](/reference/template-functions-static-context#privatecacert) to return the name of a ConfigMap containing additional trusted CA certificates provided by the end user with the `--private-ca` flag for the `install` command. + +### Bug Fixes {#bug-fixes-1-13-0} + +* Fixes an issue where user-provided proxy configuration was removed during upgrades. +* Fixes an issue where the disk performance preflight failed on certain architectures where fio was unable to run. + +## 1.12.1 + +Released on September 13, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.12.1+k8s-1.29</td> + <td id="center">1.12.1+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.8</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.116.1</td> + </tr> +</table> + +### New Features {#new-features-1-12-1} + +* Adds the ability to provide additional trusted certificate authority certificates with the `install` command's `--private-ca` flag. This is useful when Embedded Cluster is installed behind an enterprise proxy that intercepts traffic and issues its own certificates. + +### Bug Fixes {#bug-fixes-1-12-1} + +* Removes unnecessary values that were previously added to the no proxy list automatically. +* KOTS now uses the fully qualified `.svc.cluster.local` address when making requests to the `kotsadm-rqlite` service to simplify HTTP proxy configuration. + +## 1.12.0 + +Released on September 11, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.12.0+k8s-1.29</td> + <td id="center">1.12.0+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.8</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.116.0</td> + </tr> +</table> + +### Improvements {#improvements-1-12-0} + +* Available updates and the check for updates button are shown on the **Dashboard** page of the Admin Console. The check for updates button is now also shown on the **Version history** page. These were removed in a previous version. +* The **Nodes** page displays guidance and easier access to the node join command during initial install. +* When nodes need to be added to the cluster during a restore operation, the `join` command is more clearly shown in the Admin Console. +* Hides a banner on the **View Files** page that told users to use `kubectl kots` commands that are not intended for Embedded Cluster. +* KOTS now uses the fully qualified `.svc.cluster.local` address when making requests to the `kotsadm-rqlite` and `kotsadm-minio` services for simplified HTTP proxy configuration using `NO_PROXY=.cluster.local`. + +### Bug Fixes {#bug-fixes-1-12-0} + +* Fixes an issue where the values provided to the `--http-proxy`, `--https-proxy`, and `--no-proxy` flags for the kots install command were not propagated to the Replicated SDK. + +## 1.11.1 + +Released on August 30, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.11.1+k8s-1.29</td> + <td id="center">1.11.1+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.7</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.114.0</td> + </tr> +</table> + +### Improvements {#improvements-1-11-1} + +* Adds a host preflight check to ensure that disk performance is sufficient for etcd. Specifically, the P99 write latency must be less than 10 ms. + +## 1.11.0 + +Released on August 23, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.11.0+k8s-1.29</td> + <td id="center">1.11.0+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.7</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.114.0</td> + </tr> +</table> + +### Improvements {#improvements-1-11-0} + +* The default range available for NodePorts is now 80-32767 instead of 30000-32767. Many customers used [`unsupportedOverrides`](/reference/embedded-config#unsupportedoverrides) to configure this wider range for use with things like an ingress controller, so we have adjusted the default range accordingly. Changes to this range are not applied on upgrades, so existing installations will not be changed. +* Adds host preflight checks for connecting to replicated.app and proxy.replicated.com. If you use a custom domain for replicated.app, the custom domain will be used in the preflight check. +* Adds a host preflight check to ensure that neither `nameserver localhost` nor `nameserver 127.0.0.1` is present in `resolv.conf`. + +### Bug Fixes {#bug-fixes-1-11-0} + +* Fixes several issues that caused node resets to fail. Single-node clusters are no longer drained before being reset. Resets will no longer fail with the error `unable to get installation` if the installation failed early on. And node resets will now work if bind mounts are used for `/var/lib/embedded-cluster`, `/var/lib/k0s`, and `/var/openebs`. +* Fixes an issue where preflight checks for `modprobe`, `mount`, and `unmount` in `PATH` did not use absolute paths. +* Fixes an issue where restoring did not work with S3-compatible object stores other than AWS S3. + +## 1.10.0 + +Released on August 13, 2024 + +<table> + <tr> + <th>Version</th> + <td id="center">1.10.0+k8s-1.29</td> + <td id="center">1.10.0+k8s-1.28</td> + </tr> + <tr> + <th>Kubernetes Version</th> + <td id="center">1.29.7</td> + <td id="center">1.28.11</td> + </tr> + <tr> + <th>KOTS Version</th> + <td id="center" colspan="2">1.114.0</td> + </tr> +</table> + +### New Features {#new-features-1-10-0} + +* Adds support for the `dropdown` config item type, which creates a dropdown on the config screen. See [`dropdown`](/reference/custom-resource-config#dropdown) in Config. +* Adds the `radio` config item type, which is functionally equivalent to the `select_one` item type but is more clearly named. The `select_one` config item type is deprecated in favor of `radio` but is still fully functional. See [`radio`](/reference/custom-resource-config#radio) in _Config_. + +:::note +For release notes for Embedded Cluster versions earlier than 1.10.0, see the [Embedded Cluster GitHub releases page](https://github.com/replicatedhq/embedded-cluster/releases). +::: + +================ +File: docs/release-notes/rn-kubernetes-installer.md +================ +--- +toc_max_heading_level: 2 +pagination_next: null +pagination_prev: null +--- + +# kURL Release Notes + +This topic contains release notes for the [Replicated kURL](/vendor/kurl-about) installer. The release notes list new features, improvements, bug fixes, known issues, and breaking changes. + +<!--RELEASE_NOTES_PLACEHOLDER--> + +## v2025.02.26-0 + +Released on February 26, 2025 + +### New Features {#new-features-v2025-02-26-0} +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verisons 1.13.10, 1.14.12, 1.15.8, 1.16.3 and 1.16.4. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2025-02-07T23-21-09Z and RELEASE.2025-02-18T16-25-55Z. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.80.0-69.3.3. +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.57.3. + +### Bug Fixes {#bug-fixes-v2025-02-26-0} +* Increased grafana pod limits to 200m cpu and 128Mi memory. +* Fixed Prometheus reporting for Rook 1.13 and later. + +## v2025.02.14-0 + +Released on February 14, 2025 + +### New Features {#new-features-v2025-02-14-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.32.2, 1.31.6, 1.30.10, and 1.29.14. +* Adds [Metrics Server add-on](https://kurl.sh/docs/add-ons/metrics-server) version 0.7.2. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.15.2. +* Adds [Cert Manager add-on](https://kurl.sh/docs/add-ons/cert-manager) version 1.17.1. + +### Bug Fixes {#bug-fixes-v2025-02-14-0} +* Fixes [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.7.25 to install the correct version on RHEL 8, CentOS 8, and Oracle Linux 8. + +## v2025.02.12-0 + +Released on February 12, 2025 + +### New Features {#new-features-v2025-02-12-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.31.5, 1.30.9, and 1.29.13. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.7.25. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) versions 0.26.3 and 0.26.4. +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) versions 4.1.2 and 4.2.0. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.80.0-69.2.0. + +## v2024.12.31-0 + +Released on December 31, 2024 + +### New Features {#new-features-v2024-12-31-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version 1.31.4, 1.30.8 and 1.29.12. +* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to include runc v1.2.3. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.26.2. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.79.2-67.5.0. + +## v2024.12.04-0 + +Released on December 4, 2024 + +### New Features {#new-features-v2024-12-04-0} +* Adds support for RHEL 9.5 and Rocky Linux 9.5. +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.31.3, 1.30.7, 1.29.11, and 1.28.15. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.26.1. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.78.2-66.2.2. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-11-07T00-52-20Z. + +## v2024.11.08-0 + +Released on November 8, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-08-0} +* Fixes an issue where the public-address flag provided to the install script is ignored and not included in the api server cert sans. + +## v2024.11.07-0 + +Released on November 7, 2024 + +### New Features {#new-features-v2024-11-07-0} +* Adds support for discovering the EC2 instance public IP address using AWS IMDSv2. +* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to include runc v1.2.1. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.26.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-10-29T16-01-48Z. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.77.2-65.8.0. + +## v2024.10.24-0 + +Released on October 24, 2024 + +### New Features {#new-features-v2024-10-24-0} +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.7. +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.31.2 1.30.6 1.29.10 1.28.15 1.27.16. + +### Bug Fixes {#bug-fixes-v2024-10-24-0} +* Fixes an issue that could cause the Velero add-on to fail to install on Ubuntu 22.04. + +## v2024.09.26-0 + +Released on September 26, 2024 + +### New Features {#new-features-v2024-09-26-0} +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.57.2. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.76.1-62.6.0. +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.31.1 1.30.5 1.29.9 1.28.14 1.27.16. + +### Bug Fixes {#bug-fixes-v2024-09-26-0} +* Fixes master CIS benchmark checks 1.1.13 and 1.1.14 for /etc/kubernetes/super-admin.conf file permissions. + +## v2024.09.06-0 + +Released on September 6, 2024 + +### Improvements {#improvements-v2024-09-06-0} +* Improves preflight checks for Amazon Linux 2023 and Ubuntu 24.04. + +## v2024.09.03-0 + +Released on September 3, 2024 + +### New Features {#new-features-v2024-09-03-0} +* Adds support for Amazon Linux 2023 and Ubuntu 24.04. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.76.0-62.3.0. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.6. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-08-26T15-33-07Z. + +## v2024.08.26-0 + +Released on August 26, 2024 + +### New Features {#new-features-v2024-08-26-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.27.16, 1.28.13, 1.29.8, 1.30.4, and 1.31.0. +* Adds support for CentOS Stream 9. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-08-17T01-24-54Z. + +### Bug Fixes {#bug-fixes-v2024-08-26-0} +* Fixes an issue where [Flannel](https://kurl.sh/docs/add-ons/flannel) versions older than 0.24.2 failed to install on instances with VMware NICs. + +## v2024.08.12-0 + +Released on August 12, 2024 + +### New Features {#new-features-v2024-08-12-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-08-03T04-33-23Z. +* Updates included kustomize binary to v5.4.3. + +## v2024.08.07-0 + +Released on August 7, 2024 + +### New Features {#new-features-v2024-08-07-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.30.3, 1.29.7, 1.28.12, and 1.27.16. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.5. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.33. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.14.0. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.75.2-61.6.0. +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 4.1.0. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.30.0. +* Updates crictl in [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) to version 1.30.2. + +### Removals {#removals-v2024-08-07-0} +* Removes all [Docker add-on](https://kurl.sh/docs/add-ons/docker) versions. Use the [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) instead. The Docker add-on was previously deprecated in March 2023. +* Removes [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) versions 1.6.0, 1.12.0, 2.6.0, and 2.12.9. +* Removes [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.33.0, 0.44.1, 0.46.0, 0.46.0-14.9.0, 0.47.0-15.2.0, 0.47.0-15.2.1, 0.47.0-16.0.1, 0.48.0-16.1.2, 0.48.0-16.10.0, 0.48.0-16.12.1, 0.49.0-17.0.0, 0.49.0-17.1.1, and 0.49.0-17.1.3. + +## v2024.07.02-0 + +Released on July 2, 2024 + +### New Features {#new-features-v2024-07-02-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.30.2 1.29.6 1.28.11 1.27.15. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.4. +* Remove `/var/lib/containerd` and `/var/lib/docker` as part of `tasks.sh reset`. + +### Bug Fixes {#bug-fixes-v2024-07-02-0} +* Fixes cluster subnets being changed on upgrade in some instances. + +## v2024.06.12-0 + +Released on June 12, 2024 + +### New Features {#new-features-v2024-06-12-0} +* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.7 to use haproxy:2.9.7-alpine3.20. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.32. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.74.0-59.0.0. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) versions 0.25.2 and 0.25.3. +* Adds support for RHEL 8.10 and 9.4. +* Adds support for Oracle Linux 8.10. +* Adds support for Rocky Linux 9.4. + +## v2024.05.17-0 + +Released on May 17, 2024 + +### New Features {#new-features-v2024-05-17-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.30.1 1.28.10 1.29.5 1.27.14. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.73.2-58.5.2. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-05-10T01-41-38Z. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.29.0. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.7. +* Updates [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.13.2 to use local-volume-provider:v0.6.4. + +## v2024.05.03-0 + +Released on May 3, 2024 + +### New Features {#new-features-v2024-05-03-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version 1.30.0. + +### Bug Fixes {#bug-fixes-v2024-05-03-0} +* Fixes list of host package dependencies for RHEL-9. +* Stop using default yum repos if all dependencies are already installed on RHEL 9. +* Stop installing sub-dependencies on RHEL-9 systems. + +## v2024.04.19-0 + +Released on April 19, 2024 + +### New Features {#new-features-v2024-04-19-0} +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.31. +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.4 1.28.9 1.27.13 1.26.15. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.13.2. + +## v2024.04.16-0 + +Released on April 16, 2024 + +### New Features {#new-features-v2024-04-16-0} +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.6. +* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.10.0-6.2.0. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.28.3. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.1. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-04-06T05-26-02Z. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.73.1-58.1.1. + +## v2024.04.11-0 + +Released on April 11, 2024 + +### Bug Fixes {#new-features-v2024-04-11-0} +* Fixes an issue where dependencies for the 'fio' package caused Amazon Linux 2 to become CentOS 7. + +## v2024.04.03-1 + +Released on April 3, 2024 + +### New Features {#new-features-v2024-04-03-1} +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 4.0.0. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.28.2. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.13.1. + +## v2024.04.03-0 + +Released on April 3, 2024 + +### New Features {#new-features-v2024-04-03-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.3 1.28.8 1.27.12 1.26.15. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.24.4. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-03-26T22-10-45Z. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.72.0-57.2.0. +* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.4 to use haproxy:2.9.6. + +## v2024.02.23-0 + +Released on February 23, 2024 + +### New Features {#new-features-v2024-02-23-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.2 1.28.7 1.27.11 1.26.14. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-02-17T01-15-57Z. +* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.4 to use haproxy 2.9.5. + +## v2024.02.05-0 + +Released on February 5, 2024 + +### New Features {#new-features-v2024-02-05-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.1 1.28.6 1.27.10 1.26.13. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) versions 0.24.1 and 0.24.2. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.6.27 and 1.6.28. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.3. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.71.2-56.6.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-02-04T22-36-13Z. +* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.4 to use haproxy 2.9.4. +* Users of VMWare clusters using the VMXNET3 NIC driver will see a new systemd .service file included that disables tcp checksum offloading on the flannel interface. This fixes an issue we have seen with dropped packets under certain combinations of VMWare NIC and cluster configurations. + +### Improvements {#improvements-v2024-02-05-0} +* Install an openebs support bundle spec whenever openebs addon is added to a kURL spec. +* Install a velero support bundle spec whenever velero addon is added to a kURL spec. + +## v2024.01.09-0 + +Released on January 9, 2024 + +### New Features {#new-features-v2024-01-09-0} +* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to use runc v1.1.11. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-01-01T16-36-33Z. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.4. + +## v2024.01.02-0 + +Released on January 2, 2024 + +### New Features {#new-features-v2024-01-02-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.0 1.28.5 1.28.4 1.27.9 1.27.8 1.26.12 1.26.11. +* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.3 to use HAProxy 2.9.1. +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.10.0. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.24.0. +* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.9.0-6.1.2. + +### Bug Fixes {#bug-fixes-v2024-01-02-0} +* Fixes an issue where the 'minimum-node-count' parameter for Rook storage would require port 31880 to be opened between the node joining the cluster and a primary node. +* Adds a preflight to Kubernetes 1.29.x+ to prevent installing KOTS versions prior to 1.96.2 due to version incompatibilities. + +## v2023.12.14-0 + +Released on December 14, 2023 + +### New Features {#new-features-v2023-12-14-0} +* Adds support for RHEL 9.3 and Rocky Linux 9.3. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.6.25 and 1.6.26. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.2. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.70.0-55.0.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2023-12-02T10-51-33Z and RELEASE.2023-12-09T18-17-51Z. +* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.3 to use HAProxy 2.9.0. + +### Bug Fixes {#bug-fixes-v2023-12-14-0} +* Fixes an issue where Kubernetes 1.27 or later could prune the pause image being used, causing pods to fail. + +## v2023.11.20-0 + +Released on November 20, 2023 + +### New Features {#new-features-v2023-11-20-0} +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.8. +* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.3 to use HAProxy 2.8.4. +* Adds support for RHEL and Oracle Linux 8.9. + +### Bug Fixes {#bug-fixes-v2023-11-20-0} +* Improve error reporting capabilities during weave to flannel migration. + +## v2023.11.17-0 + +Released on November 17, 2023 + +### New Features {#new-features-v2023-11-17-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-11-15T20-43-25Z. +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.57.1. + +## v2023.11.16-0 + +Released on November 16, 2023 + +### New Features {#new-features-v2023-11-16-0} +* Add [Cert Manager add-on](https://kurl.sh/docs/add-ons/cert-manager) version 1.13.2. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.69.1-53.0.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-11-11T08-14-41Z. +* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to use runc v1.1.10. + +## v2023.11.02-0 + +Released on November 2, 2023 + +### New Features {#new-features-v2023-11-02-0} +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.7. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.27.0. +* Updates [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.1 to use local-volume-provider v0.5.5. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.23.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-11-01T18-37-25Z. + +## v2023.10.26-0 + +Released on October 26, 2023 + +### New Features {#new-features-v2023-10-26-0} +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.26.1. +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.57.0. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.6. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-10-16T04-13-43Z. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.1. + +### Bug Fixes {#bug-fixes-v2023-10-26-0} +* Improves the reliability of the reset task by adding directory removal retry logic. +* If the `fio` host package cannot be installed, installation will continue without host filesystem performance metrics. + +## v2023.10.19-0 + +Released on October 19, 2023 + +### Bug Fixes {#bug-fixes-v2023-10-19-0} +* Fixes a bug where having multiple volumes attached to the same pod would cause some volumes not to be created on the correct node when migrating to OpenEBS + +## v2023.10.12-0 + +Released on October 12, 2023 + +### New Features {#new-features-v2023-10-12-0} +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.0. +* Updates [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.26.0 to use Envoy v1.27.1. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-10-07T15-07-38Z. + +### Bug Fixes {#bug-fixes-v2023-10-12-0} +* Fixes a bug introduced in v2023.10.11-0 that prevented migrating from Longhorn to OpenEBS or Rook-Ceph storage when Prometheus was installed. +* Fixes a race condition that could prevent Prometheus from being upgraded from very old versions. + +## v2023.10.11-0 + +Released on October 11, 2023 + +### New Features {#new-features-v2023-10-11-0} +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.5. + +### Improvements {#improvements-v2023-10-11-0} +* The filesystem performance preflight check uses the `fio` package instead of an internal implementation. To support the filesystem performance preflight check, the `fio` package is installed as part of the installation script. + + :::note + The `fio` is not automatically installed on Ubuntu 18.04 operating systems. This means that the filesystem performance preflight check does not run on Ubuntu 18.04 unless `fio` has been installed through other means. + ::: + +### Bug Fixes {#bug-fixes-v2023-10-11-0} +* When migrating from Longhorn to OpenEBS storage, PVCs remain on the node where the pod that uses the PVC was previously running. + +## v2023.10.09-0 + +Released on October 9, 2023 + +### Bug Fixes {#bug-fixes-v2023-10-09-0} +* Files in `/var/lib/kurl` are now properly owned by root:root and not the uid `1001` +* The kurl reset script now removes `/var/lib/cni` files + +## v2023.10.04-0 + +Released on October 4, 2023 + +### New Features {#new-features-v2023-10-04-0} +* Adds [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.3. + +## v2023.10.03-0 + +Released on October 3, 2023 + +### New Features {#new-features-v2023-10-03-0} +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.26.0. +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.9.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-09-30T07-02-29Z. + +## v2023.09.26-0 + +Released on September 26, 2023 + +### New Features {#new-features-v2023-09-26-0} +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.4. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.22.3. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.24. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-09-23T03-47-50Z. + +### Bug Fixes {#bug-fixes-v2023-09-26-0} +* Fixes an issue where the weave-to-flannel migration would provide prompts for remote nodes that incorrectly included the 'airgap' flag on non-airgap systems and the reverse. + +## v2023.09.15-0 + +Released on September 15, 2023 + +### New Features {#new-features-v2023-09-15-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.28.2 1.27.6 1.26.9 1.25.14. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.68.0-51.0.0. + +## v2023.09.12-0 + +Released on September 12, 2023 + +### Bug Fixes {#bug-fixes-v2023-09-12-0} +* Docker 20.10.x will properly use RHEL 8 packages when installing on RHEL 8 based operating systems. This is still considered an unsupported configuration. + +## v2023.09.07-0 + +Released on September 7, 2023 + +### New Features {#new-features-v2023-09-07-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.24.17. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.3. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.67.1-50.3.1. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-09-04T19-57-37Z. + +### Bug Fixes {#bug-fixes-v2023-09-07-0} +* Velero 1.11.1 and later properly removes the 'restic' daemonset when upgrading. This is replaced by a new daemonset named 'node-agent'. +* Velero 1.11.1 and later running with OpenEBS with no object storage creates no default backup location instead of a broken one. +* Fixes an issue when merging an Installer patch file containing HostPreflights definitions. + +## v2023.08.28-0 + +Released on August 28, 2023 + +### New Features {#new-features-v2023-08-28-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.28.1 1.27.5 1.26.8 1.25.13. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.2. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.11.1. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-08-23T10-07-06Z. + +## v2023.08.23-0 + +Released on August 23, 2023 + +### New Features {#new-features-v2023-08-23-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version 1.28.0. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.22.2. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.22. As this package has not been published for Ubuntu 18.04, 1.6.21 will be installed there when 1.6.22 is selected. + +### Improvements {#improvements-v2023-08-23-0} +* When PVC storage migrations from Rook or Longhorn to OpenEBS 3.7.0+ are required, the migrations will be performed before upgrading Kubernetes. +* When object storage migrations from Rook to MinIO 2023-08-04T17-40-21Z+ are required, the migrations will be performed before upgrading Kubernetes. + +## v2023.08.15-0 + +Released on August 15, 2023 + +### New Features {#new-features-v2023-08-15-0} +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.3. + +### Bug Fixes {#bug-fixes-v2023-08-15-0} +* Fixes an issue where EKCO-created Rook-Ceph clusters would not mount volumes on RHEL 7 based operating systems. + +## v2023.08.10-0 + +Released on August 10, 2023 + +### New Features {#new-features-v2023-08-10-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-08-04T17-40-21Z. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.66.0-48.3.1. +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.8.0. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.22.1. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.2. + +### Bug Fixes {#bug-fixes-v2023-08-10-0} +* Fixes an issue where the control plane would not get upgraded on remote primary nodes due to a missing file `/etc/kubernetes/audit.yaml`. +* Fixes an error `failed to pull image registry.k8s.io/coredns:v1.8.6` when upgrading from Kubernetes version 1.23.15 to 1.24.4. + +## v2023.08.07-0 + +Released on August 7, 2023 + +### New Features {#new-features-v2023-08-07-0} +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) versions 1.12.0, 1.12.1 + + +### Bug Fixes {#bug-fixes-v2023-08-07-0} +* Fixes an issue where storage could not be moved from Longhorn to OpenEBS at the same time as Kubernetes was upgraded to 1.25 or later. + +## v2023.07.31-0 + +Released on July 31, 2023 + +### New Features {#new-features-v2023-07-31-0} +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.66.0-48.1.2. +* Adds [Metrics Server add-on](https://kurl.sh/docs/add-ons/metrics-server) version 0.6.4. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.2. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.25.2. + +### Bug Fixes {#bug-fixes-v2023-07-31-0} + +* Resolves an issue for the OpenEBS to Rook storage migration feature that caused a delay in storage availability when using the feature on new installations. See [Known Issues](#known-issues-v2023-07-06-0) in _v2023.07.06-0_. + +## v2023.07.21-0 + +Released on July 21, 2023 + +:::important +kURL v2023.07.21-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). +::: + +### New Features {#new-features-v2023-07-21-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.27.4 1.26.7 1.25.12 1.24.16. +* Updates [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) 1.27.3 to use crictl v1.27.1. +* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) with runc v1.1.8. + +### Known Issues {#known-issues-v2023-07-21-0} + +kURL v2023.07.21-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). + +## v2023.07.11-0 + +Released on July 11, 2023 + +:::important +kURL v2023.07.11-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). +::: + +### Bug Fixes {#bug-fixes-v2023-07-11-0} +* Fixes support for RHEL 9.2 +* Fixes adding the Registry add-on to multi-node clusters using Containerd. + +### Known Issues {#known-issues-v2023-07-11-0} + +kURL v2023.07.11-0 has a known issue for the OpenEBS to Rook storage migration feature that causes a delay in storage availability when using the feature on new installations. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). + +## v2023.07.10-0 + +Released on July 10, 2023 + +:::important +kURL v2023.07.10-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). +::: + +### Improvements {#improvements-v2023-07-10-0} +* Clarifies error message when installing on RHEL 9 variants without containerd. + +### Bug Fixes {#bug-fixes-v2023-07-10-0} +* Improves messaging when a subnet cannot be automatically discovered. + +### Known Issues {#known-issues-v2023-07-10-0} + +kURL v2023.07.10-0 has a known issue for the OpenEBS to Rook storage migration feature that causes a delay in storage availability when using the feature on new installations. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). + +## v2023.07.06-0 + +Released on July 6, 2023 + +:::important +kURL v2023.07.06-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). +::: + +### New Features {#new-features-v2023-07-06-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2023-06-23T20-26-00Z and RELEASE.2023-06-29T05-12-28Z. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.27.3. +* Adds the ability to start a cluster with OpenEBS and MinIO, and then migrate data to Rook-Ceph after three or more nodes are ready. For more information, see [Migrating CSI](https://kurl.sh/docs/install-with-kurl/migrating-csi#automated-local-to-distributed-storage-migrations) in the kURL documentation. + +### Known Issues {#known-issues-v2023-07-06-0} + +kURL v2023.07.10-0 has a known issue for the OpenEBS to Rook storage migration feature that causes a delay in storage availability when using the feature on new installations. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). + +## v2023.06.27-0 + +Released on June 27, 2023 + +### New Features {#new-features-v2023-06-27-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.27.3 1.26.6 1.25.11 1.24.15. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-06-19T19-52-50Z. +* Adds support for RHEL 8.8. +* Adds a preflight check to require an object store or storage provider when using the Registry add-on. +* Updates the Velero add-on version 1.11.0 with new replicated/local-volume-provider image version v0.5.4. + +### Bug Fixes {#bug-fixes-v2023-06-27-0} +* Fixes an issue that prevented migrating away from Rook-Ceph when the `dashboard` or `prometheus` modules were unhealthy. +* Fixes an issue preventing Velero deployment rollout when using kURL version `v2023.06.20-0`. + +## v2023.06.20-0 + +Released on June 20, 2023 + +:::important +Versions earlier than v2023.06.20-0 contain a known issue that might put the system in a critical state when migrating from Weave to Flannel. Use v2023.06.20-0 or later when migrating from Weave to Flannel. +::: + +### New Features {#new-features-v2023-06-20-0} +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.22.0. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.8. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.25.0. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.65.2-46.8.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-06-09T07-32-12Z. +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.17. + +### Improvements {#improvements-v2023-06-20-0} +* Enhances the migration from Weave to Flannel to preserve custom IP Tables rules. + +### Bug Fixes {#bug-fixes-v2023-06-20-0} +* Fixes all previous Flannel versions by backporting the fixes introduced in the latest release v2023.06.09-0 to solve the problems faced when migrating from Weave to Flannel. +* Fixes MinIO PVC resizing race condition for versions equals to or greater than `2023-06-09T07-32-12Z`. +* Fixes the migration from Weave to Flannel to allow the installer to retry the migration when an error is faced. + +## v2023.06.09-0 + +Released on June 9, 2023 + + +### New Features {#new-features-v2023-06-09-0} +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.65.1-46.5.0. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.27.1. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.11.7. +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.7.0. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.65.1-46.6.0. + +### Improvements {#improvements-v2023-06-09-0} +* Enhance proxy installations by enabling the use of HTTP_PROXY and HTTPS_PROXY environment variables during the configuration of the container runtime. + +### Bug Fixes {#bug-fixes-v2023-06-09-0} +* Fixes issue where Pods got stuck when migrating from Weave to Flannel. This fix also ensures that Weave is properly removed during the migration. This bug fix applies to Flannel versions 0.21.5 and later. +* Fixes an issue that could cause Rook upgrades from version 1.0.4 to 1.7.x to fail with `error rook-ceph-migrator pod not found`. + +## v2023.05.30-0 + +Released on May 30, 2023 + +### Improvements {#improvements-v2023-05-30-0} +* Adds check to ensure connectivity to the registry with Containerd. This check helps identify misconfigurations, including invalid proxy settings. +* Adds a message informing the user of preflight check results when preflight checks have been ignored using the `host-preflight-ignore` flag. + +### Bug Fixes {#bug-fixes-v2023-05-30-0} +* Fixes an issue where the Longhorn to OpenEBS migration preparation preflight check failed due to the following error: `error scaling down pods using longhorn volumes: error scaling down *v1.statefulset default/kotsadm-rqlite: Operation cannot be fulfilled on statefulsets.apps "kotsadm-rqlite": the object has been modified; please apply your changes to the latest version and try again`. +* Fixes an issue with Longhorn volumes were not able to be rolled back after a storage migration with the error: `error rolling back volume ... replicas: Operation cannot be fulfilled on volumes.longhorn.io ...`. +* Fixes an issue uninstalling Weave by removing interfaces first and then removing lib/weave for Weave versions `0.21.5` and later. + +## v2023.05.25-0 + +Released on May 25, 2023 + +### New Features {#new-features-v2023-05-25-0} +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.11.6. +* Adds support for Oracle Linux 8.8. +* Adds support for Rocky Linux 9.2 and RHEL 9.2. + +### Improvements {#improvements-v2023-05-22-0} +* Improves logs for Registry add-on. + +### Bug Fixes {#bug-fixes-v2023-05-25-0} +* Fixes issue where the additionalNoProxyAddresses value was not properly propagated through the additional-no-proxy-addresse flag in the outputted commands. +* Fixes OpenSSL calls used to configure Rook add-on by explicitly specifying the digest method in order to support RHEL 9.2. +* Fixes OpenSSL calls used to configure Registry add-on by explicitly specifying the digest method in order to support RHEL 9.2. + +## v2023.05.22-0 + +Released on May 22, 2023 + +### New Features {#new-features-v2023-05-22-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.27.2, 1.26.5, 1.25.10 and 1.24.14. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.65.1-45.27.2 and 0.65.1-45.28.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-05-18T00-05-36Z. +* Enables the [Collectd add-on](https://kurl.sh/docs/add-ons/collectd) for Ubuntu 22.04. + +### Improvements {#improvements-v2023-05-22-0} +* Adds further log information for Docker Proxy settings configuration. +* Adds further log information for containerd installations and configuration on version 1.5.10 or later. + +### Bug Fixes {#bug-fixes-v2023-05-22-0} +* Fixes an issue with the [Weave add-on](https://kurl.sh/docs/add-ons/weave) for version 2.8.1-20230417 that prevented symbolic links to /opt/cni/bin from working. +* Fixes an issue that caused Rook upgrades from 1.0.4 to 1.8.x or later to fail with the error "pod has unsupported owner ReplicaSet". +* Improves stability of upgrades to Rook version 1.5.12. +* Updates the [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.11.0 with new replicated/local-volume-provider image version v0.5.3 to address the following high severity CVE: CVE-2022-29458. + +## v2023.05.15-0 + +Released on May 15, 2023 + +### New Features {#new-features-v2023-05-15-0} +* Adds [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.2. + +### Bug Fixes {#bug-fixes-v2023-05-15-0} +* Adds fixes to ensure that the Firewalld check verifies if it is enabled and active, and to provide more comprehensive information about the Firewalld check. + +## v2023.05.11-0 + +Released on May 11, 2023 + +### New Features {#new-features-v2023-05-11-0} +* Adds the ability to have fine-grained control over the Rook-Ceph node and device storage configuration through the [`rook.nodes`](https://kurl.sh/docs/add-ons/rook#per-node-storage-configuration) property of the specification. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.21. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.5. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.5. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-05-04T21-44-30Z. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) versions 1.24.4 and 1.25.0. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.65.1-45.26.0 and 0.65.1-45.27.1. +* Upgrades [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) to version 0.27.1. + +### Bug Fixes {#bug-fixes-v2023-05-11-0} +* Fixes an issue that causes installations to fail when running preflight checks and the file `/etc/kubernetes/admin.conf` is not found due to a previous failed Kubernetes installation. + +## v2023.05.08-0 + +Released on May 8, 2023 + +### New Features {#new-features-v2023-05-08-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.27.1, 1.27.0, 1.26.4, 1.25.9 and 1.24.13. +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.6.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-04-20T17-56-55Z. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.63.0-45.19.0, 0.63.0-45.20.0, 0.63.0-45.21.0. +* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.7.0-6.0.1. +* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) runc version from v1.1.5 to v1.1.7. +* Adds the ability to bypass kubeadm upgrade preflight errors and warnings using the spec property [`kubernetes.upgradeIgnorePreflightErrors`](https://kurl.sh/docs/add-ons/kubernetes#advanced-install-options:~:text=upgradeIgnorePreflightErrors) or the flag [`--kubernetes-upgrade-ignore-preflight-errors=`](https://kurl.sh/docs/install-with-kurl/advanced-options#:~:text=internal%2Dload%2Dbalancer-,kubernetes%2Dupgrade%2Dignore%2Dpreflight%2Derrors,-Bypass%20kubeadm%20upgrade). +* Adds the ability to configure the maximum number of Pods that can run on each node (default 110) using the spec property [`kubernetes.maxPodsPerNode`](https://kurl.sh/docs/add-ons/kubernetes#advanced-install-options:~:text=the%20Kubernetes%20documentation.-,maxPodsPerNode,-The%20maximum%20number) or the flag [`--kubernetes-max-pods-per-node=`](https://kurl.sh/docs/install-with-kurl/advanced-options#:~:text=preflight%2Derrors%3DCoreDNSUnsupportedPlugins-,kubernetes%2Dmax%2Dpods%2Dper%2Dnode,-The%20maximum%20number). + +### Improvements {#improvements-v2023-05-08-0} +* Reduces OpenEBS resource usage by removing NDM. +* Removes the `rook-upgrade` task. + +### Bug Fixes {#bug-fixes-v2023-05-08-0} +* Fixes an issue on RHEL 7 based distributions that caused the script to improperly calculate the bundle size when upgrading multiple Kubernetes versions and print the message 'total_archive_size + "935": syntax error: operand expected (error token is ""935"")'. +* Fixes an issue where high availability MinIO deployments were not migrated to Rook's object store. +* Fixes an issue that caused Rook upgrades of more than one minor version to upgrade to the latest patch version for the target minor version rather than to the specified patch version. +* Fixes an issue when upgrading Rook from v1.4.x or later in an air gap environment that caused the script to fail with ImagePullBackoff errors due to the failure to prompt the user to load images on remote nodes. + +## v2023.04.24-0 + +Released on April 24, 2023 + +### New Features {#new-features-v2023-04-24-0} +* Updates the [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) to support upgrading Kubernetes by more than two minor versions at the same time using a single spec. For air gap instances, users must provdide a package with the required assets during upgrade. For more information, see [Upgrading](https://kurl.sh/docs/install-with-kurl/upgrading#kubernetes) in the kURL documentation. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.4. +* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.8.1-20230417 and 2.6.5-20230417 to address the following high and critical severity CVEs: CVE-2023-27536, CVE-2023-27533, CVE-2023-27534, CVE-2023-27535. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.63.0-45.10.1 and 0.63.0-45.15.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-04-13T03-08-07Z. + +### Improvements {#improvements-v2023-04-24-0} +* Updates kURL to use the `kurl-install-directory` specified for host os repositories. Previously, this was hardcoded to `/var/lib/kurl`. + +### Bug Fixes {#bug-fixes-v2023-04-24-0} +* Fixes an issue to ensure that the tasks.sh reset script respects the `kurl-install-directory` flag or discovers the directory from the cluster. +* Fixes an issue that caused the installation script to prompt for a load balancer address when running the installer with `ekco-enable-internal-load-balancer`. + +## v2023.04.13-0 + +Released on April 13, 2023 + +### New Features {#new-features-v2023-04-13-0} +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.3. +* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) version 2.8.1-20230406 to address the following high severity CVE: CVE-2023-0464. +* Updates the [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.10.2 with new kurlsh/s3cmd image to address the following high severity CVE: CVE-2023-0464. + +### Bug Fixes {#bug-fixes-v2023-04-13-0} +* Fixes an issue that causes migrations from Docker to containerd on multi-node clusters to fail with the error "Downgrading containerd is not supported". +* Fixes an issue that could cause installations to fail with the error "/var/lib/kurl does not exist" when using the `kurl-install-directory` flag. + +## v2023.04.11-0 + +Released on April 11, 2023 + +### New Features {#new-features-v2023-04-11-0} +* Adds support for RHEL and Rocky Linux 9. +* Makes the [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) mandatory. +* Updates kURL to always install the latest version of the [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) add-on, even if the EKCO add-on is not specified or if a different version is specified. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.26.5, and removes all versions earlier than 0.26.5. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-03-24T21-41-23Z. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.63.0-45.8.0, 0.63.0-45.8.1, and 0.63.0-45.9.1. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.24.3. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.20. +* Updates the [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.1 with new kurlsh/s3cmd image to address the following high severity CVE: CVE-2023-0464. + +### Improvements {#improvements-v2023-04-11-0} +* Adds a preflight check to ensure sufficient disk space is available for the Containerd, Rook, and OpenEBS add-ons. +* Adds a preflight check to ensure Kubernetes API Server is healthy prior to Kubernetes upgrades. +* Adds a preflight check to ensure Kubernetes API Server load balancer health prior to Kubernetes upgrades. +* Adds a preflight check to ensure Kubernetes API and ETCD certificates are present and valid prior to Kubernetes upgrades. +* Adds a preflight check to ensure nodes are healthy prior to Kubernetes upgrades. +* Adds a preflight check to ensure that kURL Pod(s) are running prior to Kubernetes upgrades. +* Adds a preflight check to ensure that MinIO pods are running prior to migrating object store data from Rook. +* Adds a preflight check to ensure that OpenEBS and Rook-Ceph are healthy prior to migrating from Rook to OpenEBS. +* Adds a preflight check to ensure that Longhorn and OpenEBS are healthy prior to migrating from Longhorn to OpenEBS. +* Adds a preflight check to ensure that Longhorn and Rook-Ceph are healthy prior to migrating from Longhorn to Rook Ceph. +* Adds a preflight check to prevent unsupported migrations from Longhorn to OpenEBS versions earlier than 3.3.0 and without an object store when Registry is present. +* Adds the ability to upgrade the containerd add-on in a kURL cluster by two minor versions at the same time. + +### Bug Fixes {#bug-fixes-v2023-04-11-0} +* Fixes an issue that could cause rerunning the install script to fail if the Kubernetes binaries are installed but the cluster was never installed or configured. + +## v2023.03.28-0 + +Released on March 28, 2023 + +### New Features {#new-features-v2023-03-28-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-03-20T20-16-18Z. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.2. +* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) version. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.4. +* Adds [Metrics Server add-on](https://kurl.sh/docs/add-ons/metrics-server) version 0.6.3. + +### Improvements {#improvements-v2023-03-28-0} +* Adds preflight checks to prevent installations without the `kotsadm.disableS3` option set to `true` from continuing without an Object Store. +* Adds preflight checks to prevent migrating from Rook to OpenEBS without MinIO when the Registry add-on is included in the spec. +* Removes the optional flag `force-reapply-addons` and makes it the default behavior to reapply all add-ons regardless of whether or not they change. + +### Bug Fixes {#bug-fixes-v2023-03-28-0} +* Fixes an issue when upgrading from Kubernetes releases that caused the script to fail with error "connection refused" and the message "couldn't retrieve DNS addon deployments" +* Fixes an issue that could cause the installation script to exit with an error when running preflights if kubectl is installed but Kubernetes is not installed or the cluster is down. +* Fixes an issue that prevented Rook from being fully removed after a migration to another PV provisioner. +* Fixes an issue that allowed the object store to be migrated more than one time during a storage migration. + +## v2023.03.21-0 + +Released on March 21, 2023 + +### New Features {#new-features-v2023-03-21-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.26.3 1.25.8 1.24.12 1.23.17 1.22.17. +* Adds a preflight check to ensure that a host is not updated with a version of the Kubernetes installer that is earlier than the version currently running in the cluster. +* Adds better logging information that highlights failures and warnings when migrating from Rook. + +### Bug Fixes {#bug-fixes-v2023-03-21-0} +* Fixes an issue when migrating from Rook that caused the Rook Ceph preflight health check to incorrectly report that Ceph was unhealthy because Ceph version information could not be found. This issue was caused by a bug in Rook Ceph versions earlier than 1.4.8. +* Fixes broken upgrades caused by not being able to uninstall Rook. Upgrade failures are highlighted in the console with further information. +* Fixes an issue where the installation script got stuck when migrating from Rook. Added timeouts with further information displayed in the console. +* Fixes a bug where Rook data was not removed after Rook Ceph was removed from the cluster. +* Fixes a bug in the Kubernetes installer v2023.03.20-0 where the registry add-on failed to create the object store. + +## v2023.03.20-0 + +Released on March 20, 2023 + +### New Features {#new-features-v2023-03-20-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-03-13T19-46-17Z. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.24.2. +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.5.0. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.10.2. + +### Improvements {#improvements-v2023-03-20-0} +* Adds checks to ensure that Rook Ceph and its Object Store are healthy before migrating from Rook to OpenEBS and Minio. +* Adds checks and better log information when removing Rook or Longhorn to notify users of the reasons for a failure. + +### Bug Fixes {#bug-fixes-v2023-03-20-0} +* Fixes an issue where the weave-to-flannel-\{primary,secondary\} tasks fail with "Flannel images not present...". + +## v2023.03.13-0 + +Released on March 13, 2023 + +### New Features {#new-features-v2023-03-13-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-03-09T23-16-13Z. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.3. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.63.0-45.7.1. + +### Bug Fixes {#bug-fixes-v2023-03-13-0} +* Fixes upgrade timeouts by adding a check to wait for Rook rollout from 1.5.9 to 1.10.8 as is done for Rook 1.10.11. +* Adds a preflight check to prevent unsupported migrations from Rook to OpenEBS versions earlier than 3.3.0. +* Fixes an issue where MinIO failed to update when running in high availability mode. +* Fixes issue `failed to find plugin /opt/cni/bin/weave-net` when the installer is checking cluster networking by deleting the weave-net pod when the binary is not found to let it be re-created successfully. +* Increases timeout from 5 to 10 minutes waiting for sync-object-store pod to complete as part of the object store migration from Rook to OpenEBS. + +## v2023.03.07-0 + +Released on March 7, 2023 + +### New Features {#new-features-v2023-03-07-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.26.2, 1.25.7, 1.24.11, 1.23.17, and 1.22.17. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.63.0-45.3.0, 0.63.0-45.4.0, and 0.63.0-45.5.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-27T18-10-45Z. +* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) versions 3.7.0-5.6.0. +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.16. + +### Improvements {#improvements-v2023-03-07-0} +* Adds colors to the preflight checks results to improve the user experience. + +### Bug Fixes {#bug-fixes-v2023-03-07-0} +* Fixes an issue when migrating from Weave to Flannel that incorrectly prompts to load images with the airgap flag when online and without when offline. +* Fixes an issue that causes an HA install to fail after a node has been reset with error "stat: cannot stat '/etc/kubernetes/manifests/haproxy.yaml': No such file or directory". + +## v2023.02.23-0 + +Released on February 23, 2023 + +### New Features {#new-features-v2023-02-23-0} +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.63.0-45.2.0. +* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20230222 and 2.8.1-20230222 to address the following high severity CVEs: CVE-2022-4450, CVE-2023-0215, CVE-2023-0286. +* Updates [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.1 with new kurlsh/s3cmd image to address the following high severity CVEs: CVE-2022-4450, CVE-2023-0215, CVE-2023-0286. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-22T18-23-45Z. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.26.4. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.10.1. + +### Improvements {#improvements-v2023-02-23-0} +* kURL no longer chooses the node name and instead defers to kubeadm to infer the node name from the hostname. + +### Bug Fixes {#bug-fixes-v2023-02-23-0} +* Fixes an issue where EKCO serialized an incorrect kubeadm `ClusterStatus(kubeadm.k8s.io/v1beta2)` config when purging a node with [`ekco-purge-node.sh`](https://kurl.sh/docs/add-ons/ekco#purge-nodes) for Kubernetes version 1.21 and earlier. Moreover, this bug prevented adding new nodes to the Kuberenetes cluster. + +## v2023.02.21-0 + +Released on February 21, 2023 + +### New Features {#new-features-v2023-02-21-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-17T17-52-43Z. + +### Bug Fixes {#bug-fixes-v2023-02-21-0} +* Fixes an issue that causes the install script to fail with error "ctr: flags --detach and --rm cannot be specified together" when using Containerd 1.6.18 and the EKCO Internal Load Balancer. + +## v2023.02.17-0 - Withdrawn + +Released on February 17, 2023 + +:::important +v2023.02.17-0 has been removed because Containerd 1.6.18 is incompatible with high availability installations using the EKCO internal load balancer. +::: + +### New Features {#new-features-v2023-02-17-0} +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.63.0-45.1.0 and 0.63.0-45.1.1. +* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.4.0. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.18. + +### Bug Fixes {#bug-fixes-v2023-02-17-0} +* Fixes an issue that causes Rook multi-version upgrades to fail if add-on airgap packages exist on the server prior to upgrading. +* Fixes a rare race condition that could cause data loss when migrating between storage providers. +## v2023.02.16-0 + +Released on February 16, 2023 + +### New Features {#new-features-v2023-02-16-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-10T18-48-39Z. +* Warn the user if there is likely to be insufficient space to upgrade Rook multiple versions. + +### Bug Fixes {#bug-fixes-v2023-02-16-0} +* Fixes a misconfiguration in the kubelet that caused Kubernetes to garbage collect the pause image, which caused new containers to fail to start and get stuck in ContainerCreating. + +## v2023.02.14-0 + +Released on February 14, 2023 + +### New Features {#new-features-v2023-02-14-0} +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.10.1. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.24.1. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.1. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-09T05-16-53Z. + +### Bug Fixes {#bug-fixes-v2023-02-14-0} +* Fixes a broken link to the Rook zapping procedure in the output of the installation script. +* Changes the kubelet service file permissions to 600 to fix CIS benchmark failure 4.1.1: "Ensure that the kubelet service file permissions are set to 600 or more restrictive". +* Fixes an issue where containers were stuck in a ContainerCreating state after a Kubernetes upgrade. + +## v2023.02.06-1 + +Released on February 6, 2023 + +### Bug Fixes {#bug-fixes-v2023-02-06-1} +* Fixes an issue in [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.5 where restores fail to pull the `velero-restic-restore-helper` image in air gapped environments. + +## v2023.02.06-0 + +Released on February 6, 2023 + +### New Features {#new-features-v2023-02-06-0} +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.0. + +### Improvements {#improvements-v2023-02-06-0} +* If there are multiple network interfaces on a single host, the [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) prompts users to choose an interface or use the interface of the [private-address](https://kurl.sh/docs/install-with-kurl/advanced-options#reference) flag when specified, instead of using the default gateway interface. +* Prompts users when preflight warnings occur, and allows users to cancel the installation and fix the root cause before resuming the installation. + +### Bug Fixes {#bug-fixes-v2023-02-06-0} +* Fixes an issue where the Prometheus adapter was not able to install custom metrics due to an incorrect URL to the Prometheus service. +* Fixes an issue where running kubectl commands with Kubernetes version 1.26 was generating the warning "Got empty response for: custom.metrics.k8s.io/v1beta1". + +## v2023.02.02-0 + +Released on February 2, 2023 + +### New Features {#new-features-v2023-02-02-0} +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.24.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-01-31T02-24-19Z. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.26.3. +* Flannel CNI is no longer supported with the Docker container runtime. Containerd is required. + +### Improvements {#improvements-v2023-02-02-0} +* When upgrading multiple versions of Rook, users can download a single air gap bundle containing all versions of the Rook air gap packages, instead of downloading each version package separately. + +## v2023.01.31-0 + +Released on January 31, 2023 + +### New Features {#new-features-v2023-01-31-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.26.1 1.25.6 1.24.10 1.23.16. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.16. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.5. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-01-25T00-19-54Z. +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.15. +* Adds a `serverFlags` configuration option to the [Velero add-on](https://kurl.sh/docs/add-ons/velero) to allow users to pass additional flags to the `velero server` command in the Velero pod. This can also be set using the [velero-server-flags](https://kurl.sh/docs/install-with-kurl/advanced-options#reference) cli flag when running the install script. + +### Improvements {#improvements-v2023-01-31-0} +* Adds TCP connection host preflight checks for ports 2379 and 6443. +* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.8.1-20230130 to address the following high severity CVE: [CVE-2022-43551](https://avd.aquasec.com/nvd/cve-2022-43551). +* Adds a warning message when Flannel is the cluster CNI suggesting the user check that UDP port 8472 is open when joining a node or migrating from Weave to Flannel. +* Adds Flannel UDP port 8472 status preflight check. + +### Bug Fixes {#bug-fixes-v2023-01-31-0} +* Fixes an error due to missing images from registry.k8s.io when updating Kubernetes from 1.21 to 1.23.{0-14} and 1.22 to 1.24.{0-8} in airgapped environments. +* Fixes an issue that could cause Flannel pods on remote airgapped nodes to fail with ImagePullBackoff errors. +* Fixes an issue that could cause single node upgrades to Rook add-on version 1.6.11 with Ceph filesystem enabled to fail with error "filesystem-singlenode.yaml: No such file or directory". + +## v2023.01.23-0 + +Released on January 23, 2023 + +### New Features {#new-features-v2023-01-23-0} +* Allows migrating multi-node [Weave](https://kurl.sh/docs/add-ons/weave) installations to [Flannel](https://kurl.sh/docs/add-ons/flannel). +* The [Rook add-on](https://kurl.sh/docs/add-ons/rook) can now be upgraded from version 1.0.x to 1.10.8, latest supported Rook version, as part of the installation script. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2023-01-12T02-06-16Z, RELEASE.2023-01-18T04-36-38Z and RELEASE.2023-01-20T02-05-44Z. +* Adds [metrics-server add-on](https://kurl.sh/docs/add-ons/metrics-server) version 0.6.2. + +### Bug Fixes {#bug-fixes-v2023-01-23-0} +* Creates .kube/config for installations where .kube/config was not created. + +## v2023.01.13-1 + +Released on January 13, 2023 + +### Bug Fixes {#bug-fixes-v2023-01-13-1} +* Reverts a bug fix made in v2023.01.03-0 which caused `.kube/config` to not be created. For more information, see [Known Issue](#known-issues-v2023-01-13-0) below. + +## v2023.01.13-0 + +:::important +The Kubernetes installer v2023.01.13-0 has a known issue that affects the creation of .kube/config in the home directory. See [Known Issue](#known-issues-v2023-01-13-0) below. This issue is resolved in v2023.01.13-1. +::: + +Released on January 13, 2023 + +### New Features {#new-features-v2023-01-13-0} +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.5. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2023-01-02T09-40-09Z and RELEASE.2023-01-06T18-11-18Z. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.10.8. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.15. +* Adds automatic data migration from Longhorn to OpenEBS. +* Adds a migration path for Weave to Flannel on single-node Kubernetes clusters. This migration requires downtime. +* Adds logs for kURL execution which can be found under `/var/log/kurl/`. + +### Bug Fixes {#bug-fixes-v2023-01-13-0} +* Fixes an issue where the process get stuck in failures scenarios by adding timeouts and improving log info when upgrading from the Rook `1.0.4` to `1.4.9`. +* Fixes upgrading Rook from `1.0.4-14.2.21` to `1.4.9`. +* Fixes a bug on Ubuntu where the installer would sometimes remove packages when attempting to install Kubernetes. +* Fixes a timeout waiting for new versions of Rook and Ceph to roll out on upgrades by increase wait timeouts from 10 to 20 minutes. + +### Known Issue {#known-issues-v2023-01-13-0} + +This issue is resolved in v2023.01.13-1. + +v2023.01.13-0 has a known issue where the .kube/config might not be created in the home directory. This does not affect the ability to access the cluster when you run bash -l with kubectl. + +If you cannot connect to the cluster with kubectl or did not find the .kube/config file, Replicated recommends that you copy .kube/config to your home directory: + +``` +cp /etc/kubernetes/admin.conf $HOME/.kube/config +``` + +Then, grant permissions to the $HOME/.kube/config file. + + +## v2023.01.03-0 + +Released on January 3, 2023 + +:::important +v2023.01.03-0 has a known issue that can cause critical system packages to be removed from Ubuntu machines. This known issue is resolved in v2023.01.13-1. To avoid this known issue, do not upgrade to v2023.01.03-0, and instead upgrade directly to v2023.01.13-1. +::: + +### New Features {#new-features-v2023-01-03-0} +* [Rook add-on](https://kurl.sh/docs/add-ons/rook) can now be upgraded and migrated from version 1.4.3 up to version 1.7.x as part of the installation script. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.26.2. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.23.2. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-12-12T19-27-27Z. +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.13. +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.26.0, 1.25.5, 1.24.9, 1.23.15, and 1.22.17. +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.14. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.6.13 and 1.6.14. + +### Improvements {#improvements-v2023-01-03-0} + +* Disk and volume validation checks now run prior to migrating from Rook to OpenEBS. A failed validation check aborts the upgrade. + +### Bug Fixes {#bug-fixes-v2023-01-03-0} +* Fixes installation conflicts when installing the containerd add-on and Docker is already installed on the host. Now the installation checks to see if Docker is installed and provides users with the option to automatically remove Docker. +* Fixes an issue where EKCO's provisioned HAProxy load balancer pod crashed when it did not have access to the Config file. +* Fixes an issue that causes air gapped upgrades to Rook add-on version 1.7.11 to fail with ImagePullBackoff errors. +* Fixes an issue with the Docker preflight check not failing on some unsupported operating systems. +* Fixes an issue that could cause Rook upgrades to fail if EKCO is scaled down, due to failures to recreate the Rook OSD deployments when the rook-priority.kurl.sh MutatingAdmissionWebhook is unreachable. + +## v2022.12.12-0 + +Released on December 12, 2022 + +### New Features {#new-features-v2022-12-12-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.25.5, 1.24.9, 1.23.15, and 1.22.17. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.6.11 and 1.6.12. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions 0.26.0 and 0.26.1. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.4. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.20.2. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.23.1. + +### Bug Fixes {#bug-fixes-v2022-12-12-0} +* Fixes an issue that prevented upgrading from Rook 1.0.4 to 1.4.9 due to error "pool(s) have non-power-of-two pg_num". +* Fixes an issue that caused Rook add-on upgrades from 1.0.4 to 1.4.9 to hang indefinitely with 50% pgs degraded when EKCO add-on is included in the upgrade spec. +* Fixes an issue that prevented containerd.io to be installed or upgraded when the host has docker.io package installed on Ubuntu. +* Fixes preflight checks to only recommend Docker Enterprise Edition to RHEL installs when containerd is not selected. +* Fixes an issue where a deprecated version of Docker was being installed when Docker or containerd add-on versions were not explicitly set. + +## v2022.11.29-0 + +Released on November 29, 2022 + +### New Features {#new-features-v2022-11-29-0} +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.12. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.10. +* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.8.1-20221122 to address the following high and critical severity CVEs: CVE-2022-42915, CVE-2022-42915, CVE-2022-42916, CVE-2022-42916. +* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) version 2.6.5-20221122 to address the following high and critical severity CVEs: CVE-2022-42915, CVE-2022-42915, CVE-2022-42916, CVE-2022-42916. + +### Improvements {#improvements-v2022-11-29-0} +* Binaries installed by kURL into /use/local/bin are now owned by root. +* Containerd add-on versions are now shipped with the respective supported runc version. Containerd addon versions 1.6.4 and later are built with runc version `v1.1.4` instead of `v1.0.0-rc95`. + +### Bug Fixes {#bug-fixes-v2022-11-29-0} +* Fixes an issue that causes Rook add-on version 1.0.4-14.2.21 to fail to install on Oracle Linux 7 with host dependency resolution errors. +* Fixes an issue that causes Rook upgrades to unnecessarily pause for an extended period of time, with the message "failed to wait for Rook", before proceeding with the upgrade. +* Fixes an issue that leaves the EKCO operator scaled down to 0 replicas when upgrading a cluster with Rook add-on versions 1.8.10 and 1.9.12. + +## v2022.11.16-1 + +Released on November 16, 2022 + +### Bug Fixes {#bug-fixes-v2022-11-16-1} +* Fixes a bug that blocked installations. + +## v2022.11.16-0 + +Released on November 16, 2022 + +### New Features {#new-features-v2022-11-16-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.25.4 1.24.8 1.23.14 1.22.16. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.3. +* PVMigrate now checks for available disk space before starting to migrate volumes. +* RHEL 8.7 and Oracle Linux 8.7 are now supported. + +## v2022.11.10-1 + +Released on November 10, 2022 + +### Bug Fixes {#bug-fixes-v2022-11-10-1} +* Fixes an issue where [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.89.0+ fails to install when a proxy is configured. + +## v2022.11.10-0 + +Released on November 10, 2022 + +### Improvements {#improvements-v2022-11-10-0} +* OpenEBS Local PV Storage Class will now be the default if no other Storage Class is specified for OpenEBS add-on versions 3.3.0 and above. Previously, OpenEBS was only the default if `openebs.localPVStorageClassName` was set to `"default"`. + +### Bug Fixes {#bug-fixes-v2022-11-10-0} +* Fixes an issue that could cause installations or upgrades to fail with error "syntax error: operand expected (error token is ""0" + "1"")" on RHEL 7 based distributions. +* Fixes an issue that causes installations to fail with no default Storage Class for specs with `openebs.localPVStorageClassName` set to anything other than `"default"` and no other CSI add-on specified. + +## v2022.11.09-0 + +Released on November 9, 2022 + +### New Features {#new-features-v2022-11-09-0} +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.25.0. This version adds management of the rqlite StatefulSet deployed by the app manager. If a Kubernetes installer cluster has at least three healthy nodes and the OpenEBS localpv storage class is available, rqlite is scaled up to three replicas for data replication and high availability. + +## v2022.11.07-0 + +Released on November 7, 2022 + +### New Features {#new-features-v2022-11-07-0} +* Removes support for the BETA K3s add-on and the BETA RKE2 add-on. It is recommended to use the [OpenEBS add-on](https://kurl.sh/docs/add-ons/openEBS#localpv) for the single-node LocalPV use case with kURL. For more information about this decision, see the [ADR](https://github.com/replicatedhq/kURL/blob/main/docs/arch/adr-007-deprecate-k3s-and-rke2.md). +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.24.1. +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.20.1. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.60.1-41.7.3. + +### Bug Fixes {#bug-fixes-v2022-11-07-0} +* Fixes CRD errors when updating from Prometheus 0.49.0-17.1.3 on Kubernetes versions that do not support Server-Side Apply. + +## v2022.11.02-0 + +Released on November 2, 2022 + +### New Features {#new-features-v2022-11-02-0} +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.9. +* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.7.0-5.5.0. +* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.11. + +### Improvements {#improvements-v2022-11-02-0} +* Prompts and warns users of downtime before migrating from Rook-backed PersistentVolumeClaims to OpenEBS Local PV when OpenEBS is included in the specification and Rook is removed. For migration information, see [Migrating to Change kURL CSI Add-ons](https://kurl.sh/docs/install-with-kurl/migrating-csi). +* Updates the kurlsh/s3cmd image to tag 20221029-37473ee for [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.1 and [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.2, to address the high severity CVE: CVE-2022-43680. + +### Bug Fixes {#bug-fixes-v2022-11-02-0} +* Fixes an issue that could cause the migration from Rook-backed PersistentVolumeClaims to unnecessarily hang for 5 minutes. For migration information, see [Migrating to Change kURL CSI Add-ons](https://kurl.sh/docs/install-with-kurl/migrating-csi). +* Fixes an issue that could cause kURL to attempt to migrate CRI from Docker to Containerd when the CRI is already Containerd. +* Fixes an issue with [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) versions 1.12.0 and 2.6.0 that could cause installations to fail with the error `failed calling webhook "admission-webhook.openebs.io"`. +* Fixes an issue that could cause the kURL installer to disable EKCO management of the internal load balancer during an upgrade. See [Internal Load Balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). +* Fixes an issue where [Weave add-on](https://kurl.sh/docs/add-ons/weave) "latest" version resolves to 2.6.5-20221006 instead of 2.6.5-20221025. +* Fixes an issue where kURL will migrate to both OpenEBS Local PV and Longhorn from Rook-backed PersistentVolumeClaims when both add-ons are included in the specification and Rook is removed. kURL now prefers to migrate to OpenEBS. + +## v2022.10.28-1 + +Released on October 28, 2022 + +### Bug Fixes {#bug-fixes-v2022-10-28-1} +* Fixes an issue that causes kURL to erroneously prompt the end-user for a Rook to OpenEBS Local PV migration when upgrading and the OpenEBS version 3.3.0 is included in the spec. + +## v2022.10.28-0 + +Released on October 28, 2022 + +### New Features {#new-features-v2022-10-28-0} +* When Rook is installed on the cluster but not included in the kURL spec, the OpenEBS add-on version 3.3.0 and later automatically migrates any Rook-backed PersistentVolumeClaims (PVCs) to OpenEBS Local PV. + +### Improvements {#improvements-v2022-10-28-0} +* The replicatedhq/local-volume-provider image has been updated to v0.4.0 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.2 which addresses the following high and critical CVEs: CVE-2021-33574, CVE-2021-35942, CVE-2022-23218, CVE-2022-23219, CVE-2020-1752, CVE-2020-6096, CVE-2021-3326, CVE-2021-3999. + +## v2022.10.26-0 + +Released on October 26, 2022 + +### New Features {#new-features-v2022-10-26-0} +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.24.0. +* MinIO deploys a highly-available StatefulSet with EKCO when the OpenEBS localpv storage class is enabled and at least three nodes are available. For more information, see [Manage MinIO with EKCO](https://kurl.sh/docs/add-ons/ekco#manage-minio-with-ekco) in _EKCO Add-on_ in the kURL documentation. + +### Improvements {#improvements-v2022-10-26-0} +* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20221025 and 2.8.1-20221025 to address the following high CVEs: CVE-2022-40303, CVE-2022-40304. + +## v2022.10.24-0 + +Released on October 24, 2022 + +### New Features {#new-features-v2022-10-24-0} +* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.20.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-10-20T00-55-09Z. + +## v2022.10.21-0 + +Released on October 21, 2022 + +### New Features {#new-features-v2022-10-21-0} +* [Rook add-on](https://kurl.sh/docs/add-ons/rook) versions 1.9.12 and later are now supported on Kubernetes 1.25. +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.25.3 1.24.7 1.23.13. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-10-15T19-57-03Z. +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.23.0. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.2 which addresses the following high and critical CVEs: CVE-2021-33574, CVE-2021-35942, CVE-2022-23218, CVE-2022-23219, CVE-2020-1752, CVE-2020-6096, CVE-2021-3326, CVE-2021-3999. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.1. +* For [Rook add-on](https://kurl.sh/docs/add-ons/rook) versions 1.9.12 and later, [Ceph metrics collection and a Ceph Grafana dashboard](https://kurl.sh/docs/add-ons/rook#monitor-rook-ceph) are now enabled when the Prometheus add-on is installed. +* The replicatedhq/local-volume-provider image has been updated to v0.3.10 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.2 to address CVE-2022-37434 with critical severity. + +### Bug Fixes {#bug-fixes-v2022-10-21-0} +* Fixes an issue that causes the .kube/config to get removed on a Kubernetes upgrade. +* With the release of [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.1, fixes an issue that could cause EKCO to fail to perform operations dependent on Rook version on Rook upgrades, including maintaining CSI Pod resources and scaling the ceph-mgr Pod replica count. +* With the release of [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.2, fixes an issue that causes upgrades of Kubernetes to fail on secondary nodes when EKCO [Internal Load Balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer) is enabled. +* Fixes an issue that causes EKCO to log RBAC errors when the Rook add-on is not installed. + +## v2022.10.13-0 + +Released on October 13, 2022 + +### New Features {#new-features-v2022-10-13-0} +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.9.12. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.0 with support for Rook 1.9. + +### Bug Fixes {#bug-fixes-v2022-10-13-0} +* Fixes an issue that could prevent the EKCO deployment from scaling back up from zero replicas after running the Kubernetes installer script. + +## v2022.10.10-0 + +Released on October 10, 2022 + +### New Features {#new-features-v2022-10-10-0} +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-10-08T20-11-00Z. +* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.8.10. + +### Bug Fixes {#bug-fixes-v2022-10-10-0} +* Fixes an issue that could cause installations to fail with error "yaml: did not find expected node content" when installing behind an HTTP_PROXY. + +## v2022.10.07-0 + +Released on October 7, 2022 + +### New Features {#new-features-v2022-10-07-0} + +* New KOTS add-on versions are now automatically added to the Kubernetes installer upon a new release of KOTS. + + This means that the Kubernetes installer no longer needs to release to make a new version of KOTS available. So, the addition of new KOTS add-on versions will not be stated in the Kubernetes installer release notes. +For information about the features, improvements, and bug fixes included in each new version of KOTS, see the [App Manager Release Notes](https://docs.replicated.com/release-notes/rn-app-manager). +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2022-10-05T14-58-27Z and RELEASE.2022-10-02T19-29-29Z. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.22.0. + +### Improvements {#improvements-v2022-10-07-0} +* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20221006 and 2.8.1-20221006 to address the following critical CVEs: CVE-2022-2795, CVE-2022-2881, CVE-2022-2906, CVE-2022-3080, CVE-2022-38177, CVE-2022-38178. +* Updates kurlsh/s3cmd image to tag 20221006-27d5371 for latest [Registry](https://kurl.sh/docs/add-ons/registry) and [Velero](https://kurl.sh/docs/add-ons/velero) add-on versions to address the following critical CVE: CVE-2022-40674. + +## v2022.09.30-0 + +Released on September 30, 2022 + +### New Features {#new-features-v2022-09-30-0} +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.21.1. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-09-25T15-44-53Z. +* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.2. +* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.6.1-5.4.2. +* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.86.1. + +## v2022.09.28-0 + +Released on September 28, 2022 + +### New Features {#new-features-v2022-09-28-0} +* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.86.0. + +## v2022.09.23-0 + +Released on September 23, 2022 + +### New Features {#new-features-v2022-09-23-0} +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.59.1-40.1.0. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-09-17T00-09-45Z. +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.25.2 1.24.6 1.23.12 1.22.15. + +### Improvements {#improvements-v2022-09-23-0} +* Messaging while upgrading Rook-Ceph add-on to newer versions has been improved. +* When run on an unsupported operating system, kURL now links to the [list of supported systems](https://kurl.sh/docs/install-with-kurl/system-requirements#supported-operating-systems). +* Online installations now downloads files from kurl.sh instead of Amazon S3. + +## v2022.09.19-0 + +Released on September 19, 2022 + +### New Features {#new-features-v2022-09-19-0} +* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.85.0. + +### Bug Fixes {#bug-fixes-v2022-09-19-0} +* Fixes an issue that could cause air gapped Kubernetes upgrades to fail Sonobuoy tests with a missing image. + +## v2022.09.16-0 + +Released on September 16, 2022 + +### New Features {#new-features-v2022-09-16-0} +* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.25.1 1.25.0 1.24.5 1.23.11 1.22.14. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.58.0-39.12.1. +* Improved output when waiting for rook-ceph to become healthy. + +### Improvements {#improvements-v2022-09-16-0} +* Updates the replicatedhq/local-volume-provider image to v0.3.8 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.1 to address CVE-2022-2509 with high severity. + +### Bug Fixes {#bug-fixes-v2022-09-16-0} +* Fixes an issue that prevents upgrading Kubernetes to 1.24.x if the CRI has previously been migrated from Docker to Containerd. +* Fixes an issue that causes stateful pods mounting Persistent Volumes to get stuck in a `Terminating` state when upgrading single node Kubernetes clusters and using the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn). + +## v2022.09.12-0 + +Released on September 12, 2022 + +### New Features {#new-features-v2022-09-12-0} +* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.84.0. +* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.21.0. +* [Rook 1.0.x to 1.4.9 upgrades](https://kurl.sh/docs/add-ons/rook#upgrades) can now be completed in airgapped clusters. + +### Bug Fixes {#bug-fixes-v2022-09-12-0} +* [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions 0.21.0 or greater will now forcefully delete Envoy pods that change from a ready state to one where the Envoy container is not ready and have been in that state for at least 5 minutes. This has been added as a work around to a [known issue](https://github.com/projectcontour/contour/issues/3192) that may be caused by resource contention. + +## Release v2022.09.08-1 + +Released on September 8, 2022 + +### New Features {#new-features-v2022-09-08-1} +* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.22.1. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-09-07T22-25-02Z. + +### Improvements {#improvements-v2022-09-08-1} +* The [Cert Manager add-on](https://kurl.sh/docs/add-ons/cert-manager) now supports upgrading from 1.0.3 to 1.9.1. +* The Rook 1.0 to 1.4 migration will now prompt the user to load images used by the migration on other nodes before starting. + +## Release v2022.09.08-0 + +Released on September 8, 2022 + +### New Features {#new-features-v2022-09-08-0} +* Adds support for [Docker add-on](https://kurl.sh/docs/add-ons/docker) on Ubuntu version 22.04. +* Adds [Cert Manager add-on](https://kurl.sh/docs/add-ons/cert-manager) version 1.9.1. +* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.8. +* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version 2022-09-01T23-53-36Z. +* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.58.0-39.11.0. + +## Release v2022.09.01-1 + +Released on September 1, 2022 + +### New Features {#new-features-v2022-09-01-1} + +* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.83.0. + +## Release v2022.09.01-0 + +Released on September 1, 2022 + +### New Features {#new-features-v2022-09-01-0} +* The [Rook add-on](https://kurl.sh/docs/add-ons/rook) can now be upgraded from version 1.0.x to 1.4.x or 1.5.x as part of the installation script for internet-connected installations only. + Upgrading from version 1.0.x to 1.4.x or 1.5.x migrates data off of any hostpath-based OSDs in favor of block device-based OSDs, and performs a rolling upgrade through Rook versions 1.1.9, 1.2.7 and 1.3.11 before installing 1.4.9 (and 1.5.12 if applicable). + The upstream Rook project introduced a requirement for block storage in versions 1.3.x and later. +* Adds [Docker add-on](https://kurl.sh/docs/add-ons/docker) version 20.10.17. + Note that Ubuntu version 22.04 only supports Docker version 20.10.17 and later. + +### Bug Fixes {#bug-fixes-v2022-09-01-0} +* Fixes an issue that causes migrations to fail from Docker to containerd due to uninstalled `docker-scan-plugin` package. +* Fixes an issue that causes migrations to fail from Rook to Longhorn 1.3.1 with 2 conflicting default storage classes. + +## Release v2022.08.25-0 + +Released on August 25, 2022 + +### New Features {#new-features-v2022-08-25-0} + +- Adds [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) versions 1.24.4, 1.23.10, 1.22.13 and 1.21.14. +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kots) version 1.82.0 +- Adds [Minio add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-08-22T23-53-06Z. +- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.58.0-39.9.0. +- Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20220825 and 2.8.1-20220825 to address the following critical severity CVE: CVE-2022-37434. + +### Improvements {#improvements-v2022-08-25-0} + +- Removes support for the BETA Local Path Provisioner Add-On. It is recommended to use the [OpenEBS](https://kurl.sh/docs/add-ons/openEBS#localpv) add-on for the LocalPV use case. +- The Rook [1.0 to 1.4 task](https://kurl.sh/docs/add-ons/rook#upgrades) will now print new lines when waiting for pods to be rolled out, OSDs to be added, or certain migrations to complete. Previously, one line was printed and then overwritten with updates. +- Updates kurlsh/s3cmd image to tag 20220825-237c19d for latest [Registry](https://kurl.sh/docs/add-ons/registry) and [Velero](https://kurl.sh/docs/add-ons/velero) add-on versions to address the following critical and high severity CVEs: CVE-2022-37434 + +### Bug Fixes {#bug-fixes-v2022-08-25-0} + +- Fixes the [reset task](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node) which fails when unable to find the kurlsh/weaveexec image. +- Fixes the [Rook 1.0 to 1.4 task](https://kurl.sh/docs/add-ons/rook#upgrades) which would wait for health indefinitely after upgrading to 1.4.9 on single-node installations. + +## Release v2022.08.23-0 + +Released on August 23, 2022 + +### New Features {#new-features-v2022-08-23-0} + +- Adds new [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.3.0. + +### Bug Fixes {#bug-fixes-v2022-08-23-0} + +- Fixes an issue that causes Weave 2.6.x and 2.8.x versions of Weave to resolve to the incorrect versions without the latest CVE fixes. +- Updates the replicatedhq/local-volume-provider image to v0.3.7 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.1 to address CVE-2021-44716, CVE-2021-33194, and CVE-2022-21221 with high severity. + +## Release v2022.08.22-0 + +Released on August 22, 2022 + +### New Features {#new-features-v2022-08-22-0} + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kots) version 1.81.1 +- Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.10. + +## Release v2022.08.19-0 + +Released on August 19, 2022 + +### New Features {#new-features-v2022-08-19-0} + +- Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.7.11. + - Upgrades Ceph cluster from Octopus to [Pacific](https://docs.ceph.com/en/quincy/releases/pacific/). +- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.20.0 with support for [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.7.11. +- Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.1. +- Adds a new tasks.sh command, [`rook-10-to-14`](https://kurl.sh/docs/add-ons/rook#upgrades), that upgrades Rook 1.0 installations to Rook 1.4.9. This command only works for online installations. + +### Improvements {#improvements-v2022-08-19-0} + +- The [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) auto-upgrades experimental feature is no longer supported as of EKCO version 0.20.0. + +### Bug Fixes {#bug-fixes-v2022-08-19-0} + +- Fixes an issue that causes Rook upgrades to fail on single node installations because of Rook MDS pod anti-affinity rules. +- Fixes an issue that can cause a migration from Docker to Containerd to fail due to listing nodes using the incorrect Kubernetes api resource group. + +## Release v2022.08.16-0 + +Released on August 16, 2022 + +### New Features {#new-features-v2022-08-16-0} + +- Adds [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.3.1. +- Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.7. + +## Release v2022.08.12-1 + +Released on August 12, 2022 + +### New Features {#new-features-v2022-08-08-0} + +- Adds KOTS add-on version 1.81.0. See [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm). + +## Release v2022.08.12-0 + +Released on August 12, 2022 + +### Bug Fixes {#bug-fixes-v2022-08-12-0} + +- Fixes an issue that causes snapshots to fail after Rook to MinIO migration. + +## Release v2022.08.11-0 + +Released on August 11, 2022 + +### Improvements {#improvements-v2022-08-11-0} + +- Add Collectd Ubuntu 22.04 compatibility to host preflight checks +- Add `additional noproxy` addresses to the join command + +## Release v2022.08.08-0 + +Released on August 8, 2022 + +### New Features {#new-features-v2022-08-08-0} + +- Adds Ubuntu 22.04 support. +- Adds KOTS add-on version 1.80.0. See [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm). + +### Improvements {#improvements-v2022-08-08-0} + +- Adds a new preflight check to disallow the Docker add-on installation on Ubuntu 22.04. + +### Bug Fixes {#bug-fixes-v2022-08-08-0} + +- Fixes an issue that could cause downloading add-on packages to fail with a TAR error. + +## Release v2022.08.04-0 + +Released on August 4, 2022 + +### New Features {#new-features-v2022-08-04-0} + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.79.0. +- Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.22.0. +- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.58.0-39.4.0. +- Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-08-02T23-59-16Z. + +### Improvements {#improvements-v2022-08-04-0} + +- The install script will now retry add-on package downloads for some failure scenarios. + +### Bug Fixes {#bug-fixes-v2022-08-04-0} + +- Fixes an issue as of kURL version v2022.08.03-0 that improperly sets auth_allow_insecure_global_id_reclaim to true for new installations. + +## Release v2022.08.03-0 + +Released on August 3, 2022 + +### New Features {#new-features-v2022-08-03-0} + +- Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.6.11. +- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.19.9. + +### Bug Fixes {#bug-fixes-v2022-08-03-0} + +- Fixes an issue in [Rook add-on](https://kurl.sh/docs/add-ons/rook) versions 1.5.11 and 1.5.12 that could cause Rook upgrades to fail from versions prior to 1.5.11 due to `auth_allow_insecure_global_id_reclaim` improperly set to `false` for [unpatched Ceph versions](https://docs.ceph.com/en/quincy/security/CVE-2021-20288/). +- Fixes an issue in [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions prior to 0.19.9 that could cause Ceph to remain in `HEALTH_WARN` state for as long as an hour on a new installation. + +## Release v2022.07.29-0 + +Released on July 29, 2022 + +### New Features {#new-features-v2022-07-29-0} + +- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.19.6. + +### Improvements {#improvements-v2022-07-29-0} + +- kURL is now [CIS Kubernetes Benchmark](https://www.cisecurity.org/benchmark/kubernetes) compliant using the latest [github.com/aquasecurity/kube-bench](https://github.com/aquasecurity/kube-bench) version v0.6.8 when property `kubernetes.cisCompliance` is set to `true`. + +### Bug Fixes {#bug-fixes-v2022-07-29-0} + +- Fixes an issue in [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions prior to 0.19.6 that causes unnecessary downtime when adding additional primary nodes and using the EKCO [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). +- Fixes an issue in [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions prior to 0.19.6 that causes long running kubectl commands such as `kubectl logs` or `kubectl exec` to timeout after 20 seconds of inactivity when using the EKCO [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). + +## Release v2022.07.28-0 + +Released on July 28, 2022 + +### New Features {#new-features-v2022-07-28-0} + +- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.19.3. +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.78.0. + +### Improvements {#improvements-v2022-07-28-0} + +- Updates the haproxy image to tag 2.6.2-alpine3.16 for [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.19.3 to address the following critical severity CVEs: CVE-2022-1586, CVE-2022-1587. +- The property `kubernetes.loadBalancerUseFirstPrimary`, and equivalent flag `kubernetes-load-balancer-use-first-primary`, has been added to automatically use the first primary address as the cluster control plane endpoint. This settings is not recommended. Enable the [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) and use the property `ekco.enableInternalLoadBalancer` instead. + +### Bug Fixes {#bug-fixes-v2022-07-28-0} + +- Fixes an issue with [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions prior to 0.19.3 which causes registry certificates generated to be expired upon renewal. + +## Release v2022.07.22-0 + +Released on July 22, 2022 + +### New Features {#new-features-v2022-07-22-0} + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.77.0. + +### Improvements {#improvements-v2022-07-22-0} + +- Updates the kurlsh/s3cmd image to tag 20220722-4585dda for the latest [Registry](https://kurl.sh/docs/add-ons/registry) and [Velero](https://kurl.sh/docs/add-ons/velero) add-on versions, to address the following high severity CVEs: CVE-2022-30065, CVE-2022-2097, CVE-2022-30065. + +## Release v2022.07.20-0 + +Released on July 20, 2022 + +### New Features {#new-features-v2022-07-20-0} + +- Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20220720 and 2.8.1-20220720 with a fix for broken iptables command on RHEL 8 based distributions. +- Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-07-17T15-43-14Z. + +### Bug Fixes {#bug-fixes-v2022-07-20-0} + +- Fixes an issue on RHEL 8 based distributions that causes the iptables command to report error `table "filter" is incompatible, use 'nft' tool` when using [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20220616 and 2.8.1-20220616. + +## Release v2022.07.15-2 + +Released on July 15, 2022 + +### Improvements {#improvements-v2022-07-15-2} + +- Updates the local-volume-provider image to v0.3.6 for the [Velero add-on](https://kurl.sh/docs/add-ons/velero) to address CVE-2021-38561 with high severity. + +## Release v2022.07.15-1 + +Released on July 15, 2022 + +### New Features {#new-features-v2022-07-15-1} + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.76.1. +- Adds [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) versions 1.24.3, 1.23.9, 1.22.12 and 1.21.14. +- Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20220616 and 2.8.1-20220616 with Replicated-created security patches. + +### Improvements {#improvements-v2022-07-15-1} + +- Changes Weave version 2.6.5 and 2.8.1 to once again use upstream weave images. + +### Bug Fixes {#bug-fixes-v2022-07-15-1} + +- Fixes an issue that caused Rook to Longhorn migration failures due to Ceph claiming Longhorn devices. + +## Release v2022.07.15-0 + +Released on July 15, 2022 + +### Improvements {#improvements-v2022-07-15-0} + +- Improved health checks for [MinIO](https://kurl.sh/docs/add-ons/minio), [OpenEBS](https://kurl.sh/docs/add-ons/openebs), and [GoldPinger](https://kurl.sh/docs/add-ons/goldpinger) add-ons. + +## Release v2022.07.12-0 + +Released on July 12, 2022 + +### New Features {#new-features-v2022-07-12-0} + +- Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version 2022-07-06T20-29-49Z to address the following high severity CVE: CVE-2022-1271. +- Adds [Docker Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.1. +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.76.0. + +### Improvements {#improvements-v2022-07-12-0} +- Updates kurlsh/s3cmd image to tag 20220711-9578884 for latest [Registry](https://kurl.sh/docs/add-ons/registry) and [Velero](https://kurl.sh/docs/add-ons/velero) add-on versions to address the following critical and high severity CVEs: CVE-2018-25032, CVE-2021-30139, CVE-2021-36159, CVE-2021-3711, CVE-2021-3712, CVE-2021-42378, CVE-2021-42379, CVE-2021-42380, CVE-2021-42381, CVE-2021-42382, CVE-2021-42383, CVE-2021-42384, CVE-2021-42385, CVE-2021-42386, CVE-2021-45960, CVE-2021-46143, CVE-2022-0778, CVE-2022-1271, CVE-2022-22822, CVE-2022-22823, CVE-2022-22824, CVE-2022-22825, CVE-2022-22826, CVE-2022-22827, CVE-2022-23852, CVE-2022-23990, CVE-2022-25235, CVE-2022-25236, CVE-2022-25314, CVE-2022-25315, CVE-2022-28391. + +## Release v2022.07.07-0 + +Released on July 7, 2022 + +### Improvements {#improvements-v2022-07-07-0} + +- Adds [containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.6. +- Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.8. + +## Release v2022.07.05-0 + +Released on July 5, 2022 + +### New Features {#new-features-v2022-07-05-0} + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.75.0. + +## Release v2022.07.01-1 + +Released on July 1, 2022 + +### New Features {#new-features-v2022-07-01-1} + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.74.0. + +## Release v2022.07.01-0 + +Released on July 01, 2022 + +### New Features {#new-features-v2022-07-01-0} + +- Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.5.1. +- Adds support for RHEL and Oracle Linux 8.6. +- Adds support for upgrading OpenEBS 1.x directly to 2.12+ or 3.2+. +- The default [RKE2](https://kurl.sh/docs/add-ons/rke2) spec now includes the latest version of the [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) + + +## Release v2022.06.29-0 + +Released on June 29, 2022 + +### New Features {#new-features-v2022-06-29-0} + +- Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.0. +- Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) versions 2.12.9 and 3.2.0. Only localpv volumes are supported. + +## Release v2022.06.24-0 + +Released on June 24, 2022 + +### New Features {#new-features-v2022-06-24-0} + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.73.0. +- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.57.0-36.2.0 to address the following critical and high severity CVEs: CVE-2022-28391, CVE-2022-0778, CVE-2022-28391, CVE-2022-1271, CVE-2018-25032. + +## Release v2022.06.22-0 + +Released on June 22, 2022 + +### Improvements {#improvements-v2022-06-22-0} + +- Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.21.1. +- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.57.0-36.0.3. +- Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.7. + +### Bug Fixes {#bug-fixes-v2022-06-22-0} + +- Fixes CVEs for [Weave add-on](https://kurl.sh/docs/add-ons/weave) version 2.8.1. CVEs addressed: CVE-2021-36159, CVE-2021-25216, CVE-2021-30139, CVE-2020-8620, CVE-2020-8621, CVE-2020-8623, CVE-2020-8625, CVE-2021-25215, CVE-2021-28831, CVE-2020-8169, CVE-2020-8177, CVE-2020-8231, CVE-2020-8285, CVE-2020-8286, CVE-2020-28196, CVE-2021-23840, CVE-2021-3450, CVE-2021-3517, CVE-2021-3518. +- Updates the local-volume-provider image to v0.3.5 for the [Velero add-on](https://kurl.sh/docs/add-ons/velero) to address CVE-2022-1664 with critical severity. + +## Release v2022.06.17-0 + +Released on June 17, 2022 + +### Improvements {#improvements-v2022-06-17-0} + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.72.1. +- Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version 2022-06-11T19-55-32Z to address the following critical and high severity CVEs: CVE-2020-14040, CVE-2021-42836, CVE-2020-36067, CVE-2020-36066, CVE-2020-35380, CVE-2020-26521, CVE-2020-26892, CVE-2021-3121, CVE-2020-26160, CVE-2021-28831, CVE-2020-11080, CVE-2021-3450, CVE-2021-23840, CVE-2020-1967, CVE-2020-8286, CVE-2020-8285, CVE-2020-8231, CVE-2020-8177, CVE-2020-8169, CVE-2021-30139, CVE-2021-36159. +- Adds details to the documentation for the [AWS add-on](https://kurl.sh/docs/add-ons/aws) to include details on applying the appropriate [AWS IAM](https://aws.amazon.com/iam/) roles required for the add-on to function properly and additional specific requirements necessary for integrating with [AWS ELB](https://aws.amazon.com/elasticloadbalancing/) service. + +### Bug Fixes {#bug-fixes-v2022-06-17-0} + +- Fixes a bug where the [AWS add-on](https://kurl.sh/docs/add-ons/aws) would fail if `latest` or `0.1.x` was used. +- Fixes a bug when `excludeStorageClass` is set to `true` would cause the [AWS add-on](https://kurl.sh/docs/add-ons/aws) to fail. + +## Release v2022.06.14-0 + +Released on June 14, 2022 + +### New Features {#new-features-v2022-06-14-0} + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.72.0. +- Adds [Local Path Provisioner add-on](https://kurl.sh/docs/add-ons/local-path-provisioner) (Beta) as an additional storage provisioner. + +### Bug Fixes {#bug-fixes-v2022-06-14-0} + +- Fixes an issue where the `HTTPS_PROXY` variable was not set properly for the [containerd add-on](https://kurl.sh/docs/add-ons/containerd) service. + +## Release v2022.06.01-0 + +Released on June 1, 2022 + +### Improvements + +- Adds support for Kubernetes versions for 1.21.12, 1.22.9, 1.23.6 and 1.24.0. +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.71.0. +- Adds [containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.5.10, 1.5.11, and 1.6.4. +- Adds [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.2.4. +- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions 0.19.1 and 0.19.2. +- In addition to the `ekco.enableInternalLoadBalancer` parameter in the installer specification, the `ekco-enable-internal-load-balancer` flag can now be specified at install time to enable the EKCO [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). +- Upgraded the replicated/local-volume-provider image to v0.3.4 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) v1.8.1. + +### Bug Fixes + +- Fixes an issue that caused the `less` command to break after installing on Amazon Linux 2. +- Fixes an issue that caused installations with Velero and the `kotsadm.disableS3` flag set to `true` to fail on RHEL-based distributions. +- Fixes an issue that caused `bash: _get_comp_words_by_ref: command not found` to be printed after pressing tab when `bash-completion` is not installed. +- Fixes an issue where migrating the object store from Rook to MinIO would fail due to undefined metrics functions. + +## Release v2022.05.19-0 + +Released on May 19, 2022 + +### Improvements + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.70.1. +- Does not install Helm unless the experimental Helm charts feature is in use. + +## Release v2022.05.16-0 + +Released on May 16, 2022 + +### Improvements + +- Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.21.0. +- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.56.2-35.2.0. +- Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.8.1. + +## Release v2022.05.11-0 + +Released on May 11, 2022 +### Improvements + +- The storage class created by the AWS add-on is now named `aws-ebs` instead of `default`. + +## Release v2022.05.10-0 + +Released on May 10, 2022 + +### New Features + +- Introduces the AWS add-on, which sets up the AWS cloud provider in a Kubernetes installer-created cluster. For more information, see [AWS Add-On](https://kurl.sh/docs/add-ons/aws) in the kURL open source documentation. + +### Improvements + +- OpenEBS is now marked as incompatible with Kubernetes 1.22+. + +## Release v2022.05.06-0 + +Released on May 6, 2022 + +### New Features + +- Adds a `resticTimeout` configuration option to the [Velero add-on](https://kurl.sh/docs/add-ons/velero) to allow users to configure the value that gets passed to the `--restic-timeout` flag in the Velero pod. This can also be set using the [`velero-restic-timeout` flag](https://kurl.sh/docs/install-with-kurl/advanced-options#reference) when running the install script. + +### Improvements + +- The “latest” version for the [containerd add-on](https://kurl.sh/docs/add-ons/containerd) is no longer pinned to 1.4.6. The “latest” version was pinned to 1.4.6 because later versions of containerd are not supported on Ubuntu 16. kURL removed support for Ubuntu 16 in [v2022.04.29-0](#release-v20220429-0). +- Adds the `NoExecute` effect to the toleration for the Weave-Net DaemonSet for versions 2.5.2, 2.6.4, and 2.6.5. +- Ensures that OpenEBS pods run with critical priority so that they are not evicted before other pods that depend on them. + +### Bug Fixes + +- Fixes an issue that could cause a migration from Docker to containerd to fail from a miscalculation of available disk space. +- Fixes an issue that caused an upgrade of Kubernetes to fail when enabling the [EKCO internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). + +## Release v2022.05.02-0 + +Released on May 2, 2022 + +### Improvements + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.70.0. + +## Release v2022.04.29-0 + +Released on April 29, 2022 + +### Improvements + +- Installs an NFS client package as part of the [Velero add-on](https://kurl.sh/docs/add-ons/velero). +- Removes support for Ubuntu 16.04 (end of life April 29, 2021). +- The [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) recommends that the user change the password after installation. + +### Bug Fixes + +- Fixes an issue that caused upgrades of two versions of Kubernetes on remote masters to fail with error "docker: executable file not found in $PATH". +- Fixes an issue that caused a migration from Containerd to Docker to fail on air gapped instances with image pull errors. + +## Release v2022.04.19-0 + +Released on April 19, 2022 + +### Improvements + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.69.1. +- Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.3.0-5.1.0. + +### Bug Fixes + +- Fixes a bug where the `installerVersion` in the kURL manifest was not fully applied. + +## Release v2022.04.08-1 + +Released on April 8, 2022 + +### Improvements + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.69.0. + +## Release v2022.04.08-0 + +Released on April 8, 2022 + +### Improvements + +- Adds support for Kubernetes versions 1.21.11, 1.22.8, and 1.23.5. +- Adds support for containerd version 1.4.13. + +### Bug Fixes + +- Fixes a bug that caused cross-cluster restores to fail in some situations. +- Fixes an issue where Contour and Envoy requested too much CPU, causing other pods to not get scheduled in 4 CPU single node installations. +- Fixes a bug where persistent volume migrations sometimes failed due to a nil pointer dereference. +- Fixes a bug where the migration from Rook's object store to MinIO would fail after failing to get the logs of the sync-object-store pod. +- Increases the timeout while waiting for the kotsadm deployment to start, in order to improve the success rate when migrating from Rook to Longhorn. +- Fixes a bug that caused migrating from Docker to containerd to fail when also upgrading Kubernetes by more than one minor version in multi-node clusters. + +## Release v2022.04.04-0 + +Released on April 4, 2022 + +### New Features + +- Adds the `kubeReserved` and `systemReservedResources` options to the [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) to reserve resources for Kubernetes and OS system daemons. For more information, see [Kube Reserved](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#kube-reserved) and [System Reserved](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved). +- Adds the `evictionThresholdResources` option to the [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) to set [Kubernetes eviction thresholds](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds). + +### Improvements + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.68.0. + +## Release v2022.03.23-0 + +Released on March 23, 2022 + +### Improvements + +- Adds an optional [CIS Compliance](https://kurl.sh/docs/install-with-kurl/cis-compliance) flag to the Kubernetes installer specification that configures the instance to meet the [Center for Internet Security (CIS)](https://www.cisecurity.org/cis-benchmarks/) compliance benchmark. +- Fixes a bug that could cause an unbound variable error when restoring from a backup. + +## Release v2022.03.22-0 + +Released on March 22, 2022 + +### Bug Fixes + +- Fixes a bug that caused installations to fail with the error “incorrect binary usage” for all installers that include KOTS add-on version 1.67.0. + +## Release v2022.03.21-0 + +Released on March 21, 2022 + +### Improvements + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.67.0. +- Adds the [`app-version-label` flag](https://kurl.sh/docs/install-with-kurl/advanced-options#reference), which takes a version label as an argument and tells KOTS to install that particular version of an application. If this flag is not passed, the latest version of the application is installed. See [Online Installation with the Kubernetes Installer​](/enterprise/installing-embedded-cluster). + +## Release v2022.03.11-0 + +Released on March 11, 2022 + +### New Features +* Adds the [labels flag](https://kurl.sh/docs/install-with-kurl/advanced-options), which applies the given labels to the node. + +### Bug Fixes +* Fixes false validation errors when creating a new installer that includes one or more of the following fields: `excludeBuiltinHostPreflights`, `hostPreflightIgnore`, `hostPreflightEnforceWarnings`, and `storageOverProvisioningPercentage`. + +## Release v2022.03.08-0 + +Released on March 8, 2022 + +### Improvements + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.66.0. + +### Bug Fixes + +- Fixes a bug where the `installerVersion` field for the [kURL add-on](https://kurl.sh/docs/add-ons/kurl) was stripped when creating or promoting the installer. + +## Release v2022.03.04-1 + +Released on March 4, 2022 + +### Improvements + +- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.18.0. + +## Release v2022.03.04-0 + +Released on March 4, 2022 + +### Improvements + +- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.17.0. +- Adds CPU resource requests and limits to the [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.20.1+ to prevent Envoy from becoming unresponsive. + +## Release v2022.03.01-0 + +Released on March 1, 2022 + +### Improvements + +- Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.20.1. + +## Release v2022.02.28-0 + +Released on February 28, 2022 + +### Improvements + +- Adds the [storage over-provisioning percentage](https://longhorn.io/docs/1.2.3/references/settings/#storage-over-provisioning-percentage) option to the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn). + +### Bug Fixes + +- Fixes the KOTS `uiBindPort` for the beta K3s and RKE2 installers so that they won't error on deploy. This port now defaults to 30880, and the allowable range is ports 30000-32767. + +## Release v2022.02.25-0 + +Released on February 25, 2022 + +### Improvements + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.65.0. + +## Release v2022.02.23-0 + +Released on February 23, 2022 + +### Bug Fixes + +- Fixes a race condition when migrating from Rook-Ceph to Longhorn with both Prometheus and [EKCO v0.13+](https://kurl.sh/docs/add-ons/ekco#auto-resource-scaling) installed. +- Fixes a bug that caused RHEL 8 installations utilizing the [containerd add-on](https://kurl.sh/docs/add-ons/containerd) to fail because of conflicting dependency package versions. +- Fixes a bug that caused RHEL 7 installations to fail because of conflicting openssl-lib package versions. + +## Release v2022.02.18-0 + +### Improvements + +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.64.0. + +## Release v2022.02.17-0 + +Released on February 17, 2022 + +### New Features +- (Beta) Introduces support for the [K3s](https://kurl.sh/docs/add-ons/k3s) and [RKE2](https://kurl.sh/docs/add-ons/rke2) add-ons. +- (Beta) Introduces support for a [single-node optimized installer specification](https://kurl.sh/docs/create-installer/single-node-optimized), using either [K3s](https://kurl.sh/docs/add-ons/k3s) or [RKE2](https://kurl.sh/docs/add-ons/rke2). +- The [KOTS](https://kurl.sh/docs/add-ons/kostadm) add-on no longer includes the MinIO image. + +### Improvements +- Automatic detection of the host's private IP on subsequent runs of the installation script. + +### Bug Fixes +- Fixes an erroneous host preflight failure when using EKCO's [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). +- Fixes a bug that caused containerd to fail with x509 errors when pulling images from the local kURL registry. +- Fixes a bug that resulted in the `kurl-config` ConfigMap to be missing when using [K3s](https://kurl.sh/docs/add-ons/k3s) and [RKE2](https://kurl.sh/docs/add-ons/rke2) distributions. + +## Release v2022.02.11-1 + +Released on February 11, 2022 + +### Improvements +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.63.0. + +## Release v2022.02.11-0 + +Released on February 11, 2022 + +### Bug Fixes +- Fixes a failing preflight for the TCP load balancer check when EKCO's internal load balancer is enabled. + +## Release v2022.02.09-0 + +### Improvements +- Adds support for Kubernetes versions 1.22.6, 1.21.9, and 1.20.15. +- Adds support for Contour version 1.20.0. +- Adds support for K3s version 1.23.3+k3s1. This feature is experimental and is only available to vendors who have requested access. +- Adds support for RKE2 version 1.22.6+rke2r1. This feature is experimental and is only available to vendors who have requested access. +- Updates the latest installer specification (https://kurl.sh/latest) to Kubernetes 1.23.x. + +## Release v2022.02.04-0 + +Released on February 4, 2022 + +### Improvements +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.62.0. + +### Bug Fixes +- Fixes an installer failure in scenarios where custom host preflights are enabled with other installer flags. +- Fixes a bug that allowed for weak ciphers in etcd, kubelet, and kube apiserver. + +## Release v2022.02.01-0 + +Released on February 1, 2022 + +### New Features +- Adds support for RHEL 8.5. + +### Improvements +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.61.0. + +### Bug Fixes +- Fixes Velero backup labels not being added to registry secrets when the secrets were already present. +- Fixes restoration of snapshots of the registry from pre-IPV6 support on new clusters. +- Fixes using the `skip-system-package-install` flag with the containerd add-on. + +## Release v2022.01.28-2 + +Released on January 28, 2022 + +### Bug Fixes + +- Changes the [filesystem write latency host preflight for etcd](https://kurl.sh/docs/install-with-kurl/host-preflights#primary-nodes) to warn when greater than or equal to 10ms. + +## Release v2022.01.28-1 + +Released on January 28, 2022 + +### New Features +- Registry backup and restore scripts include more user-friendly logging within the container. + +### Bug Fixes +- Fixes airgap Postgres images in [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.60.0. + +## Release v2022.01.28-0 + +Released on January 28, 2022 + +### New Features +- Adds support for Kubernetes version 1.23.x. + +### Bug Fixes +- Fixes a bug that caused the installer to exit when installing Antrea version 1.4.0+ with encryption and without the requisite WireGuard kernel module. + +## Release v2022.01.25-0 + +Released on January 25, 2022 + +### New Features +- [Host preflight](https://kurl.sh/docs/install-with-kurl/host-preflights/) failures are now blocking, and the installer will exit with error. Warnings do not cause the installer to exit. Warnings can be enforced and errors can be ignored with [`host-preflight-enforce-warnings` and `host-preflight-ignore`](https://kurl.sh/docs/install-with-kurl/advanced-options). + +### Improvements +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.60.0. +- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.16.0, which does a rollout restart of the envoy pods after generating the new certificates instead of restarting all pods at once. It will also validate and renew certificates on startup. + +### Bug Fixes +- Fix legacy `apiregistration.k8s.io/v1beta1` resource for Prometheus 0.53.1-30.1.0. + +## Release v2022.01.24-0 + +Released on January 24, 2022 + +### Bug Fixes +- Reverts an update to React-DOM that was causing the TestGrid UI to fail. + +## Release v2022.01.22-0 + +Released on January 22, 2022 + +### Bug Fixes +- Changes the default Kubernetes version from 1.22.x to 1.21.x to mitigate an incompatibility with the default Prometheus version. + +## Release v2022.01.21-0 + +Released on January 21, 2022 + +### Improvements +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version v1.59.3. +- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.53.1-30.1.0. +- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.15.0, which supports auto-renewal of Contour and Envoy certs. +- Moves the [`latest`](https://kurl.sh/latest) installer on kurl.sh to Kubernetes 1.22.5. + +### Bug Fixes +- Fixes a bug that caused the **Internal Storage** snapshot option to be missing when an object store is available. +- Fixes random Alert Manager and Grafana Nodeports in the Prometheus add-on for versions 0.53.1-30.1.0+. + + +## Release v2022.01.18-0 + +Released on January 18, 2022 + +### New Features +- Adds the ability to exclude the built-in host preflights during installation. + +### Improvements +- Adds support for Kubernetes v1.22.5. +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version v1.59.2. +- Adds version 0.14.0 of the EKCO add-on, which adds Kubernetes v1.22+ support. + +### Bug Fixes +- Fixes a race condition with Storage Class migration. +- Fixes a bug related to long Persistent Volume Claim (PVC) names when migrating Storage Classes. +- Fixes some host preflight error messages. + +## Release v2022.01.05-0 + +Released on January 5, 2022 + +### Improvements +- Adds support for Kubernetes 1.19.16, 1.20.14, 1.21.8. + +### Bug Fixes +- Resolves an error when installing the Velero add-on with Kubernetes 1.21 and `disableS3=true` set for KOTS. +- Fixes an issue with the KOTS URL not printing correctly when performing a re-install or upgrade. + +## Release v2022.01.04-0 + +Released on January 4, 2022 + +### Bug Fixes +- Reverts `latest` version of Kubernetes to v1.19.x. + +## Release v2021.12.29-0 + +Released on December 29, 2021 + +### New Features +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.59.1. + + +## Release v2021.12.23-0 + +Released on December 23, 2021 + +### New Features +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.59.0. +- Adds support for [cluster migration away from object storage](https://kurl.sh/docs/install-with-kurl/removing-object-storage). KOTS can now be deployed without an object store with [no loss of snapshot or registry functionality](https://kurl.sh/docs/add-ons/kotsadm). + +## Release v2021.12.21-0 + +Released on December 21, 2021 + +### Improvements +- Updates front-end dependencies to latest available versions. + +## Release v2021.12.17-0 + +Released on December 17, 2021 + +### Bug Fixes +- Improves experimental [IPv6](https://kurl.sh/docs/install-with-kurl/ipv6) support. + +## Release v2021.12.14-0 + +Released on December 14, 2021 + +### New Features +- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.58.2. + +### Bug Fixes +- Adds Contour 1.19.1 images that were missing from airgap bundles. + + +## Release v2021.12.10-0 + +Released on December 10, 2021 + +### New Features +- Adds the ability to skip the installation of system packages by passing the `skip-system-package-install` flag. Using this flag will automatically enable a preflight check that will detect if the necessary system packages for the included addons are already installed. + +### Improvements +- kURL `latest` installer spec is now pinned to Kubernetes version 1.21.x +- kURL `latest` installer spec will now pin to addon minor versions rather than `latest`. + +## Release v2021.12.09-0 + +Released on December 9, 2021 + +### Improvements +- Adds support for Oracle Linux 8.5. + +### Bug Fixes +- Temporarily removes the Prometheus add-on version 0.52.0-22.0.0 due to an [upstream bug](https://github.com/prometheus-community/helm-charts/issues/1500). + +## Release v2021.12.08-0 + +Released on December 8, 2021 + +### New Features +- Adds [EKCO](https://kurl.sh/docs/add-ons/ekco) version 0.13.0. +- Adds Velero version 1.7.1. +- Adds Longhorn version 1.2.2. +- Adds Sonobuoy version 0.55.1. +- Adds Antrea version 1.4.0. +- Adds Prometheus version 0.52.0-22.0.0. +- Updates pvmigrate to 0.4.1. + +### Bug Fixes +- Prevents EKCO from trying to manage Rook when Rook is not installed. +- Fixes missing packages in some Longhorn migration scenarios. + +## Release v2021.12.02-0 + +Released on December 2, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.58.1. + +## Release v2021.12.01-0 + +Released on December 1, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.58.0. + +### Bug Fixes +- Host packages installed as DNF modules are now reset after installation to allow for running yum update without dependency errors. + + +## Release v2021.11.22-0 + +Released on November 22, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.57.0. + + +## Release v2021.11.09-0 + +Released on November 09, 2021 + +### Improvements +- kURL will now report when migrations occur between the Rook Ceph and MiniO object stores. +- kURL will now report when migrations occur between the Rook Ceph and Longhorn storage classes. + +### Bug Fixes +- Fixed an issue that prevented the versions of Longhorn and MinIO from resolving in kurl.sh/latest. + +## Release v2021.11.08-0 + +Released on November 08, 2021 + +### Improvements +- The default configuration for https://kurl.sh/latest was updated to replace Rook with Longhorn and MinIO. + +## Release v2021.11.05-0 + +Released on November 05, 2021 + +### New Features +- Added mechanism to migrate registry contents from s3 to a persistent volume. Note that this cannot be triggered yet, but will later be used once all object storage-related migrations are available. +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.56.0. + +### Bug Fixes +- Reverted changes to https://kurl.sh/latest that were introduced in [v2021.11.04-0](https://kurl.sh/release-notes/v2021.11.04-0). As a result, Rook and Kubernetes 1.19 are once again in the default configuration. + +## Release v2021.11.04-0 + +Released on November 04, 2021 + +### Improvements +- The default configuration for https://kurl.sh/latest was updated to include Kubernetes 1.21 instead of 1.19, and Rook was replaced with Longhorn and MinIO. Note that using `rook: latest` with `kubernetes: latest` no longer works as Rook 1.0.4 is not compatible with Kubernetes 1.20+. To avoid this, pin a specific version instead of using `latest`. + +## Release v2021.11.02-0 + +Released on November 02, 2021 + +### Improvements +- Rook Ceph versions 1.4+ will now display an info-level message when trying to mount an external disk, along with some troubleshooting tips. + +### Bug Fixes +- kURL [yaml patches](https://kurl.sh/docs/install-with-kurl/#modifying-an-install-using-a-yaml-patch-file-at-runtime) that include non-breaking spaces will now cause the installer to fail with a helpful error. +- Null or empty kURL [yaml patches](https://kurl.sh/docs/install-with-kurl/#modifying-an-install-using-a-yaml-patch-file-at-runtime) will not remove the configuration provided by the kURL spec. + +## Release v2021.10.22-0 + +Released on October 22, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.55.0. + +## Release v2021.10.20-0 + +Released on October 20, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.54.0. + +### Bug Fixes +- Fixed a bug caused when Ceph update versions are not applied to all Ceph components. +- Reverted the ability for the registry add-on to run with two replicas and a RWX volume when used with Longhorn. This was originally released in [v2021.10.01-0](https://kurl.sh/release-notes/v2021.10.01-0). + +## Release v2021.10.08-0 + +Released on October 08, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.53.0. + +## Release v2021.10.04-0 + +Released on October 04, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.52.1. + +## Release v2021.10.01-0 + +Released on October 01, 2021 + +### New Features +- Containerd is now the default container runtime, replacing the previous default container runtime, Docker. +- Log rotation will now be configured by default for the [Docker add-on](https://kurl.sh/docs/add-ons/docker), where the [max-size](https://docs.docker.com/config/containers/logging/json-file/#options) parameter for the log file is set to `10m`. +- Added the ability to configure log rotation through kubelet, which helps when using containerd instead of docker. +- Re-enabled the ability to declare custom host preflight checks in the kURL installer spec. + +### Improvements +- When Longhorn is specified in an installer spec but an object store (e.g., MinIO) is not, the [Registry add-on](https://kurl.sh/docs/add-ons/registry) will be deployed with two replicas and a ReadWriteMany (RWX) volume for greater availability. + +### Bug Fixes +- Fixed a bug that didn't allow User and Service Account tokens to authenticate to the kURL API. + +## Release v2021.09.30-0 + +Released on September 30, 2021 + +### Bug Fixes +- Fixed a bug to allow User and Service Account token authenticate to the API +- Fixed a bug that could cause upgrades from Rook 1.0.4 to 1.0.4-14.2.21 to fail +- Fixed a bug that would cause snapshots not to restore after a Rook to Longhorn migration + +### Improvements +- Sysctl parameters required for pod networking are now enabled for all operating systems in /etc/sysctl.conf + +## Release v2021.09.27-4 + +Released on September 27, 2021 + +### Bug Fixes +- Due to a bug, removed the ability to add custom host preflights in the kURL installer spec. This was initially released in [v2021.09.24-0](https://kurl.sh/release-notes/v2021.09.24-0). + +## Release v2021.09.24-0 + +Released on September 24, 2021 + +### New Features +- Custom host preflight checks can be declared in the kURL installer spec. +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.52.0. + +### Bug Fixes +- Fixed an issue that prevented Rook add-on preflight checks from executing. + +## Release v2021.09.20-0 + +Released on September 20, 2021 + +### Bug Fixes +- Fixed a bug that could cause the EKCO addon to fail when mistakenly trying to deploy the `PodImageOverrides` mutating webhook configuration. + +## Release v2021.09.17-0 + +Released on September 17, 2021 + +### New Features +- Added Kubernetes versions 1.21.5, 1.21.4, 1.21.3, 1.20.11, 1.20.10, and 1.19.15. + +## Release v2021.09.16-0 + +Released on September 16, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kots) nightly version. + +## Release v2021.09.15-0 + +Released on September 15, 2021 + +### New Features +- Added [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.12.0. +- Host preflights check disk space in /opt/replicated/rook with Rook 1.0.4. + +### Improvements +- Host preflight block device checks run for all versions of Rook 1.4+. + +## Release v2021.09.09-0 + +Released on September 9, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kots) version 1.50.2. + +## Release v2021.08.27-0 + +Released on August 27, 2021 + +### New Features +- Clusters with containerd enabled will be automatically migrated from docker when docker is detected. Previously containerd would not be installed when docker was detected. + +### Bug Fixes +- Fixed an issue that prevented the [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer) from being started on remote nodes when not explicitly enabled. +- Fixed an issue that could cause the [minio add-on](https://kurl.sh/docs/add-ons/minio) to wait forever when creating a PVC. + +## Release v2021.08.20-0 + +Released on August 20, 2021 + +### New Features +- Added a new parameter to the [MinIO addon](https://kurl.sh/docs/add-ons/minio), `claimSize`. This defaults to `10Gi` and allows setting the size of the MinIO PVC. +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kots) version 1.50.1. + +## Release v2021.08.16-0 + +Released on August 16, 2021 + +### New Features +- New feature flag [licenseURL](https://kurl.sh/docs/install-with-kurl/#vendor-licensing-agreement-beta) for kURL allows vendors to include a URL to a licensing agreement for non-airgap installs. +- Added [Antrea add-on](https://kurl.sh/docs/add-ons/antrea) version 1.2.1. +- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.49.0-17.1.3. +- Added [local-volume-provider](https://github.com/replicatedhq/local-volume-provider) plugin to Velero addon versions 1.5.1 through 1.6.2. +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.50.0. + +### Bug Fixes +- Docker preflights will no longer run when docker is not configured within kURL. + +## Release v2021.08.09-0 + +Released on August 9, 2021 + +### New Features +- Added [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.53.0. +- Added [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.2.0-4.2.1. +- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.49.0-17.1.1. + +### Bug Fixes +- The [Rook add-on block storage](https://kurl.sh/docs/add-ons/rook#block-storage) flag is no longer required to be set for version 1.4.3+. Instead, it is assumed to be set to true for these versions. + +## Release v2021.08.06-0 + +Released on August 6, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.49.0. + +## Release v2021.08.04-0 + +Released on August 4, 2021 + +### New Features +- The kURL installer can now differentiate between installs and upgrades. +- Added [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.11.0 with support for [internal load balancing with HAProxy on HA installs](https://kurl.sh/docs/install-with-kurl/#highly-available-k8s-ha). + +## Release v2021.08.03-0 + +Released on August 3, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.48.1. + +### Bug Fixes +- Fixed an issue where the kotsadm config would be overriden when updating kURL. + +## Release v2021.07.30-1 + +Released on July 30, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.48.0. + +## Release v2021.07.30-0 + +Released on July 30, 2021 + +### New Features +- Added [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.18.0. +- Added [Antrea add-on](https://kurl.sh/docs/add-ons/antrea) version 1.2.0. +- Longhorn 1.1.2+ will automatically migrate Rook-backed PVCs to Longhorn-backed if Rook is installed but no longer included in the kURL spec. +- MinIO will automatically import Rook-backed object store data if Rook is installed but no longer included in the kURL spec. +- Rook will automatically be uninstalled if all data is migrated successfully to both Longhorn and MinIO. + + +## Release v2021.07.23-1 + +Released on July 23, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.47.3. +- Added [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.6.2. +- Added [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.1.2. +- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.49.0-17.0.0. +- Added Kubernetes versions 1.21.3, 1.20.9, and 1.19.13. + +## Release v2021.07.23-0 + +Released on July 23, 2021 + +### New Features +- Host preflight results are now tracked in the directory `/var/lib/kurl/host-preflights`. + +### Improvements +- Host preflights can now be run with an installer spec from STDIN, for example `kubectl get installer 6abe39c -oyaml | /var/lib/kurl/bin/kurl host preflight -`. +- Host preflight added to check disk usage in /var/lib/docker. + +### Bug Fixes +- Fixed an issue that would cause [.x versions](https://kurl.sh/docs/create-installer/#x-patch-versions) to fail for the kotsadm addon. +- Fixed an issue where warning messages would be displayed for passing preflight checks. +- Fixed an issue where terminal control characters were erroneously displayed in noninteractive preflight check output. +- Fixed an issue where invalid configurations for Rook version 1.4 or greater would pass validation checks. + +## Release v2021.07.20-0 + +Released on July 20, 2021 + +### Bug Fixes +- Fixed an issue that would cause the installer to panic when `spec.selinuxConfig` is not empty or the `preserve-selinux-config` flag is specified and `spec.firewalldConfig` is empty. + +## Release v2021.07.19-0 + +Released on July 19, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.47.2 +- The [Rook add-on's](https://kurl.sh/docs/add-ons/rook) object store can be migrated to [MinIO](https://kurl.sh/docs/add-ons/minio) with the `migrate-rgw-to-minio` task. + +### Improvements +- Weave add-on host preflight check will not fail on connection timeout on metrics ports 6781 and 6782. +- The preflight check for ftype on XFS filesystems has been added to all versions of containerd 1.3.7+. + +### Bug Fixes +- The [EKCO add-on's](https://kurl.sh/docs/add-ons/ekco) reboot service no longer depends on docker when using containerd. + +## Release v2021.07.16-0 + +Released on July 16, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.47.1. + +### Improvements +- The [containerd add-on](https://kurl.sh/docs/add-ons/containerd) will check XFS filesystems have ftype enabled before attempting to install. +- The load balancer address preflight check will now validate that a valid address is provided before validating the network. + +### Bug Fixes +- The default preflight check for memory pass value has been changed from 8Gi to 8G. + +## Release v2021.07.13-0 + +Released on July 13, 2021 + +### New Features +- Preflight results will now be stored on the host in the directory /var/lib/kurl/host-preflights. +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.47.0. + +### Improvements +- When downloading a bundle from the kURL server, the bundle creation process will fail early in the situation where one of the layers is unavailable, instead of returning a partial bundle. +- Added better messaging to the user when the kurlnet-client pod fails. + +## Release v2021.07.09-0 + +Released on July 9, 2021 + +### New Features +- All add-ons with versions that conform to semver now support the notation `Major.Minor.x`. When specified using this notation, the version will resolve to the greatest patch version for the specified major and minor version. +- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.48.1-16.12.1. +- Added Sonobuoy add-on version 0.52.0. + +### Bug Fixes +- The [reset task](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node) will now properly remove Kubernetes host packages. + +## Release v2021.07.02-0 + +Released on July 2, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.46.0. + +### Bug Fixes +- Fixed CVE-2021-20288 Rook 1.5.11 and 1.0.4-14.2.21. + +## Release v2021.06.30-1 + +Released on June 30, 2021 + +### Bug Fixes + +- Fixed an issue which caused newer versions of kURL to have outdated scripts. This issue affects kURL versions v2021.06.24-0, v2021.06.24-1, v2021.06.25-0, and v2021.06.30-0. + +## Release v2021.06.30-0 + +Released on June 30, 2021 + +### New Features +- Added the ability to configure the Kubernetes service type used by the [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) to expose Prometheus, Grafana and Alertmanager. The currently accepted options are "NodePort" as the default, and "ClusterIP". +- [Migrations](https://kurl.sh/docs/install-with-kurl/migrating) are a supported way to change CSI, CRI, and CNI providers. + +### Bug Fixes +- Fixed an issue that would cause Kubernetes upgrades to fail when the hostname of a node contains uppercase characters. +- Fixed an issue that prevented containerd from trusting the registry certificate except on the first primary. + +## Release v2021.06.25-0 + +Released on June 25, 2021 + +### New Features +- Added support for Kubernetes versions 1.21.2, 1.20.8, 1.19.12 and 1.18.20. +- Added [KOTS](https://kurl.sh/docs/add-ons/kotsadm) add-on version 1.45.0. +- Added [Containerd](https://kurl.sh/docs/add-ons/containerd) add-on version 1.4.6. +- Added [Contour](https://kurl.sh/docs/add-ons/contour) add-on version 1.16.0. +- Added [EKCO](https://kurl.sh/docs/add-ons/ekco) add-on version 0.10.3. +- Added [Rook](https://kurl.sh/docs/add-ons/rook) add-on version 1.5.12. +- Added [Velero](https://kurl.sh/docs/add-ons/velero) add-on version 1.6.1. +- Added [Antrea](https://kurl.sh/docs/add-ons/antrea) add-on version 1.1.0. + +### Bug Fixes +- Fixed an issue that would cause an upgrade of Prometheus from version 0.44.1 to any later version to cause the Contour Pods to crash. +- Fixed an issue in earlier versions of the Prometheus add-on which prevented the Grafana Dashboard from connecting to the Prometheus data store. +- Fixed an issue that could cause a kURL upgrade to fail if new add-ons had been added to kURL (even if they were not used in that installer). + +## Release v2021.06.24-1 + +Released on June 24, 2021 + +### Bug Fixes +- Fixed a bug in which the [Rook](https://kurl.sh/docs/add-ons/rook) add-on (version 1.0.4-14.2.21) was referencing the incorrect ceph image. + +## Release v2021.06.24-0 + +Released on June 24, 2021 + +### New Features +- The [Goldpinger](https://kurl.sh/docs/add-ons/goldpinger) add-on has been added to monitor network connectivity. + +### Improvements +- Host packages installed on CentOS, RHEL and Oracle Linux will now be installed using yum rather than rpm and no longer force overwrite previously installed versions. +- The Prometheus add-on (Version 0.48.1-16.10.0+) will now pass the flag [--storage.tsdb.retention.size=9GB](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) to avoid filling the PVC completely. + +### Bug Fixes +- Fixed a bug with the `kurl-registry-ip` flag that caused errors when restoring airgap clusters while using the Containerd add-on. + +## Release v2021.06.22-0 + +Released on June 22, 2021 + +### Bug Fixes +- Fixed an issue that caused Rook-Ceph to have insecure connection claims. See [CVE-2021-20288](https://docs.ceph.com/en/latest/security/CVE-2021-20288/) for details. +- A new [Rook](https://kurl.sh/docs/add-ons/rook) add-on version 1.0.4-14.2.21 has been added with an upgraded Ceph version 14.2.21. + +## Release v2021.06.17-0 + +Released on June 17, 2021 + +### New Features +- Added support for RHEL 8.4 and CentOS 8.4. + +### Improvements +- Added support for [versioned kurl installers](https://kurl.sh/docs/install-with-kurl/#versioned-releases) to the installation spec validator (if an add-on version was not present in the version of kurl specified, an error will be returned). + +## Release v2021.06.15-0 + +Released on June 15, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.44.1. +- Added a new field, kurl.InstallerVersion, that allows [pinning the kURL installer version](https://kurl.sh/docs/install-with-kurl/#versioned-releases). + +### Improvements +- Containerd configuration will be regenerated when rerunning the installer. New settings have been added to the [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to allow you to preserve the existing config or to add additional fields. + +## Release v2021.06.11-0 + +Released on June 11, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.44.0. + +## Release v2021.06.08-0 + +Released on June 8, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.43.2. + +## Release v2021.06.07-0 + +Released on June 7, 2021 + +### Improvements +-Added HTTPS proxy configuration to KOTS (>= v1.43.1). + +## Release v2021.06.04-0 + +Released on June 4, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.43.1. +- Added [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.10.2 with support for Longhorn PVCs in the node shutdown script. +- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.48.0-16.1.2. + +### Improvements +- Added HTTPS proxy configuration to Velero. +- Installing the Docker add-on will no longer install additional recommended packages on Ubuntu. +- Added a preinstallation check to the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) that validates that nodes support bidirectional mount propagation. +- The replicated/kurl-util image now includes the Linux command line utilities curl, ipvsadm, netcat, openssl, strace, sysstat, tcpdump and telnet for debugging purposes. + +## Release v2021.05.28-01 + +Released on May 28, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.43.0. + +### Improvements +- A host preflight check for the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) will ensure sufficient disk space is available in /var/lib/longhorn. +- A priority class is now set on the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) to delay its eviction. + +## Release v2021.05.28-0 + +Released on May 28, 2021 + +### Improvements +- The [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) will include a ServiceMonitor for Longhorn when the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) is installed. +- The [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) will no longer hardcode `storageClassName: default` for better compatibility with PVC Provisioner add-ons. + +### Bug Fixes +- Fixed an issue that caused the [Versioned](https://kurl.sh/docs/install-with-kurl/#versioned-releases) airgap installer to download incomplete packages for previous versions. + +## Release v2021.05.26-2 + +Released on May 26, 2021 + +### Bug Fixes +- Fixed an issue that caused installations on Oracle Linux 8.4 to fail. + +## Release v2021.05.26-1 + +Released on May 26, 2021 + +### Bug Fixes +- Fixed release generator. + +## Release v2021.05.26-0 + +Released on May 26, 2021 + +### New Features +- Added Kubernetes versions 1.21.1, 1.20.7, 1.19.11 and 1.18.19. +- Added [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.5.11. +- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.47.1-16.0.1. + +### Improvements +- The [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) will now be upgraded to conform to the latest kURL spec installed. +- The version of runC included with Docker and Containerd has been upgraded to [v1.0.0-rc95](https://github.com/opencontainers/runc/releases/tag/v1.0.0-rc95). + +### Bug Fixes +- Fixed an issue that caused the Grafana dashboard to fail to show graphs due to a misconfigured Prometheus service URL. + + +## Release v2021.05.24-0 + +Released on May 24, 2021 + +### New Features +- Added the ability to configure proxies for Velero backups. +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.42.1. + +## Release v2021.05.21-1 + +Released on May 21, 2021 + +### Improvements +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.42.0. + +## Release v2021.05.21-0 + +Released on May 21, 2021 + +### Improvements +- The [longhorn](https://kurl.sh/docs/add-ons/longhorn) data directory permissions are now restricted to the root user. + +### Bug Fixes +- Fixed an issue that prevented Rook 1.4.9+ from installing on Kubernetes 1.21. + +## Release v2021.05.17-0 + +Released on May 17, 2021 + +### Improvements +- The following improvements have been made to prompts requiring user feedback: + - For interactive terminal sessions, all prompts will no longer timeout. + - For non-interactive terminal sessions, all prompts that require user input will now fail. + - For non-interactive terminal sessions, confirmation prompts will now automatically confirm or deny based on the default. + - Preflight failures and warnings will no longer prompt to confirm or deny, and instead will continue. + - Properties [`spec.kurl.ignoreRemoteLoadImagesPrompt`](https://kurl.sh/docs/install-with-kurl/advanced-options) and [`spec.kurl.ignoreRemoteUpgradePrompt`](https://staging.kurl.sh/docs/install-with-kurl/advanced-options) have been added to the `kurl.sh/v1beta1.Installer` spec to bypass prompts for automation purposes. + +### Bug Fixes +- Fixed an issue that could cause the node ready check to falsely report as successful causing unforseen issues with an installation. + +## Release v2021.05.14-1 + +Released on May 14, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.41.1. + +## Release v2021.05.14-0 + +Released on May 14, 2021 + +### New Features +- Kurl clusters can be configured to use [dedicated primary nodes](https://kurl.sh/docs/install-with-kurl/dedicated-primary) reserved for control-plane components. +- Added [Antrea add-on](https://kurl.sh/docs/add-ons/antrea) version 1.0.1. +- Added [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.15.1. + +### Improvements +- RPM install command will now suppress signature verification errors. + +## Release v2021.05.07-1 + +Released on May 7, 2021 + +### New Features +- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.41.0. + +### Improvements +- Allow the `WEAVE_TAG` environment variable to be specified to pin the Weave version when running the [reset task](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node). + +### Bug Fixes +- Fixed Weave iptables reset when running the [reset task](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node). +- Added the ability to specicify a [release version](https://kurl.sh/docs/install-with-kurl/#versioned-releases) when running the kURL installer. +- Added [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.1.1. + +## Release v2021.05.07-0 + +Released on May 7, 2021 + +### New Features +- Added the ability to specify a [release version](https://kurl.sh/docs/install-with-kurl/#versioned-releases) when running the kURL installer. +- Added [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.1.1. + +### Bug Fixes +- Fixed an issue with the [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) that would cause a node to hang on shutdown if there were any unmounted rbd devices. + +================ +File: docs/release-notes/rn-replicated-sdk.md +================ +--- +toc_max_heading_level: 2 +pagination_next: null +pagination_prev: null +--- + +# Replicated SDK Release Notes + +This topic contains release notes for the [Replicated SDK](/vendor/replicated-sdk-overview). The release notes list new features, improvements, bug fixes, known issues, and breaking changes. + +## 1.1.1 + +Released on February 19, 2025 + +### Improvements {#improvements-1-1-1} +* Addresses CVE-2025-0665, CVE-2025-0725, and CVE-2024-12797 + +## 1.1.0 + +Released on February 4, 2025 + +### New Features {#new-features-1-1-0} +* Adds the ability to pass custom labels to the Replicated SDK Helm Chart via the `commonLabels` and `podLabels` Helm values. For more information, see [Add Custom Labels](/vendor/replicated-sdk-customizing#add-custom-labels) in _Customizing the Replicated SDK_. + +## 1.0.0 + +Released on December 23, 2024 + +This release removes the pre-release from the version number. + +## 1.0.0-beta.33 + +Released on December 23, 2024 + +### New Features {#new-features-1-0-0-beta-33} +* Adds support for setting `affinity` for the Replicated SDK deployment +* Adds `/app/status` [API](/reference/replicated-sdk-apis) that returns detailed application status information. +* Adds support for mocking channelID, channelName, channelSequence, releaseSequence in current release info returned by /app/info API. + +### Bug Fixes {#bug-fixes-1-0-0-beta-33} +* Fixes a bug that could result in an instance being reported as unavailable if the application includes an Ingress resource. + +## 1.0.0-beta.32 + +Released on December 9, 2024 + +### Bug Fixes {#bug-fixes-1-0-0-beta-32} +* Fixes an issue that caused [custom metrics](/vendor/custom-metrics#configure-custom-metrics) to not be collected. + +## 1.0.0-beta.31 + +Released on October 17, 2024 + +### New Features {#new-features-1-0-0-beta-31} +* Adds support for specifying ClusterRole using the [clusterRole](/vendor/replicated-sdk-customizing#custom-clusterrole) key. + +## 1.0.0-beta.30 + +Released on October 16, 2024 + +### New Features {#new-features-1-0-0-beta-30} +* Adds support for custom Certificate Authorities using the [privateCASecret](/vendor/replicated-sdk-customizing#custom-certificate-authority) key. + +### Improvements {#improvements-1-0-0-beta-30} +* This release addresses CVE-2024-41110. + +## 1.0.0-beta.29 + +Released on October 9, 2024 + +### New Features {#new-features-1-0-0-beta-23} +* Adds support for setting individual image name component values instead of the entire image: registry, repository, and tag. + +## 1.0.0-beta.28 + +Released on September 20, 2024 + +### New Features {#new-features-1-0-0-beta-23} +* Adds support for custom Certificate Authorities using the [privateCAConfigmap](/vendor/replicated-sdk-customizing#custom-certificate-authority) key. + +## 1.0.0-beta.27 + +Released on August 16, 2024 + +### Bug Fixes {#bug-fixes-1-0-0-beta-27} +* Fixes an issue that caused k0s to be reported as the distribution for Embedded Clusters. + +## 1.0.0-beta.26 + +Released on July 31, 2024 + +### Bug Fixes {#bug-fixes-1-0-0-beta-26} +* Fixes an issue that caused k8s minor version parsing errors to be logged repeatedly. + +## 1.0.0-beta.25 + +Released on July 3, 2024 + +### Bug Fixes {#bug-fixes-1-0-0-beta-25} +* Various bug fixes and refactoring of tests. + +## 1.0.0-beta.24 + +Released on July 2, 2024 + +### Improvements {#improvements-1-0-0-beta-24} +* Adds caching and rate-limiting to the `/api/v1/app/custom-metrics` and `/api/v1/app/instance-tags` endpoints +* Adds a ten-second default timeout to the SDK's HTTP client + +## 1.0.0-beta.23 + +Released on June 21, 2024 + +### New Features {#new-features-1-0-0-beta-23} +* Adds support for `PATCH` and `DELETE` methods on the [custom application metrics](/vendor/custom-metrics) endpoint: `/api/v1/app/custom-metrics`. + +## 1.0.0-beta.22 + +Released on June 12, 2024 + +### Improvements {#improvements-1-0-0-beta-22} +* The `/app/info` and `/license/info` endpoints now return additional app and license info, respectively. +* Updates the SDK's support bundle spec to extract license, app, history, and release information with an exec collector. + +## 1.0.0-beta.21 + +Released on June 6, 2024 + +### Bug Fixes {#bug-fixes-1-0-0-beta-21} +* Fixes an issue where the replicated pod logs collector could fail in environments with namespace-restricted RBAC. + +## 1.0.0-beta.20 + +Released on May 14, 2024 + +### Bug Fixes {#bug-fixes-1-0-0-beta-20} +* Fixes an issue where the namespace fields in the support bundle spec were not quoted, which caused the linter to show schema warnings. + +## 1.0.0-beta.19 + +Released on April 26, 2024 + +### New Features {#new-features-1-0-0-beta-19} +* Adds Supply-chain Levels for Software Artifacts (SLSA) generation for the Replicated SDK image. + + For example, you can run the following to validate the attestation for the SDK image: + ```bash + cosign download attestation replicated/replicated-sdk:VERSION | jq -r .payload | base64 -d | jq + ``` + Where `VERSION` is the target version of the SDK. + + You can also search Sigstor using Rekor at https://search.sigstore.dev/ + +## 1.0.0-beta.18 + +Released on April 26, 2024 + +### Improvements {#improvements-1-0-0-beta-18} +* Updates the Replicated SDK image to resolve CVE-2024-2961 with high severity, and CVE-2023-6237, CVE-2024-24557, and CVE-2023-45288 with medium severity. + +## 1.0.0-beta.17 + +Released on April 8, 2024 + +### New Features {#new-features-1-0-0-beta-17} +* Adds a new [`POST /app/instance-tags`](/reference/replicated-sdk-apis#post-appinstance-tags) endpoint that allows an application to programmatically send instance tags to the vendor portal. + +## 1.0.0-beta.16 + +Released on February 19, 2024 + +### New Features {#new-features-1-0-0-beta-16} +* Adds support for running the SDK on ARM64 nodes. + +## 1.0.0-beta.15 + +Released on February 15, 2024 + +### Improvements {#improvements-1-0-0-beta-15} +* Upgrades the helm.sh/helm/v3 go module to 3.14.0 to resolve GHSA-7ww5-4wqc-m92c and GHSA-45x7-px36-x8w8 with medium severity. +* Upgrades the go version used to build the Replicated SDK to 1.21.7 to resolve CVE-2023-45285, CVE-2023-44487, CVE-2023-39325, and CVE-2023-39323 with high severity, and CVE-2023-39326, CVE-2023-39319, and CVE-2023-39318 with medium severity. + +## 1.0.0-beta.14 + +Released on February 5, 2024 + +### Improvements {#improvements-1-0-0-beta-14} +* Adds `fsGroup` and `supplementalGroups` to the default PodSecurityContext for the Replicated SDK deployment. + +## 1.0.0-beta.13 + +Released on January 2, 2024 + +### Improvements {#improvements-1-0-0-beta-13} +* Upgrades the helm.sh/helm/v3 go module to v3.13.3 to resolve CVE-2023-39325 and GHSA-m425-mq94-257g with high severity and CVE-2023-44487 and GHSA-jq35-85cj-fj4p with medium severity. + +## 1.0.0-beta.12 + +Released on November 6, 2023 + +### New Features {#new-features-1-0-0-beta-12} +* Adds support for custom metrics in air gapped installs. + +## 1.0.0-beta.11 + +Released on October 30, 2023 + +### New Features {#new-features-1-0-0-beta-11} +* Adds support for running in air gapped mode. +* Renames the `images.replicated` Helm value to `images.replicated-sdk`. + +## 1.0.0-beta.10 + +Released on October 13, 2023 + +### Improvements {#improvements-1-0-0-beta-10} +* Adds support for adding custom tolerations to the SDK deployment via the `tolerations` value. +* Status informers will no longer be automatically generated if the user explicitly passes an empty array for the `statusInformers` value. + +### Bug Fixes {#bug-fixes-1-0-0-beta-10} +* Fixes a bug that caused no status code to be returned from the custom metrics API requests. + +## 1.0.0-beta.9 + +Released on October 6, 2023 + +### Improvements {#improvements-1-0-0-beta-9} +* Adds support for setting additional environment variables in the replicated deployment via the `extraEnv` value. +* Updates the helm.sh/helm/v3 go module to v3.13.0 to resolve GHSA-6xv5-86q9-7xr8 with medium severity. + +### Bug Fixes {#bug-fixes-1-0-0-beta-9} +* Fixes an issue where data returned from API endpoints and instance reporting was outdated after a chart was upgraded. + +## 1.0.0-beta.8 + +Released on September 19, 2023 + +### Bug Fixes {#bug-fixes-1-0-0-beta-8} +* Fixes an issue where the `replicated` Pod/API failed to come up due to the inability to generate status informers if the application contains empty YAML documents, or documents that only have comments. + +## 1.0.0-beta.7 + +Released on September 15, 2023 + +### Improvements {#improvements-1-0-0-beta-7} +* The [custom metrics](/vendor/custom-metrics#configure-custom-metrics) API no longer requires authorization header. + +## 1.0.0-beta.6 + +Released on September 7, 2023 + +### New Features {#new-features-1-0-0-beta-6} + +Renames the SDK's Kubernetes resources and the library SDK chart from `replicated-sdk` to `replicated` to better align with standard SDK naming conventions. + +The `replicated-sdk` naming convention is still supported and existing integrations can continue to use `replicated-sdk` as the name of the SDK Kubernetes resources and SDK chart name. However, Replicated recommends that new integrations use the `replicated` naming convention. + +To update the naming convention of an existing integration from `replicated-sdk` to `replicated`, do the following before you upgrade to 1.0.0-beta.6 to avoid breaking changes: + +* Update the dependencies entry for the SDK in the parent chart: + + ```yaml + dependencies: + - name: replicated + repository: oci://registry.replicated.com/library + version: 1.0.0-beta.6 + ``` + +* Update any requests to the SDK service in the cluster to use `replicated:3000` instead of `replicated-sdk:3000`. + +* Update any automation that references the installation command for integration mode to `helm install replicated oci://registry.replicated.com/library/replicated --version 1.0.0-beta.6`. + +* If the SDK's values are modified in the `values.yaml` file of the parent chart, change the field name for the SDK subchart in the `values.yaml` file from `replicated-sdk` to `replicated`. + +* Change the field name of any values that are provided at runtime to the SDK from `replicated-sdk` to `replicated`. For example, `--set replicated.integration.enabled=false`. + +For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +## 1.0.0-beta.5 + +Released on September 1, 2023 + +### New Features {#new-features-1-0-0-beta-5} +* Adds support for sending [custom application metrics](/vendor/custom-metrics) via the `/api/v1/app/custom-metrics` endpoint. +* Adds support for installing the Helm chart via `helm template` then `kubectl apply` the generated manifests. Limitations to installing with this approach include: + - The [app history endpoint](/reference/replicated-sdk-apis#get-apphistory) will always return an empty array because there is no Helm history in the cluster. + - Status informers will not be automatically generated and would have to be provided via the [replicated-sdk.statusInformers](/vendor/insights-app-status#helm-installations) Helm value. + +## 0.0.1-beta.4 + +Released on August 17, 2023 + +### New Features {#new-features-0-0-1-beta-4} +* Adds support for OpenShift clusters. + +### Improvements {#improvements-0-0-1-beta-4} +* Application updates returned by the `/api/v1/app/updates` endpoint show in order from newest to oldest. + +## 0.0.1-beta.3 + +Released on August 11, 2023 + +### Bug Fixes {#bug-fixes-0-0-1-beta-3} +* Fixes an issue where generating a support bundle failed when using the Replicated SDK support bundle Secret in the Helm chart. The failure occurred due to a syntax issue where the `selector` field expected an array of strings instead of a map. + +## 0.0.1-beta.2 + +Released on August 4, 2023 + +### New Features {#new-features-0-0-1-beta-2} +* Includes the application status as part of the [/app/info](/reference/replicated-sdk-apis#get-appinfo) endpoint response. + +### Improvements {#improvements-0-0-1-beta-2} +* The replicated-sdk image is now built using a distroless base image from Chainguard, which significantly reduces the overall size and attack surface. + +## 0.0.1-beta.1 + +Released on July 28, 2023 + +### Improvements {#improvements-0-0-1-beta-1} +* Renames the SDK's Kubernetes resources and the library SDK chart from `replicated` to `replicated-sdk` to distinguish them from other replicated components. + +================ +File: docs/release-notes/rn-vendor-platform.md +================ +--- +toc_max_heading_level: 2 +pagination_next: null +pagination_prev: null +--- + +# Vendor Platform Release Notes + +This topic contains release notes for the Replicated Vendor Platform, which includes the [Vendor Portal](/vendor/vendor-portal-creating-account), the [Replicated CLI](/reference/replicated-cli-installing), and [Compatibility Matrix](/vendor/testing-about). The release notes list new features, improvements, bug fixes, known issues, and breaking changes. + +<!--RELEASE_NOTES_PLACEHOLDER--> + +## v2025.03.08-0 + +Released on March 8, 2025 + +### Bug Fixes {#bug-fixes-v2025-03-08-0} +* Fixes an issue on the Compatibility Matrix **Usage History** page that caused the `pageSize` parameter to be set to an incorrect value. + +## v2025.03.06-1 + +Released on March 6, 2025 + +### Bug Fixes {#bug-fixes-v2025-03-06-1} +* Updates the Download Portal to no longer show KOTS pre-releases. + +## v2025.02.07-1 + +Released on February 7, 2025 + +### Bug Fixes {#bug-fixes-v2025-02-07-1} +* Fixes a bug that caused the behavior of check boxes for instance events filters to be reversed. + +## v2025.02.06-2 + +Released on February 6, 2025 + +### Bug Fixes {#bug-fixes-v2025-02-06-2} +* Fixes a bug when viewing a release that caused the **Help** sidebar to be unopenable after it was closed. + +## v2025.02.04-2 + +Released on February 4, 2025 + +### Bug Fixes {#bug-fixes-v2025-02-04-2} +* Fixes an issue on the Compatibility Matrix Usage History page which caused items to appear outside the range of the selected date time. + +## v2025.02.03-4 + +Released on February 3, 2025 + +### Bug Fixes {#bug-fixes-v2025-02-03-4} +* Fixes a bug that could cause private application images hosted in Docker Hub to be shown using anonymous commands in the [Download Portal](https://docs.replicated.com/vendor/helm-install-airgap). + +## v2025.01.31-2 + +Released on January 31, 2025 + +### Bug Fixes {#bug-fixes-v2025-01-31-2} +* Updates the Helm instructions in the Download Portal to use the correct file name for `values.yaml` depending on if there is more than one Helm chart in the given release. For releases with multiple Helm charts, the values file for each Helm chart is named according to the name of the chart. This avoids file name conflicts for users when downloading and editing each values file. + +## v2025.01.31-1 + +Released on January 31, 2025 + +### New Features {#new-features-v2025-01-31-1} +* Adds a new `instance_kurl_install_started_at` column to the customer instance exports. `instance_kurl_install_started_at` is the date and time when the install for the given kURL instance was reported to start. + +## v2025.01.30-0 + +Released on January 30, 2025 + +### New Features {#new-features-v2025-01-30-0} +* Adds a link to download Embedded Cluster installation assets in the Download Portal. +* Adds a button to log out of the Download Portal. + +### Bug Fixes {#bug-fixes-v2025-01-30-0} +* Fixes a bug that would prevent demoting a channel release when it was the only release on the channel. +* Fixes a bug that could have marked the wrong release as active if the semantic version for a demoted release was reused by multiple releases on the given channel. + +## v2025.01.29-4 + +Released on January 29, 2025 + +### Bug Fixes {#bug-fixes-v2025-01-29-4} +* Removes a duplicated section from the Download Portal. +* Fixes a bug where app name would be missing from the app bundle header in the Download Portal. + +## v2025.01.29-1 + +Released on January 29, 2025 + +### Bug Fixes {#bug-fixes-v2025-01-29-1} +* Fixes bug that would result in the "Next" button being hidden from the support form. + +## v2025.01.28-1 + +Released on January 28, 2025 + +### New Features {#new-features-v2025-01-28-1} +* Adds `(demoted)` text label to any demoted channel releases in the Embedded Cluster install instructions accessed from the Vendor Portal customer manage page. + +## v2025.01.27-0 + +Released on January 27, 2025 + +### New Features {#new-features-v2025-01-27-0} +* Adds support for demoting and un-demoting releases from the **Release History** page in the Vendor Portal. + +## v2025.01.23-1 + +Released on January 23, 2025 + +### New Features {#new-features-v2025-01-23-1} +* Adds pagination and search to the **Channels** page in Vendor Portal. + +## v2025.01.17-3 + +Released on January 17, 2025 + +### New Features {#new-features-v2025-01-17-3} +* Compatibility Matrix: Adds `/v3/cmx/stats` to query historical Compatibility Matrix usage data. See [Get CMX usage stats](https://replicated-vendor-api.readme.io/reference/getcmxstats) in the Vendor API v3 documentation. + +## v2025.01.15-4 + +Released on January 15, 2025 + +### New Features {#new-features-v2025-01-15-4} +* Show the vendor's GitHub Collab repository in the Vendor Portal. + +## v2025.01.06-5 + +Released on January 6, 2025 + +### New Features {#new-features-v2025-01-06-5} +* Adds the Vendor API v3 [/cmx/history](https://replicated-vendor-api.readme.io/reference/listcmxhistory) endpoint, which can be used to get historical data on Compatibility Matrix usage. + +## v2025.01.06-2 + +Released on January 6, 2025 + +### Bug Fixes {#bug-fixes-v2025-01-06-2} +* Fixes a bug that could cause instances to not receive updates on [semver](/vendor/releases-about#semantic-versioning) enabled channels when [--app-version-label](/reference/kots-cli-install#usage) flag is used during the install. + +## v2025.01.02-1 + +Released on January 2, 2025 + +### Bug Fixes {#bug-fixes-v2025-01-02-1} +* Fixes a bug that caused the Download Portal to display a blank screen. + +## v2024.12.31-2 + +Released on December 31, 2024 + +### New Features {#new-features-v2024-12-31-2} +* Adds ability to enable and disable [Development Mode](/vendor/replicated-sdk-development) per customer in the Replicated SDK. + +## v2024.12.27-1 + +Released on December 27, 2024 + +### Bug Fixes {#bug-fixes-v2024-12-27-1} +* Fixes a bug that would cause the configured GitHub username to not show up on the Account Settings page when logging in with Google. + +## v2024.12.17-1 + +Released on December 17, 2024 + +### New Features {#new-features-v2024-12-17-1} +* Compatibility Matrix: View your remaining credit balance using the `v3/cluster/status` endpoint via 'credit_balance'. The value is in cents. + +## v2024.12.11-5 + +Released on December 11, 2024 + +### Bug Fixes {#bug-fixes-v2024-12-11-5} +* Fixes a bug that would hide air gap instances on the **Customer Reporting** page even if they existed. + +## v2024.12.11-1 + +Released on December 11, 2024 + +### New Features {#new-features-v2024-12-11-1} +* Downloaded support bundle file names will now include customer name and instance name or ID if available. + +## v2024.12.10-0 + +Released on December 10, 2024 + +### Bug Fixes {#bug-fixes-v2024-12-10-0} +* Compatibility Matrix: Fix `update ttl` for VM-based clusters (including k3s, OpenShift, rke2, and so on). + +## v2024.12.06-4 + +Released on December 6, 2024 + +### Bug Fixes {#bug-fixes-v2024-12-06-4} +* Compatiblity Matrix: Fix for `cluster ls` not taking into account end-time when including terminated clusters. + +## v2024.12.06-2 + +Released on December 6, 2024 + +### Bug Fixes {#bug-fixes-v2024-12-06-2} +* Fixes a bug that could cause the Replicated CLI to fail to promote a new release to a channel. + +## v2024.12.05-5 + +Released on December 5, 2024 + +### Bug Fixes {#bug-fixes-v2024-12-05-5} +* Compatibility Matrix: Display time in local timezone on the **Cluster History** page. + +## v2024.12.04-2 + +Released on December 4, 2024 + +### Bug Fixes {#bug-fixes-v2024-12-04-2} +* Fixes a bug that could cause the "email is required for customers with helm install enabled" error when creating or updating customers. + +## v2024.12.04-1 + +Released on December 4, 2024 + +### Bug Fixes {#bug-fixes-v2024-12-04-1} +* Compatibility Matrix: Fix cluster assignment for EKS, AKS, GKE and OKE in case no warm clusters are available. + +## v2024.12.02-2 + +Released on December 2, 2024 + +### Bug Fixes {#bug-fixes-v2024-12-02-2} +* Fixes a bug that could cause the [kURL Embedded Cluster](https://docs.replicated.com/vendor/licenses-install-types#install-types) option to be disabled for customers even though there is a kURL Installer spec available. + +## v2024.12.02-0 + +Released on December 2, 2024 + +### New Features {#new-features-v2024-12-02-0} +* Adds support for `kots.io/installer-only` annotation on Kuberntes specs. For more information, see [kots.io/installer-only Annotation](/vendor/licenses-install-types#installer-only-annotation) in _Managing Install Types for a License (Beta)_. + +## v2024.11.27-1 + +Released on November 27, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-27-1} +* Fixes an issue where a KOTS release was incorrectly identified as a [Helm CLI-only](/vendor/licenses-install-types#install-types) release, preventing it from being promoted. +## v2024.11.27-0 + +Released on November 27, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-27-0} +* Fixes a bug where Helm install instructions in the [Download Portal](/vendor/helm-install-airgap) didn't use custom domains. + +## v2024.11.26-6 + +Released on November 26, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-26-6} +* Fixes a bug where it causes the Customer Portal to show a blank screen when missing data from an endpoint. + +## v2024.11.26-2 + +Released on November 26, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-26-2} +* Fixes a bug that caused images to be excluded from the Helm air gap install instructions. + +## v2024.11.20-5 + +Released on November 20, 2024 + +### New Features {#new-features-v2024-11-20-5} +* Allows the user to edit cluster tags from the **Edit Cluster** page. + +## v2024.11.20-2 + +Released on November 20, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-20-2} +* Fixes a bug that could cause the [Channel installation command](/vendor/releases-about#channels-page) to use a kURL Installer other than the latest. + +## v2024.11.18-0 + +Released on November 18, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-18-0} +* Fixes a bug where the Helm install instructions would not appear on the **Customer** pages if the KOTS install license option was not enabled. + +## v2024.11.13-0 + +Released on November 13, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-13-0} +* Fixes a bug that could cause an error message similar to the following to display when attempting to update an existing customer: "This team cannot create customers with kurl install enabled". + +## v2024.11.12-4 + +Released on November 12, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-12-4} +* Fixes a bug on the **Customer** page that caused the **Embedded Cluster Install Instructions** to be hidden when the Embedded Cluster install type was enabled for the license. + +## v2024.11.12-4 + +Released on November 12, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-12-4} +* Fixes a bug on the **Customer** page that caused the **Embedded Cluster Install Instructions** to be hidden when the Embedded Cluster install type was enabled for the license. + +## v2024.11.12-2 + +Released on November 12, 2024 + +### Improvements {#improvements-v2024-11-12-2} +* Updates the styles and removes irrelevant content for errored clusters on the Compatibility Matrix Clusters page. + +## v2024.11.11-0 + +Released on November 11, 2024 + +### Improvements {#improvements-v2024-11-11-0} +* Compatibility Matrix: Clusters in error will remain visible for about 5 minutes before they will be transferred to the cluster history. + +### Bug Fixes {#bug-fixes-v2024-11-11-0} +* Fixes exception raised when submitting a support case without a GitHub username. +* When downloading an Embedded Cluster installation asset, a 400 status code and message will now be returned when an air gap bundle does not exist and `airgap=true` is set in the URL. + +## v2024.11.06-1 + +Released on November 6, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-06-1} +* Fixes a bug in the Helm Install Instructions modal when entering an email address. + +## v2024.11.01-1 + +Released on November 1, 2024 + +### Bug Fixes {#bug-fixes-v2024-11-01-1} +* Fix default license install options when creating new license. + +## v2024.10.28-3 + +Released on October 28, 2024 + +### Bug Fixes {#bug-fixes-v2024-10-28-3} +* Fixes a bug that could cause the **Customer Email** field to be required. + +## v2024.10.25-8 + +Released on October 25, 2024 + +### Bug Fixes {#bug-fixes-v2024-10-25-8} +* Fixes a bug where users could not create a new customer when there are required license fields. + +## v2024.10.25-3 + +Released on October 25, 2024 + +### Improvements {#improvements-v2024-10-25-3} +* Add GitHub issue URL to feature request confirmation modal. + +## v2024.10.24-2 + +Released on October 24, 2024 + +### Improvements {#improvements-v2024-10-24-2} +* Renames "Embedded cluster" to "Embedded Kubernetes" and "Bring my own cluster" to "Bring my own Kubernetes" in the Download Portal side bar. + +## v2024.10.23-6 + +Released on October 23, 2024 + +### Bug Fixes {#bug-fixes-v2024-10-23-6} +* Compatibility Matrix: Fixes a bug where the `--min-nodes` count could be specified without specifying `--max-nodes` count. + +## v2024.10.23-3 + +Released on October 23, 2024 + +### New Features {#new-features-v2024-10-23-3} +* Compatibility Matrix: Oracle Kubernetes Engine (OKE) now available in Beta. + +## v2024.10.17-1 + +Released on October 17, 2024 + +### Improvements {#improvements-v2024-10-17-1} +* Makes the wording and styling consistent on the Helm and Embedded Cluster install instructions modals. +* Presents users a new step to update their GitHub username when creating support cases or feature requests. + +## v2024.10.16-0 + +Released on October 16, 2024 + +### Improvements {#improvements-v2024-10-16-0} +* Capitalize "Embedded Cluster" in the support workflow. + +## v2024.10.10-5 + +Released on October 10, 2024 + +### Bug Fixes {#bug-fixes-v2024-10-10-5} +* Adds the "Copy URL" button for the Download Portal link back into the Download Portal section of the **Customer Reporting** page. + +## v2024.10.01-0 + +Released on October 1, 2024 + +### New Features {#new-features-v2024-10-01-0} +* Compatibility Matrix: Adds API support for VM create, versions, update ttl, ls. + +## v2024.09.30-2 + +Released on September 30, 2024 + +### Bug Fixes {#bug-fixes-v2024-09-30-2} +* Fixes a bug that could cause release notes to not be shown on the [Channel History](/vendor/releases-about#properties) page. + +## v2024.09.27-4 + +Released on September 27, 2024 + +### Bug Fixes {#bug-fixes-v2024-09-27-4} +* Fixes a bug where you could not disable the Embedded Cluster license entitlement once it was enabled. +* Fixes a bug that would show an error when estimating the cost of a Compatibility Matrix cluster, even when the request was successful. + +## v2024.09.27-1 + +Released on September 27, 2024 + +### Bug Fixes {#bug-fixes-v2024-09-27-1} +* Fixes a bug in Customer Portal that would result in "Unauthorized" message when downloading Embedded Cluster installer. + +## v2024.09.26-4 + +Released on September 26, 2024 + +### Improvements {#improvements-v2024-09-26-4} +* Improves styles in order to make it more obvious that license ID is required when creating a cluster using Embedded Cluster on the Compatibility Matrix Create Cluster page and modal. + +## v2024.09.26-2 + +Released on September 26, 2024 + +### New Features {#new-features-v2024-09-26-2} +* Compatibility Matrix: Alpha support for creating clusters of ubuntu servers (20.04). + +## v2024.09.25-2 + +Released on September 25, 2024 + +### New Features {#new-features-v2024-09-25-2} +* Adds ability to remove a node group in the Compatibility Matrix Create Cluster page and modal. + +## v2024.09.25-1 + +Released on September 25, 2024 + +### New Features {#new-features-v2024-09-25-1} +* Adds persistent sessions in the Customer Portal. + +### Bug Fixes {#bug-fixes-v2024-09-25-1} +* Fixes an issue that caused registry proxy authentication requests to be redirected to proxy.replicated.com instead of the custom hostname when one is configured. + +## v2024.09.18-3 + +Released on September 18, 2024 + +### New Features {#new-features-v2024-09-18-3} +* Removes the "Helm-only" release option from the releases page. + +## v2024.09.18-2 + +Released on September 18, 2024 + +### Improvements {#improvements-v2024-09-18-2} +* Compatibility Matrix: Improved error handling when creating clusters for Embedded Cluster. + +## v2024.09.17-1 + +Released on September 17, 2024 + +### Bug Fixes {#bug-fixes-v2024-09-17-1} +* Adds a PATCH method for updating channels. + +## v2024.09.17-0 + +Released on September 17, 2024 + +### Bug Fixes {#bug-fixes-v2024-09-17-0} +* Fixes updating the custom domain override on channels in the Vendor Portal. + +## v2024.09.13-0 + +Released on September 13, 2024 + +### Bug Fixes {#bug-fixes-v2024-09-13-0} +* The correct error status code is now returned when downloading an Embedded Cluster release fails. + + +## v2024.09.12-3 + +Released on September 12, 2024 + +### New Features {#new-features-v2024-09-12-3} +* Compatibility Matrix: Adds new instance shapes for OKE (Oracle) distribution. + +## v2024.09.13-1 + +Released on September 13, 2024 + +### New Features {#new-features-v2024-09-13-1} +* Compatibility Matrix: Adds Alpha support for Embedded Cluster multinode. + +## v2024.09.11-2 + +Released on September 11, 2024 + +### Bug Fixes {#bug-fixes-v2024-09-11-2} +* Compatibility Matrix: Fixes the "Update TTL" section of the Edit Cluster page and disallows setting a lower TTL than one that was previously configured. +* Compatibility Matrix: Fixes an issue where you could not purchase more usage credits if you had a zero credit balance. + +### Improvements {#improvements-v2024-09-11-2} +* Compatibility Matrix: Improves styles of the Edit Tags modal on the Cluster History page. + +## v2024.09.09-0 + +Released on September 9, 2024 + +### New Features {#new-features-v2024-09-09-0} +* Adds support for setting custom passwords for the customer's Download Portal. See [Sharing Assets Through the Download Portal](/vendor/releases-share-download-portal). + +## v2024.09.05-3 + +Released on September 5, 2024 + +### New Features {#new-features-v2024-09-05-3} +* Compatibility Matrix: Adds support for EKS EC2 instance types m5 and c5. + +## v2024.09.04-0 + +Released on September 4, 2024 + +### New Features {#new-features-v2024-09-04-0} +* Comaptibility Matrix: Added capability to create ws and wss tunnels from the web UI. + +## v2024.08.30-0 + +Released on August 30, 2024 + +### New Features {#new-features-v2024-08-30-0} +* After uploading a support bundle, if instance insights detects a Kubernetes distribution/version combination, the distribution and version will be preloaded when creating a cluster with Compatibility Matrix. + +## v2024.08.28-0 + +Released on August 28, 2024 + +### Bug Fixes {#bug-fixes-v2024-08-28-0} +* Click docs link will open a new window to related documentation in the Compatbility Matrix versions available modal. + +## v2024.08.23-2 + +Released on August 23, 2024 + +### New Features {#new-features-v2024-08-23-2} +* Adds new `channels` column to customers csv export containing basic channels json blob. +* Adds new `customer_channels` object to customer instances csv exports containing basic channels json blob. +* Adds `channels` object to customer instances json export. + +## v2024.08.20-5 + +Released on August 20, 2024 + +### New Features {#new-features-v2024-08-20-5} +* Adds support for the [`dropdown`](/reference/custom-resource-config#dropdown) and [`radio`](/reference/custom-resource-config#radio) Config item types in the Config preview. + +## v2024.08.20-4 + +Released on August 20, 2024 + +### Bug Fixes {#bug-fixes-v2024-08-20-4} +* Fixes a bug that caused Embedded Cluster installation artifacts to not be shown in the Download Portal. + +## v2024.08.19-1 + +Released on August 19, 2024 + +### New Features {#new-features-v2024-08-19-1} +* Update Embedded Cluster install instructions to use custom domain when applicable. + +## v2024.08.15-2 + +Released on August 15, 2024 + +### New Features {#new-features-v2024-08-15-2} +* Adds a PATCH method for `/v3/customer/:customerId` path that allows updating a customer without having to resend the entire customer object. + +## v2024.08.13-1 + +Released on August 13, 2024 + +### Bug Fixes {#bug-fixes-v2024-08-13-1} +* The Proxy Registry now includes scope in the WWW-Authenticate auth challenge response header. This fixes support for the Registry Proxy as a Sonatype Nexus Docker proxy. + +## v2024.08.12-0 + +Released on August 12, 2024 + +### Improvements {#improvements-v2024-08-12-0} +* Streamlines design of the rows on the **Customers** page hybrid view, as well as the customer info header on the **Manage Customer** and **Customer Reporting** pages. + +## v2024.08.09-5 + +Released on August 9, 2024 + +### Bug Fixes {#bug-fixes-v2024-08-09-5} +* Fixes an issue that could cause anonymous image pulls from proxy.replicated.com to fail to resume when interrupted. + +## v2024.08.09-0 + +Released on August 9, 2024 + +### New Features {#new-features-v2024-08-09-0} +* The Compatibility Matrix cluster usage endpoint now also includes channel_id, channel_sequence and version_label. + +## v2024.08.06-0 + +Released on August 6, 2024 + +### Bug Fixes {#bug-fixes-v2024-08-06-0} +* Fixes a bug that caused /require-2fa page to render blank. + +## v2024.08.01-0 + +Released on August 1, 2024 + +### Improvements {#improvements-v2024-08-01-0} +* Updates the Embedded Cluster install instructions to include relevant flags for showing server errors if the release download fails. + +## v2024.07.24-0 + +Released on July 24, 2024 + +### New Features {#new-features-v2024-07-24-0} +* Adds an "Estimate cluster cost" button on the **Create a cluster** page for Compatibility Matrix. + +### Bug Fixes {#bug-fixes-v2024-07-24-0} +* Fixes inconsistent lint results when editing KOTS releases. + +## v2024.07.23-1 + +Released on July 23, 2024 + +### New Features {#new-features-v2024-07-23-1} +* Adds the `--dry-run` flag for Compatibility Matrix, which shows the estimated cost of a cluster before you create the cluster. + +## v2024.07.22-2 + +Released on July 22, 2024 + +### Bug Fixes {#bug-fixes-v2024-07-22-2} +* Fixes a bug where customer channels were not being updated when using the Replicated CLI. + +## v2024.07.22-0 + +Released on July 22, 2024 + +### Improvements {#improvements-v2024-07-22-0} +* Improves responsive styles on the Compatibility Matrix **Create Cluster** page and on the **Troubleshoot** page. + +## v2024.07.19-4 + +Released on July 19, 2024 + +### New Features {#new-features-v2024-07-19-4} +* Adds Compatibility Matrix support for port expose using websockets. + +## v2024.07.19-3 + +Released on July 19, 2024 + +### New Features {#new-features-v2024-07-19-3} +* Enables the "Buy $500 additional credits" button on the **Compatibility Matrix** page for any vendor with a valid contract. + +## v2024.07.19-0 + +Released on July 19, 2024 + +### New Features {#new-features-v2024-07-19-0} +* Adds Compatibility Matrix support for ARM based nodes in Oracle OKE. + +## v2024.07.15-0 + +Released on July 15, 2024 + +### New Features {#new-features-v2024-07-15-0} +* Adds a dropdown to select a specific release in the Embedded Cluster installation instructions dialog. + +## v2024.07.09-0 + +Released on July 9, 2024 + +### Improvements {#improvements-v2024-07-09-0} +* UI improvements for Embedded Cluster installation instructions. + +## v2024.07.08-0 + +Released on July 8, 2024 + +### Bug Fixes {#bug-fixes-v2024-07-08-0} +* Fixed Oracle Compatibility Matrix Pricing. Pricing is now following Oracle's cost estimator. + +## v2024.06.26-4 + +Released on June 26, 2024 + +### New Features {#new-features-v2024-06-26-4} +* Adds a new "Upcoming license expiration" section to the Dashboard page. + +## v2024.06.25-1 + +Released on June 25, 2024 + +### Bug Fixes {#bug-fixes-v2024-06-25-1} +* Use the correct Embedded Cluster icon on the customer page. +* Release API now returns a 400 with a more descriptive error message when a release includes duplicate chart names. + +## v2024.06.24-1 + +Released on June 24, 2024 + +### Bug Fixes {#bug-fixes-v2024-06-24-1} +* Replicated proxy registry no longer requires access to proxy-auth.replicated.com. + +## v2024.06.24-0 + +Released on June 24, 2024 + +### Improvements {#improvements-v2024-06-24-0} +* Support form product list renames **Troubleshoot** to **Support bundles and preflights**. + +## v2024.06.21-2 + +Released on June 21, 2024 + +### New Features {#new-features-v2024-06-21-2} +* Adds the ability to pull public images through the proxy registry without credentials using the prefix `proxy.replicated.com/anon`. For example `docker pull proxy.replicated.com/anon/docker.io/library/mysql:latest`. + +## v2024.06.17-1 + +Released on June 17, 2024 + +### New Features {#new-features-v2024-06-17-1} +* Replicated SDK support bundles details are now visible in Troubleshoot. + +## v2024.06.13-0 + +Released on June 13, 2024 + +### New Features {#new-features-v2024-06-13-0} +* Adds a direct link to the **License Fields** page from the **Manage Customer** and **Create New Customer** pages if the user has no custom license fields configured under the "Custom fields" section. + +## v2024.06.12-0 + +Released on June 12, 2024 + +### Improvements {#improvements-v2024-06-12-0} +* Improves mobile styles on the table views on the **Customers** and **Channels** pages, as well as some mobile styles on the **Releases** page. + +## v2024.05.30-7 + +Released on May 30, 2024 + +### Bug Fixes {#bug-fixes-v2024-05-30-7} +* Fixes incorrectly displayed "No records to display" message, which appeared on the **Cluster History** page while loading data. + +## v2024.05.30-5 + +Released on May 30, 2024 + +### New Features {#new-features-v2024-05-30-5} +* Adds Sonatype Nexus Repository to the list of providers on the **Images** page. +* Adds support for linking and proxying images from anonymous registries. + +## v2024.05.28-3 + +Released on May 28, 2024 + +### New Features {#new-features-v2024-05-28-3} +* Add support for Oracle OKE 1.29. + +### Bug Fixes {#bug-fixes-v2024-05-28-3} +* Fix Compatibility Matrix available credits rounding. + +## v2024.05.28-0 + +Released on May 28, 2024 + +### Bug Fixes {#bug-fixes-v2024-05-28-0} +* Users can create GitHub support tickets with large support bundle analysis results. + +## v2024.05.24-6 + +Released on May 24, 2024 + +### New Features {#new-features-v2024-05-24-6} +* Added support for Sonatype Nexus registry. + +## v2024.05.24-2 + +Released on May 24, 2024 + +### Bug Fixes {#bug-fixes-v2024-05-24-2} +* Fixes a bug that caused version string for Replicated SDK chart have an invalid "v" prefix. + +## v2024.05.23-2 + +Released on May 23, 2024 + +### Bug Fixes {#bug-fixes-v2024-05-23-2} +* Adds validation to compatibility matrix object-store add-on bucket prefix input. + +## v2024.05.21-1 + +Released on May 21, 2024 + +### New Features {#new-features-v2024-05-21-1} +* Adds API support for Oracle Cloud Infrastructure Container Engine for Kubernetes (OKE) to compatibility matrix. + +### Bug Fixes {#bug-fixes-v2024-05-21-1} +* Fixes a bug where users could not restore password policies to default. +* Disables the edit and archive channel options and displays helpful hover text on the **Channels** page table view when the user does not have permission to edit channels. +* Fixes a bug that caused "airgap:true" or "airgap:false" customer searches to fail with error 500. + +## v2024.05.21-0 + +Released on May 21, 2024 + +### New Features {#new-features-v2024-05-21-0} +* Compatibility matrix automatically sends an email notification to team admins when a team is low on credits. + +## v2024.05.20-1 + +Released on May 20, 2024 + +### New Features {#new-features-v2024-05-20-1} +* Adds support for IP dual-stack Kind clusters to compatibility matrix. + +## v2024.05.16-3 + +Released on May 16, 2024 + +### Bug Fixes {#bug-fixes-v2024-05-16-3} +* Fixes an issue that would cause embedded cluster installs to fail with error 404 when downloading public files. + +## v2024.05.14-2 + +Released on May 14, 2024 + +### New Features {#new-features-v2024-05-14-2} +* Adds Beta support for collecting telemetry from instances running in air gap environments with no outbound internet access. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). + +### Improvements {#improvements-v2024-05-14-2} +* Allows installations with the Helm CLI to upload a support bundle on the **Customer Reporting** page. +* Improves mobile responsiveness of the sign up and login flow in the vendor portal. + +## v2024.05.14-1 + +Released on May 14, 2024 + +### Bug Fixes {#bug-fixes-v2024-05-14-1} +* Fixes a bug that would cause downloaded licenses to not include custom hostname in the `endpoint` field. + +## v2024.05.10-1 + +Released on May 10, 2024 + +### New Features {#new-features-v2024-05-10-1} +* Adds support for creating compatibility matrix ports with wildcard domains and TLS certificates. + +## v2024.05.10-0 + +Released on May 10, 2024 + +### Improvements {#improvements-v2024-05-10-0} +* Moves release information for the bundle under "Versions Behind" on the **Support Bundle Analysis** page. + +### Bug Fixes {#bug-fixes-v2024-05-10-0} +* Fixes a bug where product options are not updated correctly when changing installation type in the create a support issue modal. + +## v2024.05.08-0 + +Released on May 8, 2024 + +### New Features {#new-features-v2024-05-08-0} +* Adds "Not Recommended" label to the "GitOps Enabled" option on the **Manage Customer** and **Create New Customer** pages. +* Improves Airgap Bundle Contents modal size for long image names. +* Shows the Replicated domain next to the headers on the **Custom Domains** page. + +### Bug Fixes {#bug-fixes-v2024-05-08-0} +* Remove native sorting on Customers and Instances table. + +## v2024.05.06-2 + +Released on May 6, 2024 + +### Bug Fixes {#bug-fixes-v2024-05-06-2} +* Adds validation when creating and deleting license fields. + +## v2024.05.06-1 + +Released on May 6, 2024 + +### New Features {#new-features-v2024-05-06-1} +* Adds additional bundle validation when uploading a support bundle for air gap telemetry (alpha feature). + +## v2024.05.03-1 + +Released on May 3, 2024 + +### Bug Fixes {#bug-fixes-v2024-05-03-1} +* Fixes an issue that caused compatibility matrix addons to stay in a pending state for multi-node clusters. + +## v2024.05.01-2 + +Released on May 1, 2024 + +### New Features {#new-features-v2024-05-01-2} +* Adds support for creating RKE2 clusters with compatibility matrix using the vendor portal UI. + +## v2024.04.29-0 + +Released on April 29, 2024 + +### New Features {#new-features-v2024-04-29-0} +* Adds support for creating RKE2 clusters with compatibility matrix using the Vendor API v3. + +## v2024.04.26-5 + +Released on April 26, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-26-5} +* Fixes Embedded Cluster support on the compatibility matrix create cluster page. + +## v2024.04.26-3 + +Released on April 26, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-26-3} +* Displays error when creating an embedded cluster with the compatibility matrix and the `--version` flag is a non-numeric string. + +## v2024.04.26-1 + +Released on April 26, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-26-1} +* Only users with the `team/support-issues/write` RBAC policy can submit support tickets on the **Support Bundle Analysis** page. + +## v2024.04.25-0 + +Released on April 25, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-25-0} +* Users can sort customers by the date they were created on the **Customers** page. + +## v2024.04.23-1 + +Released on April 23, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-23-1} +* When a user selects a customer-supplied Kubernetes cluster in the support form, the end of life (EOL) alert about the deprecated Docker and Weave kURL add-ons will not apply for the latest channel kURL installer. + +## v2024.04.22-1 + +Released on April 22, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-22-1} +* Fixes a bug with the 'Reset' filters button on the **Customers** page. + +## v2024.04.18-2 + +Released on April 18, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-18-2} +* Fixes styling on the 'Reset password' modal and 'Trial expired' modal on the **Login** page. +* Fixes a stray '0' rendering under the "Latest Release" sections on the **Channels** page for Builders Plan users. + + +## v2024.04.16-1 + +Released on April 16, 2024 + +### New Features {#new-features-v2024-04-16-1} +* Adds support for Postgres as an addon for EKS clusters in compatibility matrix. + +## v2024.04.12-5 + +Released on April 12, 2024 + +### New Features {#new-features-v2024-04-12-5} +* Adds the ability to expose NodePorts on VM clusters in compatibility matrix. +* Adds the ability to attach new S3 buckets to EKS clusters in compatibility matrix. + +## v2024.04.11-2 + +Released on April 11, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-11-2} +* Eliminates excessive page reloads on the **Support Bundle Analysis** page that would cause users to lose their place. + +## v2024.04.11-1 + +Released on April 11, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-11-1} +* Fix selected default instance type on Compatibility Matrix. + +## v2024.04.11-0 + +Released on April 11, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-11-0} +* Fixes an issue that prevented add-ons from multi-node compatibility matrix clusters from working properly. + +## v2024.04.10-0 + +Released on April 10, 2024 + +### New Features {#new-features-v2024-04-10-0} +* Allows sev 1 and 2 support issues to be submitted for the Replicated host service, including the compatibility matrix and vendor portal. + +### Improvements {#improvements-v2024-04-10-0} +* Highlights required fields that are not filled on the support issue form on the **Support** page. + +## v2024.04.09-2 + +Released on April 9, 2024 + +### New Features {#new-features-v2024-04-09-2} +* Adds advanced cluster creation form for compatibility matrix. + +## v2024.04.04-0 + +Released on April 4, 2024 + +### New Features {#new-features-v2024-04-04-0} +* Adds channel sequence and updates row styles on the **Release History** page. + +## v2024.04.02-2 + +Released on April 2, 2024 + +### Bug Fixes {#bug-fixes-v2024-04-02-2} +* Fixes an issue that caused collisions in kubeconfig context naming when using the `replicated cluster kubeconfig` command resulting in contexts being overwritten. + +## v2024.04.01-3 + +Released on April 1, 2024 + +### New Features {#new-features-v2024-04-01-3} +* Makes the granular resource status view generally available (GA). For more information, see [Instance Details](/vendor/instance-insights-details#current-state). + +## v2024.03.27-3 + +Released on March 27, 2024 + +### Improvements {#improvements-v2024-03-27-3} +* Moves the **Audit Log** page to be nested under the **Team** section. Shows a message to the user if they visit the **Audit Log** from the account dropdown in the top right, and informs them that the **Audit Log** will be permanently moving to the **Team** section in the near future. + +## v2024.03.27-1 + +Released on March 27, 2024 + +### New Features {#new-features-v2024-03-27-1} +* Allows user to attach both existing support bundles and upload new bundles on the support request form on the **Support** page. +* Displays the latest release in the channel at time of bundle collection and the release sequence that was installed at time of bundle collection on **Support Bundle Analysis** pages. + +## v2024.03.27-0 + +Released on March 27, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-27-0} +* Shows certificate errors on the **Custom Domains** page if certificates cannot be renewed. + +## v2024.03.26-5 + +Released on March 26, 2024 + +### New Features {#new-features-v2024-03-26-5} +* Compatibility matrix supports Standard_DS and GPU based instance types for AKS clusters. + +### Improvements {#improvements-v2024-03-26-5} +* Removes the "Download license" and "Install Instructions" buttons from the **Instance Details** page, as they are not relevant on that page. + +## v2024.03.26-1 + +Released on March 26, 2024 + +### Improvements {#improvements-v2024-03-26-1} +* Changes the **Instances** option in the **Download CSV** dropdown on the **Customers** page to **Customers + Instances** to better communicate that it is a superset that contains both customers *and* instances. + +## v2024.03.25-0 + +Released on March 25, 2024 + +### New Features {#new-features-v2024-03-25-0} +* Adds a **View bundle contents** link on airgap bundles that have a warning status on the vendor portal **Release History** page. + +## v2024.03.22-1 + +Released on March 22, 2024 + +### Improvements {#improvements-v2024-03-22-1} +* Hides the "View bundle contents" link on the **Release History** page if an airgap bundle contains no images. To view image lists, rebuild your bundle. + +## v2024.03.21-8 + +Released on March 21, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-21-8} +* Fixes an issue where online embedded cluster downloads failed if airgap download was not enabled for the customer / license. + +## v2024.03.21-5 + +Released on March 21, 2024 + +### New Features {#new-features-v2024-03-21-5} +* Adds the ability to view more granular app status updates in the Instance Activity section on the **Instance Details** page via a tooltip. To get access to this feature, log in to your vendor portal account, select Support > Request a feature, and submit a feature request for "granular app status view". +* Adds a **View bundle contents** link on the **Release History** page to view a list of images in a given airgap bundle. + + :::note + This link appears only for releases built or rebuilt after this implementation. + ::: + +## v2024.03.21-3 + +Released on March 21, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-21-3} +* Fixes pagination on the compatibility matrix **Cluster History** page. + +## v2024.03.21-1 + +Released on March 21, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-21-1} +* Fixes a bug that could cause the **Channels** page table view to fail to load. + +## v2024.03.21-0 + +Released on March 21, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-21-0} +* Fixes a bug that could cause the compatibility matrix **Cluster History** page to fail to load. + +## v2024.03.20-0 + +Released on March 20, 2024 + +### New Features {#new-features-v2024-03-20-0} +* Adds new cluster addon API. + +### Bug Fixes {#bug-fixes-v2024-03-20-0} +* Fixes a bug where users with a "proton.me" email domain could enable auto-join for their team. + +## v2024.03.18-1 + +Released on March 18, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-18-1} +* Adds a **Helm CLI** option to the **Install Commands** modal on the **Release History** page. +* Fixes an issue that could cause a draft KOTS release to not contain KOTS specs by default. + +## v2024.03.15-2 + +Released on March 15, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-15-2} +* Fixes a styling bug in the granular app status tooltip. + +## v2024.03.14-2 + +Released on March 14, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-14-2} +* Corrects the `helm package` command provided in the **Add the Replicated SDK to your Helm Chart** dialog. + +## v2024.03.14-1 + +Released on March 14, 2024 + +### New Features {#new-features-v2024-03-14-1} +* Adds the ability to view a more granular app status via a tooltip on the **Instance Details** page. To get access to this feature, log in to your vendor portal account, select **Support > Request a feature**, and submit a feature request for "granular app status view". + + :::note + Due to a backend API fix, if the application's status informers are templatized, there might be formatting issues until another app release is promoted. + ::: + +## v2024.03.14-0 + +Released on March 14, 2024 + +### Improvements {#improvements-v2024-03-14-0} +* Returns a friendly error message when attempting to download an embedded cluster release with an unknown version. + +## v2024.03.13-0 + +Released on March 13, 2024 + +### New Features {#new-features-v2024-03-13-0} +* Adds the ability to search customers by their email address. For more information, see [Filter and Search Customers](/vendor/releases-creating-customer#filter-and-search-customers) in _Creating and Managing Customers_. + +## v2024.03.12-1 + +Released on March 12, 2024 + +### Improvements {#improvements-v2024-03-12-1} +* Makes the **Gitops Enabled** entitlement false by default when creating a customer. Also updates the description of the **Gitops Enabled** entitlement. + +## v2024.03.11-0 + +Released on March 11, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-11-0} +* Fixes a bug that could result in a bad URL when downloading an airgap bundle for Replicated kURL from the download portal. + +## v2024.03.08-3 + +Released on March 8, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-08-3} +* Fixes a bug in the vendor portal UI related to allowing license download when a channel does not have a release. + +## v2024.03.08-2 + +Released on March 8, 2024 + +### New Features {#new-features-v2024-03-08-2} +* Adds support for E2 family and GPU Tesla T4 on GKE clusters created with the compatibility matrix. + +## v2024.03.07-5 + +Released on March 7, 2024 + +### Bug Fixes {#bug-fixes-v2024-03-07-5} +* Fixes a bug that caused "An unknown actor performed the action" message to be shown in the Audit Log. + +## v2024.03.07-0 + +Released on March 7, 2024 + +### New Features {#new-features-v2024-03-07-0} +* Adds the Replicated embedded cluster (Beta) distribution to the compatibility matrix. For more information, see [Using Embedded Cluster](/vendor/embedded-overview). + +## v2024.03.06-3 + +Released on March 6, 2024 + +### New Features {#new-features-v2024-03-06-3} +* Adds node autoscaling for EKS, GKE and AKS clusters created with the compatibility matrix. + +## v2024.02.29-3 + +Released on February 29, 2024 + +### New Features {#new-features-v2024-02-29-3} +* Adds support for nodegroups to compatibility matrix clusters that use VM-based Kubernetes distributions and support multinode. + +## v2024.02.29-0 + +Released on February 29, 2024 + +### New Features {#new-features-v2024-02-29-0} +* Enables the Embedded Cluster option on the customer license page. For more information, see [Using Embedded Cluster](/vendor/embedded-overview). + + +## v2024.02.27-1 + +Released on February 27, 2024 + +### New Features {#new-features-v2024-02-27-1} +* Adds ARM support for Compatibility Matrix GKE clusters. + +## v2024.02.26-0 + +Released on February 26, 2024 + +### New Features {#new-features-v2024-02-26-0} +* v3 API for `/customer_instances` endpoint supports filtering with the `customerIds=".."` query parameter. + +## v2024.02.23-2 + +Released on February 23, 2024 + +### New Features {#new-features-v2024.02.23-2} +* Adds the ability to pin a license to a specific release sequence. To get access to this feature, log in to your vendor portal account. Select Support > Request a feature, and submit a feature request for "license release pinning". + +## v2024.02.21-1 + +Released on February 21, 2024 + +### New Features {#new-features-v2024-02-21-1} +* Adds the EKS g4dn instance types to Compatibility Matrix. +* Adds the AKS Standard_D2ps_v5 and higher instance types to Compatibility Matrix. +* Labels and comments on support cases with End of Life (EOL) addons in kURL installer specs embedded in application releases. + +## v2024.02.21-0 + +Released on February 21, 2024 + +### New Features {#new-features-v2024-02-21-0} +* Adds release info to the **Support bundle analysis** page. + +## v2024.02.19-0 + +Released on February 19, 2024 + +### New Features {#new-features-v2024-02-19-0} +* Adds support for Node Groups on the **Cluster History** page. + +## v2024.02.14-0 + +Released on February 14, 2024 + +### New Features {#new-features-v2024-02-14-0} +* Adds ability to add a Custom ID to a Customer through the vendor portal. +* Shows Custom ID and License ID on the Customers and Instances table views on the **Customers** page. + +## v2024.02.13-3 + +Released on February 13, 2024 + +### New Features {#new-features-v2024-02-13-3} +* Adds support for creating multiple nodegroups in compatibility matrix EKS clusters. + +## v2024.02.09-3 + +Released on February 9, 2024 + +### New Features {#new-features-v2024-02-09-3} +* Adds support for Google Artifact Registry. + +### Improvements {#improvements-v2024-02-09-3} +* Adds pagination to the list of customer instances on the customer details page. + +### Bug Fixes {#bug-fixes-v2024-02-09-3} +* pageSize and offset properties are no longer required for the `/v3/customers/search` Vendor API endpoint. API consumers must provide at least one inclusion criteria for a valid customer search. + +## v2024.02.08-2 + +Released on February 8, 2024 + +### Bug Fixes {#bug-fixes-v2024-02-08-2} +* Replaces GMT timezone value with UTC label. + +## v2024.02.08-1 + +Released on February 8, 2024 + +### New Features {#new-features-v2024-02-08-1} +* Updates the pricing for compatibiliy matrix clusters that use Amazon Elastic Kubernetes Service (EKS) versions with extended support. For more information, see [Compatibility Matrix Platform Pricing](https://www.replicated.com/matrix/pricing) on the Replicated website. + +## v2024.02.07-7 + +Released on February 7, 2024 + +### Bug Fixes {#bug-fixes-v2024-02-07-7} +* Custom Metrics chart tooltip displays two digits for the minutes field. Also adds GMT TZ for clarity. + +## v2024.02.05-1 + +Released on February 5, 2024 + +### New Features {#new-features-v2024-02-05-1} +* Adds status indicator to Customer rows on the **Customers** page Hybrid view. +* Adds entitlement badges to Customer rows on the **Customers** page Hybrid view. + +## v2024.02.05-0 + +Released on February 5, 2024 + +### New Features {#new-features-v2024-02-05-0} +* Label and comment on support cases with End Of Life (EOL) addons in Installer specs pinned to channels. + +## v2024.02.01-4 + +Released on February 1, 2024 + +### Improvements {#improvements-v2024-02-01-4} +* Improves the display of large quantities of Custom Metrics on the **Instance Reporting** page. + +## v2024.01.29-0 + +Released on January 29, 2024 + +### Improvements {#improvements-v2024-01-29-0} +* Adds link to documentation for updating team member email addresses. + +## v2024.01.26-3 + +Released on January 26, 2024 + +### Bug Fixes {#bug-fixes-v2024-01-26-3} +* Display accurate active instance count on the **Customers** page. + +## v2024.01.25-4 + +Released on January 25, 2024 + +### New Features {#new-features-v2024-01-25-4} +* Adds ability to filter customers by channel version on the **Customers** page. +* Adds links to filter customers by adopted version from the **Channels** page. + +## v2024.01.25-0 + +Released on January 25, 2024 + +### Improvements {#improvements-v2024-01-25-0} +* Adds more helpful messaging on the **Support Bundle Analysis** page if your bundle does not contain an instance ID. + +## v2024.01.23-1 + +Released on January 23, 2024 + +### Improvements {#improvements-v2024-01-23-1} +* Application release information is extracted from an attached support bundle and displayed in the Github support case for better reference. + +## v2024.01.19-1 + +Released on January 19, 2024 + +### Bug Fixes {#bug-fixes-v2024-01-19-1} +* Adds the ability to scroll on the **License Fields** page. + + +## v2024.01.18-3 + +Released on January 18, 2024 + +### Improvements {#improvements-v2024-01-18-3} +* Displays air gap build status on the **Channels** page. + +## v2024.01.18-2 + +Released on January 18, 2024 + +### Bug Fixes {#bug-fixes-v2024-01-18-2} +* Instances CSV export shows relevant `.airgap` bundle downloaded timestamp, channel_id, and channel_sequence data. + +## v2024.01.17-1 + +Released on January 17, 2024 + +### New Features {#new-features-v2024-01-17-1} +* Adds support to the compatibility matrix for running Openshift clusters with multiple nodes. + +## v2024.01.11-1 + +Released on January 11, 2024 + +### Bug Fixes {#bug-fixes-v2024-01-11-1} +* Fixes bug in the **Customers** page search feature, where it would not display the ‘not found’ state if no results were found. + +## v2024.01.10-2 + +Released on January 10, 2024 + +### Bug Fixes {#bug-fixes-v2024-01-10-2} +* Adds an error state for the **Support Bundle Analysis** page if there is an invalid bundle slug in the URL. + +## v2024.01.10-1 + +Released on January 10, 2024 + +### Improvements {#improvements-v2024-01-10-1} +* Adds pagination to the **Kubernetes Installers* page. + +## v2024.01.10-0 + +Released on January 10, 2024 + +### Improvements {#improvements-v2024-01-10-0} +* Improve refetching on **Customers** page. + +## v2024.01.09-4 + +Released on January 9, 2024 + +### Bug Fixes {#bug-fixes-v2024-01-09-4} +* Fixes the install links on the **Channels** page for Native applications. + +## v2024.01.09-3 + +Released on January 9, 2024 + +### Improvements {#improvements-v2024-01-09-3} +* Adds pagination for the **Customers** page table view. + +## v2024.01.08-6 + +Released on January 8, 2024 + +### Bug Fixes {#bug-fixes-v2024-01-08-6} +* Fixes back button behavior when navigating to the **Customers** page from a link on the **Channels** page. + +## v2024.01.08-5 + +Released on January 8, 2024 + +### Improvements {#improvements-v2024-01-08-5} +* Adds an 'Add support bundle' button the the **Customer Support Bundles** page. +* Adds an error state when user visits an invalid release. +* Simplifies the search design on the **Troubleshoot** pages. +* Adds an empty state when there are no search results on the **Troubleshoot** pages. +* Persists the search query and shows correct results when switching between the application-level **Troubleshoot** page and the top-level **Troubleshoot** page. + +### Bug Fixes {#bug-fixes-v2024-01-08-5} +* Fixes bug where the search box would disappear on the top-level **Troubleshoot** page if the query returned no results. + +## v2024.01.08-1 + +Released on January 8, 2024 + +### New Features {#new-features-v2024-01-08-1} +* Adds both TTL and Duration to the **Cluster History** page. +* Fixes sort by TTL and sort by duration to work with paginated results. +* Adds filter by Kubernetes distribution to the **Cluster History** page. +* Adds filter by Cost to the **Cluster History** page. +* Adds filter by Node Count to the **Cluster History** page. + +## v2024.01.08-0 + +Released on January 8, 2024 + +### Bug Fixes {#bug-fixes-v2024-01-08-0} +* Fixes a bug where the support bundle and customer name would not be prefilled on the support request form if you navigated there from one of the "Submit support ticket" links on the **Troubleshoot** or **Dashboard** pages. + +## v2024.01.04-2 + +Released on January 4, 2024 + +### Improvements {#improvements-v2024-01-04-2} +* Adds ability to edit instance name on the **Customers** page. + +### Bug Fixes {#bug-fixes-v2024-01-04-2} +* Shows an error state when you visit a customer page with an invalid app slug or customer ID in the URL. + +## v2024.01.03-3 + +Released on January 3, 2024 + +### Improvements {#improvements-v2024-01-03-3} +* Improves the wording and styling of the Adoption Rate section of the channels on the **Channels** page. + +### Bug Fixes {#bug-fixes-v2024-01-03-3} +* Fixes the filtering for the active/inactive customer links on the **Channels** page. + +## v2024.01.03-2 + +Released on January 3, 2024 + +### Improvements {#improvements-v2024-01-03-2} +* Includes instance name on the **Support Bundle Analysis** page. + +## v2024.01.03-0 + +Released on January 3, 2024 + +### Improvements {#improvements-v2024-01-03-0} +* Displays instance tags in Instance table view. + +## v2024.01.02-0 + +Released on January 2, 2024 + +### Improvements {#improvements-v2024-01-02-0} +* Displays instance name on the **Customers** page hybrid view. + +## v2023.12.30-0 + +Released on December 30, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-30-0} +* Fixes an issue where the instance name failed to render after creating an instance tag with the key "name.". + +## v2023.12.29-5 + +Released on December 29, 2023 + +### New Features {#new-features-v2023-12-29-5} +* Adds the ability to add a custom name to a given instance along with other vendor-defined instance tags. + +## v2023.12.28-0 + +Released on December 28, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-28-0} +* Removes references to the deprecated support@replicated.com email address. + +## v2023.12.27-1 + +Released on December 27, 2023 + +### New Features {#new-features-v2023-12-27-1} +* Adds additional bundle and instance metadata to the **Support Bundle Analysis** page. + +## v2023.12.21-3 + +Released on December 21, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-21-3} +* Fixes incorrect link for releases and customers created by Service Accounts. + +## v2023.12.20-1 + +Released on December 20, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-20-1} +* Improves error messaging for the **Instance Details** page when there is an invalid app slug, customer ID, or instance ID in the URL. +* Fixes installation failures for applications with Helm charts that contain empty files. + +## v2023.12.19-3 + +Released on December 19, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-19-3} +* Allows user to press 'Enter' to submit when logging in to the download portal. + +## v2023.12.19-2 + +Released on December 19, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-19-2} +* Fixes scrolling on **Kubernetes Installers** teaser page. + +## v2023.12.19-1 + +Released on December 19, 2023 + +### Improvements {#improvements-v2023-12-19-1} +* Redesigns the **Customers** page search to make it more streamlined. + +## v2023.12.19-0 + +Released on December 19, 2023 + +### New Features {#new-features-v2023-12-19-0} +* Release Embedded Cluster v1.28.4+ec.5 replacing v1.28.4+ec.4. +* Shows max disk size on create cluster form (CMX) based on entitlement value. + +### Bug Fixes {#bug-fixes-v2023-12-19-0} +* Disables create cluster button when loading team entitlement. + +## v2023.12.18-0 + +Released on December 18, 2023 + +### New Features {#new-features-v2023-12-18-0} +* Adds ability to extend cluster Time to Live (TTL) after creation with the compatibility matrix. + +### Improvements {#improvements-v2023-12-18-0} +* Adds Embedded Cluster `v1.28.4+ec.4` as the default version. +* Removes the 'NEW' badge from the Instances CSV download. + +## v2023.12.14-4 + +Released on December 14, 2023 + +### Improvements {#improvements-v2023-12-14-4} +* Persists inputs on the **Compatibility Matrix > Create Cluster** dialog when there is an error. + +## v2023.12.14-3 + +Released on December 14, 2023 + +### Improvements {#improvements-v2023-12-14-3} +* Displays maintenance notifications per distro in create cluster form. +* Adds ability to select the date time range filter in **Cluster History** page. Cluster stats can be filtered by `start-time` and `end-time`. + +## v2023.12.14-0 + +Released on December 14, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-14-0} +* Fixes the default product options on the support request form. These will be generated based on enabled entitlements. + +## v2023.12.13-1 + +Released on December 13, 2023 + +### Improvements {#improvement-v2023-12-13-1} +* Uses `sortColumn=tag` and `tag-sort-key` to sort clusters on the values for a tag key. + +### Bug Fixes {#bug-fixes-v2023-12-13-1} +* Shows error message when updating Compatibility Matrix quotas to the same value or less than the current value. + +## v2023.12.13-0 + +Released on December 13, 2023 + +### Improvements {#improvements-v2023-12-13-0} +* Adds "Created By" and "Updated By" columns to the Customers and Instances table views. + +## v2023.12.11-3 + +Released on December 11, 2023 + +### Improvements {#improvements-v2023-12-11-3} +* Adds "Last Airgap Download Version" and "Last Airgap Download Date" columns to the Customers and Instances table views. + +### Bug Fixes {#bug-fixes-v2023-12-11-3} +* Fixes issues with customer instances CSV row repetition. + +## v2023.12.11-2 + +Released on December 11, 2023 + +### Improvements {#improvements-v2023-12-11-2} +* Improves usability of the Download Portal by providing descriptions, better button names, and improved styles. +* Improves messaging when RBAC prevents requesting more credits in CMX. + +### Bug Fixes {#bug-fixes-v2023-12-11-2} +* Fixes version label on customer instances table. + +## v2023.12.11-1 + +Released on December 11, 2023 + +### Improvements {#improvements-v2023-12-11-1} +* Shows the release version that was most recently downloaded from the Download Portal on the **Customer Reporting** page. + +## v2023.12.11-0 + +Released on December 11, 2023 + +### Improvements {#improvements-v2023-12-11-0} +* Re-orders the support request form to ensure that the customer (or "no customer") is selected prior to the selection of the product area, and auto fill the form smartly. + +## v2023.12.09-0 + +Released on December 9, 2023 + +### Improvements {#improvements-v2023-12-09-0} +* Adds ability to upload multiple support bundles when opening a support issue on the **Troubleshoot** or **Support** page. + +## v2023.12.08-4 + +Released on December 8, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-08-4} +* Persists column visibility on Compatibility Matrix cluster history. + +## v2023.12.08-1 + +Released on December 8, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-08-1} +* Fixes bug where the selected file in the editor would be reset after saving changes to a KOTS release. + +## v2023.12.08-0 + +Released on December 8, 2023 + +### Improvements {#improvements-v2023-12-08-0} +* Adds ability to upload multiple support bundles when opening a support issue on the **Troubleshoot** or **Support** pages. + +## v2023.12.07-2 + +Released on December 7, 2023 + +### Improvements {#improvements-v2023-12-07-2} +* Adds ability to specify tags at cluster creation with the compatibility matrix. + +## v2023.12.07-1 + +Released on December 7, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-07-1} +* Fixes a bug that prompts the user about unsaved changes when clicking "Create release" on the Draft Release page. + +## v2023.12.06-2 + +Released on December 6, 2023 + +### Improvements {#improvements-v2023-12-06-2} +* Shows 'Created by' and 'Last modified by' information on the **Customers**, **Reporting**, and **Customer details** pages. + +## v2023.12.06-0 + +Released on December 6, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-06-0} +* Fixes a bug that could occur when generating the embedded cluster binary for channels where semantic versioning was not enabled. +* Fixes bug in the **Channel Settings** modal where the user could not return custom domains to the Replicated default. + +## v2023.12.05-1 + +Released on December 5, 2023 + +### Improvements {#improvements-v2023-12-05-1} +* Shows 'Created by' and 'Last modified by' on the **Releases**, **View Release**, **Edit Release**, and **Release History** pages. + +## v2023.12.04-4 + +Released on December 4, 2023 + +### Bug Fixes {#bug-fixes-v2023-12-04-4} +* Fixes the **Copy download URL** button for airgap builds on the **Release History** page in Safari. + +## v2023.12.04-3 + +Released on December 4, 2023 + +### Improvements {#improvements-v2023-12-04-3} +* Adds the ability to update a test cluster TTL. + +## v2023.12.04-1 + +Released on December 4, 2023 + +### New Features {#new-features-v2023-12-04-1} +* Adds the "installer support enabled" license option to the customer create and manage pages. This option is only visibile to vendors with the associated entitlement enabled. + +## v2023.12.01-4 + +Released on December 1, 2023 + +### Improvements {#improvements-v2023-12-01-4} +* Unifies the Customers page search, sort, and filter results across all tabs. + +## v2023.11.29-3 + +Released on November 29, 2023 + +### Improvements {#improvements-v2023-11-29-3} +* Adds the ability to subscribe to custom metrics notifications. +* Splits notifications for "All" events into "App Status" and "System Events". + +## v2023.11.29-2 + +Released on November 29, 2023 + +### New Features {#new-features-v2023-11-29-2} +* Adds Custom Metrics timeseries graphing on the Instance Details page. + +## v2023.11.29-0 + +Released on November 29, 2023 + +### Improvements {#improvements-v2023-11-29-0} +* Adds support for opening a new tab on right click in the Application drop down. + +### Bug Fixes {#bug-fixes-v2023-11-29-0} +* Fixes an issue that could cause the user to not be able to upload support bundles on the Instance Insights page. + +## v2023.11.28-1 + +Released on November 28, 2023 + +### Bug Fixes {#bug-fixes-v2023-11-28-1} +* Aligns Helm icon with helm chart in release editor. + +## v2023.11.28-0 + +Released on November 28, 2023 + +### Bug Fixes {#bug-fixes-v2023-11-28-0} +* Fixes an issue that caused linter results to not be displayed when opening a KOTS release for editing. +* Fixes loading state on the Customers table view. + +## v2023.11.27-1 + +Released on November 27, 2023 + +### Bug Fixes {#bug-fixes-v2023-11-27-1} +* Fixes a bug with automatic air gap builds in the Channel Settings modal, where it would show false even if automatic air gap builds were enabled. + +## v2023.11.23-0 + +Released on November 23, 2023 + +### New Features {#new-features-v2023-11-23-0} +* Supports multi-node kURL clusters up to 10 nodes with the compatibility matrix. + +## v2023.11.22-1 + +Released on November 22, 2023 + +### Bug Fixes {#bug-fixes-v2023-11-22-1} +* Fixes an issue where the compatibility matrix kURL version displayed in the `create cluster` command was incorrect. + +## v2023.11.20-2 + +Released on November 20, 2023 + +### Improvements {#improvements-v2023-11-20-2} +* Hides inactive instances from the Instances table view by default. Add checkbox to show inactive instances in table. + +## v2023.11.17-2 + +Released on November 17, 2023 + +### Improvements {#improvements-v2023-11-17-2} +* Hides the 'NEW' badge on the Instances CSV download after it has been clicked. + +## v2023.11.15-0 + +Released on November 15, 2023 + +### Improvements {#improvements-v2023-11-15-0} +* Saves Channels ordering, sorting, and hidden columns table settings when the user updates them. +* Standardize tooltips on Dashboard, Customers, and Channels pages. +* Disallow adding a .zip file when uploading a bundle in the support request form. + +### Bug Fixes {#bug-fixes-v2023-11-15-0} +* Fixes button alignment in empty state on the **Releases** page when the KOTS installer is not enabled. + +## v2023.11.13-0 + +Released on November 13, 2023 + +### Improvements {#improvements-v2023-11-13-0} +* Standardizes button styles on the Compatibility Matrix pages. + +## v2023.11.10-1 + +Released on November 10, 2023 + +### Improvements {#improvements-v2023-11-10-1} +* Updates button styles on Troubleshoot, License Fields, Images, Kubernetes Installers, and Custom Domains. +* Standardizes button styles on Team and Account Settings pages. + +## v2023.11.10-0 + +Released on November 10, 2023 + +### Improvements {#improvements-v2023-11-10-0} +* Adds the ability to save table settings (column order, column visibility, sort by, page size) on Customer and Instances table. +* Standardizes button styles on Releases, Channels, and Customers pages. + +### Bug Fixes {#bug-fixes-v2023-11-10-0} +* Show promoted channel(s) when viewing a KOTS release. + +## v2023.11.06-1 + +Released on November 6, 2023 + +### Improvements {#improvements-v2023-11-06-1} +* Improves the way large amounts of custom metrics display on the Instance Details page, in both the Filters dropdown and the Custom Metrics section on the left. + +## v2023.11.03-1 + +Released on November 3, 2023 + +### Bug Fixes {#bug-fixes-v2023-11-03-1} +* Filters out "read" events in the audit log initial search query. + +## v2023.11.03-2 + +Released on November 3, 2023 + +### Improvements {#improvements-v2023-11-03-2} +* Redirects user to the most recently managed application upon login. + +## v2023.10.30-3 + +Released on October 30, 2023 + +### Bug Fixes {#bug-fixes-v2023-10-30-3} +* Fixes style bug on the Audit Log page where the search input border is partially hidden. + +## v2023.10.30-2 + +Released on October 30, 2023 + +### Improvements {#improvements-v2023-10-30-2} +* Makes some columns hidden by default in the Instances view on the Customers page and updates column names. + +## v2023.10.30-1 + +Released on October 30, 2023 + +### Improvements {#improvements-v2023-10-30-1} +* Updates styles on **Instance Details** page. +* Updates tab styles throughout the vendor portal. + +## v2023.10.27-2 + +Released on October 27, 2023 + +### Improvements {#improvements-v2023-10-27-2} +* Standardizes breadcrumbs across the site. + +## v2023.10.27-1 + +Released on October 27, 2023 + +### Improvements {#improvements-v2023-10-27-1} +* Various style improvements to the **Images**, **Kubernetes Installer**, **Custom Domains**, and **App Settings** pages. + +## v2023.10.26-3 + +Released on October 26, 2023 + +### Improvements {#improvements-v2023-10-26-3} +* Various style improvements to the compatibility matrix **Cluster History**, **Customers**, **Troubleshoot**, and **License Fields** pages. + +## v2023.10.26-2 + +Released on October 26, 2023 + +### Bug Fixes {#bug-fixes-v2023-10-26-2} +* Fixes query timeout issues with the `/events` API endpoint. + +## v2023.10.26-0 + +Released on October 26, 2023 + +### Improvements {#improvements-v2023-10-26-0} +* Allows editing tags in the Cluster History table. +* Allows adding tags as separate columns in the Cluster History table. +* Shows some statistics at the top of the Cluster History table. + +## v2023.10.24-0 + +Released on October 24, 2023 + +### Improvements {#improvements-v2023-10-24-0} +* Adds links to release notes in the vendor portal. + +## v2023.10.23-0 + +Released on October 23, 2023 + +### Bug Fixes {#bug-fixes-v2023-10-23-0} +* Shows multiple instances for a single customer in the customer instance table view. + +## v2023.10.18-1 + +Released on October 18, 2023 + +### New Features {#new-features-v2023-10-18-1} +* Compatibility matrix retries on an error provisioning a cluster up to 2 times for a total of 3 attempts before returning an error. + +## v2023.10.18-0 + +Released on October 18, 2023 + +### Improvements {#improvements-v2023-10-18-0} +* Shows tags on the cluster and cluster history table. + +### Bug Fixes {#bug-fixes-v2023-10-18-0} +* Limits release size to 16MiB when compressed using the [Vendor API v3 to create a release](https://replicated-vendor-api.readme.io/reference/createrelease). +* Shows error message if user encounters an error during application creation. +* Fixes a bug that would allow creating accounts using an email address with trailing or leading white spaces. + +## v2023.10.16-0 + +Released on October 16, 2023 + +### Improvements {#improvements-v2023-10-16-0} +* Adds table views for customers and instances on Customers page. + +### Bug Fixes {#bug-fixes-v2023-10-16-0} +* Fixes a bug in the copy create cluster command. +* Fixes the "by" in cluster history to not show "web ui" most of the time. +* Fixes the display of cost in cluster history table. + +## v2023.10.13-0 + +Released on October 13, 2023 + +### Improvements {#improvements-v2023-10-13-0} +* Adds the name of the entity that created the cluster to the cluster page. +* Various design imporvements to the **Cluster History** page to improve the user experience. + +## v2023.10.11-1 + +Released on October 11, 2023 + +### New Features {#new-features-v2023-10-11-1} +* Adds **Settings** page for the Compatibility Matrix, granting users the ability to access quota and capacity information and submit requests for increasing their quotas. + +### Improvements {#improvements-v2023-10-11-1} +* Adds updated table view for the **Channels** page. + +### Bug Fixes {#bug-fixes-v2023-10-11-1} +* Fixes an issue that could prevent users from logging in because they do not have an RBAC role assigned. +* Fixes bug on Dashboard where user was unable to delete a support bundle. +* Fixes bug on the Kubernetes Installer History page where breadcrumbs were not displaying correctly. + +## v2023.10.10-0 + +Released on October 10, 2023 + +### Improvements {#improvements-v2023-10-10-0} +* Adds a verification stage when provisioning bare metal clusters of type Kind, K3s, kURL, and HelmVM to check that the cluster is running and healthy. + +## v2023.10.09-1 + +Released on October 9, 2023 + +### Bug Fixes {#bug-fixes-v2023.10.09-1} +* Updates the icon for custom metrics events on the Instance Details pages. + +## v2023.10.09-0 + +Released on October 9, 2023 + +### Improvements {#improvements-v2023.10.09-0} +* Sets `false` as the default value for any new boolean license fields. +* Changes boolean license field options to a "True"/"False" dropdown on the **Customer Manage** and **Create Customer** pages. + +================ +File: docs/release-notes/rn-whats-new.md +================ +--- +pagination_next: null +pagination_prev: null +--- + +# Release Notes Overview + +New features and improvements that have been added to Replicated are documented on a per component basis in the corresponding release notes section. Component updates may be released at any time following a continuous delivery model. + +To view the component release notes, see the following: +* [Embedded Cluster Release Notes](rn-embedded-cluster) +* [KOTS Release Notes](rn-app-manager) +* [kURL Release Notes](rn-kubernetes-installer) +* [Replicated SDK Release Notes](rn-replicated-sdk) +* [Vendor Platform Release Notes](rn-vendor-platform) + +For an overview of recent updates to the Replicated platform, see the monthly [Replicated Release Highlights blog](https://www.replicated.com/blog-tags/replicated-release-highlights). + +================ +File: docs/templates/procedure.md +================ +# Page Title (Use a gerund. For example, "Creating...") + +<!-- This template is used for a single procedures. For a workflow that contains multiple procedures/tasks, use the process/multiple procedure template.--> + +[Introductory paragraph stating the business reason - what and why - a user would want to do this procedure.] + +**Prerequisites** + +Complete the following items before you perform this task: +* First item +* Second item + +To [do this task]: [For example, "To create a customer license:"] + +1. Log in to the [vendor portal](https://vendor.replicated.com), and click **Customer > Create Customer**. + + [Optional: Include a results step. Use only when a result is not obvious, such as in a complex UX flow like GitOps. For example, "A list of your applications displays and shows the status of GitOps integration for each application."] + +1. Edit the fields: + + <table> + <tr> + <th width="30%">Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td>Field Name</td> + <td>Example description: The type of customer is used solely for reporting purposes. Their access to your app is not affected by the type you assign to them. <strong>Options:</strong> Development, Trial, Paid, Community. <strong>Default:</strong> Trial. For more information, see <a href="releases-creating-customer">Creating a Customer</a>.</td> + </tr> + <tr> + <td></td> + <td></td> + </tr> + </table> + + <!--If you need a resizable screenshot, and give the user the ability to display a larger size image if needed, format it like this: + + <img alt="channel settings dialog" src="/images/channel-settings.png" width="500px"/> + + [View a larger version of this image](/images/channel-settings.png)> + + --> + +1. Run the following command to export the blah blah blah: + + ``` + kubectl kots pull UPSTREAM_URI --cluster CLUSTER_NAME + ``` + + Replace: + + - UPSTREAM_URI: With the URI for the application. + - CLUSTER_NAME: With the name of the kubeconfig cluster. + + + [Use a bulleted list for the placeholder text definitions unless you feel the list is too long and that a table would be cleaner. If you need to use a table, use the following table format: + + <table> + <tr> + <th width="30%">Replace</th> + <th width="70%">With</th> + </tr> + <tr> + <td>UPSTREAM_URI</td> + <td>The URI for the application.</td> + </tr> + <tr> + <td>CLUSTER_NAME</td> + <td>The name of the kubeconfig cluster.</td> + </tr> + </table> + +1. Click **Save Changes**. + +## Next Step + +[Describe and link to the next task.] + +## Related Topics + +<!-- Be judicious. Only include this section if the topics are truly related to this procedure and have a specific purpose/goal for including it here instead of as a cross-reference. + +* Do not use an intro sentence +* Should be a bulleted list only if there is more than one link +* Use the actual topic name with a hyperlink +* Keep the list short +* Should only link to topics on docs.replicated.com, or Replicated blogs/articles +--> + +[Example Related Topic Title](https://docs.replicated.com) + +================ +File: docs/templates/process-multiple-procedures.md +================ +# Page Title (Use a gerund. For example, "Creating...") + +<!-- This template is used for processes/workflows or tutorial that require multiple procedures/tasks. For single procedures, use the other template.--> + +[Introductory paragraph stating the business reason why a user would want to do this process/workflow or tutorial.] + +## Prerequisites + +These actions or items must be complete before you perform this task: +* First item +* Second item + +## Task Heading (Start with verb. For example, "Create a Customer License") + +[Introductory sentence or two to explain the “what“ and “why“ of the task.] + +To [do this task]: [For example, "To create a customer license:"] + +1. Log in to the [vendor portal](https://vendor.replicated.com), and click **Customer > Create Customer**. + + [Optional: include a results step. Use only when a result is not obvious. For example, "The Create a new customer page opens."] + +1. Edit the fields: + + <table> + <tr> + <th width="30%">Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td>Field Name</td> + <td>[Example description: The type of customer is used solely for reporting purposes. Their access to your app is not affected by the type you assign to them. <strong>Options:</strong> Development, Trial, Paid, Community. <strong>Default:</strong> Trial. For more information, see LINK.]</td> + </tr> + <tr> + <td>Field Name</td> + <td>Specifies the...</td> + </tr> + </table> + +1. Run the following command to export the blah blah blah: + + ``` + kubectl kots pull UPSTREAM_URI --cluster CLUSTER_NAME + ``` + Replace: + + - UPSTREAM_URI: With the URI for the application. + - CLUSTER_NAME: With the name of the kubeconfig cluster. + + + [Use a bulleted list for the placeholder text definitions unless you feel the list is too long and that a table would be cleaner. If you need to use a table, use the following table format: + + <table> + <tr> + <th width="30%">Replace</th> + <th width="70%">With</th> + </tr> + <tr> + <td>UPSTREAM_URI</td> + <td>The URI for the application.</td> + </tr> + <tr> + <td>CLUSTER_NAME</td> + <td>The name of the kubeconfig cluster.</td> + </tr> + </table> + +1. Click **Save Changes**. + +## (Optional) Task Heading 2 + +<!--Separate each task under a new heading. If there are optional tasks that the user can complete as part of the larger procedure, put “(Optional)“ in the heading.--> + +[Introductory sentence or two to explain the “what“ and “why“ of the task. You can tell the user that the previous task must be completed first. For example: "After you create a blah blah, you can configure the... This helps you..."] + +To [do this task]: + +1. Step +1. Step + + +## Task Heading 3 + +[Introductory sentence or two to explain the “what“ and “why“ of the task. You can tell the user that the previous task must be completed first. For example: "After you create a blah blah, you can configure the... This helps you..."] + +To [do this task]: + +1. Step +1. Step + +## Related Topics + +<!-- Be judicious. Only include this section if the topics are truly related to this procedure and have a specific purpose/goal for including it here instead of as a cross-reference. + +* Do not use an intro sentence +* Should be a bulleted list only if there is more than one link +* Use the actual topic name with a hyperlink +* Keep the list short +* Should only link to topics on docs.replicated.com, or Replicated blogs/articles +--> + +[My Topic Title](https://docs.replicated.com) + +================ +File: docs/templates/release-notes.md +================ +# Product Name Release Notes + +## vX.X.X + +Release Date: Month Day, Year + +### Kubernetes Compatibility + +This release is compatible with Kubernetes vX.X, vX.Y, and vX.Z. + +### Security Fixes + +* Fix 1 +* Fix 2 + +### New Features + +* New feature 1 +* New feature 2 + +### Improvements + +* Improvement 1 +* Improvement 2 + +### Bug Fixes + +* Bug 1 +* Bug 2 + +### Known Issues + +* Known issue 1 +* Known issue 2 + +================ +File: docs/vendor/admin-console-adding-buttons-links.mdx +================ +# Adding Links to the Dashboard + +This topic describes how to use the Kubernetes SIG Application custom resource to add links to the Replicated KOTS Admin Console dashboard. + +## Overview + +Replicated recommends that every application include a Kubernetes SIG Application custom resource. The Kubernetes SIG Application custom resource provides a standard API for creating, viewing, and managing applications. For more information, see [Kubernetes Applications](https://github.com/kubernetes-sigs/application#kubernetes-applications) in the kubernetes-sigs GitHub repository. + +You can include the Kubernetes SIG Application custom resource in your releases to add links to the Admin Console dashboard. Common use cases include adding links to documentation, dashboards, or a landing page for the application. + +For example, the following shows an **Open App** button on the dashboard of the Admin Console for an application named Gitea: + +<img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> + +[View a larger version of this image](/images/gitea-open-app.png) + +:::note +KOTS uses the Kubernetes SIG Application custom resource as metadata and does not require or use an in-cluster controller to handle this custom resource. An application that follows best practices does not require cluster admin privileges or any cluster-wide components to be installed. +::: + +## Add a Link + +To add a link to the Admin Console dashboard, include a [Kubernetes SIG Application](https://github.com/kubernetes-sigs/application#kubernetes-applications) custom resource in the release with a `spec.descriptor.links` field. The `spec.descriptor.links` field is an array of links that are displayed on the Admin Console dashboard after the application is deployed. + +Each link in the `spec.descriptor.links` array contains two fields: +* `description`: The link text that will appear on the Admin Console dashboard. +* `url`: The target URL. + +For example: + +```yaml +# app.k8s.io/v1beta1 Application Custom resource + +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "gitea" +spec: + descriptor: + links: + - description: About Wordpress + url: "https://wordpress.org/" +``` + +When the application is deployed, the "About Wordpress" link is displayed on the Admin Console dashboard as shown below: + +<img alt="About Wordpress link on the Admin Console dashboard" src="/images/dashboard-link-about-wordpress.png" width="450px"/> + +[View a larger version of this image](/images/dashboard-link-about-wordpress.png) + +For an additional example of a Kubernetes SIG Application custom resource, see [application.yaml](https://github.com/kubernetes-sigs/application/blob/master/docs/examples/wordpress/application.yaml) in the kubernetes-sigs GitHub repository. + +### Create URLs with User-Supplied Values Using KOTS Template Functions {#url-template} + +You can use KOTS template functions to template URLs in the Kubernetes SIG Application custom resource. This can be useful when all or some of the URL is a user-supplied value. For example, an application might allow users to provide their own ingress controller or load balancer. In this case, the URL can be templated to render the hostname that the user provides on the Admin Console Config screen. + +The following examples show how to use the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function in the Kubernetes SIG Application custom resource `spec.descriptor.links.url` field to render one or more user-supplied values: + +* In the example below, the URL hostname is a user-supplied value for an ingress controller that the user configures during installation. + + ```yaml + apiVersion: app.k8s.io/v1beta1 + kind: Application + metadata: + name: "my-app" + spec: + descriptor: + links: + - description: Open App + url: 'http://{{repl ConfigOption "ingress_host" }}' + ``` +* In the example below, both the URL hostname and a node port are user-supplied values. It might be necessary to include a user-provided node port if you are exposing NodePort services for installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about). + + ```yaml + apiVersion: app.k8s.io/v1beta1 + kind: Application + metadata: + name: "my-app" + spec: + descriptor: + links: + - description: Open App + url: 'http://{{repl ConfigOption "hostname" }}:{{repl ConfigOption "node_port"}}' + ``` + +For more information about working with KOTS template functions, see [About Template Functions](/reference/template-functions-about). + +================ +File: docs/vendor/admin-console-customize-app-icon.md +================ +# Customizing the Application Icon + +You can add a custom application icon that displays in the Replicated Admin Console and the download portal. Adding a custom icon helps ensure that your brand is reflected for your customers. + +:::note +You can also use a custom domain for the download portal. For more information, see [About Custom Domains](custom-domains). +::: + +## Add a Custom Icon + +For information about how to choose an image file for your custom application icon that displays well in the Admin Console, see [Icon Image File Recommendations](#icon-image-file-recommendations) below. + +To add a custom application icon: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. +1. Create or open the Application custom resource manifest file. An Application custom resource manifest file has `apiVersion: kots.io/v1beta1` and `kind: Application`. + +1. In the preview section of the Help pane: + + 1. If your Application manifest file is already populated with an `icon` key, the icon displays in the preview. Click **Preview a different icon** to access the preview options. + + 1. Drag and drop an icon image file to the drop zone. Alternatively, paste a link or Base64 encoded data URL in the text box. Click **Preview**. + + ![Application icon preview](/images/app-icon-preview.png) + + 1. (Air gap only) If you paste a link to the image in the text box, click **Preview** and **Base64 encode icon** to convert the image to a Base64 encoded data URL. An encoded URL displays that you can copy and paste into the Application manifest. Base64 encoding is required for images used with air gap installations. + + :::note + If you pasted a Base64 encoded data URL into the text box, the **Base64 encode icon** button does not display because the image is already encoded. If you drag and drop an icon, the icon is automatically encoded for you. + ::: + + ![Base64 encode image button](/images/app-icon-preview-base64.png) + + 1. Click **Preview a different icon** to preview a different icon if needed. + +1. In the Application manifest, under `spec`, add an `icon` key that includes a link or the Base64 encoded data URL to the desired image. + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + title: My Application + icon: https://kots.io/images/kotsadm-logo-large@2x.png + ``` +1. Click **Save Release**. + + +## Icon Image File Recommendations + +For your custom application icon to look best in the Admin Console, consider the following recommendations: + +* Use a PNG or JPG file. +* Use an image that is at least 250 by 250 pixels. +* Export the image file at 2x. + +================ +File: docs/vendor/admin-console-customize-config-screen.md +================ +# Creating and Editing Configuration Fields + +This topic describes how to use the KOTS Config custom resource manifest file to add and edit fields in the KOTS Admin Console configuration screen. + +## About the Config Custom Resource + +Applications distributed with Replicated KOTS can include a configuration screen in the Admin Console to collect required or optional values from your users that are used to run your application. For more information about the configuration screen, see [About the Configuration Screen](config-screen-about). + +To include a configuration screen in the Admin Console for your application, you add a Config custom resource manifest file to a release for the application. + +You define the fields that appear on the configuration screen as an array of `groups` and `items` in the Config custom resource: + * `groups`: A set of `items`. Each group must have a `name`, `title`, `description`, and `items`. For example, you can create a group of several user input fields that are all related to configuring an SMTP mail server. + * `items`: An array of user input fields. Each array under `items` must have a `name`, `title`, and `type`. You can also include several optional properties. For example, in a group for configuring a SMTP mail server, you can have user input fields under `items` for the SMTP hostname, port, username, and password. + + There are several types of `items` supported in the Config manifest that allow you to collect different types of user inputs. For example, you can use the `password` input type to create a text field on the configuration screen that hides user input. + +For more information about the syntax of the Config custom resource manifest, see [Config](/reference/custom-resource-config). + +## About Regular Expression Validation + +You can use [RE2 regular expressions](https://github.com/google/re2/wiki/Syntax) (regex) to validate user input for config items, ensuring conformity to certain standards, such as valid email addresses, password complexity rules, IP addresses, and URLs. This prevents users from deploying an application with a verifiably invalid configuration. + +You add the `validation`, `regex`, `pattern` and `message` fields to items in the Config custom resource. Validation is supported for `text`, `textarea`, `password` and `file` config item types. For more information about regex validation fields, see [Item Validation](/reference/custom-resource-config#item-validation) in _Config_. + +The following example shows a common password complexity rule: + +``` +- name: smtp-settings + title: SMTP Settings + items: + - name: smtp_password + title: SMTP Password + type: password + help_text: Set SMTP password + validation: + regex: + pattern: ^(?:[\w@#$%^&+=!*()_\-{}[\]:;"'<>,.?\/|]){8,16}$ + message: The password must be between 8 and 16 characters long and can contain a combination of uppercase letter, lowercase letters, digits, and special characters. +``` + +## Add Fields to the Configuration Screen + +To add fields to the Admin Console configuration screen: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. +1. Create or open the Config custom resource manifest file in the desired release. A Config custom resource manifest file has `kind: Config`. +1. In the Config custom resource manifest file, define custom user-input fields in an array of `groups` and `items`. + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: smtp_settings + title: SMTP Settings + description: Configure SMTP Settings + items: + - name: enable_smtp + title: Enable SMTP + help_text: Enable SMTP + type: bool + default: "0" + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + - name: smtp_port + title: SMTP Port + help_text: Set SMTP Port + type: text + - name: smtp_user + title: SMTP User + help_text: Set SMTP User + type: text + - name: smtp_password + title: SMTP Password + type: password + default: 'password' + ``` + + The example above includes a single group with the name `smtp_settings`. + + The `items` array for the `smtp_settings` group includes the following user-input fields: `enable_smtp`, `smtp_host`, `smtp_port`, `smtp_user`, and `smtp_password`. Additional item properties are available, such as `affix` to make items appear horizontally on the same line. For more information about item properties, see [Item Properties](/reference/custom-resource-config#item-properties) in Config. + + The following screenshot shows how the SMTP Settings group from the example YAML above displays in the Admin Console configuration screen during application installation: + + ![User input fields on the configuration screen for the SMTP settings](/images/config-screen-smtp-example-large.png) + +1. (Optional) Add default values for the fields. You can add default values using one of the following properties: + * **With the `default` property**: When you include the `default` key, KOTS uses this value when rendering the manifest files for your application. The value then displays as a placeholder on the configuration screen in the Admin Console for your users. KOTS only uses the default value if the user does not provide a different value. + + :::note + If you change the `default` value in a later release of your application, installed instances of your application receive the updated value only if your users did not change the default from what it was when they initially installed the application. + + If a user did change a field from its default, the Admin Console does not overwrite the value they provided. + ::: + + * **With the `value` property**: When you include the `value` key, KOTS does not overwrite this value during an application update. The value that you provide for the `value` key is visually indistinguishable from other values that your user provides on the Admin Console configuration screen. KOTS treats user-supplied values and the value that you provide for the `value` key as the same. + +2. (Optional) Add regular expressions to validate user input for `text`, `textarea`, `password` and `file` config item types. For more information, see [About Regular Expression Validation](#about-regular-expression-validation). + + **Example**: + + ```yaml + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + validation: + regex: ​ + pattern: ^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$ + message: Valid hostname starts with a letter (uppercase/lowercase), followed by zero or more groups of letters (uppercase/lowercase), digits, or hyphens, optionally followed by a period. Ends with a letter or digit. + ``` +3. (Optional) Mark fields as required by including `required: true`. When there are required fields, the user is prevented from proceeding with the installation until they provide a valid value for required fields. + + **Example**: + + ```yaml + - name: smtp_password + title: SMTP Password + type: password + required: true + ``` + +4. Save and promote the release to a development environment to test your changes. + +## Next Steps + +After you add user input fields to the configuration screen, you use template functions to map the user-supplied values to manifest files in your release. If you use a Helm chart for your application, you map the values to the Helm chart `values.yaml` file using the HelmChart custom resource. + +For more information, see [Mapping User-Supplied Values](config-screen-map-inputs). + +================ +File: docs/vendor/admin-console-display-app-status.md +================ +import StatusesTable from "../partials/status-informers/_statusesTable.mdx" +import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" +import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" +import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" + +# Adding Resource Status Informers + +This topic describes how to add status informers for your application. Status informers apply only to applications installed with Replicated KOTS. For information about how to collect application status data for applications installed with Helm, see [Enabling and Understanding Application Status](insights-app-status). + +## About Status Informers + +_Status informers_ are a feature of KOTS that report on the status of supported Kubernetes resources deployed as part of your application. You enable status informers by listing the target resources under the `statusInformers` property in the Replicated Application custom resource. KOTS watches all of the resources that you add to the `statusInformers` property for changes in state. + +Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information, see [Understanding Application Status](#understanding-application-status). + +When you one or more status informers to your application, KOTS automatically does the following: + +* Displays application status for your users on the dashboard of the Admin Console. This can help users diagnose and troubleshoot problems with their instance. The following shows an example of how an Unavailable status displays on the Admin Console dashboard: + + <img src="/images/kotsadm-dashboard-appstatus.png" alt="Unavailable status on the Admin Console dashboard" width="500px"/> + +* Sends application status data to the Vendor Portal. This is useful for viewing insights on instances of your application running in customer environments, such as the current status and the average uptime. For more information, see [Instance Details](instance-insights-details). + + The following shows an example of the Vendor Portal **Instance details** page with data about the status of an instance over time: + + <img src="/images/instance-details.png" alt="Instance details full page" width="700px"/> + + [View a larger version of this image](/images/instance-details.png) +## Add Status Informers + +To create status informers for your application, add one or more supported resource types to the `statusInformers` property in the Application custom resource. See [`statusInformers`](/reference/custom-resource-application#statusinformers) in _Application_. + +<SupportedResources/> + +You can target resources of the supported types that are deployed in any of the following ways: + +* Deployed by KOTS. +* Deployed by a Kubernetes Operator that is deployed by KOTS. For more information, see [About Packaging a Kubernetes Operator Application](operator-packaging-about). +* Deployed by Helm. For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +### Examples + +Status informers are in the format `[namespace/]type/name`, where namespace is optional and defaults to the current namespace. + +**Example**: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-application +spec: + statusInformers: + - deployment/my-web-svc + - deployment/my-worker +``` + +The `statusInformers` property also supports template functions. Using template functions allows you to include or exclude a status informer based on a customer-provided configuration value: + +**Example**: + +```yaml +statusInformers: + - deployment/my-web-svc + - '{{repl if ConfigOptionEquals "option" "value"}}deployment/my-worker{{repl else}}{{repl end}}' +``` + +In the example above, the `deployment/my-worker` status informer is excluded unless the statement in the `ConfigOptionEquals` template function evaluates to true. + +For more information about using template functions in application manifest files, see [About Template Functions](/reference/template-functions-about). + +## Understanding Application Status + +This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. + +### Resource Statuses + +Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. + +The following table lists the supported Kubernetes resources and the conditions that contribute to each status: + +<StatusesTable/> + +### Aggregate Application Status + +<AggregateStatusIntro/> + +<AggregateStatus/> + +================ +File: docs/vendor/admin-console-port-forward.mdx +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import ServicePortNote from "../partials/custom-resource-application/_servicePort-note.mdx" +import GiteaKotsApp from "../partials/getting-started/_gitea-kots-app-cr.mdx" +import GiteaHelmChart from "../partials/getting-started/_gitea-helmchart-cr.mdx" +import GiteaK8sApp from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import PortsApplicationURL from "../partials/custom-resource-application/_ports-applicationURL.mdx" +import NginxKotsApp from "../partials/application-links/_nginx-kots-app.mdx" +import NginxK8sApp from "../partials/application-links/_nginx-k8s-app.mdx" +import NginxService from "../partials/application-links/_nginx-service.mdx" +import NginxDeployment from "../partials/application-links/_nginx-deployment.mdx" + +# Port Forwarding Services with KOTS + +This topic describes how to add one or more ports to the Replicated KOTS port forward tunnel by configuring the `ports` key in the KOTS Application custom resource. + +The information in this topic applies to existing cluster installations. For information about exposing services for Replicated kURL or Replicated Embedded Cluster installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). + +## Overview + +For installations into existing clusters, KOTS automatically creates a port forward tunnel and exposes the Admin Console on port 8800 where it can be accessed by users. In addition to the 8800 Admin Console port, you can optionally add one or more extra ports to the port forward tunnel. + +Adding ports to the port forward tunnel allows you to port forward application services without needing to manually run the `kubectl port-forward` command. You can also add a link to the Admin Console dashboard that points to port-forwarded services. + +This can be particularly useful when developing and testing KOTS releases for your application, because it provides a quicker way to access an application after installation compared to setting up an ingress controller or adding a load balancer. + +## Port Forward a Service with the KOTS Application `ports` Key + +To port forward a service with KOTS for existing cluster installations: + +1. In a new release, configure the [`ports`](/reference/custom-resource-application#ports) key in the KOTS Application custom resource with details for the target service. For example: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + ports: + - serviceName: my-service + servicePort: 3000 + localPort: 8888 + ``` + + 1. For `ports.serviceName`, add the name of the service. KOTS can create a port forward to ClusterIP, NodePort, or LoadBalancer services. For more information about Kubernetes service types, see [Service](https://kubernetes.io/docs/concepts/services-networking/service/) in the Kubernetes documentation. + + 1. For `ports.servicePort`, add the `containerPort` of the Pod where the service is running. This is the port where KOTS forwards traffic. + + <ServicePortNote/> + + 1. For `ports.localPort`, add the port to map on the local workstation. + +1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. + + When the application is in a Ready state and the KOTS port forward is running, you will see output similar to the following: + + ```bash + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + • Go to http://localhost:8888 to access the application + ``` + Confirm that you can access the service at the URL provided in the KOTS CLI output. + +1. (Optional) Add a link to the service on the Admin Console dashboard. See [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link) below. + +## Add a Link to a Port-Forwarded Service on the Admin Console Dashboard {#add-link} + +After you add a service to the KOTS port forward tunnel, you can also optionally add a link to the port-forwarded service on the Admin Console dashboard. + +To add a link to a port-forwarded service, add the _same_ URL in the KOTS Application custom resource `ports.applicationURL` and Kubernetes SIG Application custom resource `spec.descriptor.links.url` fields. When the URLs in these fields match, KOTS adds a link on the Admin Console dashboard where the given service can be accessed. This process automatically links to the hostname in the browser (where the Admin Console is being accessed) and appends the specified `localPort`. + +To add a link to a port-forwarded service on the Admin Console dashboard: + +1. In a new release, open the KOTS Application custom resource and add a URL to the `ports.applicationURL` field. For example: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + ports: + - serviceName: my-service + servicePort: 3000 + localPort: 8888 + applicationUrl: "http://my-service" + ``` + + Consider the following guidelines for this URL: + * Use HTTP instead of HTTPS unless TLS termination takes place in the application Pod. + * KOTS rewrites the URL with the hostname in the browser during deployment. So, you can use any hostname for the URL, such as the name of the service. For example, `http://my-service`. + +1. Add a Kubernetes SIG Application custom resource in the release. For example: + + ```yaml + # app.k8s.io/v1beta1 Application Custom resource + + apiVersion: app.k8s.io/v1beta1 + kind: Application + metadata: + name: "my-application" + spec: + descriptor: + links: + - description: Open App + # url matches ports.applicationURL in the KOTS Application custom resource + url: "http://my-service" + ``` + + 1. For `spec.descriptor.links.description`, add the link text that will appear on the Admin Console dashboard. For example, `Open App`. + + 1. For `spec.descriptor.links.url`, add the _same_ URL that you used in the `ports.applicationURL` in the KOTS Application custom resource. + +1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. + + When the application is in a Ready state, confirm that you can access the service by clicking the link that appears on the dashboard. For example: + + <img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> + + [View a larger version of this image](/images/gitea-open-app.png) + +## Access Port-Forwarded Services + +This section describes how to access port-forwarded services. + +### Command Line + +Run [`kubectl kots admin-console`](/reference/kots-cli-admin-console-index) to open the KOTS port forward tunnel. + +The `kots admin-console` command runs the equivalent of `kubectl port-forward svc/myapplication-service <local-port>:<remote-port>`, then prints a message with the URLs where the Admin Console and any port-forwarded services can be accessed. For more information about the `kubectl port-forward` command, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the Kubernetes documentation. + +For example: + +```bash +kubectl kots admin-console --namespace gitea +``` +```bash +• Press Ctrl+C to exit +• Go to http://localhost:8800 to access the Admin Console +• Go to http://localhost:8888 to access the application +``` + +### Admin Console + +You can optionally add a link to a port-forwarded service from the Admin Console dashboard. This requires additional configuration. For more information, see [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link). + +The following example shows an **Open App** link on the dashboard of the Admin Console for an application named Gitea: + +<img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> + +[View a larger version of this image](/images/gitea-open-app.png) + +## Examples + +This section provides examples of how to configure the `ports` key to port-forward a service in existing cluster installations and add links to services on the Admin Console dashboard. + +### Example: Bitnami Gitea Helm Chart with LoadBalancer Service + +This example uses a KOTS Application custom resource and a Kubernetes SIG Application custom resource to configure port forwarding for the Bitnami Gitea Helm chart in existing cluster installations, and add a link to the port-forwarded service on the Admin Console dashboard. To view the Gitea Helm chart source, see [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) in GitHub. + +To test this example: + +1. Pull version 1.0.6 of the Gitea Helm chart from Bitnami: + + ``` + helm pull oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + +1. Add the `gitea-1.0.6.tgz` chart archive to a new, empty release in the Vendor Portal along with the `kots-app.yaml`, `k8s-app.yaml`, and `gitea.yaml` files provided below. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). + + <Tabs> + <TabItem value="kots-app" label="kots-app.yaml" default> + <h5>Description</h5> + <p>Based on the <a href="https://github.com/bitnami/charts/blob/main/bitnami/gitea/templates/svc.yaml">templates/svc.yaml</a> and <a href="https://github.com/bitnami/charts/blob/main/bitnami/gitea/values.yaml">values.yaml</a> files in the Gitea Helm chart, the following KOTS Application custom resource adds port 3000 to the port forward tunnel and maps local port 8888. Port 3000 is the container port of the Pod where the <code>gitea</code> service runs.</p> + <h5>YAML</h5> + <GiteaKotsApp/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml" default> + <h5>Description</h5> + <p>The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service from the Admin Console dashboard. It also triggers KOTS to rewrite the URL to use the hostname in the browser and append the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".</p> + <h5>YAML</h5> + <GiteaK8sApp/> + </TabItem> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.</p> + <h5>YAML</h5> + <GiteaHelmChart/> + </TabItem> + </Tabs> + +1. Install the release to confirm that the service was port-forwarded successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + +### Example: NGINX Application with ClusterIP and NodePort Services + +The following example demonstrates how to link to a port-forwarded ClusterIP service for existing cluster installations. + +It also shows how to use the `ports` key to add a link to a NodePort service for kURL installations. Although the primary purpose of the `ports` key is to port forward services for existing cluster installations, it is also possible to use the `ports` key so that links to NodePort services for Embedded Cluster or kURL installations use the hostname in the browser. For information about exposing NodePort services for Embedded Cluster or kURL installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). + +To test this example: + +1. Add the `example-service.yaml`, `example-deployment.yaml`, `kots-app.yaml`, and `k8s-app.yaml` files provided below to a new, empty release in the Vendor Portal. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). + + <Tabs> + <TabItem value="service" label="example-service.yaml" default> + <h5>Description</h5> + <p>The YAML below contains ClusterIP and NodePort specifications for a service named <code>nginx</code>. Each specification uses the <code>kots.io/when</code> annotation with the Replicated IsKurl template function to conditionally include the service based on the installation type (existing cluster or kURL cluster). For more information, see <a href="/vendor/packaging-include-resources">Conditionally Including or Excluding Resources</a> and <a href="/reference/template-functions-static-context#iskurl">IsKurl</a>.</p> + <p>As shown below, both the ClusterIP and NodePort <code>nginx</code> services are exposed on port 80.</p> + <h5>YAML</h5> + <NginxService/> + </TabItem> + <TabItem value="deployment" label="example-deployment.yaml" default> + <h5>Description</h5> + <p>A basic Deployment specification for the NGINX application.</p> + <h5>YAML</h5> + <NginxDeployment/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml" default> + <h5>Description</h5> + <p>The KOTS Application custom resource below adds port 80 to the KOTS port forward tunnel and maps port 8888 on the local machine. The specification also includes <code>applicationUrl: "http://nginx"</code> so that a link to the service can be added to the Admin Console dashboard.</p> + <h5>YAML</h5> + <NginxKotsApp/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml" default> + <h5>Description</h5> + <p>The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service on the Admin Console dashboard that uses the hostname in the browser and appends the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".</p> + <h5>YAML</h5> + <NginxK8sApp/> + </TabItem> + </Tabs> + +1. Install the release into an existing cluster and confirm that the service was port-forwarded successfully by clicking **Open App** on the Admin Console dashboard. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + +1. If there is not already a kURL installer promoted to the channel, add a kURL installer to the release to support kURL installs. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). + +1. Install the release on a VM and confirm that the service was exposed successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation with kURL](/enterprise/installing-kurl). + + :::note + Ensure that the VM where you install allows HTTP traffic. + ::: + +================ +File: docs/vendor/admin-console-prometheus-monitoring.mdx +================ +import OverviewProm from "../partials/monitoring/_overview-prom.mdx" +import LimitationEc from "../partials/monitoring/_limitation-ec.mdx" + +# Adding Custom Graphs + +This topic describes how to customize the graphs that are displayed on the Replicated Admin Console dashboard. + +## Overview of Monitoring with Prometheus + +<OverviewProm/> + +## About Customizing Graphs + +If your application exposes Prometheus metrics, you can add custom graphs to the Admin Console dashboard to expose these metrics to your users. You can also modify or remove the default graphs. + +To customize the graphs that are displayed on the Admin Console, edit the [`graphs`](/reference/custom-resource-application#graphs) property in the KOTS Application custom resource manifest file. At a minimum, each graph in the `graphs` property must include the following fields: +* `title`: Defines the graph title that is displayed on the Admin Console. +* `query`: A valid PromQL Prometheus query. You can also include a list of multiple queries by using the `queries` property. For more information about querying Prometheus with PromQL, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/) in the Prometheus documentation. + +:::note +By default, a kURL cluster exposes the Prometheus expression browser at NodePort 30900. For more information, see [Expression Browser](https://prometheus.io/docs/visualization/browser/) in the Prometheus documentation. +::: + +## Limitation + +<LimitationEc/> + +## Add and Modify Graphs + +To customize graphs on the Admin Console dashboard: + +1. In the [Vendor Portal](https://vendor.replicated.com/), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. + +1. Create or open the [KOTS Application](/reference/custom-resource-application) custom resource manifest file. + +1. In the Application manifest file, under `spec`, add a `graphs` property. Edit the `graphs` property to modify or remove existing graphs or add a new custom graph. For more information, see [graphs](/reference/custom-resource-application#graphs) in _Application_. + + **Example**: + + The following example shows the YAML for adding a custom graph that displays the total number of user signups for an application. + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' + ``` + +1. (Optional) Under `graphs`, copy and paste the specs for the default Disk Usage, CPU Usage, and Memory Usage Admin Console graphs provided in the YAML below. + + Adding these default graphs to the Application custom resource manifest ensures that they are not overwritten when you add one or more custom graphs. When the default graphs are included in the Application custom resource, the Admin Console displays them in addition to any custom graphs. + + Alternatively, you can exclude the YAML specs for the default graphs to remove them from the Admin Console dashboard. + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' + # Disk Usage, CPU Usage, and Memory Usage below are the default graphs + - title: Disk Usage + queries: + - query: 'sum((node_filesystem_size_bytes{job="node-exporter",fstype!="",instance!=""} - node_filesystem_avail_bytes{job="node-exporter", fstype!=""})) by (instance)' + legend: 'Used: {{ instance }}' + - query: 'sum((node_filesystem_avail_bytes{job="node-exporter",fstype!="",instance!=""})) by (instance)' + legend: 'Available: {{ instance }}' + yAxisFormat: bytes + - title: CPU Usage + query: 'sum(rate(container_cpu_usage_seconds_total{namespace="{{repl Namespace}}",container!="POD",pod!=""}[5m])) by (pod)' + legend: '{{ pod }}' + - title: Memory Usage + query: 'sum(container_memory_usage_bytes{namespace="{{repl Namespace}}",container!="POD",pod!=""}) by (pod)' + legend: '{{ pod }}' + yAxisFormat: bytes + ``` +1. Save and promote the release to a development environment to test your changes. + +================ +File: docs/vendor/ci-overview.md +================ +import TestRecs from "../partials/ci-cd/_test-recs.mdx" + +# About Integrating with CI/CD + +This topic provides an introduction to integrating Replicated CLI commands in your continuous integration and continuous delivery (CI/CD) pipelines, including Replicated's best practices and recommendations. + +## Overview + +Using CI/CD workflows to automatically compile code and run tests improves the speed at which teams can test, iterate on, and deliver releases to customers. When you integrate Replicated CLI commands into your CI/CD workflows, you can automate the process of deploying your application to clusters for testing, rather than needing to manually create and then archive channels, customers, and environments for testing. + +You can also include continuous delivery workflows to automatically promote a release to a shared channel in your Replicated team. This allows you to more easily share releases with team members for internal testing and iteration, and then to promote releases when they are ready to be shared with customers. + +## Best Practices and Recommendations + +The following are Replicated's best practices and recommendations for CI/CD: + +* Include unique workflows for development and for releasing your application. This allows you to run tests on every commit, and then to promote releases to internal and customer-facing channels only when ready. For more information about the workflows that Replicated recommends, see [Recommended CI/CD Workflows](ci-workflows). + +* Integrate Replicated Compatibility Matrix into your CI/CD workflows to quickly create multiple different types of clusters where you can deploy and test your application. Supported distributions include OpenShift, GKE, EKS, and more. For more information, see [About Compatibility Matrix](testing-about). + +* If you use the GitHub Actions CI/CD platform, integrate the custom GitHub actions that Replicated maintains to replace repetitive tasks related to distributing application with Replicated or using Compatibility Matrix. For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). + +* To help show you are conforming to a secure supply chain, sign all commits and container images. Additionally, provide a verification mechanism for container images. + +* Use custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy that blocks the ability to promote releases to your production channel. For more information about creating custom RBAC policies in the Vendor Portal, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). + +* Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: + <TestRecs/> + +================ +File: docs/vendor/ci-workflows-github-actions.md +================ +# Integrating Replicated GitHub Actions + +This topic describes how to integrate Replicated's custom GitHub actions into continuous integration and continuous delivery (CI/CD) workflows that use the GitHub Actions platform. + +## Overview + +Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to distributing your application with Replicated and related to using the Compatibility Matrix, such as: + * Creating and removing customers, channels, and clusters + * Promoting releases + * Creating a matrix of clusters for testing based on the Kubernetes distributions and versions where your customers are running application instances + * Reporting the success or failure of tests + +If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. + +To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. + +## GitHub Actions Workflow Examples + +The [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions#examples) repository in GitHub contains example workflows that use the Replicated GitHub actions. You can use these workflows as a template for your own GitHub Actions CI/CD workflows: + +* For a simplified development workflow, see [development-helm-prepare-cluster.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm-prepare-cluster.yaml). +* For a customizable development workflow for applications installed with the Helm CLI, see [development-helm.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm.yaml). +* For a customizable development workflow for applications installed with KOTS, see [development-kots.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-kots.yaml). +* For a release workflow, see [release.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/release.yaml). + +## Integrate GitHub Actions + +The following table lists GitHub actions that are maintained by Replicated that you can integrate into your CI/CI workflows. The table also describes when to use the action in a workflow and indicates the related Replicated CLI command where applicable. + +:::note +For an up-to-date list of the avilable custom GitHub actions, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. +::: + +<table> + <tr> + <th width="25%">GitHub Action</th> + <th width="50%">When to Use</th> + <th width="25%">Related Replicated CLI Commands</th> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/archive-channel">archive-channel</a></td> + <td> + <p>In release workflows, a temporary channel is created to promote a release for testing. This action archives the temporary channel after tests complete.</p> + <p>See <a href="/vendor/ci-workflows#rel-cleanup">Archive the temporary channel and customer</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-channel-delete"><code>channel delete</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/archive-customer">archive-customer</a></td> + <td> + <p>In release workflows, a temporary customer is created so that a release can be installed for testing. This action archives the temporary customer after tests complete.</p> + <p>See <a href="/vendor/ci-workflows#rel-cleanup">Archive the temporary channel and customer</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster">create-cluster</a></td> + <td> + <p>In release workflows, use this action to create one or more clusters for testing.</p> + <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-cluster-create"><code>cluster create</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/create-release">create-release</a></td> + <td> + <p>In release workflows, use this action to create a release to be installed and tested, and optionally to be promoted to a shared channel after tests complete.</p> + <p>See <a href="/vendor/ci-workflows#rel-release">Create a release and promote to a temporary channel</a> in <em>Recommended CI/CD Workflows</em>. </p> + </td> + <td><a href="/reference/replicated-cli-release-create"><code>release create</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/get-customer-instances">get-customer-instances</a></td> + <td> + <p>In release workflows, use this action to create a matrix of clusters for running tests based on the Kubernetes distributions and versions of active instances of your application running in customer environments.</p> + <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/helm-install">helm-install</a></td> + <td> + <p>In development or release workflows, use this action to install a release using the Helm CLI in one or more clusters for testing.</p> + <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/kots-install">kots-install</a></td> + <td> + <p>In development or release workflows, use this action to install a release with Replicated KOTS in one or more clusters for testing.</p> + <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/prepare-cluster">prepare-cluster</a></td> + <td> + <p>In development workflows, use this action to create a cluster, create a temporary customer of type <code>test</code>, and install an application in the cluster.</p> + <p>See <a href="/vendor/ci-workflows#dev-deploy">Prepare clusters, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-cluster-prepare"><code>cluster prepare</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/promote-release">promote-release</a></td> + <td> + <p>In release workflows, use this action to promote a release to an internal or customer-facing channel (such as Unstable, Beta, or Stable) after tests pass.</p> + <p>See <a href="/vendor/ci-workflows#rel-promote">Promote to a shared channel</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-release-promote"><code>release promote</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster">remove-cluster</a></td> + <td> + <p>In development or release workflows, use this action to remove a cluster after running tests if no <code>ttl</code> was set for the cluster.</p> + <p>See <a href="/vendor/ci-workflows#dev-deploy">Prepare clusters, deploy, and test</a> and <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-cluster-rm"><code>cluster rm</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/report-compatibility-result">report-compatibility-result</a></td> + <td>In development or release workflows, use this action to report the success or failure of tests that ran in clusters provisioned by the Compatibility Matrix.</td> + <td><code>release compatibility</code></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/upgrade-cluster">upgrade-cluster</a></td> + <td>In release workflows, use this action to test your application's compatibility with Kubernetes API resource version migrations after upgrading.</td> + <td><a href="/reference/replicated-cli-cluster-upgrade"><code>cluster upgrade</code></a></td> + </tr> +</table> + +================ +File: docs/vendor/ci-workflows.mdx +================ +import Build from "../partials/ci-cd/_build-source-code.mdx" + +# Recommended CI/CD Workflows + +This topic provides Replicated's recommended development and release workflows for your continuous integration and continuous delivery (CI/CD) pipelines. + +## Overview + +Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). The development and release workflows in this topic describe the recommended steps and jobs to include in your own workflows, including how to integrate Replicated Compatibility Matrix into your workflows for testing. For more information about Compatibility Matrix, see [About Compatibility Matrix](testing-about). + +For each step, the corresponding Replicated CLI command is provided. Additionally, for users of the GitHub Actions platform, a corresponding custom GitHub action that is maintained by Replicated is also provided. For more information about using the Replicated CLI, see [Installing the Replicated CLI](/reference/replicated-cli-installing). For more information about the Replicated GitHub actions, see [Integrating Replicated GitHub Actions](ci-workflows-github-actions). + +:::note +How you implement CI/CD workflows varies depending on the platform, such as GitHub, GitLab, CircleCI, TravisCI, or Jenkins. Refer to the documentation for your CI/CD platform for additional guidance on how to create jobs and workflows. +::: + +## About Creating RBAC Policies for CI/CD + +Replicated recommends using custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy using the [`kots/app/[]/channel/[]/promote`](/vendor/team-management-rbac-resource-names#kotsappchannelpromote) resource that blocks the ability to promote releases to your production channel. This allows for using CI/CD for the purpose of testing, without accidentally releasing to customers. + +For more information about creating custom RBAC policies in the Vendor Portal, including examples, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). + +For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). + +## Development Workflow + +In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. Additionally, for applications managed in the Replicated vendor portal, a release is created and promoted to a channel in the Replicated Vendor Portal where it can be shared with internal teams. + +The following diagram shows the recommended development workflow, where a commit to the application code repository triggers the source code to be built and the application to be deployed to clusters for testing: + +![Development CI workflow](/images/ci-workflow-dev.png) + +[View a larger version of this image](/images/ci-workflow-dev.png) + +The following describes the recommended steps to include in release workflows, as shown in the diagram above: +1. [Define workflow triggers](#dev-triggers) +1. [Build source code](#dev-build) +1. [Prepare clusters, deploy, and test](#dev-deploy) + +### Define workflow triggers {#dev-triggers} + +Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. + +The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: + +```yaml +name: development-workflow-example + +on: + push: + branches: + - '*' # matches every branch that doesn't contain a '/' + - '*/*' # matches every branch containing a single '/' + - '**' # matches every branch + - '!main' # excludes main + +jobs: + ... +``` + +### Build source code {#dev-build} + +<Build/> + +### Prepare clusters, deploy, and test {#dev-deploy} + +Add a job with the following steps to prepare clusters with Replicated Compatibility Matrix, deploy the application, and run tests: + +1. Use Replicated Compatibility Matrix to prepare one or more clusters and deploy the application. Consider the following recommendations: + + * For development workflows, Replicated recommends that you use the `cluster prepare` command to provision one or more clusters with Compatibility Matrix. The `cluster prepare` command creates a cluster, creates a release, and installs the release in the cluster, without the need to promote the release to a channel or create a temporary customer. See the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [prepare-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/prepare-cluster) GitHub action. + + :::note + The `cluster prepare` command is Beta. It is recommended for development only and is not recommended for production releases. For production releases, Replicated recommends that you use the `cluster create` command instead. For more information, see [Create cluster matrix and deploy](#rel-deploy) in _Release Workflow_ below. + ::: + + * The type and number of clusters that you choose to provision as part of a development workflow depends on how frequently you intend the workflow to run. For example, for workflows that run multiple times a day, you might prefer to provision cluster distributions that can be created quickly, such as kind clusters. + +1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. + +1. After the tests complete, remove the cluster. Alternatively, if you used the `--ttl` flag with the `cluster prepare` command, the cluster is automatically removed when the time period provided is reached. See the [`cluster remove`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. + +## Compatibility Matrix-Only Development Workflow + +In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. + +This example development workflow does _not_ create releases or customers in the Replicated vendor platform. This workflow is useful for applications that are not distributed or managed in the Replicated platform. + +The following describes the recommended steps to include in a development workflow using Compatibility Matrix: + +1. [Define workflow triggers](#dev-triggers) +1. [Build source code](#dev-build) +1. [Create cluster matrix, deploy, and test](#dev-deploy) + +### Define workflow triggers {#dev-triggers} + +Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. + +The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: + +```yaml +name: development-workflow-example + +on: + push: + branches: + - '*' # matches every branch that doesn't contain a '/' + - '*/*' # matches every branch containing a single '/' + - '**' # matches every branch + - '!main' # excludes main + +jobs: + ... +``` + +### Build source code {#dev-build} + +<Build/> + + +### Create cluster matrix, deploy, and test {#dev-deploy} + +Add a job with the following steps to provision clusters with Compatibility Matrix, deploy your application to the clusters, and run tests: + +1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. + + The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: + + ```yaml + # github actions cluster matrix example + + compatibility-matrix-example: + runs-on: ubuntu-22.04 + strategy: + matrix: + cluster: + - {distribution: kind, version: "1.25"} + - {distribution: kind, version: "1.26"} + - {distribution: eks, version: "1.26"} + - {distribution: gke, version: "1.27"} + - {distribution: openshift, version: "4.13.0-okd"} + ``` + +1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). + +1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. + +1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. + +## Replicated Platform Release Workflow + +In a release workflow (which is triggered by an action such as a commit to `main` or a tag being pushed to the repository), the source code is built, the application is deployed to clusters for testing, and then the application is made available to customers. In this example release workflow, a release is created and promoted to a channel in the Replicated vendor platform so that it can be installed by internal teams or by customers. + +The following diagram demonstrates a release workflow that promotes a release to the Beta channel when a tag with the format `"v*.*.*-beta.*"` is pushed: + +![Workflow that promotes to Beta channel](/images/ci-workflow-beta.png) + +[View a larger version of this image](/images/ci-workflow-beta.png) + +The following describes the recommended steps to include in release workflows, as shown in the diagram above: + +1. [Define workflow triggers](#rel-triggers) +1. [Build source code](#rel-build) +1. [Create a release and promote to a temporary channel](#rel-release) +1. [Create cluster matrix, deploy, and test](#rel-deploy) +1. [Promote to a shared channel](#rel-promote) +1. [Archive the temporary channel and customer](#rel-cleanup) + +### Define workflow triggers {#rel-triggers} + +Create unique workflows for promoting releases to your team's internal-only, beta, and stable channels. Define unique event triggers for each of your release workflows so that releases are only promoted to a channel when a given condition is met: + +* On every commit to the `main` branch in your code repository, promote a release to the channel that your team uses for internal testing (such as the default Unstable channel). + + The following example shows a workflow trigger in GitHub Actions that runs the workflow on commits to `main`: + + ```yaml + name: unstable-release-example + + on: + push: + branches: + - 'main' + + jobs: + ... + ``` + +* On pushing a tag that contains a version label with the semantic versioning format `x.y.z-beta-n` (such as `1.0.0-beta.1` or `v1.0.0-beta.2`), promote a release to your team's Beta channel. + + The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*-beta.*` is pushed: + + ```yaml + name: beta-release-example + + on: + push: + tags: + - "v*.*.*-beta.*" + + jobs: + ... + ``` + +* On pushing a tag that contains a version label with the semantic versioning format `x.y.z` (such as `1.0.0` or `v1.0.01`), promote a release to your team's Stable channel. + + The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*` is pushed: + + ```yaml + name: stable-release-example + + on: + push: + tags: + - "v*.*.*" + + jobs: + ... + ``` + +### Build source code {#rel-build} + +<Build/> + +### Create a release and promote to a temporary channel {#rel-release} + +Add a job that creates and promotes a release to a temporary channel. This allows the release to be installed for testing in the next step. See the [release create](/reference/replicated-cli-release-create) Replicated CLI command. Or, for GitHub Actions workflows, see [create-release](https://github.com/replicatedhq/replicated-actions/tree/main/create-release). + +Consider the following requirements and recommendations: + +* Use a consistent naming pattern for the temporary channels. Additionally, configure the workflow so that a new temporary channel with a unique name is created each time that the release workflow runs. + +* Use semantic versioning for the release version label. + + :::note + If semantic versioning is enabled on the channel where you promote the release, then the release version label _must_ be a valid semantic version number. See [Semantic Versioning](releases-about#semantic-versioning) in _About Channels and Releases_. + ::: + +* For Helm chart-based applications, the release version label must match the version in the `version` field of the Helm chart `Chart.yaml` file. To automatically update the `version` field in the `Chart.yaml` file, you can define a step in this job that updates the version label before packaging the Helm chart into a `.tgz` archive. + +* For releases that will be promoted to a customer-facing channel such as Beta or Stable, Replicated recommends that the version label for the release matches the tag that triggered the release workflow. For example, if the tag `1.0.0-beta.1` was used to trigger the workflow, then the version label for the release is also `1.0.0-beta.1`. + +### Create cluster matrix, deploy, and test {#rel-deploy} + +Add a job with the following steps to provision clusters with Compatibility Matrix, deploy the release to the clusters, and run tests: + +1. Create a temporary customer for installing the release. See the [customer create](/reference/replicated-cli-customer-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-customer](https://github.com/replicatedhq/replicated-actions/tree/main/create-customer) action. + +1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. + + Consider the following recommendations: + + * For release workflows, Replicated recommends that you run tests against multiple clusters of different Kubernetes distributions and versions. To help build the matrix, you can review the most common Kubernetes distributions and versions used by your customers on the **Customers > Reporting** page in the Replicated vendor portal. For more information, see [Customer Reporting](/vendor/customer-reporting). + + * When using the Replicated CLI, a list of representative customer instances can be obtained using the `api get` command. For example, `replicated api get /v3/app/[APP_ID]/cluster-usage | jq .` You can further filter these results by `channel_id`, `channel_sequence`, and `version_label`. + + * GitHub Actions users can also use the `get-customer-instances` action to automate the creation of a cluster matrix based on the distributions of clusters where instances of your application are installed and running. For more information, see the [example workflow](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-dynamic.yaml) that makes use of [get-customer-instances](https://github.com/replicatedhq/replicated-actions/tree/main/get-customer-instances) in GitHub. + + The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: + + ```yaml + # github actions cluster matrix example + + compatibility-matrix-example: + runs-on: ubuntu-22.04 + strategy: + matrix: + cluster: + - {distribution: kind, version: "1.25.3"} + - {distribution: kind, version: "1.26.3"} + - {distribution: eks, version: "1.26"} + - {distribution: gke, version: "1.27"} + - {distribution: openshift, version: "4.13.0-okd"} + ``` + +1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). + + For more information about installing in an existing cluster, see: + * [Installing with Helm](/vendor/install-with-helm) + * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) + +1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. + +1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. + +### Promote to a shared channel {#rel-promote} + +Add a job that promotes the release to a shared internal-only or customer-facing channel, such as the default Unstable, Beta, or Stable channel. See the [release promote](/reference/replicated-cli-release-promote) Replicated CLI command. Or, for GitHub Actions workflows, see the [promote-release](https://github.com/replicatedhq/replicated-actions/tree/main/promote-release) action. + +Consider the following requirements and recommendations: + +* Replicated recommends that you include the `--version` flag with the `release promote` command to explicitly declare the version label for the release. Use the same version label that was used when the release was created as part of [Create a release and promote to a temporary channel](#rel-release) above. Although the `--version` flag is not required, declaring the same release version label during promotion provides additional consistency that makes the releases easier to track. + +* The channel to which the release is promoted depends on the event triggers that you defined for the workflow. For example, if the workflow runs on every commit to the `main` branch, then promote the release to an internal-only channel, such as Unstable. For more information, see [Define Workflow Triggers](#rel-triggers) above. + +* Use the `--release-notes` flag to include detailed release notes in markdown. + +### Archive the temporary channel and customer {#rel-cleanup} + +Finally, add a job to archive the temporary channel and customer that you created. This ensures that these artifacts are removed from your Replicated team and that they do not have to be manually archived after the release is promoted. + +See the [channel rm](/reference/replicated-cli-channel-rm) Replicated CLI command and the [customer/\{customer_id\}/archive](https://replicated-vendor-api.readme.io/reference/archivecustomer) endpoint in the Vendor API v3 documentation. Or, for GitHub Actions workflows, see the [archive-channel](https://github.com/replicatedhq/replicated-actions/tree/main/archive-channel) and [archive-customer](https://github.com/replicatedhq/replicated-actions/tree/main/archive-customer) actions. + +================ +File: docs/vendor/compatibility-matrix-usage.md +================ +# Viewing Compatibility Matrix Usage History +This topic describes using the Replicated Vendor Portal to understand +Compatibility Matrix usage across your team. + +## View Historical Usage +The **Compatibility Matrix > History** page provides +historical information about both clusters and VMs, as shown below: + +![Compatibility Matrix History Page](/images/compatibility-matrix-history.png) +[View a larger version of this image](/images/compatibility-matrix-history.png) + +Only _terminated_ clusters and VMs that have been deleted or errored are displayed on the **History** page. + +The top of the **History** page displays the total number of terminated clusters and VMs +in the selected time period as well as the total cost and usage time for +the terminated resources. + +The table includes cluster and VM entries with the following columns: +- **Name:** The name of the cluster or VM. +- **By:** The actor that created the resource. +- **Cost:** The cost of the resource. This is calculated at termination and is + based on the time the resource was running. +- **Distribution:** The distribution and version of the resource. For example, + `kind 1.32.1`. +- **Type:** The distribution type of the resource. Kubernetes clusters + are listed as `kubernetes` and VMs are listed as `vm`. +- **Status:** The status of the resource. For example `terminated` or `error`. +- **Instance:** The instance type of the resource. For example `r1.small`. +- **Nodes:** The node count for "kubernetes" resources. VMs do not use this + field. +- **Node Groups:** The node group count for "kubernetes" resources. VMs do not + use this field. +- **Created At:** The time the resource was created. +- **Running At:** The time the resource started running. For billing purposes, + this is the time when Replicated began charging for the resource. +- **Terminated At:** The time the resource was terminated. For billing + purposes, this is the time when Replicated stopped charging for the resource. +- **TTL:** The time-to-live for the resource. This is the maximum amount of + time the resource can run before it is automatically terminated. +- **Duration:** The total time the resource was running. This is the time + between the `running` and `terminated` states. +- **Tag:** Any tags that were applied to the resource. + +## Filter and Sort Usage History + +Each of the fields on the **History** page can be filtered and sorted. To sort by a specific field, click on the column header. + +To filter by a specific field, click on the filter icon in the column header, then use each specific filter input to filter the results, as shown below: + +![Compatibility Matrix History Page, filter input](/images/compatibility-matrix-column-filter-input.png) +[View a larger version of this image](/images/compatibility-matrix-column-filter-input.png) + +## Get Usage History with the Vendor API v3 + +For more information about using the Vendor API v3 to get Compatibility Matrix +usage history information, see the following API endpoints within the +Vendor API v3 documentation: + +* [/v3/cmx/stats](https://replicated-vendor-api.readme.io/reference/getcmxstats) +* [/v3/vms](https://replicated-vendor-api.readme.io/reference/listvms) +* [/v3/clusters](https://replicated-vendor-api.readme.io/reference/listclusters) +* [/v3/cmx/history](https://replicated-vendor-api.readme.io/reference/listcmxhistory) + +For examples of using these endpoints, see the sections below. + +### Credit Balance and Summarized Usage +You can use the `/v3/cmx/stats` endpoint to get summarized usage information in addition to your Compatibility Matrix +credit balance. + +This endpoint returns: + +- **`cluster_count`:** The total number of terminated clusters. +- **`vm_count`:** The total number of terminated VMs. +- **`usage_minutes`:** The total number of billed usage minutes. +- **`cost`:** The total cost of the terminated clusters and VMs in cents. +- **`credit_balance`:** The remaining credit balance in cents. + +```shell +curl --request GET \ + --url https://api.replicated.com/vendor/v3/customers \ + --header 'Accept: application/json' \ + --header 'Authorization: $REPLICATED_API_TOKEN' +{"cluster_count":2,"vm_count":4,"usage_minutes":152,"cost":276,"credit_balance":723}% +``` + +The `v3/cmx/stats` endpoint also supports filtering by `start-time` and +`end-time`. For example, the following request gets usage information for January 2025: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/stats?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` + +### Currently Active Clusters +To get a list of active clusters: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/clusters' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` + +You can also use a tool such as `jq` to filter and iterate over the output: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/clusters' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' | \ + jq '.clusters[] | {name: .name, ttl: .ttl, distribution: .distribution, version: .version}' + +{ + "name": "friendly_brown", + "ttl": "1h", + "distribution": "kind", + "version": "1.32.1" +} +``` + +### Currently Active Virtual Machines +To get a list of active VMs: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/vms' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` + +### Historical Usage +To fetch historical usage information: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` + +You can also filter the response from the `/v3/cmx/history` endpoint by `distribution-type`, which +allows you to get a list of either clusters or VMs: + +- **For clusters use `distribution-type=kubernetes`:** + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=kubernetes' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **For VMs use `distribution-type=vm`:** + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=vm' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +### Filtering Endpoint Results +Each of these endpoints supports pagination and filtering. You can use the +following query parameters to filter the results. + +:::note +Each of the examples below +uses the `v3/cmx/history` endpoint, but the same query parameters can be used +with the other endpoints as well. +::: + +- **Pagination:** Use the `pageSize` and `currentPage` query parameters to + paginate through the results: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?pageSize=10¤tPage=1' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **Filter by date:** Use the `start-time` and `end-time` query parameters to + filter the results by a specific date range: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **Sort by:** Use the `tag-sort-key` query parameter to sort the results by a + specific field. The field can be any of the fields returned in the response. + + By default, the results are sorted in ascending order, use + `sortDesc=true` to sort in descending order: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-sort-key=created_at&sortDesc=true' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **Tag filters:** Use the `tag-filter` query parameter to filter the results by + a specific tag: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-filter=tag1' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **Actor filters:** Use the `actor-filter` query parameter to filter the actor + that created the resource, or the type of actor such as `Web UI` or + `Replicated CLI`: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?actor-filter=name' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + + :::note + If any filter is passed for an object that does not exist, no warning is given. + For example, if you filter by `actor-filter=name` and there are no results + the response will be empty. + ::: + +================ +File: docs/vendor/config-screen-about.md +================ +# About the Configuration Screen + +This topic describes the configuration screen on the Config tab in the Replicated Admin Console. + +## About Collecting Configuration Values + +When you distribute your application with Replicated KOTS, you can include a configuration screen in the Admin Console. This configuration screen is used to collect required or optional values from your users that are used to run your application. You can use regular expressions to validate user input for some fields, such as passwords and email addresses. For more information about how to add custom fields to the configuration screen, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). + +If you use a Helm chart for your application, your users provide any values specific to their environment from the configuration screen, rather than in a Helm chart `values.yaml` file. This means that your users can provide configuration values through a user interface, rather than having to edit a YAML file or use `--set` CLI commands. The Admin Console configuration screen also allows you to control which options you expose to your users. + +For example, you can use the configuration screen to provide database configuration options for your application. Your users could connect your application to an external database by providing required values in the configuration screen, such as the host, port, and a username and password for the database. + +Or, you can also use the configuration screen to provide a database option that runs in the cluster as part of your application. For an example of this use case, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). + +## Viewing the Configuration Screen + +If you include a configuration screen with your application, users of your application can access the configuration screen from the Admin Console: +* During application installation. +* At any time after application installation on the Admin Console Config tab. + +### Application Installation + +The Admin Console displays the configuration screen when the user installs the application, after they upload their license file. + +The following shows an example of how the configuration screen displays during installation: + +![configuration screen that displays during application install](/images/config-screen-sentry-enterprise-app-install.png) + +[View a larger version of this image](/images/config-screen-sentry-enterprise-app-install.png) + +### Admin Console Config Tab + +Users can access the configuration screen any time after they install the application by going to the Config tab in the Admin Console. + +The following shows an example of how the configuration screen displays in the Admin Console Config tab: + +![configuration screen that displays in the Config tab](/images/config-screen-sentry-enterprise.png) + +[View a larger version of this image](/images/config-screen-sentry-enterprise.png) + +================ +File: docs/vendor/config-screen-conditional.mdx +================ +import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" +import PropertyWhen from "../partials/config/_property-when.mdx" +import DistroCheck from "../partials/template-functions/_string-comparison.mdx" +import NeComparison from "../partials/template-functions/_ne-comparison.mdx" + +# Using Conditional Statements in Configuration Fields + +This topic describes how to use Replicated KOTS template functions in the Config custom resource to conditionally show or hide configuration fields for your application on the Replicated KOTS Admin Console **Config** page. + +## Overview + +The `when` property in the Config custom resource denotes configuration groups or items that are displayed on the Admin Console **Config** page only when a condition evaluates to true. When the condition evaluates to false, the group or item is not displayed. + +<PropertyWhen/> + +For more information about the Config custom resource `when` property, see [when](/reference/custom-resource-config#when) in _Config_. + +## Conditional Statement Examples + +This section includes examples of common types of conditional statements used in the `when` property of the Config custom resource. + +For additional examples of using conditional statements in the Config custom resource, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. + +### Cluster Distribution Check + +It can be useful to show or hide configuration fields depending on the distribution of the cluster because different distributions often have unique requirements. + +In the following example, the `when` properties use the [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where Replicated KOTS is running. If the distribution of the cluster matches the specified distribution, then the `when` property evaluates to true. + +<DistroCheck/> + +### Embedded Cluster Distribution Check + +It can be useful to show or hide configuration fields if the distribution of the cluster is [Replicated Embedded Cluster](/vendor/embedded-overview) because you can include extensions in embedded cluster distributions to manage functionality such as ingress and storage. This means that embedded clusters frequently have fewer configuration options for the user. + +<NeComparison/> + +### kURL Distribution Check + +It can be useful to show or hide configuration fields if the cluster was provisioned by Replicated kURL because kURL distributions often include add-ons to manage functionality such as ingress and storage. This means that kURL clusters frequently have fewer configuration options for the user. + +In the following example, the `when` property of the `not_kurl` group uses the IsKurl template function to evaluate if the cluster was provisioned by kURL. For more information about the IsKurl template function, see [IsKurl](/reference/template-functions-static-context#iskurl) in _Static Context_. + +```yaml +# Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: all_distributions + title: Example Group + description: This group always displays. + items: + - name: example_item + title: This item always displays. + type: text + - name: not_kurl + title: Non-kURL Cluster Group + description: This group displays only if the cluster is not provisioned by kURL. + when: 'repl{{ not IsKurl }}' + items: + - name: example_item_non_kurl + title: The cluster is not provisioned by kURL. + type: label +``` + +As shown in the image below, both the `all_distributions` and `non_kurl` groups are displayed on the **Config** page when KOTS is _not_ running in a kURL cluster: + +![Config page displays both groups from the example](/images/config-example-iskurl-false.png) + +[View a larger version of this image](/images/config-example-iskurl-false.png) + +However, when KOTS is running in a kURL cluster, only the `all_distributions` group is displayed, as shown below: + +![Config page displaying only the first group from the example](/images/config-example-iskurl-true.png) + +[View a larger version of this image](/images/config-example-iskurl-true.png) + +### License Field Value Equality Check + +You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. + +In the following example, the `when` property of the `new_feature_config` item uses the LicenseFieldValue template function to determine if the user's license contains a `newFeatureEntitlement` field that is set to `true`. For more information about the LicenseFieldValue template function, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + description: Example fields for using LicenseFieldValue template function + items: + - name: new_feature_config + type: label + title: "You have the new feature entitlement" + when: '{{repl (LicenseFieldValue "newFeatureEntitlement") }}' +``` + +As shown in the image below, the **Config** page displays the `new_feature_config` item when the user's license contains `newFeatureEntitlement: true`: + +![Config page displaying the text "You have the new feature entitlement"](/images/config-example-newfeature.png) + +[View a larger version of this image](/images/config-example-newfeature.png) + +### License Field Value Integer Comparison + +You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. You can also compare integer values from license fields to control the configuration experience for your users. + +<IntegerComparison/> + +### User-Supplied Value Check + +You can show or hide configuration fields based on user-supplied values on the **Config** page to ensure that users only see options that are relevant to their selections. + +In the following example, the `database_host` and `database_passwords` items use the ConfigOptionEquals template function to evaluate if the user selected the `external` database option for the `db_type` item. For more information about the ConfigOptionEquals template function, see [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) in _Config Context_. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: database_settings_group + title: Database Settings + items: + - name: db_type + title: Database Type + type: radio + default: external + items: + - name: external + title: External Database + - name: embedded + title: Embedded Database + - name: database_host + title: Database Hostname + type: text + when: '{{repl (ConfigOptionEquals "db_type" "external")}}' + - name: database_password + title: Database Password + type: password + when: '{{repl (ConfigOptionEquals "db_type" "external")}}' +``` +As shown in the images below, when the user selects the external database option, the `database_host` and `database_passwords` items are displayed. Alternatively, when the user selects the embedded database option, the items are _not_ displayed: + +![Config page displaying the database host and password fields](/images/config-example-external-db.png) + +[View a larger version of this image](/images/config-example-external-db.png) + +![Config page with embedded database option selected](/images/config-example-embedded-db.png) + +[View a larger version of this image](/images/config-example-embedded-db.png) + +## Use Multiple Conditions in the `when` Property + +You can use more than one template function in the `when` property to create more complex conditional statements. This allows you to show or hide configuration fields based on multiple conditions being true. + +The following example includes `when` properties that use both the ConfigOptionEquals and IsKurl template functions: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: ingress_settings + title: Ingress Settings + description: Configure Ingress + items: + - name: ingress_type + title: Ingress Type + help_text: | + Select how traffic will ingress to the appliction. + type: radio + items: + - name: ingress_controller + title: Ingress Controller + - name: load_balancer + title: Load Balancer + default: "ingress_controller" + required: true + when: 'repl{{ not IsKurl }}' + - name: ingress_host + title: Hostname + help_text: Hostname used to access the application. + type: text + default: "hostname.example.com" + required: true + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' + - name: ingress_annotations + type: textarea + title: Ingress Annotations + help_text: See your ingress controller’s documentation for the required annotations. + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' + - name: ingress_tls_type + title: Ingress TLS Type + type: radio + items: + - name: self_signed + title: Self Signed (Generate Self Signed Certificate) + - name: user_provided + title: User Provided (Upload a TLS Certificate and Key Pair) + required: true + default: self_signed + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' + - name: ingress_tls_cert + title: TLS Cert + type: file + when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' + required: true + - name: ingress_tls_key + title: TLS Key + type: file + when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' + required: true + - name: load_balancer_port + title: Load Balancer Port + help_text: Port used to access the application through the Load Balancer. + type: text + default: "443" + required: true + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' + - name: load_balancer_annotations + type: textarea + title: Load Balancer Annotations + help_text: See your cloud provider’s documentation for the required annotations. + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' +``` + +As shown in the image below, the configuration fields that are specific to the ingress controller display only when the user selects the ingress controller option and KOTS is _not_ running in a kURL cluster: + +![Config page displaying the ingress controller options](/images/config-example-ingress-controller.png) + +[View a larger version of this image](/images/config-example-ingress-controller.png) + +Additionally, the options relevant to the load balancer display when the user selects the load balancer option and KOTS is _not_ running in a kURL cluster: + +![Config page displaying the load balancer options](/images/config-example-ingress-load-balancer.png) + +[View a larger version of this image](/images/config-example-ingress-load-balancer.png) + +================ +File: docs/vendor/config-screen-map-inputs.md +================ +# Mapping User-Supplied Values + +This topic describes how to map the values that your users provide in the Replicated Admin Console configuration screen to your application. + +This topic assumes that you have already added custom fields to the Admin Console configuration screen by editing the Config custom resource. For more information, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). + +## Overview of Mapping Values + +You use the values that your users provide in the Admin Console configuration screen to render YAML in the manifest files for your application. + +For example, if you provide an embedded database with your application, you might add a field on the Admin Console configuration screen where users input a password for the embedded database. You can then map the password that your user supplies in this field to the Secret manifest file for the database in your application. + +For an example of mapping database configuration options in a sample application, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). + +You can also conditionally deploy custom resources depending on the user input for a given field. For example, if a customer chooses to use their own database with your application rather than an embedded database option, it is not desirable to deploy the optional database resources such as a StatefulSet and a Service. + +For more information about including optional resources conditionally based on user-supplied values, see [Conditionally Including or Excluding Resources](packaging-include-resources). + +## About Mapping Values with Template Functions + +To map user-supplied values, you use Replicated KOTS template functions. The template functions are based on the Go text/template libraries. To use template functions, you add them as strings in the custom resource manifest files in your application. + +For more information about template functions, including use cases and examples, see [About Template Functions](/reference/template-functions-about). + +For more information about the syntax of the template functions for mapping configuration values, see [Config Context](/reference/template-functions-config-context) in the _Template Functions_ section. + +## Map User-Supplied Values + +Follow one of these procedures to map user inputs from the configuration screen, depending on if you use a Helm chart for your application: + +* **Without Helm**: See [Map Values to Manifest Files](#map-values-to-manifest-files). +* **With Helm**: See [Map Values to a Helm Chart](#map-values-to-a-helm-chart). + +### Map Values to Manifest Files + +To map user-supplied values from the configuration screen to manifest files in your application: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. + +1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. + +1. In the Config manifest file, locate the name of the user-input field that you want to map. + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: smtp_settings + title: SMTP Settings + description: Configure SMTP Settings + items: + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + ``` + + In the example above, the field name to map is `smtp_host`. + +1. In the same release in the Vendor Portal, open the manifest file where you want to map the value for the field that you selected. + +1. In the manifest file, use the ConfigOption template function to map the user-supplied value in a key value pair. For example: + + ```yaml + hostname: '{{repl ConfigOption "smtp_host"}}' + ``` + + For more information about the ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. + + **Example**: + + The following example shows mapping user-supplied TLS certificate and TLS private key files to the `tls.cert` and `tls.key` keys in a Secret custom resource manifest file. + + For more information about working with TLS secrets, including a strategy for re-using the certificates uploaded for the Admin Console itself, see the [Configuring Cluster Ingress](packaging-ingress) example. + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: tls-secret + type: kubernetes.io/tls + data: + tls.crt: '{{repl ConfigOption "tls_certificate_file" }}' + tls.key: '{{repl ConfigOption "tls_private_key_file" }}' + ``` + +1. Save and promote the release to a development environment to test your changes. + +### Map Values to a Helm Chart + +The `values.yaml` file in a Helm chart defines parameters that are specific to each environment in which the chart will be deployed. With Replicated KOTS, your users provide these values through the configuration screen in the Admin Console. You customize the configuration screen based on the required and optional configuration fields that you want to expose to your users. + +To map the values that your users provide in the Admin Console configuration screen to your Helm chart `values.yaml` file, you create a HelmChart custom resource. + +For a tutorial that shows how to set values in a sample Helm chart during installation with KOTS, see [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). + +To map user inputs from the configuration screen to the `values.yaml` file: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. + +1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. + +1. In the Config manifest file, locate the name of the user-input field that you want to map. + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: smtp_settings + title: SMTP Settings + description: Configure SMTP Settings + items: + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + ``` + + In the example above, the field name to map is `smtp_host`. + +1. In the same release, create a HelmChart custom resource manifest file. A HelmChart custom resource manifest file has `kind: HelmChart`. + + For more information about the HelmChart custom resource, see [HelmChart](../reference/custom-resource-helmchart) in the _Custom Resources_ section. + +1. In the HelmChart manifest file, copy and paste the name of the property from your `values.yaml` file that corresponds to the field that you selected from the Config manifest file under `values`: + + ```yaml + values: + HELM_VALUE_KEY: + ``` + Replace `HELM_VALUE_KEY` with the property name from the `values.yaml` file. + +1. Use the ConfigOption template function to set the property from the `values.yaml` file equal to the corresponding configuration screen field: + + ```yaml + values: + HELM_VALUE_KEY: '{{repl ConfigOption "CONFIG_SCREEN_FIELD_NAME" }}' + ``` + Replace `CONFIG_SCREEN_FIELD_NAME` with the name of the field that you created in the Config custom resource. + + For more information about the KOTS ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta1 + kind: HelmChart + metadata: + name: samplechart + spec: + chart: + name: samplechart + chartVersion: 3.1.7 + helmVersion: v3 + useHelmInstall: true + values: + hostname: '{{repl ConfigOption "smtp_host" }}' + ``` + +1. Save and promote the release to a development environment to test your changes. + +================ +File: docs/vendor/custom-domains-using.md +================ +# Using Custom Domains + +This topic describes how to use the Replicated Vendor Portal to add and manage custom domains to alias the Replicated registry, the Replicated proxy registry, the Replicated app service, and the download portal. + +For information about adding and managing custom domains with the Vendor API v3, see the [customHostnames](https://replicated-vendor-api.readme.io/reference/createcustomhostname) section in the Vendor API v3 documentation. + +For an overview about custom domains and limitations, see [About Custom Domains](custom-domains). + +## Configure a Custom Domain + +Before you assign a custom domain for a registry or the download portal, you must first configure and verify the ownership and TLS certificate. + +To add and configure a custom domain: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Custom Domains**. + +1. In the **Add custom domain** dropdown, select the target Replicated endpoint. + + The **Configure a custom domain** wizard opens. + + <img src="/images/custom-domains-download-configure.png" alt="custom domain wizard" width="500"/> + + [View a larger version of this image](/images/custom-domains-download-configure.png) + +1. For **Domain**, enter the custom domain. Click **Save & continue**. + +1. For **Create CNAME**, copy the text string and use it to create a CNAME record in your DNS account. Click **Continue**. + +1. For **Verify ownership**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. + + Your changes can take up to 24 hours to propagate. + +1. For **TLS cert creation verification**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. + + Your changes can take up to 24 hours to propagate. + + :::note + If you set up a [CAA record](https://letsencrypt.org/docs/caa/) for this hostname, you must include all Certificate Authorities (CAs) that Cloudflare partners with. The following CAA records are required to ensure proper certificate issuance and renewal: + + ```dns + @ IN CAA 0 issue "letsencrypt.org" + @ IN CAA 0 issue "pki.goog; cansignhttpexchanges=yes" + @ IN CAA 0 issue "ssl.com" + @ IN CAA 0 issue "amazon.com" + @ IN CAA 0 issue "cloudflare.com" + @ IN CAA 0 issue "google.com" + ``` + + Failing to include any of these CAs might prevent certificate issuance or renewal, which can result in downtime for your customers. For additional security, you can add an IODEF record to receive notifications about certificate requests: + + ```dns + @ IN CAA 0 iodef "mailto:your-security-team@example.com" + ``` + ::: + +1. For **Use Domain**, to set the new domain as the default, click **Yes, set as default**. Otherwise, click **Not now**. + + :::note + Replicated recommends that you do _not_ set a domain as the default until you are ready for it to be used by customers. + ::: + +The Vendor Portal marks the domain as **Configured** after the verification checks for ownership and TLS certificate creation are complete. + +## Use Custom Domains + +After you configure one or more custom domains in the Vendor Portal, you assign a custom domain by setting it as the default for all channels and customers or by assigning it to an individual release channel. + +### Set a Default Domain + +Setting a default domain is useful for ensuring that the same domain is used across channels for all your customers. + +When you set a custom domain as the default, it is used by default for all new releases promoted to any channel, as long as the channel does not have a different domain assigned in its channel settings. + +Only releases that are promoted to a channel _after_ you set a default domain use the new default domain. Any existing releases that were promoted before you set the default continue to use the same domain that they used previously. + +To set a custom domain as the default: + +1. In the Vendor Portal, go to **Custom Domains**. + +1. Next to the target domain, click **Set as default**. + +1. In the confirmation dialog that opens, click **Yes, set as default**. + +### Assign a Domain to a Channel {#channel-domain} + +You can assign a domain to an individual channel by editing the channel settings. When you specify a domain in the channel settings, new releases promoted to the channel use the selected domain even if there is a different domain set as the default on the **Custom Domains** page. + +Assigning a domain to a release channel is useful when you need to override either the default Replicated domain or a default custom domain for a specific channel. For example: +* You need to use a different domain for releases promoted to your Beta and Stable channels. +* You need to test a domain in a development environment before you set the domain as the default for all channels. + +To assign a custom domain to a channel: + +1. In the Vendor Portal, go to **Channels** and click the settings icon for the target channel. + +1. Under **Custom domains**, in the drop-down for the target Replicated endpoint, select the domain to use for the channel. For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. + + <img alt="channel settings dialog" src="/images/channel-settings.png" width="500px"/> + + [View a larger version of this image](/images/channel-settings.png) + +## Reuse a Custom Domain for Another Application + +If you have configured a custom domain for one application, you can reuse the custom domain for another application in the same team without going through the ownership and TLS certificate verification process again. + +To reuse a custom domain for another application: + +1. In the Vendor Portal, select the application from the dropdown list. + +1. Click **Custom Domains**. + +1. In the section for the target endpoint, click Add your first custom domain for your first domain, or click **Add new domain** for additional domains. + + The **Configure a custom domain** wizard opens. + +1. In the text box, enter the custom domain name that you want to reuse. Click **Save & continue**. + + The last page of the wizard opens because the custom domain was verified previously. + +1. Do one of the following: + + - Click **Set as default**. In the confirmation dialog that opens, click **Yes, set as default**. + + - Click **Not now**. You can come back later to set the domain as the default. The Vendor Portal shows shows that the domain has a Configured status because it was configured for a previous application, though it is not yet assigned as the default for this application. + + +## Remove a Custom Domain + +You can remove a custom domain at any time, but you should plan the transition so that you do not break any existing installations or documentation. + +Removing a custom domain for the Replicated registry, proxy registry, or Replicated app service will break existing installations that use the custom domain. Existing installations need to be upgraded to a version that does not use the custom domain before it can be removed safely. + +If you remove a custom domain for the download portal, it is no longer accessible using the custom URL. You will need to point customers to an updated URL. + +To remove a custom domain: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com) and click **Custom Domains**. + +1. Verify that the domain is not set as the default nor in use on any channels. You can edit the domains in use on a channel in the channel settings. For more information, see [Settings](releases-about#settings) in _About Channels and Releases_. + + :::important + When you remove a registry or Replicated app service custom domain, any installations that reference that custom domain will break. Ensure that the custom domain is no longer in use before you remove it from the Vendor Portal. + ::: + +1. Click **Remove** next to the unused domain in the list, and then click **Yes, remove domain**. + +================ +File: docs/vendor/custom-domains.md +================ +# About Custom Domains + +This topic provides an overview and the limitations of using custom domains to alias the Replicated private registry, Replicated proxy registry, Replicated app service, and the Download Portal. + +For information about configuring and managing custom domains, see [Using Custom Domains](custom-domains-using). + +## Overview + +You can use custom domains to alias Replicated endpoints by creating Canonical Name (CNAME) records for your domains. + +Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. + +TXT records must be created to verify: + +- Domain ownership: Domain ownership is verified when you initially add a record. +- TLS certificate creation: Each new domain must have a new TLS certificate to be verified. + +The TXT records can be removed after the verification is complete. + +You can configure custom domains for the following services, so that customer-facing URLs reflect your company's brand: + +- **Replicated registry:** Images and Helm charts can be pulled from the Replicated registry. By default, this registry uses the domain `registry.replicated.com`. We suggest using a CNAME such as `registry.{your app name}.com`. + +- **Proxy registry:** Images can be proxied from external private registries using the Replicated proxy registry. By default, the proxy registry uses the domain `proxy.replicated.com`. We suggest using a CNAME such as `proxy.{your app name}.com`. + +- **Replicated app service:** Upstream application YAML and metadata, including a license ID, are pulled from replicated.app. By default, this service uses the domain `replicated.app`. We suggest using a CNAME such as `updates.{your app name}.com`. + +- **Download Portal:** The Download Portal can be used to share customer license files, air gap bundles, and so on. By default, the Download Portal uses the domain `get.replicated.com`. We suggest using a CNAME such as `portal.{your app name}.com` or `enterprise.{your app name}.com`. + +## Limitations + +Using custom domains has the following limitations: + +- A single custom domain cannot be used for multiple endpoints. For example, a single domain can map to `registry.replicated.com` for any number of applications, but cannot map to both `registry.replicated.com` and `proxy.replicated.com`, even if the applications are different. + +- Custom domains cannot be used to alias api.replicated.com (legacy customer-facing APIs) or kURL. + +- Multiple custom domains can be configured, but only one custom domain can be the default for each Replicated endpoint. All configured custom domains work whether or not they are the default. + +- A particular custom domain can only be used by one team. + +================ +File: docs/vendor/custom-metrics.md +================ +# Configuring Custom Metrics (Beta) + +This topic describes how to configure an application to send custom metrics to the Replicated Vendor Portal. + +## Overview + +In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running customer environments. Custom metrics can be collected for application instances running in online or air gap environments. + +Custom metrics can be used to generate insights on customer usage and adoption of new features, which can help your team to make more informed prioritization decisions. For example: +* Decreased or plateaued usage for a customer can indicate a potential churn risk +* Increased usage for a customer can indicate the opportunity to invest in growth, co-marketing, and upsell efforts +* Low feature usage and adoption overall can indicate the need to invest in usability, discoverability, documentation, education, or in-product onboarding +* High usage volume for a customer can indicate that the customer might need help in scaling their instance infrastructure to keep up with projected usage + +## How the Vendor Portal Collects Custom Metrics + +The Vendor Portal collects custom metrics through the Replicated SDK that is installed in the cluster alongside the application. + +The SDK exposes an in-cluster API where you can configure your application to POST metric payloads. When an application instance sends data to the API, the SDK sends the data (including any custom and built-in metrics) to the Replicated app service. The app service is located at `replicated.app` or at your custom domain. + +If any values in the metric payload are different from the current values for the instance, then a new event is generated and displayed in the Vendor Portal. For more information about how the Vendor Portal generates events, see [How the Vendor Portal Generates Events and Insights](/vendor/instance-insights-event-data#about-events) in _About Instance and Event Data_. + +The following diagram demonstrates how a custom `activeUsers` metric is sent to the in-cluster API and ultimately displayed in the Vendor Portal, as described above: + +<img alt="Custom metrics flowing from customer environment to Vendor Portal" src="/images/custom-metrics-flow.png" width="800px"/> + +[View a larger version of this image](/images/custom-metrics-flow.png) + +## Requirements + +To support the collection of custom metrics in online and air gap environments, the Replicated SDK version 1.0.0-beta.12 or later must be running in the cluster alongside the application instance. + +The `PATCH` and `DELETE` methods described below are available in the Replicated SDK version 1.0.0-beta.23 or later. + +For more information about the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +If you have any customers running earlier versions of the SDK, Replicated recommends that you add logic to your application to gracefully handle a 404 from the in-cluster APIs. + +## Limitations + +Custom metrics have the following limitations: + +* The label that is used to display metrics in the Vendor Portal cannot be customized. Metrics are sent to the Vendor Portal with the same name that is sent in the `POST` or `PATCH` payload. The Vendor Portal then converts camel case to title case: for example, `activeUsers` is displayed as **Active Users**. + +* The in-cluster APIs accept only JSON scalar values for metrics. Any requests containing nested objects or arrays are rejected. + +* When using the `POST` method any existing keys that are not included in the payload will be deleted. To create new metrics or update existing ones without sending the entire dataset, simply use the `PATCH` method. + +## Configure Custom Metrics + +You can configure your application to `POST` or `PATCH` a set of metrics as key value pairs to the API that is running in the cluster alongside the application instance. + +To remove an existing custom metric use the `DELETE` endpoint with the custom metric name. + +The Replicated SDK provides an in-cluster API custom metrics endpoint at `http://replicated:3000/api/v1/app/custom-metrics`. + +**Example:** + +```bash +POST http://replicated:3000/api/v1/app/custom-metrics +``` + +```json +{ + "data": { + "num_projects": 5, + "weekly_active_users": 10 + } +} +``` + +```bash +PATCH http://replicated:3000/api/v1/app/custom-metrics +``` + +```json +{ + "data": { + "num_projects": 54, + "num_error": 2 + } +} +``` + +```bash +DELETE http://replicated:3000/api/v1/app/custom-metrics/num_projects +``` + +### POST vs PATCH + +The `POST` method will always replace the existing data with the most recent payload received. Any existing keys not included in the most recent payload will still be accessible in the instance events API, but they will no longer appear in the instance summary. + +The `PATCH` method will accept partial updates or add new custom metrics if a key:value pair that does not currently exist is passed. + +In most cases, simply using the `PATCH` method is recommended. + +For example, if a component of your application sends the following via the `POST` method: + +```json +{ + "numProjects": 5, + "activeUsers": 10, +} +``` + +Then, the component later sends the following also via the `POST` method: + +```json +{ + "activeUsers": 10, + "usingCustomReports": false +} +``` + +The instance detail will show `Active Users: 10` and `Using Custom Reports: false`, which represents the most recent payload received. The previously-sent `numProjects` value is discarded from the instance summary and is available in the instance events payload. In order to preseve `numProjects`from the initial payload and upsert `usingCustomReports` and `activeUsers` use the `PATCH` method instead of `POST` on subsequent calls to the endpoint. + +For example, if a component of your application initially sends the following via the `POST` method: + +```json +{ + "numProjects": 5, + "activeUsers": 10, +} +``` + +Then, the component later sends the following also via the `PATCH` method: +```json +{ + "usingCustomReports": false +} +``` + +The instance detail will show `Num Projects: 5`, `Active Users: 10`, `Using Custom Reports: false`, which represents the merged and upserted payload. + +### NodeJS Example + +The following example shows a NodeJS application that sends metrics on a weekly interval to the in-cluster API exposed by the SDK: + +```javascript +async function sendMetrics(db) { + + const projectsQuery = "SELECT COUNT(*) as num_projects from projects"; + const numProjects = (await db.getConnection().queryOne(projectsQuery)).num_projects; + + const usersQuery = + "SELECT COUNT(*) as active_users from users where DATEDIFF('day', last_active, CURRENT_TIMESTAMP) < 7"; + const activeUsers = (await db.getConnection().queryOne(usersQuery)).active_users; + + const metrics = { data: { numProjects, activeUsers }}; + + const res = await fetch('https://replicated:3000/api/v1/app/custom-metrics', { + method: 'POST', + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify(metrics), + }); + if (res.status !== 200) { + throw new Error(`Failed to send metrics: ${res.statusText}`); + } +} + +async function startMetricsLoop(db) { + + const ONE_DAY_IN_MS = 1000 * 60 * 60 * 24 + + // send metrics once on startup + await sendMetrics(db) + .catch((e) => { console.log("error sending metrics: ", e) }); + + // schedule weekly metrics payload + + setInterval( () => { + sendMetrics(db, licenseId) + .catch((e) => { console.log("error sending metrics: ", e) }); + }, ONE_DAY_IN_MS); +} + +startMetricsLoop(getDatabase()); +``` + +## View Custom Metrics + +You can view the custom metrics that you configure for each active instance of your application on the **Instance Details** page in the Vendor Portal. + +The following shows an example of an instance with custom metrics: + +<img alt="Custom Metrics section of Instance details page" src="/images/instance-custom-metrics.png" width="700px"/> + +[View a larger version of this image](/images/instance-custom-metrics.png) + +As shown in the image above, the **Custom Metrics** section of the **Instance Details** page includes the following information: +* The timestamp when the custom metric data was last updated. +* Each custom metric that you configured, along with the most recent value for the metric. +* A time-series graph depicting the historical data trends for the selected metric. + +Custom metrics are also included in the **Instance activity** stream of the **Instance Details** page. For more information, see [Instance Activity](/vendor/instance-insights-details#instance-activity) in _Instance Details_. + +## Export Custom Metrics + +You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Export Customer and Instance Data](/vendor/instance-data-export). + +================ +File: docs/vendor/customer-adoption.md +================ +# Adoption Report + +This topic describes the insights in the **Adoption** section on the Replicated Vendor Portal **Dashboard** page. + +## About Adoption Rate + +The **Adoption** section on the **Dashboard** provides insights about the rate at which your customers upgrade their instances and adopt the latest versions of your application. As an application vendor, you can use these adoption rate metrics to learn if your customers are completing upgrades regularly, which is a key indicator of the discoverability and ease of application upgrades. + +The Vendor Portal generates adoption rate data from all your customer's application instances that have checked-in during the selected time period. For more information about instance check-ins, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. + +The following screenshot shows an example of the **Adoption** section on the **Dashboard**: + +![Adoption report section on dashboard](/images/customer_adoption_rates.png) + +[View a larger version of this image](/images/customer_adoption_rates.png) + +As shown in the screenshot above, the **Adoption** report includes a graph and key adoption rate metrics. For more information about how to interpret this data, see [Adoption Graph](#graph) and [Adoption Metrics](#metrics) below. + +The **Adoption** report also displays the number of customers assigned to the selected channel and a link to the report that you can share with other members of your team. + +You can filter the graph and metrics in the **Adoption** report by: +* License type (Paid, Trial, Dev, or Community) +* Time period (the previous month, three months, six months, or twelve months) +* Release channel to which instance licenses are assigned, such as Stable or Beta + +## Adoption Graph {#graph} + +The **Adoption** report includes a graph that shows the percent of active instances that are running different versions of your application within the selected time period. + +The following shows an example of an adoption rate graph with three months of data: + +![Adoption report graph showing three months of data](/images/adoption_rate_graph.png) + +[View a larger version of this image](/images/adoption_rate_graph.png) + +As shown in the image above, the graph plots the number of active instances in each week in the selected time period, grouped by the version each instance is running. The key to the left of the graph shows the unique color that is assigned to each application version. You can use this color-coding to see at a glance the percent of active instances that were running different versions of your application across the selected time period. + +Newer versions will enter at the bottom of the area chart, with older versions shown higher up. + +You can also hover over a color-coded section in the graph to view the number and percentage of active instances that were running the version in a given period. + +If there are no active instances of your application, then the adoption rate graph displays a "No Instances" message. + +## Adoption Metrics {#metrics} + +The **Adoption** section includes metrics that show how frequently your customers discover and complete upgrades to new versions of your application. It is important that your users adopt new versions of your application so that they have access to the latest features and bug fixes. Additionally, when most of your users are on the latest versions, you can also reduce the number of versions for which you provide support and maintain documentation. + +The following shows an example of the metrics in the **Adoption** section: + +![Adoption rate metrics showing](/images/adoption_rate_metrics.png) + +[View a larger version of this image](/images/adoption_rate_metrics.png) + +As shown in the image above, the **Adoption** section displays the following metrics: +* Instances on last three versions +* Unique versions +* Median relative age +* Upgrades completed + +Based on the time period selected, each metric includes an arrow that shows the change in value compared to the previous period. For example, if the median relative age today is 68 days, the selected time period is three months, and three months ago the median relative age was 55 days, then the metric would show an upward-facing arrow with an increase of 13 days. + +The following table describes each metric in the **Adoption** section, including the formula used to calculate its value and the recommended trend for the metric over time: + +<table> + <tbody> + <tr> + <th width="25%">Metric</th> + <th width="45%">Description</th> + <th width="30%">Target Trend</th> + </tr> + <tr> + <td>Instances on last three versions</td> + <td> + <p>Percent of active instances that are running one the latest three versions of your application.</p> + <p><strong>Formula</strong>: <code>count(instances on last 3 versions) / count(instances)</code></p> + </td> + <td>Increase towards 100%</td> + </tr> + <tr> + <td>Unique versions</td> + <td> + <p>Number of unique versions of your application running in active instances.</p> + <p><strong>Formula</strong>: <code>count(distinct instance_version)</code></p> + </td> + <td>Decrease towards less than or equal to three</td> + </tr> + <tr> + <td>Median relative age</td> + <td> + <p>The <em>relative age</em> of a single instance is the number of days between the date that the instance's version was promoted to the channel and the date when the latest available application version was promoted to the channel.</p> + <p><em>Median relative age</em> is the median value across all active instances for the selected time period and channel.</p> + <p><strong>Formula</strong>: <code>median(relative_age(instance_version))</code></p> + </td> + <td><p>Depends on release cadence. For vendors who ship every four to eight weeks, decrease the median relative age towards 60 days or fewer.</p></td> + </tr> + <tr> + <td>Upgrades completed</td> + <td> + <p>Total number of completed upgrades across active instances for the selected time period and channel.</p> + <p>An upgrade is a single version change for an instance. An upgrade is considered complete when the instance deploys the new application version.</p> + <p>The instance does <em>not</em> need to become available (as indicated by reaching a Ready state) after deploying the new version for the upgrade to be counted as complete.</p> + <p><strong>Formula</strong>: <code>sum(instance.upgrade_count) across all instances</code></p> + </td> + <td>Increase compared to any previous period, unless you reduce your total number of live instances.</td> + </tr> + </tbody> +</table> + +================ +File: docs/vendor/customer-reporting.md +================ +# Customer Reporting + +This topic describes the customer and instance data displayed in the **Customers > Reporting** page of the Replicated Vendor Portal. + +## About the Customer Reporting Page {#reporting-page} + +The **Customers > Reporting** page displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page: + +![Customer reporting page showing two active instances](/images/customer-reporting-page.png) + +[View a larger version of this image](/images/customer-reporting-page.png) + +As shown in the image above, the **Reporting** page has the following main sections: +* [Manage Customer](#manage-customer) +* [Time to Install](#time-to-install) +* [Download Portal](#download-portal) +* [Instances](#instances) + +### Manage Customer + +The manage customer section displays the following information about the customer: + +* The customer name +* The channel the customer is assigned +* Details about the customer license: + * The license type + * The date the license was created + * The expiration date of the license +* The features the customer has enabled, including: + * GitOps + * Air gap + * Identity + * Snapshots + +In this section, you can also view the Helm CLI installation instructions for the customer and download the customer license. + +### Time to Install + +If the customer has one or more application instances that have reached a Ready status at least one time, then the **Time to install** section displays _License time to install_ and _Instance time to install_ metrics: + +* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. +* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. + +A _Ready_ status indicates that all Kubernetes resources for the application are Ready. For example, a Deployment resource is considered Ready when the number of Ready replicas equals the total desired number of replicas. For more information, see [Enabling and Understanding Application Status](insights-app-status). + +If the customer has no application instances that have ever reported a Ready status, or if you have not configured your application to deliver status data to the Vendor Portal, then the **Time to install** section displays a **No Ready Instances** message. + +If the customer has more than one application instance that has previously reported a Ready status, then the **Time to install** section displays metrics for the instance that most recently reported a Ready status for the first time. + +For example, Instance A reported its first Ready status at 9:00 AM today. Instance B reported its first Ready status at 8:00 AM today, moved to a Degraded status, then reported a Ready status again at 10:00 AM today. In this case, the Vendor Portal displays the time to install metrics for Instance A, which reported its _first_ Ready status most recently. + +For more information about how to interpret the time to install metrics, see [Time to Install](instance-insights-details#time-to-install) in _Instance Details_. + +### Download Portal + +From the **Download portal** section, you can: +* Manage the password for the Download Portal +* Access the unique Download Portal URL for the customer + +You can use the Download Portal to give your customers access to the files they need to install your application, such as their license file or air gap bundles. For more information, see [Downloading Assets from the Download Portal](releases-share-download-portal). + +### Instances + +The **Instances** section displays details about the active application instances associated with the customer. + +You can click any of the rows in the **Instances** section to open the **Instance details** page. The **Instance details** page displays additional event data and computed metrics to help you understand the performance and status of each active application instance. For more information, see [Instance Details](instance-insights-details). + +The following shows an example of a row for an active instance in the **Instances** section: + +![Row in the Instances section](/images/instance-row.png) +[View a larger version of this image](/images/instance-row.png) + +The **Instances** section displays the following details about each active instance: +* The first seven characters of the instance ID. +* The status of the instance. Possible statuses are Missing, Unavailable, Degraded, Ready, and Updating. For more information, see [Enabling and Understanding Application Status](insights-app-status). +* The application version. +* Details about the cluster where the instance is installed, including: + * The Kubernetes distribution for the cluster, if applicable. + * The Kubernetes version running in the cluster. + * Whether the instance is installed in a Replicated kURL cluster. + * (kURL Clusters Only) The number of nodes ready in the cluster. + * (KOTS Only) The KOTS version running in the cluster. + * The Replicated SDK version running in the cluster. + * The cloud provider and region, if applicable. +* Instance uptime data, including: + * The timestamp of the last recorded check-in for the instance. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. + * An uptime graph of the previous two weeks. For more information about how the Vendor Portal determines uptime, see [Instance Uptime](instance-insights-details#instance-uptime) in _Instance Details_. + * The uptime ratio in the previous two weeks. + +================ +File: docs/vendor/data-availability.md +================ +# Data Availability and Continuity + +Replicated uses redundancy and a cloud-native architecture in support of availability and continuity of vendor data. + +## Data Storage Architecture + +To ensure availability and continuity of necessary vendor data, Replicated uses a cloud-native architecture. This cloud-native architecture includes clustering and network redundancies to eliminate single point of failure. + +Replicated stores vendor data in various Amazon Web Services (AWS) S3 buckets and multiple databases. Data stored in the AWS S3 buckets includes registry images and air gap build data. + +The following diagram shows the flow of air gap build data and registry images from vendors to enterprise customers. + +![Architecture diagram of Replicated vendor data storage](/images/data-storage.png) + +[View a larger version of this image](/images/data-storage.png) + +As shown in the diagram above, vendors push application images to an image registry. Replicated stores this registry image data in AWS S3 buckets, which are logically isolated by vendor portal Team. Instances of the vendor's application that are installed by enterprise customers pull data from the image registry. + +For more information about how Replicated secures images pushed to the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). + +The diagram also shows how enterprise customers access air gap build data from the customer download portal. Replicated stores this air gap build data in AWS S3 buckets. + +## Data Recovery + +Our service provider's platform automatically restores customer applications and databases in the case of an outage. The provider's platform is designed to dynamically deploy applications within its cloud, monitor for failures, and recover failed platform components including customer applications and databases. + +For more information, see the [Replicated Security White Paper](https://www.replicated.com/downloads/Replicated-Security-Whitepaper.pdf). + +## Data Availability + +Replicated availability is continuously monitored. For availability reports, see https://status.replicated.com. + +## Offsite Data Backup Add-on + +For additional data redundancy, an offsite data backup add-on is available to copy customers data to a separate cloud provider. This add-on mitigates against potential data loss by our primary service provider. For more information, see [Offsite Data Backup](offsite-backup). + +================ +File: docs/vendor/database-config-adding-options.md +================ +# About Managing Stateful Services + +This topic provides recommendations for managing stateful services that you install into existing clusters. + +## Preflight Checks for Stateful Services + +If you expect to also install stateful services into existing clusters, you will likely want to expose [preflight analyzers that check for the existence of a storage class](https://troubleshoot.sh/reference/analyzers/storage-class/). + +If you are allowing end users to provide connection details for external databases, you can often use a troubleshoot.sh built-in [collector](https://troubleshoot.sh/docs/collect/) and [analyzer](https://troubleshoot.sh/docs/analyze/) to validate the connection details for [Postgres](https://troubleshoot.sh/docs/analyze/postgresql/), [Redis](https://troubleshoot.sh/docs/collect/redis/), and many other common datastores. These can be included in both `Preflight` and `SupportBundle` specifications. + +## About Adding Persistent Datastores + +You can integrate persistent stores, such as databases, queues, and caches. There are options to give an end user, such as embedding an instance alongside the application or connecting an application to an external instance that they will manage. + +For an example of integrating persistent datastores, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). + +================ +File: docs/vendor/embedded-disaster-recovery.mdx +================ +# Disaster Recovery for Embedded Cluster (Alpha) + +This topic describes the disaster recovery feature for Replicated Embedded Cluster, including how to enable disaster recovery for your application. It also describes how end users can configure disaster recovery in the Replicated KOTS Admin Console and restore from a backup. + +:::important +Embedded Cluster disaster recovery is an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). +::: + +:::note +Embedded Cluster does not support backup and restore with the KOTS snapshots feature. For more information about using snapshots for existing cluster installations with KOTS, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). +::: + +## Overview + +The Embedded Cluster disaster recovery feature allows your customers to take backups from the Admin Console and perform restores from the command line. Disaster recovery for Embedded Cluster is implemented with Velero. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. + +The backups that your customers take from the Admin Console will include both the Embedded Cluster infrastructure and the application resources that you specify. + +The Embedded Cluster infrastructure that is backed up includes components such as the KOTS Admin Console and the built-in registry that is deployed for air gap installations. No configuration is required to include Embedded Cluster infrastructure in backups. Vendors specify the application resources to include in backups by configuring a Velero Backup resource in the application release. + +## Requirements + +Embedded Cluster disaster recovery has the following requirements: + +* The disaster recovery feature flag must be enabled for your account. To get access to disaster recovery, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). +* Embedded Cluster version 1.22.0 or later +* Backups must be stored in S3-compatible storage + +## Limitations and Known Issues + +Embedded Cluster disaster recovery has the following limitations and known issues: + +* During a restore, the version of the Embedded Cluster installation assets must match the version of the application in the backup. So if version 0.1.97 of your application was backed up, the Embedded Cluster installation assets for 0.1.97 must be used to perform the restore. Use `./APP_SLUG version` to check the version of the installation assets, where `APP_SLUG` is the unique application slug. For example: + + <img alt="version command" src="/images/ec-version-command.png" width="450px"/> + + [View a larger version of this image](/images/ec-version-command.png) + +* Any Helm extensions included in the `extensions` field of the Embedded Cluster Config are _not_ included in backups. Helm extensions are reinstalled as part of the restore process. To include Helm extensions in backups, configure the Velero Backup resource to include the extensions using namespace-based or label-based selection. For more information, see [Configure the Velero Custom Resources](#config-velero-resources) below. + +* Users can only restore from the most recent backup. + +* Velero is installed only during the initial installation process. Enabling the disaster recovery license field for customers after they have already installed will not do anything. + +* If the `--admin-console-port` flag was used during install to change the port for the Admin Console, note that during a restore the Admin Console port will be used from the backup and cannot be changed. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + +## Configure Disaster Recovery + +This section describes how to configure disaster recovery for Embedded Cluster installations. It also describes how to enable access to the disaster recovery feature on a per-customer basis. + +### Configure the Velero Custom Resources {#config-velero-resources} + +This section describes how to set up Embedded Cluster disaster recovery for your application by configuring Velero [Backup](https://velero.io/docs/latest/api-types/backup/) and [Restore](https://velero.io/docs/latest/api-types/restore/) custom resources in a release. + +To configure Velero Backup and Restore custom resources for Embedded Cluster disaster recovery: + +1. In a new release containing your application files, add a Velero Backup resource. In the Backup resource, use namespace-based or label-based selection to indicate the application resources that you want to be included in the backup. For more information, see [Backup API Type](https://velero.io/docs/latest/api-types/backup/) in the Velero documentation. + + :::important + If you use namespace-based selection to include all of your application resources deployed in the `kotsadm` namespace, ensure that you exclude the Replicated resources that are also deployed in the `kotsadm` namespace. Because the Embedded Cluster infrastructure components are always included in backups automatically, this avoids duplication. + ::: + + **Example:** + + The following Backup resource uses namespace-based selection to include application resources deployed in the `kotsadm` namespace: + + ```yaml + apiVersion: velero.io/v1 + kind: Backup + metadata: + name: backup + spec: + # Back up the resources in the kotsadm namespace + includedNamespaces: + - kotsadm + orLabelSelectors: + - matchExpressions: + # Exclude Replicated resources from the backup + - { key: kots.io/kotsadm, operator: NotIn, values: ["true"] } + ``` + +1. In the same release, add a Velero Restore resource. In the `backupName` field of the Restore resource, include the name of the Backup resource that you created. For more information, see [Restore API Type](https://velero.io/docs/latest/api-types/restore/) in the Velero documentation. + + **Example**: + + ```yaml + apiVersion: velero.io/v1 + kind: Restore + metadata: + name: restore + spec: + # the name of the Backup resource that you created + backupName: backup + includedNamespaces: + - '*' + ``` + +1. For any image names that you include in your Backup and Restore resources, rewrite the image name using the Replicated KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions. This ensures that the image name is rendered correctly during deployment, allowing the image to be pulled from the user's local image registry (such as in air gap installations) or through the Replicated proxy registry. + + **Example:** + + ```yaml + apiVersion: velero.io/v1 + kind: Restore + metadata: + name: restore + spec: + hooks: + resources: + - name: restore-hook-1 + includedNamespaces: + - kotsadm + labelSelector: + matchLabels: + app: example + postHooks: + - init: + initContainers: + - name: restore-hook-init1 + image: + # Use HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace + # to template the image name + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' + tag: 1.24-alpine + ``` + For more information about how to rewrite image names using the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions, including additional examples, see [Task 1: Rewrite Image Names](helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart v2 Custom Resource_. + +1. If you support air gap installations, add any images that are referenced in your Backup and Restore resources to the `additionalImages` field of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release so they can be used during the backup and restore process in environments with limited or no outbound internet access. For more information, see [additionalImages](/reference/custom-resource-application#additionalimages) in _Application_. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-app + spec: + additionalImages: + - elasticsearch:7.6.0 + - quay.io/orgname/private-image:v1.2.3 + ``` + +1. (Optional) Use Velero functionality like [backup](https://velero.io/docs/main/backup-hooks/) and [restore](https://velero.io/docs/main/restore-hooks/) hooks to customize the backup and restore process as needed. + + **Example:** + + For example, a Postgres database might be backed up using pg_dump to extract the database into a file as part of a backup hook. It can then be restored using the file in a restore hook: + + ```yaml + podAnnotations: + backup.velero.io/backup-volumes: backup + pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U {{repl ConfigOption "postgresql_username" }} -d {{repl ConfigOption "postgresql_database" }} -h 127.0.0.1 > /scratch/backup.sql"]' + pre.hook.backup.velero.io/timeout: 3m + post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U {{repl ConfigOption "postgresql_username" }} -h 127.0.0.1 -d {{repl ConfigOption "postgresql_database" }} -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' + post.hook.restore.velero.io/wait-for-ready: 'true' # waits for the pod to be ready before running the post-restore hook + ``` + +1. Save and the promote the release to a development channel for testing. + +### Enable the Disaster Recovery Feature for Your Customers + +After configuring disaster recovery for your application, you can enable it on a per-customer basis with the **Allow Disaster Recovery (Alpha)** license field. + +To enable disaster recovery for a customer: + +1. In the Vendor Portal, go to the [Customers](https://vendor.replicated.com/customers) page and select the target customer. + +1. On the **Manage customer** page, under **License options**, enable the **Allow Disaster Recovery (Alpha)** field. + + When your customer installs with Embedded Cluster, Velero will be deployed if the **Allow Disaster Recovery (Alpha)** license field is enabled. + +## Take Backups and Restore + +This section describes how your customers can configure backup storage, take backups, and restore from backups. + +### Configure Backup Storage and Take Backups in the Admin Console + +Customers with the **Allow Disaster Recovery (Alpha)** license field can configure their backup storage location and take backups from the Admin Console. + +To configure backup storage and take backups: + +1. After installing the application and logging in to the Admin Console, click the **Disaster Recovery** tab at the top of the Admin Console. + +1. For the desired S3-compatible backup storage location, enter the bucket, prefix (optional), access key ID, access key secret, endpoint, and region. Click **Update storage settings**. + + <img alt="backup storage settings" src="/images/dr-backup-storage-settings.png" width="400px"/> + + [View a larger version of this image](/images/dr-backup-storage-settings.png) + +1. (Optional) From this same page, configure scheduled backups and a retention policy for backups. + + <img src="/images/dr-scheduled-backups.png" width="400px" alt="scheduled backups"/> + + [View a larger version of this image](/images/dr-scheduled-backups.png) + +1. In the **Disaster Recovery** submenu, click **Backups**. Backups can be taken from this screen. + + <img src="/images/dr-backups.png" alt="backups page" width="600px"/> + + [View a larger version of this image](/images/dr-backups.png) + +### Restore from a Backup + +To restore from a backup: + +1. SSH onto a new machine where you want to restore from a backup. + +1. Download the Embedded Cluster installation assets for the version of the application that was included in the backup. You can find the command for downloading Embedded Cluster installation assets in the **Embedded Cluster install instructions dialog** for the customer. For more information, [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + + :::note + The version of the Embedded Cluster installation assets must match the version that is in the backup. For more information, see [Limitations and Known Issues](#limitations-and-known-issues). + ::: + +1. Run the restore command: + + ```bash + sudo ./APP_SLUG restore + ``` + Where `APP_SLUG` is the unique application slug. + + Note the following requirements and guidance for the `restore` command: + + * If the installation is behind a proxy, the same proxy settings provided during install must be provided to the restore command using `--http-proxy`, `--https-proxy`, and `--no-proxy`. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + + * If the `--cidr` flag was used during install to the set IP address ranges for Pods and Services, this flag must be provided with the same CIDR during the restore. If this flag is not provided or is provided with a different CIDR, the restore will fail with an error message telling you to rerun with the appropriate value. However, it will take some time before that error occurs. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + + * If the `--local-artifact-mirror-port` flag was used during install to change the port for the Local Artifact Mirror (LAM), you can optionally use the `--local-artifact-mirror-port` flag to choose a different LAM port during restore. For example, `restore --local-artifact-mirror-port=50000`. If no LAM port is provided during restore, the LAM port that was supplied during installation will be used. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + + You will be guided through the process of restoring from a backup. + +1. When prompted, enter the information for the backup storage location. + + ![Restore prompts on the command line](/images/dr-restore.png) + [View a larger version of this image](/images/dr-restore.png) + +1. When prompted, confirm that you want to restore from the detected backup. + + ![Restore from detected backup prompt on the command line](/images/dr-restore-from-backup-confirmation.png) + [View a larger version of this image](/images/dr-restore-from-backup-confirmation.png) + + After some time, the Admin console URL is displayed: + + ![Restore from detected backup prompt on the command line](/images/dr-restore-admin-console-url.png) + [View a larger version of this image](/images/dr-restore-admin-console-url.png) + +1. (Optional) If the cluster should have multiple nodes, go to the Admin Console to get a join command and join additional nodes to the cluster. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + +1. Type `continue` when you are ready to proceed with the restore process. + + ![Type continue when you are done adding nodes](/images/dr-restore-continue.png) + [View a larger version of this image](/images/dr-restore-continue.png) + + After some time, the restore process completes. + + If the `restore` command is interrupted during the restore process, you can resume by rerunning the `restore` command and selecting to resume the previous restore. This is useful if your SSH session is interrupted during the restore. + +================ +File: docs/vendor/embedded-overview.mdx +================ +import EmbeddedCluster from "../partials/embedded-cluster/_definition.mdx" +import Requirements from "../partials/embedded-cluster/_requirements.mdx" +import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" +import HaArchitecture from "../partials/embedded-cluster/_multi-node-ha-arch.mdx" + +# Embedded Cluster Overview + +This topic provides an introduction to Replicated Embedded Cluster, including a description of the built-in extensions installed by Embedded Cluster, an overview of the Embedded Cluster single-node and multi-node architecture, and requirements and limitations. + +:::note +If you are instead looking for information about creating Kubernetes Installers with Replicated kURL, see the [Replicated kURL](/vendor/packaging-embedded-kubernetes) section. +::: + +## Overview + +<EmbeddedCluster/> + +## Architecture + +This section describes the Embedded Cluster architecture, including the built-in extensions deployed by Embedded Cluster. + +### Single-Node Architecture + +The following diagram shows the architecture of a single-node Embedded Cluster installation for an application named Gitea: + +![Embedded Cluster single-node architecture](/images/embedded-architecture-single-node.png) + +[View a larger version of this image](/images/embedded-architecture-single-node.png) + +As shown in the diagram above, the user downloads the Embedded Cluster installation assets as a `.tgz` in their installation environment. These installation assets include the Embedded Cluster binary, the user's license file, and (for air gap installations) an air gap bundle containing the images needed to install and run the release in an environment with limited or no outbound internet access. + +When the user runs the Embedded Cluster install command, the Embedded Cluster binary first installs the k0s cluster as a systemd service. + +After all the Kubernetes components for the cluster are available, the Embedded Cluster binary then installs the Embedded Cluster built-in extensions. For more information about these extensions, see [Built-In Extensions](#built-in-extensions) below. + +Any Helm extensions that were included in the [`extensions`](/reference/embedded-config#extensions) field of the Embedded Cluster Config are also installed. The namespace or namespaces where Helm extensions are installed is defined by the vendor in the Embedded Cluster Config. + +Finally, Embedded Cluster also installs Local Artifact Mirror (LAM). In air gap installations, LAM is used to store and update images. + +### Multi-Node Architecture + +The following diagram shows the architecture of a multi-node Embedded Cluster installation: + +![Embedded Cluster multi-node architecture](/images/embedded-architecture-multi-node.png) + +[View a larger version of this image](/images/embedded-architecture-multi-node.png) + +As shown in the diagram above, in multi-node installations, the Embedded Cluster Operator, KOTS, and the image registry for air gap installations are all installed on one controller node. + +For installations that include disaster recovery with Velero, the Velero Node Agent runs on each node in the cluster. The Node Agent is a Kubernetes DaemonSet that performs backup and restore tasks such as creating snapshots and transferring data during restores. + +Additionally, any Helm [`extensions`](/reference/embedded-config#extensions) that you include in the Embedded Cluster Config are installed in the cluster depending on the given chart and how it is configured to be deployed. + +### Multi-Node Architecture with High Availability + +:::note +High availability (HA) for multi-node installations with Embedded Cluster is Alpha and is not enabled by default. For more informaiton about enabling HA, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha). +::: + +<HaArchitecture/> + +## Built-In Extensions {#built-in-extensions} + +Embedded Cluster includes several built-in extensions. The built-in extensions provide capabilities such as application management and storage. Each built-in extension is installed in its own namespace. + +The built-in extensions installed by Embedded Cluster include: + +* **Embedded Cluster Operator**: The Operator is used for reporting purposes as well as some clean up operations. + +* **KOTS:** Embedded Cluster installs the KOTS Admin Console in the kotsadm namespace. End customers use the Admin Console to configure and install the application. Rqlite is also installed in the kotsadm namespace alongside KOTS. Rqlite is a distributed relational database that uses SQLite as its storage engine. KOTS uses rqlite to store information such as support bundles, version history, application metadata, and other small amounts of data needed to manage the application. For more information about rqlite, see the [rqlite](https://rqlite.io/) website. + +* **OpenEBS:** Embedded Cluster uses OpenEBS to provide local PersistentVolume (PV) storage, including the PV storage for rqlite used by KOTS. For more information, see the [OpenEBS](https://openebs.io/docs/) documentation. + +* **(Disaster Recovery Only) Velero:** If the installation uses the Embedded Cluster disaster recovery feature, Embedded Cluster installs Velero, which is an open-source tool that provides backup and restore functionality. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. For more information about the disaster recovery feature, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). + +* **(Air Gap Only) Image registry:** For air gap installations in environments with limited or no outbound internet access, Embedded Cluster installs an image registry where the images required to install and run the application are pushed. For more information about installing in air-gapped environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +## Comparison to kURL + +Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster offers several improvements such as: +* Significantly faster installation, updates, and node joins +* A redesigned Admin Console UI for managing the cluster +* Improved support for multi-node clusters +* One-click updates of both the application and the cluster at the same time + +Additionally, Embedded Cluster automatically deploys several built-in extensions like KOTS and OpenEBS to provide capabilities such as application management and storage. This represents an improvement over kURL because vendors distributing their application with Embedded Cluster no longer need choose and define various add-ons in the installer spec. For additional functionality that is not included in the built-in extensions, such as an ingress controller, vendors can provide their own [`extensions`](/reference/embedded-config#extensions) that will be deployed alongside the application. + +## Requirements + +### System Requirements + +<Requirements/> + +### Port Requirements + +<EmbeddedClusterPortRequirements/> + +## Limitations + +Embedded Cluster has the following limitations: + +* **Reach out about migrating from kURL**: We are helping several customers migrate from kURL to Embedded Cluster. Reach out to Alex Parker at alexp@replicated.com for more information. + +* **Multi-node support is in beta**: Support for multi-node embedded clusters is in beta, and enabling high availability for multi-node clusters is in alpha. Only single-node embedded clusters are generally available. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + +* **Disaster recovery is in alpha**: Disaster Recovery for Embedded Cluster installations is in alpha. For more information, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). + +* **Partial rollback support**: In Embedded Cluster 1.17.0 and later, rollbacks are supported only when rolling back to a version where there is no change to the [Embedded Cluster Config](/reference/embedded-config) compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same Embedded Cluster Config. For more information about how to enable rollbacks for your application in the KOTS Application custom resource, see [allowRollback](/reference/custom-resource-application#allowrollback) in _Application_. + +* **Changing node hostnames is not supported**: After a host is added to a Kubernetes cluster, Kubernetes assumes that the hostname and IP address of the host will not change. If you need to change the hostname or IP address of a node, you must first remove the node from the cluster. For more information about the requirements for naming nodes, see [Node name uniqueness](https://kubernetes.io/docs/concepts/architecture/nodes/#node-name-uniqueness) in the Kubernetes documentation. + +* **Automatic updates not supported**: Configuring automatic updates from the Admin Console so that new versions are automatically deployed is not supported for Embedded Cluster installations. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). + +* **Embedded Cluster installation assets not available through the Download Portal**: The assets required to install with Embedded Cluster cannot be shared with users through the Download Portal. Users can follow the Embedded Cluster installation instructions to download and extract the installation assets. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + +* **`minKotsVersion` and `targetKotsVersion` not supported**: The [`minKotsVersion`](/reference/custom-resource-application#minkotsversion-beta) and [`targetKotsVersion`](/reference/custom-resource-application#targetkotsversion) fields in the KOTS Application custom resource are not supported for Embedded Cluster installations. This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed`. To avoid installation failures, do not use targetKotsVersion or minKotsVersion in releases that support installation with Embedded Cluster. + +* **Support bundles over 100MB in the Admin Console**: Support bundles are stored in rqlite. Bundles over 100MB could cause rqlite to crash, causing errors in the installation. You can still generate a support bundle from the command line. For more information, see [Generating Support Bundles for Embedded Cluster](/vendor/support-bundle-embedded). + +* **Kubernetes version template functions not supported**: The KOTS [KubernetesVersion](/reference/template-functions-static-context#kubernetesversion), [KubernetesMajorVersion](/reference/template-functions-static-context#kubernetesmajorversion), and [KubernetesMinorVersion](/reference/template-functions-static-context#kubernetesminorversion) template functions do not provide accurate Kubernetes version information for Embedded Cluster installations. This is because these template functions are rendered before the Kubernetes cluster has been updated to the intended version. However, `KubernetesVersion` is not necessary for Embedded Cluster because vendors specify the Embedded Cluster version, which includes a known Kubernetes version. + +* **Custom domains not supported**: Embedded Cluster does not support the use of custom domains, even if custom domains are configured. We intend to add support for custom domains. For more information about custom domains, see [About Custom Domains](/vendor/custom-domains). + +* **KOTS Auto-GitOps workflow not supported**: Embedded Cluster does not support the KOTS Auto-GitOps workflow. If an end-user is interested in GitOps, consider the Helm install method instead. For more information, see [Installing with Helm](/vendor/install-with-helm). + +* **Downgrading Kubernetes not supported**: Embedded Cluster does not support downgrading Kubernetes. The admin console will not prevent end-users from attempting to downgrade Kubernetes if a more recent version of your application specifies a previous Embedded Cluster version. You must ensure that you do not promote new versions with previous Embedded Cluster versions. + +* **Templating not supported in Embedded Cluster Config**: The [Embedded Cluster Config](/reference/embedded-config) resource does not support the use of Go template functions, including [KOTS template functions](/reference/template-functions-about). This only applies to the Embedded Cluster Config. You can still use template functions in the rest of your release as usual. + +* **Policy enforcement on Embedded Cluster workloads is not supported**: The Embedded Cluster runs workloads that require higher levels of privilege. If your application installs a policy enforcement engine such as Gatekeeper or Kyverno, ensure that its policies are not enforced in the namespaces used by Embedded Cluster. + +* **Installing on STIG- and CIS-hardened OS images is not supported**: Embedded Cluster isn't tested on these images, and issues have arisen when trying to install on them. + +================ +File: docs/vendor/embedded-using.mdx +================ +import UpdateOverview from "../partials/embedded-cluster/_update-overview.mdx" +import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" +import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" +import EcConfig from "../partials/embedded-cluster/_ec-config.mdx" + +# Using Embedded Cluster + +This topic provides information about using Replicated Embedded Cluster, including how to get started, configure Embedded Cluster, access the cluster using kubectl, and more. For an introduction to Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). + +## Quick Start + +You can use the following steps to get started quickly with Embedded Cluster. More detailed documentation is available below. + +1. Create a new customer or edit an existing customer and select the **Embedded Cluster Enabled** license option. Save the customer. + +1. Create a new release that includes your application. In that release, create an Embedded Cluster Config that includes, at minimum, the Embedded Cluster version you want to use. See the Embedded Cluster [GitHub repo](https://github.com/replicatedhq/embedded-cluster/releases) to find the latest version. + + Example Embedded Cluster Config: + + <EcConfig/> + +1. Save the release and promote it to the channel the customer is assigned to. + +1. Return to the customer page where you enabled Embedded Cluster. At the top right, click **Install instructions** and choose **Embedded Cluster**. A dialog appears with instructions on how to download the Embedded Cluster installation assets and install your application. + + ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + +1. On your VM, run the commands in the **Embedded Cluster install instructions** dialog. + + <img alt="Embedded cluster install instruction dialog" src="/images/embedded-cluster-install-dialog-latest.png" width="550px"/> + + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + +1. Enter an Admin Console password when prompted. + + The Admin Console URL is printed when the installation finishes. Access the Admin Console to begin installing your application. During the installation process in the Admin Console, you have the opportunity to add nodes if you want a multi-node cluster. Then you can provide application config, run preflights, and deploy your application. + +## About Configuring Embedded Cluster + +To install an application with Embedded Cluster, an Embedded Cluster Config must be present in the application release. The Embedded Cluster Config lets you define several characteristics about the cluster that will be created. + +For more information, see [Embedded Cluster Config](/reference/embedded-config). + +## About Installing with Embedded Cluster + +This section provides an overview of installing applications with Embedded Cluster. + +### Installation Overview + +The following diagram demonstrates how Kubernetes and an application are installed into a customer environment using Embedded Cluster: + +![Embedded Cluster installs an app in a customer environment](/images/embedded-cluster-install.png) + +[View a larger version of this image](/images/embedded-cluster-install.png) + +As shown in the diagram above, the Embedded Cluster Config is included in the application release in the Replicated Vendor Portal and is used to generate the Embedded Cluster installation assets. Users can download these installation assets from the Replicated app service (`replicated.app`) on the command line, then run the Embedded Cluster installation command to install Kubernetes and the KOTS Admin Console. Finally, users access the Admin Console to optionally add nodes to the cluster and to configure and install the application. + +### Installation Options + +Embedded Cluster supports installations in online (internet-connected) environments and air gap environments with no outbound internet access. + +For online installations, Embedded Cluster also supports installing behind a proxy server. + +For more information about how to install with Embedded Cluster, see: +* [Online Installation wtih Embedded Cluster](/enterprise/installing-embedded) +* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) + +### Customer-Specific Installation Instructions + +To install with Embedded Cluster, you can follow the customer-specific instructions provided on the **Customer** page in the Vendor Portal. For example: + +<img alt="Embedded cluster install instruction dialog" src="/images/embedded-cluster-install-dialog.png" width="500px"/> + +[View a larger version of this image](/images/embedded-cluster-install-dialog.png) + +### (Optional) Serve Installation Assets Using the Vendor API + +To install with Embedded Cluster, you need to download the Embedded Cluster installer binary and a license. Air gap installations also require an air gap bundle. Some vendors already have a portal where their customers can log in to access documentation or download artifacts. In cases like this, you can serve the Embedded Cluster installation essets yourself using the Replicated Vendor API, rather than having customers download the assets from the Replicated app service using a curl command during installation. + +To serve Embedded Cluster installation assets with the Vendor API: + +1. If you have not done so already, create an API token for the Vendor API. See [Using the Vendor API v3](/reference/vendor-api-using#api-token-requirement). + +1. Call the [Get an Embedded Cluster release](https://replicated-vendor-api.readme.io/reference/getembeddedclusterrelease) endpoint to download the assets needed to install your application with Embedded Cluster. Your customers must take this binary and their license and copy them to the machine where they will install your application. + + Note the following: + + * (Recommended) Provide the `customerId` query parameter so that the customer’s license is included in the downloaded tarball. This mirrors what is returned when a customer downloads the binary directly using the Replicated app service and is the most useful option. Excluding the `customerId` is useful if you plan to distribute the license separately. + + * If you do not provide any query parameters, this endpoint downloads the Embedded Cluster binary for the latest release on the specified channel. You can provide the `channelSequence` query parameter to download the binary for a particular release. + +### About Host Preflight Checks + +During installation, Embedded Cluster automatically runs a default set of _host preflight checks_. The default host preflight checks are designed to verify that the installation environment meets the requirements for Embedded Cluster, such as: +* The system has sufficient disk space +* The system has at least 2G of memory and 2 CPU cores +* The system clock is synchronized + +For Embedded Cluster requirements, see [Embedded Cluster Installation Requirements](/enterprise/installing-embedded-requirements). For the full default host preflight spec for Embedded Cluster, see [`host-preflight.yaml`](https://github.com/replicatedhq/embedded-cluster/blob/main/pkg/preflights/host-preflight.yaml) in the `embedded-cluster` repository in GitHub. + +If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. For more information about host preflight checks for installations on VMs or bare metal servers, see [About Host Preflights](preflight-support-bundle-about#host-preflights). + +#### Limitations + +Embedded Cluster host preflight checks have the following limitations: + +* The default host preflight checks for Embedded Cluster cannot be modified, and vendors cannot provide their own custom host preflight spec for Embedded Cluster. +* Host preflight checks do not check that any application-specific requirements are met. For more information about defining preflight checks for your application, see [Defining Preflight Checks](/vendor/preflight-defining). + +#### Skip Host Preflight Checks + +You can skip host preflight checks by passing the `--skip-host-preflights` flag with the Embedded Cluster `install` command. For example: + +```bash +sudo ./my-app install --license license.yaml --skip-host-preflights +``` + +When you skip host preflight checks, the Admin Console still runs any application-specific preflight checks that are defined in the release before the application is deployed. + +:::note +Skipping host preflight checks is _not_ recommended for production installations. +::: + +## About Managing Multi-Node Clusters with Embedded Cluster + +This section describes managing nodes in multi-node clusters created with Embedded Cluster. + +### Defining Node Roles for Multi-Node Clusters + +You can optionally define node roles in the Embedded Cluster Config. For multi-node clusters, roles can be useful for the purpose of assigning specific application workloads to nodes. If nodes roles are defined, users access the Admin Console to assign one or more roles to a node when it is joined to the cluster. + +For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. + +### Adding Nodes + +Users can add nodes to a cluster with Embedded Cluster from the Admin Console. The Admin Console provides the join command used to add nodes to the cluster. + +For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + +### High Availability for Multi-Node Clusters (Alpha) + +Multi-node clusters are not highly available by default. Enabling high availability (HA) requires that at least three controller nodes are present in the cluster. Users can enable HA when joining the third node. + +For more information about creating HA multi-node clusters with Embedded Cluster, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha) in _Managing Multi-Node Clusters with Embedded Cluster_. + +## About Performing Updates with Embedded Cluster + +<UpdateOverview/> + +For more information about updating, see [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). + +## Access the Cluster + +With Embedded Cluster, end-users are rarely supposed to need to use the CLI. Typical workflows, like updating the application and the cluster, are driven through the Admin Console. + +Nonetheless, there are times when vendors or their customers need to use the CLI for development or troubleshooting. + +To access the cluster and use other included binaries: + +1. SSH onto a controller node. + +1. Use the Embedded Cluster shell command to start a shell with access to the cluster: + + ``` + sudo ./APP_SLUG shell + ``` + + The output looks similar to the following: + ``` + __4___ + _ \ \ \ \ Welcome to APP_SLUG debug shell. + <'\ /_/_/_/ This terminal is now configured to access your cluster. + ((____!___/) Type 'exit' (or CTRL+d) to exit. + \0\0\0\0\/ Happy hacking. + ~~~~~~~~~~~ + root@alex-ec-2:/home/alex# export KUBECONFIG="/var/lib/embedded-cluster/k0s/pki/admin.conf" + root@alex-ec-2:/home/alex# export PATH="$PATH:/var/lib/embedded-cluster/bin" + root@alex-ec-2:/home/alex# source <(kubectl completion bash) + root@alex-ec-2:/home/alex# source /etc/bash_completion + ``` + + The appropriate kubeconfig is exported, and the location of useful binaries like kubectl and Replicated’s preflight and support-bundle plugins is added to PATH. + + :::note + You cannot run the `shell` command on worker nodes. + ::: + +1. Use the available binaries as needed. + + **Example**: + + ```bash + kubectl version + ``` + ``` + Client Version: v1.29.1 + Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3 + Server Version: v1.29.1+k0s + ``` + +1. Type `exit` or **Ctrl + D** to exit the shell. + + :::note + If you encounter a typical workflow where your customers have to use the Embedded Cluster shell, reach out to Alex Parker at alexp@replicated.com. These workflows might be candidates for additional Admin Console functionality. + ::: + +## Reset a Node + +Resetting a node removes the cluster and your application from that node. This is useful for iteration, development, and when mistakes are made, so you can reset a machine and reuse it instead of having to procure another machine. + +If you want to completely remove a cluster, you need to reset each node individually. + +When resetting a node, OpenEBS PVCs on the node are deleted. Only PVCs created as part of a StatefulSet will be recreated automatically on another node. To recreate other PVCs, the application will need to be redeployed. + +To reset a node: + +1. SSH onto the machine. Ensure that the Embedded Cluster binary is still available on that machine. + +1. Run the following command to reset the node and automatically reboot the machine to ensure that transient configuration is also reset: + + ``` + sudo ./APP_SLUG reset + ``` + Where `APP_SLUG` is the unique slug for the application. + + :::note + Pass the `--no-prompt` flag to disable interactive prompts. Pass the `--force` flag to ignore any errors encountered during the reset. + ::: + +## Additional Use Cases + +This section outlines some additional use cases for Embedded Cluster. These are not officially supported features from Replicated, but are ways of using Embedded Cluster that we or our customers have experimented with that might be useful to you. + +### NVIDIA GPU Operator + +The NVIDIA GPU Operator uses the operator framework within Kubernetes to automate the management of all NVIDIA software components needed to provision GPUs. For more information about this operator, see the [NVIDIA GPU Operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html) documentation. + +You can include the NVIDIA GPU Operator in your release as an additional Helm chart, or using Embedded Cluster Helm extensions. For information about adding Helm extensions, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. + +Using the NVIDIA GPU Operator with Embedded Cluster requires configuring the containerd options in the operator as follows: + +```yaml +# Embedded Cluster Config + + extensions: + helm: + repositories: + - name: nvidia + url: https://nvidia.github.io/gpu-operator + charts: + - name: gpu-operator + chartname: nvidia/gpu-operator + namespace: gpu-operator + version: "v24.9.1" + values: | + # configure the containerd options + toolkit: + env: + - name: CONTAINERD_CONFIG + value: /etc/k0s/containerd.d/nvidia.toml + - name: CONTAINERD_SOCKET + value: /run/k0s/containerd.sock +``` +When the containerd options are configured as shown above, the NVIDIA GPU Operator automatically creates the required configurations in the `/etc/k0s/containerd.d/nvidia.toml` file. It is not necessary to create this file manually, or modify any other configuration on the hosts. + +:::note +If you include the NVIDIA GPU Operator as a Helm extension, remove any existing containerd services that are running on the host (such as those deployed by Docker) before attempting to install the release with Embedded Cluster. If there are any containerd services on the host, the NVIDIA GPU Operator will generate an invalid containerd config, causing the installation to fail. +::: + +## Troubleshoot with Support Bundles + +<SupportBundleIntro/> + +<EmbeddedClusterSupportBundle/> + +================ +File: docs/vendor/helm-image-registry.mdx +================ +import StepCreds from "../partials/proxy-service/_step-creds.mdx" +import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" + +# Using the Proxy Registry with Helm Installations + +This topic describes how to use the Replicated proxy registry to proxy images for installations with the Helm CLI. For more information about the proxy registry, see [About the Replicated Proxy Registry](private-images-about). + +## Overview + +With the Replicated proxy registry, each customer's unique license can grant proxy access to images in an external private registry. To enable the proxy registry for Helm installations, you must create a Secret with `type: kubernetes.io/dockerconfigjson` to authenticate with the proxy registry. + +During Helm installations, after customers provide their license ID, a `global.replicated.dockerconfigjson` field that contains a base64 encoded Docker configuration file is automatically injected in the Helm chart values. You can use this `global.replicated.dockerconfigjson` field to create the required pull secret. + +For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. + +## Enable the Proxy Registry + +This section describes how to enable the proxy registry for applications deployed with Helm, including how to use the `global.replicated.dockerconfigjson` field that is injected during application deployment to create the required pull secret. + +To enable the proxy registry: + +1. <StepCreds/> + +1. <StepCustomDomain/> + +1. In your Helm chart templates, create a Kubernetes Secret to evaluate if the `global.replicated.dockerconfigjson` value is set, and then write the rendered value into a Secret on the cluster: + + ```yaml + # /templates/replicated-pull-secret.yaml + + {{ if .Values.global.replicated.dockerconfigjson }} + apiVersion: v1 + kind: Secret + metadata: + name: replicated-pull-secret + type: kubernetes.io/dockerconfigjson + data: + .dockerconfigjson: {{ .Values.global.replicated.dockerconfigjson }} + {{ end }} + ``` + + :::note + If you use the Replicated SDK, do not use `replicated` for the name of the image pull secret because the SDK automatically creates a Secret named `replicated`. Using the same name causes an error. + ::: + +1. Ensure that you have a field in your Helm chart values file for your image repository URL, and that any references to the image in your Helm chart access the field from your values file. + + **Example**: + + ```yaml + # values.yaml + ... + dockerconfigjson: '{{ .Values.global.replicated.dockerconfigjson }}' + images: + myapp: + # Add image URL in the values file + apiImageRepository: quay.io/my-org/api + apiImageTag: v1.0.1 + ``` + ```yaml + # /templates/deployment.yaml + + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + spec: + template: + spec: + containers: + - name: api + # Access the apiImageRepository field from the values file + image: {{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }} + ``` + +1. In your Helm chart templates, add the image pull secret that you created to any manifests that reference the private image: + + ```yaml + # /templates/example.yaml + ... + {{ if .Values.global.replicated.dockerconfigjson }} + imagePullSecrets: + - name: replicated-pull-secret + {{ end }} + ``` + + **Example:** + + ```yaml + # /templates/deployment.yaml + ... + image: "{{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }}" + {{ if .Values.global.replicated.dockerconfigjson }} + imagePullSecrets: + - name: replicated-pull-secret + {{ end }} + name: myapp + ports: + - containerPort: 3000 + name: http + ``` + +1. Package your Helm chart and add it to a release. Promote the release to a development channel. See [Managing Releases with Vendor Portal](releases-creating-releases). + +1. Install the chart in a development environment to test your changes: + + 1. Create a local `values.yaml` file to override the default external registry image URL with the URL for the image on `proxy.replicated.com`. + + The proxy registry URL has the following format: `proxy.replicated.com/proxy/APP_SLUG/EXTERNAL_REGISTRY_IMAGE_URL` + + Where: + * `APP_SLUG` is the slug of your Replicated application. + * `EXTERNAL_REGISTRY_IMAGE_URL` is the path to the private image on your external registry. + + **Example** + ```yaml + # A local values.yaml file + ... + images: + myapp: + apiImageRepository: proxy.replicated.com/proxy/my-app/quay.io/my-org/api + apiImageTag: v1.0.1 + + ``` + + :::note + If you configured a custom domain for the proxy registry, use the custom domain instead of `proxy.replicated.com`. For more information, see [Using Custom Domains](custom-domains-using). + ::: + + 1. Log in to the Replicated registry and install the chart, passing the local `values.yaml` file you created with the `--values` flag. See [Installing with Helm](install-with-helm). + +================ +File: docs/vendor/helm-install-airgap.mdx +================ +import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" + +# Installing and Updating with Helm in Air Gap Environments + +## Overview + +Replicated supports installing and updating Helm charts in air gap environments with no outbound internet access. In air gap Helm installations, customers are guided through the process with instructions provided in the [Replicated Download Portal](/vendor/releases-share-download-portal). + +When air gap Helm installations are enabled, an **Existing cluster with Helm** option is displayed in the Download Portal on the left nav. When selected, **Existing cluster with Helm** displays three tabs (**Install**, **Manual Update**, **Automate Updates**), as shown in the screenshot below: + +![download helm option](/images/download-helm.png) + +[View a larger version of this image](/images/download-helm.png) + +Each tab provides instructions for how to install, perform a manual update, or configure automatic updates, respectively. + +These installing and updating instructions assume that your customer is accessing the Download Portal from a workstation that can access the internet and their internal private registry. Direct access to the target cluster is not required. + +Each method assumes that your customer is familiar with `curl`, `docker`, `helm`, `kubernetes`, and a bit of `bash`, particularly for automating updates. + +## Prerequisites + +Before you install, complete the following prerequisites: + +* Reach out to your account rep to enable the Helm air gap installation feature. + +<Prerequisites/> + +## Install + +The installation instructions provided in the Download Portal are designed to walk your customer through the first installation of your chart in an air gap environment. + +To install with Helm in an air gap environment: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers > [Customer Name] > Reporting**. + +1. In the **Download portal** section, click **Visit download portal** to log in to the Download Portal for the customer. + +1. In the Download Portal left nav, click **Existing cluster with Helm**. + + ![download helm option](/images/download-helm.png) + + [View a larger version of this image](/images/download-helm.png) + +1. On the **Install** tab, in the **App version** dropdown, select the target application version to install. + +1. Run the first command to authenticate into the Replicated proxy registry with the customer's credentials (the `license_id`). + +1. Under **Get the list of images**, run the command provided to generate the list of images needed to install. + +1. For **(Optional) Specify registry URI**, provide the URI for an internal image registry where you want to push images. If a registry URI is provided, Replicatd automatically updates the commands for tagging and pushing images with the URI. + +1. For **Pull, tag, and push each image to your private registry**, copy and paste the docker commands provided to pull, tag, and push each image to your internal registry. + + :::note + If you did not provide a URI in the previous step, ensure that you manually replace the image names in the `tag` and `push` commands with the target registry URI. + ::: + +1. Run the command to authenticate into the OCI registry that contains your Helm chart. + +1. Run the command to install the `preflight` plugin. This allows you to run preflight checks before installing to ensure that the installation environment meets the requirements for the application. + +1. For **Download a copy of the values.yaml file** and **Edit the values.yaml file**, run the `helm show values` command provided to download the values file for the Helm chart. Then, edit the values file as needed to customize the configuration of the given chart. + + If you are installing a release that contains multiple Helm charts, repeat these steps to download and edit each values file. + + :::note + For installations with mutliple charts where two or more of the top-level charts in the release use the same name, ensure that each values file has a unique name to avoid installation error. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. + ::: + +1. For **Determine install method**, select one of the options depending on your ability to access the internet and the cluster from your workstation. + +1. Use the commands provided and the values file or files that you edited to run preflight checks and then install the release. + +## Perform Updates + +This section describes the processes of performing manual and automatic updates with Helm in air gap environments using the instructions provided in the Download Portal. + +### Manual Updates + +The manual update instructions provided in the Download Portal are similar to the installation instructions. + +However, the first step prompts the customer to select their current version an the target version to install. This step takes [required releases](/vendor/releases-about#properties) into consideration, thereby guiding the customer to the versions that are upgradable from their current version. + +The additional steps are consistent with installation process until the `preflight` and `install` commands where customers provide the existing values from the cluster with the `helm get values` command. Your customer will then need to edit the `values.yaml` to reference the new image tags. + +If the new version introduces new images or other values, Replicated recommends that you explain this at the top of your release notes so that customers know they will need to make additional edits to the `values.yaml` before installing. + +### Automate Updates + +The instructions in the Download Portal for automating updates use API endpoints that your customers can automate against. + +The instructions in the Download Portal provide customers with example commands that can be put into a script that they run periodically (nightly, weekly) using GitHub Actions, Jenkins, or other platforms. + +This method assumes that the customer has already done a successful manual installation, including the configuration of the appropriate `values`. + +After logging into the registry, the customer exports their current version and uses that to query an endpoint that provides the latest installable version number (either the next required release, or the latest release) and export it as the target version. With the target version, they can now query an API for the list of images. + +With the list of images the provided `bash` script will automate the process of pulling updated images from the repository, tagging them with a name for an internal registry, and then pushing the newly tagged images to their internal registry. + +Unless the customer has set up the `values` to preserve the updated tag (for example, by using the `latest` tag), they need to edit the `values.yaml` to reference the new image tags. After doing so, they can log in to the OCI registry and perform the commands to install the updated chart. + +## Use a Harbor or Artifactory Registry Proxy + +You can integrate the Replicated proxy registry with an existing Harbor or jFrog Artifactory instance to proxy and cache images on demand. For more information, see [Using a Registry Proxy for Helm Air Gap Installations](using-third-party-registry-proxy). + +================ +File: docs/vendor/helm-install-overview.mdx +================ +import Helm from "../partials/helm/_helm-definition.mdx" + +# About Helm Installations with Replicated + +This topic provides an introduction to Helm installations for applications distributed with Replicated. + +## Overview + +<Helm/> + +Replicated strongly recommends that all applications are packaged using Helm because many enterprise users expect to be able to install an application with the Helm CLI. + +Existing releases in the Replicated Platform that already support installation with Replicated KOTS and Replicated Embedded Cluster (and that include one or more Helm charts) can also be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. + +For information about how to install with Helm, see: +* [Installing with Helm](/vendor/install-with-helm) +* [Installing and Updating with Helm in Air Gap Environments (Alpha)](helm-install-airgap) + +The following diagram shows how Helm charts distributed with Replicated are installed with Helm in online (internet-connected) customer environments: + +<img src="/images/helm-install-diagram.png" alt="diagram of a helm chart in a custom environment" width="700px"/> + +[View a larger version of this image](/images/helm-install-diagram.png) + +As shown in the diagram above, when a release containing one or more Helm charts is promoted to a channel, the Replicated Vendor Portal automatically extracts any Helm charts included in the release. These charts are pushed as OCI objects to the Replicated registry. The Replicated registry is a private OCI registry hosted by Replicated at `registry.replicated.com`. For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). + +For example, if your application in the Vendor Portal is named My App and you promote a release containing a Helm chart with `name: my-chart` to a channel with the slug `beta`, then the Vendor Portal pushes the chart to the following location: `oci://registry.replicated.com/my-app/beta/my-chart`. + +Customers can install your Helm chart by first logging in to the Replicated registry with their unique license ID. This step ensures that any customer who installs your chart from the registry has a valid, unexpired license. After the customer logs in to the Replicated registry, they can run `helm install` to install the chart from the registry. + +During installation, the Replicated registry injects values into the `global.replicated` key of the parent Helm chart's values file. For more information about the values schema, see [Helm global.replicated Values Schema](helm-install-values-schema). + +## Limitations + +Helm installations have the following limitations: + +* Installing with Helm in air gap environments is an Beta feature. For more information, see [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap). +* Helm CLI installations do not provide access to any of the features of the Replicated KOTS installer, such as: + * The KOTS Admin Console + * Strict preflight checks that block installation + * Backup and restore with snapshots + * Required releases with the **Prevent this release from being skipped during upgrades** option + +================ +File: docs/vendor/helm-install-release.md +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" +import HelmPackage from "../partials/helm/_helm-package.mdx" + +# Packaging a Helm Chart for a Release + +This topic describes how to package a Helm chart and the Replicated SDK into a chart archive that can be added to a release. + +## Overview + +To add a Helm chart to a release, you first add the Replicated SDK as a dependency of the Helm chart and then package the chart and its dependencies as a `.tgz` chart archive. + +The Replicated SDK is a Helm chart can be installed as a small service alongside your application. The SDK provides access to key Replicated features, such as support for collecting custom metrics on application instances. For more information, see [About the Replicated SDK](replicated-sdk-overview). + +## Requirements and Recommendations + +This section includes requirements and recommendations for Helm charts. + +### Chart Version Requirement + +The chart version in your Helm chart must comply with image tag format requirements. A valid tag can contain only lowercase and uppercase letters, digits, underscores, periods, and dashes. + +The chart version must also comply with the Semantic Versioning (SemVer) specification. When you run the `helm install` command without the `--version` flag, Helm retrieves the list of all available image tags for the chart from the registry and compares them using the SemVer comparison rules described in the SemVer specification. The version that is installed is the version with the largest tag value. For more information about the SemVer specification, see the [Semantic Versioning](https://semver.org) documentation. + +### Chart Naming + +For releases that contain more than one Helm chart, Replicated recommends that you use unique names for each top-level Helm chart in the release. This aligns with Helm best practices and also avoids potential conflicts in filenames during installation that could cause the installation to fail. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. + +### Helm Best Practices + +Replicated recommends that you review the [Best Practices](https://helm.sh/docs/chart_best_practices/) guide in the Helm documentation to ensure that your Helm chart or charts follows the required and recommended conventions. + +## Package a Helm Chart {#release} + +This procedure shows how to create a Helm chart archive to add to a release. For more information about the Helm CLI commands in this procedure, see the [Helm Commands](https://helm.sh/docs/helm/helm/) section in the Helm documentation. + +To package a Helm chart so that it can be added to a release: + +1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. + + <DependencyYaml/> + + For additional guidelines related to adding the SDK as a dependency, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + + :::note + <RegistryLogout/> + ::: + +1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + + After the release is promoted, your Helm chart is automatically pushed to the Replicated registry. For information about how to install a release with the Helm CLI, see [Installing with Helm](install-with-helm). For information about how to install Helm charts with KOTS, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +================ +File: docs/vendor/helm-install-troubleshooting.mdx +================ +# Troubleshooting Helm Installations with Replicated + +This topic provides troubleshooting information for common issues related to performing installations and upgrades with the Helm CLI. + +## Installation Fails for Release With Multiple Helm Charts {#air-gap-values-file-conflict} + +#### Symptom + +When performing installing a release with multiple Helm charts, the installation fails. You might also see the following error message: + +``` +Error: INSTALLATION FAILED: cannot re-use a name that is still in use +``` + +#### Cause + +In the Download Portal, each chart's values file is named according to the chart's name. For example, the values file for the Helm chart Gitea would be named `gitea-values.yaml`. + +If any top-level charts in the release use the same name, the associated values files will also be assigned the same name. This causes each new values file downloaded with the `helm show values` command to overwrite any previously-downloaded values file of the same name. + +#### Solution + +Replicated recommends that you use unique names for top-level Helm charts in the same release. + +Alternatively, if a release contains charts that must use the same name, convert one or both of the charts into subcharts and use Helm conditions to differentiate them. See [Conditions and Tags](https://helm.sh/docs/chart_best_practices/dependencies/#conditions-and-tags) in the Helm documentation. + +================ +File: docs/vendor/helm-install-values-schema.mdx +================ +import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" + +# Helm global.replicated Values Schema + +This topic describes the `global.replicated` values that are injected in the values file of an application's parent Helm chart during Helm installations with Replicated. + +## Overview + +When a user installs a Helm application with the Helm CLI, the Replicated registry injects a set of customer-specific values into the `global.replicated` key of the parent Helm chart's values file. + +The values in the `global.replicated` field include the following: + +* The fields in the customer's license, such as the field names, descriptions, signatures, values, and any custom license fields that you define. Vendors can use this license information to check entitlements before the application is installed. For more information, see [Checking Entitlements in Helm Charts Before Deployment](/vendor/licenses-reference-helm). + +* A base64 encoded Docker configuration file. To proxy images from an external private registry with the Replicated proxy registry, you can use the `global.replicated.dockerconfigjson` field to create an image pull secret for the proxy registry. For more information, see [Proxying Images for Helm Installations](/vendor/helm-image-registry). + +The following is an example of a Helm values file containing the `global.replicated` values: + +```yaml +# Helm values.yaml +global: + replicated: + channelName: Stable + customerEmail: username@example.com + customerName: Example Customer + dockerconfigjson: eyJhdXRocyI6eyJd1dIRk5NbEZFVGsxd2JGUmFhWGxYWm5scloyNVRSV1pPT2pKT2NGaHhUVEpSUkU1... + licenseFields: + expires_at: + description: License Expiration + name: expires_at + signature: + v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... + title: Expiration + value: "2023-05-30T00:00:00Z" + valueType: String + licenseID: YiIXRTjiB7R... + licenseType: dev +``` + +## `global.replicated` Values Schema + +The `global.replicated` values schema contains the following fields: + +| Field | Type | Description | +| --- | --- | --- | +| `channelName` | String | The name of the release channel | +| `customerEmail` | String | The email address of the customer | +| `customerName` | String | The name of the customer | +| `dockerconfigjson` | String | Base64 encoded docker config json for pulling images | +| `licenseFields`| | A list containing each license field in the customer's license. Each element under `licenseFields` has the following properties: `description`, `signature`, `title`, `value`, `valueType`. `expires_at` is the default `licenseField` that all licenses include. Other elements under `licenseField` include the custom license fields added by vendors in the Vendor Portal. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). | +| `licenseFields.[FIELD_NAME].description` | String | Description of the license field | +| `licenseFields.[FIELD_NAME].signature.v1` | Object | Signature of the license field | +| `licenseFields.[FIELD_NAME].title` | String | Title of the license field | +| `licenseFields.[FIELD_NAME].value` | String | Value of the license field | +| `licenseFields.[FIELD_NAME].valueType` | String | Type of the license field value | +| `licenseID` | String | The unique identifier for the license | +| `licenseType` | String | The type of license, such as "dev" or "prod". For more information, see [Customer Types](/vendor/licenses-about#customer-types) in _About Customers and Licensing_. | + +## Replicated SDK Helm Values + +<SdkValues/> + +================ +File: docs/vendor/helm-native-about.mdx +================ +import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" +import TemplateLimitation from "../partials/helm/_helm-template-limitation.mdx" +import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" +import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" +import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" +import Deprecated from "../partials/helm/_replicated-deprecated.mdx" +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" +import ReplicatedHelmMigration from "../partials/helm/_replicated-helm-migration.mdx" +import Helm from "../partials/helm/_helm-definition.mdx" + +# About Distributing Helm Charts with KOTS + +This topic provides an overview of how Replicated KOTS deploys Helm charts, including an introduction to the KOTS HelmChart custom resource, limitations of deploying Helm charts with KOTS, and more. + +## Overview + +<Helm/> + +KOTS can install applications that include: +* One or more Helm charts +* More than a single instance of any chart +* A combination of Helm charts and Kubernetes manifests + +Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise users expect to be able to install an application with the Helm CLI. + +Deploying Helm charts with KOTS provides additional functionality not directly available with the Helm CLI, such as: +* The KOTS Admin Console +* Backup and restore with snapshots +* Support for air gap installations +* Support for embedded cluster installations on VMs or bare metal servers + +Additionally, for applications packaged as Helm charts, you can support Helm CLI and KOTS installations from the same release without having to maintain separate sets of Helm charts and application manifests. The following diagram demonstrates how a single release containing one or more Helm charts can be installed using the Helm CLI and KOTS: + +<img src="/images/helm-kots-install-options.png" width="650px" alt="One release being installed into three different customer environments"/> + +[View a larger version of this image](/images/helm-kots-install-options.png) + +For a tutorial that demonstrates how to add a sample Helm chart to a release and then install the release using both KOTS and the Helm CLI, see [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup). + +## How KOTS Deploys Helm Charts + +This section describes how KOTS uses the HelmChart custom resource to deploy Helm charts. + +### About the HelmChart Custom Resource + +<KotsHelmCrDescription/> + +The HelmChart custom resource with `apiVersion: kots.io/v1beta2` (HelmChart v2) is supported with KOTS v1.99.0 and later. For more information, see [About the HelmChart kots.io/v1beta2 Installation Method](#v2-install) below. + +KOTS versions earlier than v1.99.0 can install Helm charts with `apiVersion: kots.io/v1beta1` of the HelmChart custom resource. The `kots.io/v1beta1` HelmChart custom resource is deprecated. For more information, see [Deprecated HelmChart kots.io/v1beta1 Installation Methods](#deprecated-helmchart-kotsiov1beta1-installation-methods) below. + +### About the HelmChart v2 Installation Method {#v2-install} + +When you include a HelmChart custom resource with `apiVersion: kots.io/v1beta2` in a release, KOTS v1.99.0 or later does a Helm install or upgrade of the associated Helm chart directly. + +The `kots.io/v1beta2` HelmChart custom resource does _not_ modify the chart during installation. This results in Helm chart installations that are consistent, reliable, and easy to troubleshoot. For example, you can reproduce the exact installation outside of KOTS by downloading a copy of the application files from the cluster with `kots download`, then using those files to install with `helm install`. And, you can use `helm get values` to view the values that were used to install. + +The `kots.io/v1beta2` HelmChart custom resource requires configuration. For more information, see [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). + +For information about the fields and syntax of the HelmChart custom resource, see [HelmChart v2](/reference/custom-resource-helmchart-v2). + +### Limitations + +The following limitations apply when deploying Helm charts with the `kots.io/v1beta2` HelmChart custom resource: + +* Available only for Helm v3. + +* Available only for KOTS v1.99.0 and later. + +* The rendered manifests shown in the `rendered` directory might not reflect the final manifests that will be deployed to the cluster. This is because the manifests in the `rendered` directory are generated using `helm template`, which is not run with cluster context. So values returned by the `lookup` function and the built-in `Capabilities` object might differ. + +* When updating the HelmChart custom resource in a release from `kots.io/v1beta1` to `kots.io/v1beta2`, the diff viewer shows a large diff because the underlying file structure of the rendered manifests is different. + +* Editing downstream Kustomization files to make changes to the application before deploying is not supported. This is because KOTS does not use Kustomize when installing Helm charts with the `kots.io/v1beta2` HelmChart custom resource. For more information about patching applications with Kustomize, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). + +* <GitOpsLimitation/> + + <GitOpsNotRecommended/> + + For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). +## Support for Helm Hooks {#hooks} + +KOTS supports the following hooks for Helm charts: +* `pre-install`: Executes after resources are rendered but before any resources are installed. +* `post-install`: Executes after resources are installed. +* `pre-upgrade`: Executes after resources are rendered but before any resources are upgraded. +* `post-upgrade`: Executes after resources are upgraded. +* `pre-delete`: Executes before any resources are deleted. +* `post-delete`: Executes after resources are deleted. + +The following limitations apply to using hooks with Helm charts deployed by KOTS: + +* <HooksLimitation/> + +* <HookWeightsLimitation/> + +For more information about Helm hooks, see [Chart Hooks](https://helm.sh/docs/topics/charts_hooks/) in the Helm documentation. + +## Air Gap Installations + +KOTS supports installation of Helm charts into air gap environments with configuration of the HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. The `builder` key specifies the Helm values to use when building the air gap bundle for the application. + +For more information about how to configure the `builder` key to support air gap installations, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). + +## Resource Deployment Order + +When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. + +For information about how to set the deployment order for Helm charts with KOTS, see [Orchestrating Resource Deployment](/vendor/orchestrating-resource-deployment). + +## Deprecated HelmChart kots.io/v1beta1 Installation Methods + +This section describes the deprecated Helm chart installation methods that use the HelmChart custom resource `apiVersion: kots.io/v1beta1`. + +:::important +<Deprecated/> +::: + +### useHelmInstall: true {#v1beta1} + +:::note +This method was previously referred to as _Native Helm_. +::: + +When you include version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`, KOTS uses Kustomize to render the chart with configuration values, license field values, and rewritten image names. KOTS then packages the resulting manifests into a new Helm chart to install. For more information about Kustomize, see the [Kustomize documentation](https://kubectl.docs.kubernetes.io/). + +The following diagram shows how KOTS processes Helm charts for deployment with the `kots.io/v1beta1` method: + +![Flow chart of a v1beta1 Helm chart deployment to a cluster](/images/native-helm-flowchart.png) + +[View a larger image](/images/native-helm-flowchart.png) + +As shown in the diagram above, when given a Helm chart, KOTS: + +- Uses Kustomize to merge instructions from KOTS and the end user to chart resources (see steps 2 - 4 below) +- Packages the resulting manifest files into a new Helm chart (see step 5 below) +- Deploys the new Helm chart (see step 5 below) + +To deploy Helm charts with version `kots.io/v1beta1` of the HelmChart custom resource, KOTS does the following: + +1. **Checks for previous installations of the chart**: If the Helm chart has already been deployed with a HelmChart custom resource that has `useHelmInstall: false`, then KOTS does not attempt the install the chart. The following error message is displayed if this check fails: `Deployment method for chart <chart_name> has changed`. For more information, see [HelmChart kots.io/v1beta1 (useHelmInstall: false)](#v1beta1-false) below. + +1. **Writes base files**: KOTS extracts Helm manifests, renders them with Replicated templating, and then adds all files from the original Helm tarball to a `base/charts/` directory. + + Under `base/charts/`, KOTS adds a Kustomization file named `kustomization.yaml` in the directories for each chart and subchart. KOTS uses these Kustomization files later in the deployment process to merge instructions from Kustomize to the chart resources. For more information about Kustomize, see the [Kustomize website](https://kustomize.io). + + The following screenshot from the Replicated Admin Console shows a `base/charts/` directory for a deployed application. The `base/charts/` directory contains a Helm chart named postgresql with one subchart: + + ![Base directory in the Admin Console](/images/native-helm-base.png) + + In the screenshot above, a Kustomization file that targets the resources from the postgresql Helm chart appears in the `base/charts/postgresql/` directory: + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - secrets.yaml + - statefulset.yaml + - svc-headless.yaml + - svc.yaml + ``` + +1. **Writes midstream files with Kustomize instructions from KOTS**: KOTS then copies the directory structure from `base/charts/` to an `overlays/midstream/charts/` directory. The following screenshot shows an example of the midstream directory for the postgresql Helm chart: + + ![Midstream directory in the Admin Console UI](/images/native-helm-midstream.png) + + As shown in the screenshot above, the midstream directory also contains a Kustomization file with instructions from KOTS for all deployed resources, such as image pull secrets, image rewrites, and backup labels. For example, in the midstream Kustomization file, KOTS rewrites any private images to pull from the Replicated proxy registry. + + The following shows an example of a midstream Kustomization file for the postgresql Helm chart: + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + bases: + - ../../../../base/charts/postgresql + commonAnnotations: + kots.io/app-slug: helm-test + images: + - name: gcr.io/replicated-qa/postgresql + newName: proxy.replicated.com/proxy/helm-test/gcr.io/replicated-qa/postgresql + kind: Kustomization + patchesStrategicMerge: + - pullsecrets.yaml + resources: + - secret.yaml + transformers: + - backup-label-transformer.yaml + ``` + + As shown in the example above, all midstream Kustomization files have a `bases` entry that references the corresponding Kustomization file from the `base/charts/` directory. + +1. **Writes downstream files for end user Kustomize instructions**: KOTS then creates an `overlays/downstream/this-cluster/charts` directory and again copies the directory structure of `base/charts/` to this downstream directory: + + ![Downstream directory in the Admin Console UI](/images/native-helm-downstream.png) + + As shown in the screenshot above, each chart and subchart directory in the downstream directory also contains a Kustomization file. These downstream Kustomization files contain only a `bases` entry that references the corresponding Kustomization file from the midstream directory. For example: + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + bases: + - ../../../../midstream/charts/postgresql + kind: Kustomization + ``` + + End users can edit the downstream Kustomization files to make changes before deploying the application. Any instructions that users add to the Kustomization files in the downstream directory take priority over midstream and base Kustomization files. For more information about how users can make changes before deploying, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). + +1. **Deploys the Helm chart**: KOTS runs `kustomize build` for any Kustomization files in the `overlays/downstream/charts` directory. KOTS then packages the resulting manifests into a new chart for Helm to consume. + + Finally, KOTS runs `helm upgrade -i <release-name> <chart> --timeout 3600s -n <namespace>`. The Helm binary processes hooks and weights, applies manifests to the Kubernetes cluster, and saves a release secret similar to `sh.helm.release.v1.chart-name.v1`. Helm uses this secret to track upgrades and rollbacks of applications. + +### useHelmInstall: false {#v1beta1-false} + +:::note +This method was previously referred to as _Replicated Helm_. +::: + +When you use version `kots.io/v1beta1` of HelmChart custom resource with `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. KOTS also has additional functionality for specific Helm hooks. For example, when KOTS encounters an upstream Helm chart with a `helm.sh/hook-delete-policy` annotation, it automatically adds the same `kots.io/hook-delete-policy` to the Job object. + +The resulting deployment is comprised of standard Kubernetes manifests. Therefore, cluster operators can view the exact differences between what is currently deployed and what an update will deploy. + +### Limitations {#replicated-helm-limitations} + +This section lists the limitations for version `kots.io/v1beta1` of the HelmChart custom resource. +#### kots.io/v1beta1 (useHelmInstall: true) Limitations + +The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`: + +* <Deprecated/> + +* Available only for Helm V3. + +* <GitOpsLimitation/> + + For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). + +* <HooksLimitation/> + +* <HookWeightsLimitation/> + +* <TemplateLimitation/> + +* <VersionLimitation/> + + For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. + +#### kots.io/v1beta1 (useHelmInstall: false) Limitations {#v1beta1-false-limitations} + +The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: false`: + +* <ReplicatedHelmMigration/> + +* <TemplateLimitation/> + +* <VersionLimitation/> + + For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. + +================ +File: docs/vendor/helm-native-v2-using.md +================ +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" + +# Configuring the HelmChart Custom Resource v2 + +This topic describes how to configure the Replicated HelmChart custom resource version `kots.io/v1beta2` to support Helm chart installations with Replicated KOTS. + +## Workflow + +To support Helm chart installations with the KOTS `kots.io/v1beta2` HelmChart custom resource, do the following: +1. Rewrite image names to use the Replicated proxy registry. See [Rewrite Image Names](#rewrite-image-names). +1. Inject a KOTS-generated image pull secret that grants proxy access to private images. See [Inject Image Pull Secrets](#inject-image-pull-secrets). +1. Add a pull secret for any Docker Hub images that could be rate limited. See [Add Pull Secret for Rate-Limited Docker Hub Images](#docker-secret). +1. Configure the `builder` key to allow your users to push images to their own local registries. See [Support Local Image Registries](#local-registries). +1. (KOTS Existing Cluster and kURL Installations Only) Add backup labels to your resources to support backup and restore with the KOTS snapshots feature. See [Add Backup Labels for Snapshots](#add-backup-labels-for-snapshots). + :::note + Snapshots is not supported for installations with Replicated Embedded Cluster. For more information about configuring disaster recovery for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). + ::: + +## Task 1: Rewrite Image Names {#rewrite-image-names} + +Configure the KOTS HelmChart custom resource `values` key so that KOTS rewrites the names for both private and public images in your Helm values during deployment. This allows images to be accessed at one of the following locations, depending on where they were pushed: +* The [Replicated proxy registry](private-images-about) (`proxy.replicated.com` or your custom domain) +* A public image registry +* Your customer's local registry +* The built-in registry used in Replicated Embedded Cluster or Replicated kURL installations in air-gapped environments + +You will use the following KOTS template functions to conditionally rewrite image names depending on where the given image should be accessed: +* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry): Returns true if the installation environment is configured to use a local image registry. HasLocalRegistry is always true in air gap installations. HasLocalRegistry is also true in online installations if the user configured a local private registry. +* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost): Returns the host of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryHost returns the host of the built-in registry. +* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace): Returns the namespace of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryNamespace returns the namespace of the built-in registry. + + <details> + <summary>What is the registry namespace?</summary> + + The registry namespace is the path between the registry and the image name. For example, `images.mycompany.com/namespace/image:tag`. + </details> + +### Task 1a: Rewrite Private Image Names + +For any private images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the Replicated proxy registry (for online installations) or to the local registry in the user's installation environment (for air gap installations or online installations where the user configured a local registry). + +To rewrite image names to the location of the image in the proxy registry, use the format `<proxy-domain>/proxy/<app-slug>/<image>`, where: +* `<proxy-domain>` is `proxy.replicated.com` or your custom domain. For more information about configuring a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). +* `<app-slug>` is the unique application slug in the Vendor Portal +* `<image>` is the path to the image in your registry + +For example, if the private image is `quay.io/my-org/nginx:v1.0.1` and `images.mycompany.com` is the custom proxy registry domain, then the image name should be rewritten to `images.mycompany.com/proxy/my-app-slug/quay.io/my-org/nginx:v1.0.1`. + +For more information, see the example below. + +#### Example + +The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + ... + values: + image: + # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry's hostname + # Else use proxy.replicated.com or your custom proxy registry domain + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "images.mycompany.com" }}' + # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry namespace + # Else use the image's namespace at the proxy registry domain + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' + tag: v1.0.1 +``` + +The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource above correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown below: + +```yaml +# Helm chart values.yaml file + +image: + registry: quay.io + repository: my-org/nginx + tag: v1.0.1 +``` + +During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. + +Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - name: + image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} +``` + +### Task 1b: Rewrite Public Image Names + +For any public images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the location of the image in the public registry (for online installations) or the local registry (for air gap installations or online installations where the user configured a local registry. + +For more information, see the example below. + +#### Example + +The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + ... + values: + image: + # If a local registry is used, use that registry's hostname + # Else, use the public registry host (ghcr.io) + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "ghcr.io" }}' + # If a local registry is used, use the registry namespace provided + # Else, use the path to the image in the public registry + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "cloudnative-pg" }}/cloudnative-pg' + tag: catalog-1.24.0 +``` + +The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown in the example below: + +```yaml +# Helm chart values.yaml file + +image: + registry: ghcr.io + repository: cloudnative-pg/cloudnative-pg + tag: catalog-1.24.0 +``` + +During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in your Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: + +```yaml +apiVersion: v1 +kind: Pod +spec: + containers: + - name: + image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} +``` + +## Task 2: Inject Image Pull Secrets {#inject-image-pull-secrets} + +Kubernetes requires a Secret of type `kubernetes.io/dockerconfigjson` to authenticate with a registry and pull a private image. When you reference a private image in a Pod definition, you also provide the name of the Secret in a `imagePullSecrets` key in the Pod definition. For more information, see [Specifying imagePullSecrets on a Pod](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) in the Kubernetes documentation. + +During installation, KOTS creates a `kubernetes.io/dockerconfigjson` type Secret that is based on the customer license. This pull secret grants access to the private image through the Replicated proxy registry or in the Replicated registry. Additionally, if the user configured a local image registry, then the pull secret contains the credentials for the local registry. You must provide the name of this KOTS-generated pull secret in any Pod definitions that reference the private image. + +You can inject the name of this pull secret into a field in the HelmChart custom resource using the Replicated ImagePullSecretName template function. During installation, KOTS sets the value of the corresponding field in your Helm chart `values.yaml` file with the rendered value of the ImagePullSecretName template function. + +#### Example + +The following example shows a `spec.values.image.pullSecrets` array in the HelmChart custom resource that uses the ImagePullSecretName template function to inject the name of the KOTS-generated pull secret: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + image: + # Note: Use proxy.replicated.com or your custom domain + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/ecr.us-east-1.amazonaws.com/my-org" }}/api' + pullSecrets: + - name: '{{repl ImagePullSecretName }}' +``` + +The `spec.values.image.pullSecrets` array in the HelmChart custom resource corresponds to a `image.pullSecrets` array in the Helm chart `values.yaml` file, as shown in the example below: + +```yaml +# Helm chart values.yaml file + +image: + registry: ecr.us-east-1.amazonaws.com + repository: my-org/api/nginx + pullSecrets: + - name: my-org-secret +``` + +During installation, KOTS renders the ImagePullSecretName template function and adds the rendered pull secret name to the `image.pullSecrets` array in the Helm chart `values.yaml` file. + +Any templates in the Helm chart that access the `image.pullSecrets` field are updated to use the name of the KOTS-generated pull secret, as shown in the example below: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - name: nginx + image: {{ .Values.image.registry }}/{{ .Values.image.repository }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 2 }} + {{- end }} +``` + +## Task 3: Add Pull Secret for Rate-Limited Docker Hub Images {#docker-secret} + +Docker Hub enforces rate limits for Anonymous and Free users. To avoid errors caused by reaching the rate limit, your users can run the `kots docker ensure-secret` command, which creates an `<app-slug>-kotsadm-dockerhub` secret for pulling Docker Hub images and applies the secret to Kubernetes manifests that have images. For more information, see [Avoiding Docker Hub Rate Limits](/enterprise/image-registry-rate-limits). + +If you are deploying a Helm chart with Docker Hub images that could be rate limited, to support the use of the `kots docker ensure-secret` command, any Pod definitions in your Helm chart templates that reference the rate-limited image must be updated to access the `<app-slug>-kotsadm-dockerhub` pull secret, where `<app-slug>` is your application slug. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). + +You can do this by adding the `<app-slug>-kotsadm-dockerhub` pull secret to a field in the `values` key of the HelmChart custom resource, along with a matching field in your Helm chart `values.yaml` file. During installation, KOTS sets the value of the matching field in the `values.yaml` file with the `<app-slug>-kotsadm-dockerhub` pull secret, and any Helm chart templates that access the value are updated. + +For more information about Docker Hub rate limiting, see [Understanding Docker Hub rate limiting](https://www.docker.com/increase-rate-limits) on the Docker website. + +#### Example + +The following Helm chart `values.yaml` file includes `image.registry`, `image.repository`, and `image.pullSecrets` for a rate-limited Docker Hub image: + +```yaml +# Helm chart values.yaml file + +image: + registry: docker.io + repository: my-org/example-docker-hub-image + pullSecrets: [] +``` + +The following HelmChart custom resource includes `spec.values.image.registry`, `spec.values.image.repository`, and `spec.values.image.pullSecrets`, which correspond to those in the Helm chart `values.yaml` file above. + +The `spec.values.image.pullSecrets` array lists the `<app-slug>-kotsadm-dockerhub` pull secret, where the slug for the application is `example-app-slug`: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + image: + registry: docker.io + repository: my-org/example-docker-hub-image + pullSecrets: + - name: example-app-slug-kotsadm-dockerhub +``` + +During installation, KOTS adds the `example-app-slug-kotsadm-dockerhub` secret to the `image.pullSecrets` array in the Helm chart `values.yaml` file. Any templates in the Helm chart that access `image.pullSecrets` are updated to use `example-app-slug-kotsadm-dockerhub`: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: example +spec: + containers: + - name: example + image: {{ .Values.image.registry }}/{{ .Values.image.repository }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 2 }} + {{- end }} +``` + +## Task 4: Support the Use of Local Image Registries {#local-registries} + +Local image registries are required for KOTS installations in air-gapped environments with no outbound internet connection. Also, users in online environments can optionally use a local registry. For more information about how users configure a local image registry with KOTS, see [Configuring Local Image Registries](/enterprise/image-registry-settings). + +To support the use of local registries, configure the `builder` key. For more information about how to configure the `builder` key, see [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. + +## Task 5: Add Backup Labels for Snapshots (KOTS Existing Cluster and kURL Installations Only) {#add-backup-labels-for-snapshots} + +:::note +The Replicated [snapshots](snapshots-overview) feature for backup and restsore is supported only for existing cluster installations with KOTS. Snapshots are not support for installations with Embedded Cluster. For more information about disaster recovery for installations with Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery.mdx). +::: + +The snapshots feature requires the following labels on all resources in your Helm chart that you want to be included in the backup: +* `kots.io/backup: velero` +* `kots.io/app-slug: APP_SLUG`, where `APP_SLUG` is the slug of your Replicated application. + +For more information about snapshots, see [Understanding Backup and Restore](snapshots-overview). + +To support backup and restore with snapshots, add the `kots.io/backup: velero` and `kots.io/app-slug: APP_SLUG` labels to fields under the HelmChart custom resource `optionalValues` key. Add a `when` statement that evaluates to true only when the customer license has the `isSnapshotSupported` entitlement. + +The fields that you create under the `optionalValues` key must map to fields in your Helm chart `values.yaml` file. For more information about working with the `optionalValues` key, see [optionalValues](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. + +#### Example + +The following example shows how to add backup labels for snapshots in the `optionalValues` key of the HelmChart custom resource: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + ... + optionalValues: + # add backup labels only if the license supports snapshots + - when: "repl{{ LicenseFieldValue `isSnapshotSupported` }}" + recursiveMerge: true + values: + mariadb: + commonLabels: + kots.io/backup: velero + kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} + podLabels: + kots.io/backup: velero + kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} +``` + +## Additional Information + +### About the HelmChart Custom Resource + + +<KotsHelmCrDescription/> + +For more information about the HelmChart custom resource, including the unique requirements and limitations for the keys described in this topic, see [HelmChart v2](/reference/custom-resource-helmchart-v2). + +### HelmChart v1 and v2 Differences + +To support the use of local registries with version `kots.io/v1beta2` of the HelmChart custom resource, provide the necessary values in the builder field to render the Helm chart with all of the necessary images so that KOTS knows where to pull the images from to push them into the local registry. + +For more information about how to configure the `builder` key, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles) and [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. + +The `kots.io/v1beta2` HelmChart custom resource has the following differences from `kots.io/v1beta1`: + +<table> + <tr> + <th>HelmChart v1beta2</th> + <th>HelmChart v1beta1</th> + <th>Description</th> + </tr> + <tr> + <td><code>apiVersion: kots.io/v1beta2</code></td> + <td><code>apiVersion: kots.io/v1beta1</code></td> + <td><code>apiVersion</code> is updated to <code>kots.io/v1beta2</code></td> + </tr> + <tr> + <td><code>releaseName</code></td> + <td><code>chart.releaseName</code></td> + <td><code>releaseName</code> is a top level field under <code>spec</code></td> + </tr> + <tr> + <td>N/A</td> + <td><code>helmVersion</code></td> + <td><code>helmVersion</code> field is removed</td> + </tr> + <tr> + <td>N/A</td> + <td><code>useHelmInstall</code></td> + <td><code>useHelmInstall</code> field is removed</td> + </tr> +</table> + +### Migrate Existing KOTS Installations to HelmChart v2 + +Existing KOTS installations can be migrated to use the KOTS HelmChart v2 method, without having to reinstall the application. + +There are different steps for migrating to HelmChart v2 depending on the application deployment method used previously. For more information, see [Migrating Existing Installations to HelmChart v2](helm-v2-migrate). + +================ +File: docs/vendor/helm-optional-charts.md +================ +# Example: Including Optional Helm Charts + +This topic describes using optional Helm charts in your application. It also provides an example of how to configure the Replicated HelmChart custom resource to exclude optional Helm charts from your application when a given condition is met. + +## About Optional Helm Charts + +By default, KOTS creates an instance of a Helm chart for every HelmChart custom resource manifest file in the upstream application manifests. However, you can configure your application so that KOTS excludes certain Helm charts based on a conditional statement. + +To create this conditional statement, you add a Replicated KOTS template function to an `exclude` field in the HelmChart custom resource file. For example, you can add a template function that evaluates to `true` or `false` depending on the user's selection for a configuration field on the KOTS Admin Console Config page. +KOTS renders the template function in the `exclude` field, and excludes the chart if the template function evaluates to `true`. + +For all optional components, Replicated recommends that you add a configuration option to allow the user to optionally enable or disable the component. +This lets you support enterprises that want everything to run in the cluster and those that want to bring their own services for stateful components. + +For more information about template functions, see [About Template Functions](/reference/template-functions-about). + +## Example + +This example uses an application that has a Postgres database. +The community-supported Postgres Helm chart is available at https://github.com/bitnami/charts/tree/main/bitnami/postgresql. + +In this example, you create a configuration field on the Admin Console Config page that lets the user provide their own Postgres instance or use a Postgres service that is embedded with the application. Then, you configure the HelmChart custom resource in a release for an application in the Replicated Vendor Portal to conditionally exclude the optional Postgres component. + +### Step 1: Create the Configuration Fields + +To start, define the Admin Console Config page that gives the user a choice of "Embedded Postgres" or "External Postgres", where "External Postgres" is user-supplied. + +1. Log in to the [Vendor Portal](https://vendor.replicated.com). Create a new application for this example, or open an existing application. Then, click **Releases > Create release** to create a new release for the application. + +1. In the Config custom resource manifest file in the release, add the following YAML to create the "Embedded Postgres" or "External Postgres" configuration options: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: example-application + spec: + groups: + - name: database + title: Database + description: Database Options + items: + - name: postgres_type + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres + - name: embedded_postgres_password + type: password + value: "{{repl RandomString 32}}" + hidden: true + - name: external_postgres_uri + type: text + title: External Postgres Connection String + help_text: Connection string for a Postgres 10.x server + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + ``` + + The YAML above does the following: + * Creates a field with "Embedded Postgres" or "External Postgres" radio buttons + * Uses the Replicated RandomString template function to generate a unique default password for the embedded Postgres instance at installation time + * Creates fields for the Postgres password and connection string, if the user selects the External Postgres option + + The following shows how this Config custom resource manifest file displays on the Admin Console Config page: + + ![Postgres Config Screen](/images/postgres-config-screen.gif) + +### Step 2: Create a Secret for Postgres + +The application has a few components that use Postgres, and they all mount the Postgres connection string from a single Secret. + +Define a Secret for Postgres that renders differently if the user selects the Embedded Postgres or External Postgres option: + +1. In the release, create a Secret file and add the following YAML: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: postgresql-secret + stringData: + uri: postgres://username:password@postgresql:5432/database?sslmode=disable + ``` + +1. Edit the `uri` field in the Secret to add a conditional statement that renders either a connection string to the embedded Postgres chart or to the user supplied instance: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: postgresql-secret + stringData: + uri: repl{{ if ConfigOptionEquals "postgres_type" "embedded_postgres" }}postgres://myapplication:repl{{ ConfigOption "embedded_postgres_password" }}@postgres:5432/mydatabase?sslmode=disablerepl{{ else }}repl{{ ConfigOption "external_postgres_uri" }}repl{{ end }} + ``` + + As shown above, you must use a single line for the conditional statement. Optionally, you can use the Replicated Base64Encode function to pipe a string through. See [Base64Encode](/reference/template-functions-static-context#base64encode) in _Static Context_. + +### Step 3: Add the Helm Chart + +Next, package the Helm chart and add it to the release in the Vendor Portal: + +1. Run the following commands to generate a `.tgz` package of the Helm chart: + + ``` + helm repo add bitnami https://charts.bitnami.com/bitnami + helm fetch bitnami/postgresql + ``` + +1. Drag and drop the `.tgz` file into the file tree of the release. The Vendor Portal automatically creates a new HelmChart custom resource named `postgresql.yaml`, which references the `.tgz` file you uploaded. + + For more information about adding Helm charts to a release in the Vendor Portal, see [Managing Releases with the Vendor Portal](releases-creating-releases). + +### Step 4: Edit the HelmChart Custom Resource + +Finally, edit the HelmChart custom resource: + +1. In the HelmChart custom resource, add a mapping to the `values` key so that it uses the password you created. Also, add an `exclude` field to specify that the Postgres Helm chart must only be included when the user selects the embedded Postgres option on the Config page: + + ```yaml + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: postgresql + spec: + exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' + chart: + name: postgresql + chartVersion: 12.1.7 + + releaseName: samplechart-release-1 + + # values are used in the customer environment, as a pre-render step + # these values will be supplied to helm template + values: + auth: + username: username + password: "repl{{ ConfigOption `embedded_postgres_password` }}" + database: mydatabase + ``` + +1. Save and promote the release. Then, install the release in a development environment to test the embedded and external Postgres options. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + +================ +File: docs/vendor/helm-optional-value-keys.md +================ +import Values from "../partials/helm/_helm-cr-values.mdx" +import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" +import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" +import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" +import ConfigExample from "../partials/helm/_set-values-config-example.mdx" +import LicenseExample from "../partials/helm/_set-values-license-example.mdx" +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Setting Helm Values with KOTS + +This topic describes how to use the Replicated KOTS HelmChart custom resource to set and delete values in `values.yaml` files for Helm charts deployed with Replicated KOTS. + +For a tutorial that demonstrates how to set Helm values in a sample Helm chart using the KOTS HelmChart custom resource, see [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). + +## Overview + +The KOTS HelmChart custom resource [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) keys create a mapping between KOTS and the `values.yaml` file for the corresponding Helm chart. This allows you to set or delete Helm values during installation or upgrade with KOTS, without having to make any changes to the Helm chart itself. + +You can create this mapping by adding a value under `values` or `optionalValues` that uses the exact same key name as a value in the corresponding Helm chart `values.yaml` file. During installation or upgrade, KOTS sets the Helm chart `values.yaml` file with any matching values from the `values` or `optionalValues` keys. + +The `values` and `optionalValues` keys also support the use of Replicated KOTS template functions. When you use KOTS template functions in the `values` and `optionalValues` keys, KOTS renders the template functions and then sets any matching values in the corresponding Helm chart `values.yaml` with the rendered values. For more information, see [About Template Functions](/reference/template-functions-about). + +Common use cases for the HelmChart custom resource `values` and `optionalValues` keys include: +* Setting Helm values based on user-supplied values from the KOTS Admin Console configuration page +* Setting values based on the user's unique license entitlements +* Conditionally setting values when a given condition is met +* Deleting a default value key from the `values.yaml` file that should not be included for KOTS installations + +For more information about the syntax for these fields, see [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. + +## Set Values + +This section describes how to use KOTS template functions or static values in the HelmChart custom resource `values` key to set existing Helm values. + +### Using a Static Value + +You can use static values in the HelmChart custom resource `values` key when a given Helm value must be set the same for all KOTS installations. This allows you to set values for KOTS installations only, without affecting values for any installations that use the Helm CLI. + +For example, the following Helm chart `values.yaml` file contains `kotsOnlyValue.enabled`, which is set to `false` by default: + +```yaml +# Helm chart values.yaml +kotsOnlyValue: + enabled: false +``` + +The following HelmChart custom resource contains a mapping to `kotsOnlyValue.enabled` in its `values` key, which is set to `true`: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + values: + kotsOnlyValue: + enabled: true +``` + +During installation or upgrade with KOTS, KOTS sets `kotsOnlyValue.enabled` in the Helm chart `values.yaml` file to `true` so that the KOTS-only value is enabled for the installation. For installations that use the Helm CLI instead of KOTS, `kotsOnlyValue.enabled` remains `false`. + +### Using KOTS Template Functions + +You can use KOTS template functions in the HelmChart custom resource `values` key to set Helm values with the rendered template functions. For more information, see [About Template Functions](/reference/template-functions-about). + +<Tabs> + <TabItem value="config" label="Config Context Example" default> + <ConfigExample/> + </TabItem> + <TabItem value="license" label="License Context Example" default> + <LicenseExample/> + </TabItem> +</Tabs> + +## Conditionally Set Values + +<OptionalValues/> + +For example, the following HelmChart custom resource uses the `optionalValues` key and the [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to set user-supplied values for an external MariaDB database: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: wordpress +spec: + chart: + name: wordpress + chartVersion: 15.3.2 + + releaseName: sample-release-1 + + optionalValues: + - when: "repl{{ ConfigOptionEquals `mariadb_type` `external`}}" + recursiveMerge: false + values: + externalDatabase: + host: "repl{{ ConfigOption `external_db_host`}}" + user: "repl{{ ConfigOption `external_db_user`}}" + password: "repl{{ ConfigOption `external_db_password`}}" + database: "repl{{ ConfigOption `external_db_database`}}" + port: "repl{{ ConfigOption `external_ db_port`}}" +``` + +During installation, KOTS renders the template functions and sets the `externalDatabase` values in the HelmChart `values.yaml` file only when the user selects the `external` option for `mariadb_type` on the Admin Console configuration page. + +### About Recursive Merge for optionalValues {#recursive-merge} + +<OptionalValuesRecursiveMerge/> + +For example, the following HelmChart custom resource has both `values` and `optionalValues`: + +```yaml +values: + favorite: + drink: + hot: tea + cold: soda + dessert: ice cream + day: saturday + +optionalValues: + - when: '{{repl ConfigOptionEquals "example_config_option" "1" }}' + recursiveMerge: false + values: + example_config_option: + enabled: true + favorite: + drink: + cold: lemonade +``` + +The `values.yaml` file for the associated Helm chart defines the following key value pairs: + +```yaml +favorite: + drink: + hot: coffee + cold: soda + dessert: pie +``` +The `templates/configmap.yaml` file for the Helm chart maps these values to the following fields: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap +data: + favorite_day: {{ .Values.favorite.day }} + favorite_dessert: {{ .Values.favorite.dessert }} + favorite_drink_cold: {{ .Values.favorite.drink.cold }} + favorite_drink_hot: {{ .Values.favorite.drink.hot }} +``` + +When `recursiveMerge` is set to `false`, the ConfigMap for the deployed application includes the following key value pairs: + +```yaml +favorite_day: null +favorite_dessert: pie +favorite_drink_cold: lemonade +favorite_drink_hot: coffee +``` + +In this case, the top level keys in `optionalValues` override the top level keys in `values`. + +KOTS then uses the values from the Helm chart `values.yaml` to populate the remaining fields in the ConfigMap: `favorite_day`, `favorite_dessert`, and `favorite_drink_hot`. + +When `recursiveMerge` is set to `true`, the ConfigMap for the deployed application includes the following key value pairs: + +```yaml +favorite_day: saturday +favorite_dessert: ice cream +favorite_drink_cold: lemonade +favorite_drink_hot: tea +``` + +In this case, all keys from `values` and `optionalValues` are merged. Because both include `favorite.drink.cold`, KOTS uses `lemonade` from `optionalValues`. + +## Delete a Default Key + +If the Helm chart `values.yaml` contains a static value that must be deleted when deploying with KOTS, you can set the value to `"null"` (including the quotation marks) in the `values` key of the HelmChart custom resource. + +A common use case for deleting default value keys is when you include a community Helm chart as a dependency. Because you cannot control how the community chart is built and structured, you might want to change some of the default behavior. + +For example, the following HelmChart custom resource sets an `exampleKey` value to `"null"` when the chart is deployed with KOTS: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + values: + exampleKey: "null" +``` + +For more information about using a `null` value to delete a key, see [Deleting a Default Key](https://helm.sh/docs/chart_template_guide/values_files/#deleting-a-default-key) in the Helm documentation. + +================ +File: docs/vendor/helm-packaging-airgap-bundles.mdx +================ +import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" +import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" +import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" +import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" + +# Packaging Air Gap Bundles for Helm Charts + +This topic describes how to package and build air gap bundles for releases that contain one or more Helm charts. This topic applies to applications deployed with Replicated KOTS. + +## Overview + +<AirGapBundle/> + +When building the `.airgap` bundle for a release that contains one or more Helm charts, the Vendor Portal renders the Helm chart templates in the release using values supplied in the KOTS HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. + +## Configure the `builder` Key + +You should configure the `builder` key if you need to change any default values in your Helm chart so that the `.airgap` bundle for the release includes all images needed to successfully deploy the chart. For example, you can change the default Helm values so that images for any conditionally-deployed components are always included in the air gap bundle. Additionally, you can use the `builder` key to set any `required` values in your Helm chart that must be set for the chart to render. + +The values in the `builder` key map to values in the given Helm chart's `values.yaml` file. For example, `spec.builder.postgres.enabled` in the example HelmChart custom resource below would map to a `postgres.enabled` field in the `values.yaml` file for the `samplechart` chart: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + builder: + postgres: + enabled: true +``` + +For requirements, recommendations, and examples of common use cases for the `builder` key, see the sections below. + +### Requirements and Recommendations + +<HelmBuilderRequirements/> + +### Example: Set the Image Registry for Air Gap Installations + +For air gap installations, if the [Replicated proxy registry](/vendor/private-images-about) domain `proxy.replicated.com` is used as the default image name for any images, you need to rewrite the image to the upstream image name so that it can be processed and included in the air gap bundle. You can use the `builder` key to do this by hardcoding the upstream location of the image (image registry, repository, and tag), as shown in the example below: + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + builder: + my-service: + image: + registry: 12345.dkr.ecr.us-west-1.amazonaws.com + repository: my-app + tag: "1.0.2" +``` +When building the `.airgap` bundle for the release, the Vendor Portal uses the registry, repository, and tag values supplied in the `builder` key to template the Helm chart, rather than the default values defined in the Helm `values.yaml` file. This ensures that the image is pulled from the upstream registry using the credentials supplied in the Vendor Portal, without requiring any changes to the Helm chart directly. + +### Example: Include Conditional Images + +Many applications have images that are included or excluded based on a given condition. For example, enterprise users might have the option to deploy an embedded database with the application or bring their own database. To support this use case for air gap installations, the images for any conditionally-deployed components must always be included in the air gap bundle. + +<BuilderExample/> + +## Related Topics + +* [builder](/reference/custom-resource-helmchart-v2#builder) +* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) +* [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped) + +================ +File: docs/vendor/helm-v2-migrate.md +================ +# Migrating Existing Installations to HelmChart v2 + +This topic describes how to migrate existing Replicated KOTS installations to the KOTS HelmChart `kots.io/v1beta2` (HelmChart v2) installation method, without having to reinstall the application. It also includes information about how to support both HelmChart v1 and HelmChart v2 installations from a single release, and lists frequently-asked questions (FAQs) related to migrating to HelmChart v2. + +## Migrate to HelmChart v2 + +### Requirements + +* The HelmChart v2 custom resource is supported with KOTS v1.99.0 and later. If any of your customers are running a version of KOTS earlier than v1.99.0, see [Support Customers on KOTS Versions Earlier Than v1.99.0](#support-both-v1-v2) below for more information about how to support both HelmChart v1 and HelmChart v2 installations from the same release. + +* The Helm `--take-ownership` flag is supported with KOTS v1.124.0 and later. + +* The `kots.io/keep` annotation is supported with KOTS v1.122.0 and later. + +### Migrate From HelmChart v1 with `useHelmInstall: true` + +To migrate existing installations from HelmChart v1 with `useHelmInstall: true` to HelmChart v2: + +1. In a development environment, install an application release using the KOTS HelmChart v1 with `useHelmInstall: true` method. You will use this installation to test the migration to HelmChart v2. + +1. Create a new release containing your application files. + +1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). + +1. Promote the release to an internal-only channel that your team uses for testing. + +1. In your development environment, log in to the Admin Console and confirm that you can upgrade to the new HelmChart v2 release. + +1. When you are done testing, promote the release to one or more of your customer-facing channels. Customers can follow the standard upgrade process in the Admin Console to update their instance. + +### Migrate From HelmChart v1 with `useHelmInstall: false` + +This section describes how to migrate existing HelmChart v1 installations with `useHelmInstall: false`. + +:::note +When the `useHelmInstall` field is _not_ set in the HelmChart custom resource, `false` is the default value. +::: + +These migration steps ensure that KOTS does not uninstall any resources that were previously deployed without Helm, and that Helm takes ownership of these existing resources. + +To migrate existing installations from HelmChart v1 and `useHelmInstall: false` to HelmChart v2: + +1. Create a new release containing your application files: + + 1. In the release, for any resources defined in Kubernetes manifests or in your Helm `templates` that were previously installed with HelmChart v1 and `useHelmInstall: false`, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling these resources when upgrading using the HelmChart v2 method. + + **Example:** + + ```yaml + apiVersion: apps/v1 + kind: Statefulset + metadata: + name: postgresql + # Add the kots.io/keep annotation + annotations: + kots.io/keep: "true" + ``` + + 1. Save the release. + +1. Create another new release: + + 1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). + + 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: + + ```yaml + # HelmChart v2 + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: samplechart + spec: + helmUpgradeFlags: + - --take-ownership + ``` + + When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. + + 1. Save the release. + +1. Test the migration process: + + 1. Promote the first release to an internal-only channel that your team uses for testing. + + 1. In a development environment, install the first release. + + 1. Promote the second release to the same channel. + + 1. In your development environment, access the Admin Console to upgrade to the second release. + +1. When you are done testing, promote the first release to one or more of your customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. + +1. Promote the second release to the same customer-facing channel(s). Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. + +1. Instruct customers to migrate by first upgrading to the release where the `kots.io.keep` annotation is applied to your resources, then upgrading to the release with HelmChart v2. + +1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. + +### Migrate From Standard Kubernetes Manifests + +This section describes how to migrate existing KOTS installations of applications that were previously packaged as standard Kubernetes manifests and are now packaged as one or more Helm charts. This migration path involves performing two upgrades to ensure that KOTS does not uninstall any resources that were adopted into Helm charts, and that Helm can take ownership of resources that were previously deployed without Helm. + +To migrate applications that were previously packaged as standard Kubernetes manifests: + +1. Create a new release containing the Kubernetes manifests for your application: + + 1. For each of the application manifests in the release, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling resources that were previously installed without Helm when upgrading using the HelmChart v2 method. + + **Example:** + + ```yaml + apiVersion: apps/v1 + kind: Statefulset + metadata: + name: postgresql + annotations: + kots.io/keep: "true" + ``` + + 1. Save the release. + +1. Create another new release: + + 1. In the release, add your application Helm chart(s). Remove the application manifests for resources that were adopted into the Helm chart(s). + + 1. For each Helm chart in the release, add a corresponding KOTS HelmChart custom resource with `apiVersion` set to `kots.io/v1beta2`. Configure the resource to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). + + 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: + + ```yaml + # HelmChart v1 beta2 + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: samplechart + spec: + helmUpgradeFlags: + - --take-ownership + ``` + + When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. + + 1. Save the release. + +1. Test the migration process: + + 1. Promote the first release to an internal-only channel that your team uses for testing. + + 1. In a development environment, install the first release. + + 1. Promote the second release to the same channel. + + 1. In your development environment, access the Admin Console to upgrade to the second release. Upgrading to the second release migrates the installation to HelmChart v2. + +1. After you are done testing the migration process, promote the first release containing your application manifests with the `kots.io/keep` annotation to one or more customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. + +1. Promote the second release containing your Helm chart(s) to the same channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. + +1. Instruct customers to migrate by first upgrading to the release containing the standard manifests, then upgrading to the release packaged with Helm. + +1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. + +## Support Customers on KOTS Versions Earlier Than v1.99.0 {#support-both-v1-v2} + +The HelmChart v2 installation method requires KOTS v1.99.0 or later. If you have existing customers that have not yet upgraded to KOTS v1.99.0 or later, Replicated recommends that you support both the HelmChart v2 and v1 installation methods from the same release until all installations are running KOTS v1.99.0 or later. + +To support both installation methods from the same release, include both versions of the HelmChart custom resource for each Helm chart in your application releases (HelmChart `kots.io/v1beta2` and HelmChart `kots.io/v1beta1` with `useHelmInstall: true`). + +When you include both versions of the HelmChart custom resource for a Helm chart, installations with KOTS v1.98.0 or earlier use the v1 method, while installations with KOTS v1.99.0 or later use v2. + +After all customers are using KOTS v1.99.0 or later, you can remove the HelmChart v1 custom resources so that all customers are using the HelmChart v2 method. + +## HelmChart v2 Migration FAQs + +This section includes FAQs related to migrating existing installations to the KOTS HelmChart v2 method. + +### Which migration scenarios require the `kots.io/keep` annotation? + +When applied to a resource in a release, the `kots.io/keep` annotation prevents the given resource from being uninstalled. The `kots.io/keep` annotation can be used to prevent KOTS from deleting resources that were adopted into Helm charts or otherwise previously deployed without Helm. + +To prevent existing resources from being uninstalled during upgrade, the `kots.io/keep` annotation is required for the following types of migrations: + * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 + * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 + +`kots.io/keep` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. + +### Which migration scenarios require the `--take-ownership` flag? + +When the `--take-ownership` flag is enabled, Helm automatically takes ownership of resources that were previously deployed to the cluster without Helm. + +The `--take-ownership` flag is required for the following types of migrations: + * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 + * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 + +`--take-ownership` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. + +### What is the difference between HelmChart v1 with `useHelmInstall: false` and `useHelmInstall: true`? + +With HelmChart v1 and `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. This differs from both the HelmChart v1 with `useHelmInstall: true` and HelmChart v2 methods, where KOTS installs the application using Helm. + +Because the HelmChart v1 with `useHelmInstall: false` method does not deploy resources with Helm, it is necessary to use the `kots.io/keep` annotation and the Helm `--take-ownership` flag when migrating to the HelmChart v2 installation method. These ensure that Helm can take ownership of existing resources and that the resources are not uninstalled during upgrade. + +For more information about how KOTS deploys Helm charts, including information about the deprecated HelmChart v1 installation methods, see [About Distributing Helm Charts with KOTS](helm-native-about). + +================ +File: docs/vendor/identity-service-configuring.md +================ +:::important +This topic is deleted from the product documentation because this Beta feature is deprecated. +::: + +# Enabling and Configuring Identity Service (Beta) + +This topic describes how to enable the identity service (Beta) feature, and how to regulate access to application resources using role based access control (RBAC). + +## About Identity Service + +When you enable the identity service for an application, the Replicated app manager deploys [Dex](https://dexidp.io/) as an intermediary that can be configured to control access to the application. Dex implements an array of protocols for querying other user-management systems, known as connectors. For more information about connectors, see [Connectors](https://dexidp.io/docs/connectors/) in the Dex documentation. + + +## Limitations and Requirements + +Identity service has the following limitations and requirements: + +* Requires the identity service option is enabled in customer licenses. +* Is available only for embedded cluster installations with the kURL installer. +* Is available only through the Replicated Admin Console. + +## Enable and Configure Identity Service + +Use the Identity custom resource to enable and configure the identity service for your application. For an example application that demonstrates how to configure the identity service, see the [`kots-idp-example-app`](https://github.com/replicatedhq/kots-idp-example-app) on GitHub. + +To begin, create a new release in the [Vendor Portal](https://vendor.replicated.com). Add an Identity custom resource file and customize the file for your application. For more information about the Identity custom resource, see [Identity (Beta)](/reference/custom-resource-identity) in _Reference_. + +**Example:** + +```YAML +apiVersion: kots.io/v1beta1 +kind: Identity +metadata: + name: identity +spec: + requireIdentityProvider: true + identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + oidcRedirectUris: + - https://{{repl ConfigOption "ingress_hostname"}}/callback + roles: + - id: access + name: Access + description: Restrict access to IDP Example App +``` + +Make the identity service accessible from the browser by configuring the service name and port. The app manager provides the service name and port to the application through the identity template functions so that the application can configure ingress for the identity service. For more information about the identity template functions, see [Identity Context](/reference/template-functions-identity-context) in _Reference_. + +**Example:** + +```YAML +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: idp-app + annotations: + kubernetes.io/ingress.allow-http: 'false' + ingress.kubernetes.io/force-ssl-redirect: 'true' + kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} + labels: + app: idp-app +spec: + tls: + - hosts: + - repl{{ ConfigOption "ingress_hostname" }} + secretName: idp-ingress-tls + rules: + - host: repl{{ or (ConfigOption "ingress_hostname") "~" }} + http: + paths: + - path: / + backend: + serviceName: idp-app + servicePort: 80 + - path: /oidcserver + backend: + serviceName: repl{{ IdentityServiceName }} + servicePort: repl{{ IdentityServicePort }} +``` +In your Deployment manifest file, add environment variables to configure all of the information that your application needs to communicate and integrate with the identity service. + +**Example:** + +```YAML +apiVersion: apps/v1 +kind: Deployment +metadata: + name: idp-app + labels: + app: idp-app +spec: + replicas: 1 + selector: + matchLabels: + app: idp-app + template: + metadata: + labels: + app: idp-app + spec: + containers: + - name: idp-app + image: replicated/kots-idp-example-app:latest + imagePullPolicy: Always + ports: + - containerPort: 5555 + volumeMounts: + - name: tls-ca-volume + mountPath: /idp-example + readOnly: true + args: ["--issuer-root-ca=/idp-example/tls.ca"] + env: + - name: CERT_SHA + value: repl{{ sha256sum (ConfigOption "tls_cert") }} + - name: LISTEN + value: http://0.0.0.0:5555 + - name: ISSUER + value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + - name: CLIENT_ID + value: repl{{ IdentityServiceClientID }} + - name: CLIENT_SECRET + value: repl{{ IdentityServiceClientSecret }} # TODO: secret + - name: REDIRECT_URI + value: https://{{repl ConfigOption "ingress_hostname"}}/callback + - name: EXTRA_SCOPES + value: groups + - name: RESTRICTED_GROUPS + value: | + {{repl IdentityServiceRoles | keys | toJson }} + hostAliases: + - ip: 172.17.0.1 + hostnames: + - myapp.kotsadmdevenv.com + volumes: + - name: tls-ca-volume + secret: + secretName: idp-app-ca +``` + +## Configuring Access with RBAC + +You can also regulate access to your application resources using role based access control (RBAC). + +In the Identity custom resource, provide a list of the available roles within your application in the `roles` section. For more information, see [`roles`](/reference/custom-resource-identity#roles) in _Reference_. + +**Example:** + +```YAML +apiVersion: kots.io/v1beta1 +kind: Identity +metadata: + name: identity +spec: + requireIdentityProvider: true + identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + oidcRedirectUris: + - https://{{repl ConfigOption "ingress_hostname"}}/callback + roles: + - id: access + name: Access + description: Restrict access to IDP Example App +``` + +Then, using the Admin Console, your customer has the ability to create groups and assign specific roles to each group. +This mapping of roles to groups is returned to your application through the `IdentityServiceRoles` template function that you configure in your Deployment manifest file under the environment variable `RESTRICTED_GROUPS`. For more information, see [`IdentityServiceRoles`](/reference/template-functions-identity-context#identityserviceroles) in _Reference_. + +**Example:** + +```YAML +apiVersion: apps/v1 +kind: Deployment +metadata: + name: idp-app + labels: + app: idp-app +spec: + replicas: 1 + selector: + matchLabels: + app: idp-app + template: + metadata: + labels: + app: idp-app + spec: + containers: + - name: idp-app + image: replicated/kots-idp-example-app:latest + imagePullPolicy: Always + ports: + - containerPort: 5555 + volumeMounts: + - name: tls-ca-volume + mountPath: /idp-example + readOnly: true + args: ["--issuer-root-ca=/idp-example/tls.ca"] + env: + - name: CERT_SHA + value: repl{{ sha256sum (ConfigOption "tls_cert") }} + - name: LISTEN + value: http://0.0.0.0:5555 + - name: ISSUER + value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + - name: CLIENT_ID + value: repl{{ IdentityServiceClientID }} + - name: CLIENT_SECRET + value: repl{{ IdentityServiceClientSecret }} # TODO: secret + - name: REDIRECT_URI + value: https://{{repl ConfigOption "ingress_hostname"}}/callback + - name: EXTRA_SCOPES + value: groups + - name: RESTRICTED_GROUPS + value: | + {{repl IdentityServiceRoles | keys | toJson }} + hostAliases: + - ip: 172.17.0.1 + hostnames: + - myapp.kotsadmdevenv.com + volumes: + - name: tls-ca-volume + secret: + secretName: idp-app-ca +``` + +================ +File: docs/vendor/insights-app-status.md +================ +import StatusesTable from "../partials/status-informers/_statusesTable.mdx" +import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" +import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" +import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" + +# Enabling and Understanding Application Status + +This topic describes how to configure your application so that you can view the status of application instances in the Replicated Vendor Portal. It also describes the meaning of the different application statuses. + +## Overview + +The Vendor Portal displays data on the status of instances of your application that are running in customer environments, including the current state (such as Ready or Degraded), the instance uptime, and the average amount of time it takes your application to reach a Ready state during installation. For more information about viewing instance data, see [Instance Details](instance-insights-details). + +To compute and display these insights, the Vendor Portal interprets and aggregates the state of one or more of the supported Kubernetes resources that are deployed to the cluster as part of your application. + +<SupportedResources/> + +For more information about how instance data is sent to the Vendor Portal, see [About Instance and Event Data](instance-insights-event-data). + +## Enable Application Status Insights + +To display insights on application status, the Vendor Portal requires that your application has one or more _status informers_. Status informers indicate the Kubernetes resources deployed as part of your application that are monitored for changes in state. + +To enable status informers for your application, do one of the following, depending on the installation method: +* [Helm Installations](#helm-installations) +* [KOTS Installations](#kots-installations) + +### Helm Installations + +To get instance status data for applications installed with Helm, the Replicated SDK must be installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). + +After you include the SDK as a dependency, the requirements for enabling status informers vary depending on how your application is installed: + +* For applications installed by running `helm install` or `helm upgrade`, the Replicated SDK automatically detects and reports the status of the resources that are part of the Helm release. No additional configuration is required to get instance status data. + +* For applications installed by running `helm template` then `kubectl apply`, the SDK cannot automatically detect and report the status of resources. You must configure custom status informers by overriding the `statusInformers` value in the Replicated SDK chart. For example: + + ```yaml + # Helm chart values.yaml file + + replicated: + statusInformers: + - deployment/nginx + - statefulset/mysql + ``` + + :::note + Applications installed by running `helm install` or `helm upgrade` can also use custom status informers. When the `replicated.statusInformers` field is set, the SDK detects and reports the status of only the resources included in the `replicated.statusInformers` field. + ::: + +### KOTS Installations + +For applications installed with Replicated KOTS, configure one or more status informers in the KOTS Application custom resource. For more information, see [Adding Resource Status Informers](admin-console-display-app-status). + +When Helm-based applications that include the Replicated SDK and are deployed by KOTS, the SDK inherits the status informers configured in the KOTS Application custom resource. In this case, the SDK does _not_ automatically report the status of the resources that are part of the Helm release. This prevents discrepancies in the instance data in the vendor platform. + +## View Resource Status Insights {#resource-status} + +For applications that include the Replicated SDK, the Vendor Portal also displays granular resource status insights in addition to the aggregate application status. For example, you can hover over the **App status** field on the **Instance details** page to view the statuses of the indiviudal resources deployed by the application, as shown below: + +<img src="/images/resource-status-hover-current-state.png" alt="resource status pop up" width="400px"/> + +[View a larger version of this image](/images/resource-status-hover-current-state.png) + +Viewing these resource status details is helpful for understanding which resources are contributing to the aggregate application status. For example, when an application has an Unavailable status, that means that one or more resources are Unavailable. By viewing the resource status insights on the **Instance details** page, you can quickly understand which resource or resources are Unavailable for the purpose of troubleshooting. + +Granular resource status details are automatically available when the Replicated SDK is installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). + +## Understanding Application Status + +This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. + +### About Resource Statuses {#resource-statuses} + +Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. + +The following table lists the supported Kubernetes resources and the conditions that contribute to each status: + +<StatusesTable/> + +### Aggregate Application Status + +<AggregateStatusIntro/> + +<AggregateStatus/> + +================ +File: docs/vendor/install-with-helm.mdx +================ +import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" +import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" + +# Installing with Helm + +This topic describes how to use Helm to install releases that contain one or more Helm charts. For more information about the `helm install` command, including how to override values in a chart during installation, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. + +## Prerequisites + +Before you install, complete the following prerequisites: + +<Prerequisites/> + +## Firewall Openings for Online Installations with Helm {#firewall} + +<FirewallOpeningsIntro/> + +<table> + <tr> + <th width="50%">Domain</th> + <th>Description</th> + </tr> + <tr> + <td>`replicated.app` *</td> + <td><p>Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.</p></td> + </tr> + <tr> + <td>`registry.replicated.com`</td> + <td><p>Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.</p></td> + </tr> + <tr> + <td>`proxy.replicated.com`</td> + <td><p>Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.</p></td> + </tr> +</table> + +* Required only if the [Replicated SDK](/vendor/replicated-sdk-overview) is included as a dependency of the application Helm chart. + +## Install + +To install a Helm chart: + +1. In the Vendor Portal, go to **Customers** and click on the target customer. + +1. Click **Helm install instructions**. + + <img alt="Helm install button" src="/images/helm-install-button.png" width="700px"/> + + [View a larger image](/images/helm-install-button.png) + +1. In the **Helm install instructions** dialog, run the first command to log in to the Replicated registry: + + ```bash + helm registry login registry.replicated.com --username EMAIL_ADDRESS --password LICENSE_ID + ``` + Where: + * `EMAIL_ADDRESS` is the customer's email address + * `LICENSE_ID` is the ID of the customer's license + + :::note + You can safely ignore the following warning message: `WARNING: Using --password via the CLI is insecure.` This message is displayed because using the `--password` flag stores the password in bash history. This login method is not insecure. + + Alternatively, to avoid the warning message, you can click **(show advanced)** in the **Helm install instructions** dialog to display a login command that excludes the `--password` flag. With the advanced login command, you are prompted for the password after running the command. + ::: + +1. (Optional) Run the second and third commands to install the preflight plugin and run preflight checks. If no preflight checks are defined, these commands are not displayed. For more information about defining and running preflight checks, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). + +1. Run the fourth command to install using Helm: + + ```bash + helm install RELEASE_NAME oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART_NAME + ``` + Where: + * `RELEASE_NAME` is the name of the Helm release. + * `APP_SLUG` is the slug for the application. For information about how to find the application slug, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). + * `CHANNEL` is the lowercased name of the channel where the release was promoted, such as `beta` or `unstable`. Channel is not required for releases promoted to the Stable channel. + * `CHART_NAME` is the name of the Helm chart. + + :::note + To install the SDK with custom RBAC permissions, include the `--set` flag with the `helm install` command to override the value of the `replicated.serviceAccountName` field with a custom service account. For more information, see [Customizing RBAC for the SDK](/vendor/replicated-sdk-customizing#customize-rbac-for-the-sdk). + ::: + +1. (Optional) In the Vendor Portal, click **Customers**. You can see that the customer you used to install is marked as **Active** and the details about the application instance are listed under the customer name. + + **Example**: + + ![example customer in the Vendor Portal with an active instance](/images/sdk-customer-active-example.png) + [View a larger version of this image](/images/sdk-customer-active-example.png) + +================ +File: docs/vendor/installer-history.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Installer History + +<KurlAvailability/> + +This topic describes how to access the installation commands for all active and inactive kURL installers promoted to a channel. + +## About Using Inactive Installers + +Each release channel in the Replicated Vendor Portal saves the history of kURL installers that were promoted to the channel. You can view the list of historical installers on the **kURL Installer History** page for each channel. For more information, see [About the Installer History Page](#about) below. + +It can be useful to access the installation commands for inactive installers to reproduce an issue that a user is experiencing for troubleshooting purposes. For example, if the user's cluster is running the inactive installer version 1.0.0, then you can install with version 1.0.0 in a test environment to troubleshoot. + +You can also send the installation commands for inactive installers to your users as needed. For example, a user might have unique requirements for specific versions of Kubernetes or add-ons. + +## About the Installer History Page {#about} + +The **kURL Installer History** page for each channel includes a list of all the kURL installers that have been promoted to the channel, including the active installer and any inactive installers. + +To access the **kURL Installer History** page, go to **Channels** and click the **Installer history** button on the target channel. + +The following image shows an example **kURL Installer History** page with three installers listed: + +![Installer History page in the Vendor Portal](/images/installer-history-page.png) + +[View a larger version of this image](/images/installer-history-page.png) + +The installers are listed in the order in which they were promoted to the channel. The installer at the top of the list is the active installer for the channel. + +The **kURL Installer History** page includes the following information for each installer listed: + +* Version label, if provided when the installer was promoted +* Sequence number +* Installation command +* Installer YAML content + +================ +File: docs/vendor/instance-data-export.md +================ +import Download from "../partials/customers/_download.mdx" + +# Export Customer and Instance Data + +This topic describes how to download and export customer and instance data from the Replicated Vendor Portal. + +## Overview + +While you can always consume customer and instance insight data directly in the Replicated Vendor Portal, the data is also available in a CSV format so that it can be imported into any other system, such as: +* Customer Relationship Management (CRM) systems like Salesforce or Gainsight +* Data warehouses like Redshift, Snowflake, or BigQuery +* Business intelligence (BI) tools like Looker, Tableau, or PowerBI + +By collecting and organizing this data wherever it is most visible and valuable, you can enable your team to make better decisions about where to focus efforts across product, sales, engineering, and customer success. + +## Bulk Export Instance Event Timeseries Data + +You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Get instance events in either JSON or CSV format](https://replicated-vendor-api.readme.io/reference/listappinstanceevents) in the Vendor API v3 documentation. + +The `/app/{app_id}/events` endpoint returns data scoped to a given application identifier. It also allows filtering based on time periods, instances identifiers, customers identifers, and event types. You must provide at least **one** query parameter to scope the query in order to receive a response. + +By bulk exporting this instance event data with the `/app/{app_id}/events` endpoint, you can: +* Identify trends and potential problem areas +* Demonstrate the impact, adoption, and usage of recent product features + +### Filter Bulk Data Exports + +You can use the following types of filters to filter timeseries data for bulk export: + +- **Filter by date**: + - Get instance events recorded _at or before_ the query date. For example: + ```bash + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?before=2023-10-15" + ``` + - Get instance events recorded _at or after_ the query date. For example: + ```shell + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-10-15" + ``` + - Get instance events recorded within a range of dates [after, before]. For example: + ```shell + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-05-02&before=2023-10-15" + ``` +- **Filter by customer**: Get instance events from one or more customers using a comma-separated list of customer IDs. For example: + ```bash + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?customerIDs=1b13241,2Rjk2923481" + ``` +- **Filter by event type**: Get instance events by event type using a comma-separated list of event types. For example: + ```bash + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?eventTypes=numUsers,numProjects" + ``` + +:::note +If any filter is passed for an object that does not exist, no warning is given. For example, if a `customerIDs` filter is passed for an ID that does not exist, or for an ID that the user does not have access to, then an empty array is returned. +::: + + +## Download Customer Instance Data CSVs +<Download/> + +### Data Dictionary + +The following table lists the data fields that can be included in the customers and instances CSV downloads, including the label, data type, and description. + +<table> + <tr> + <th>Label</th> + <th>Type</th> + <th>Description</th> + </tr> + <tr> + <td>customer_id</td> + <td>string</td> + <td>Customer identifier</td> + </tr> + <tr> + <td>customer_name</td> + <td>string</td> + <td>The customer name</td> + </tr> + <tr> + <td>customer_created_date</td> + <td>timestamptz</td> + <td>The date the license was created</td> + </tr> + <tr> + <td>customer_license_expiration_date</td> + <td>timestamptz</td> + <td>The expiration date of the license</td> + </tr> + <tr> + <td>customer_channel_id</td> + <td>string</td> + <td>The channel id the customer is assigned</td> + </tr> + <tr> + <td>customer_channel_name</td> + <td>string</td> + <td>The channel name the customer is assigned</td> + </tr> + <tr> + <td>customer_app_id</td> + <td>string</td> + <td>App identifier</td> + </tr> + <tr> + <td>customer_last_active</td> + <td>timestamptz</td> + <td>The date the customer was last active</td> + </tr> + <tr> + <td>customer_type</td> + <td>string</td> + <td>One of prod, trial, dev, or community</td> + </tr> + <tr> + <td>customer_status</td> + <td>string</td> + <td>The current status of the customer</td> + </tr> + <tr> + <td>customer_is_airgap_enabled</td> + <td>boolean</td> + <td>The feature the customer has enabled - Airgap</td> + </tr> + <tr> + <td>customer_is_geoaxis_supported</td> + <td>boolean</td> + <td>The feature the customer has enabled - GeoAxis</td> + </tr> + <tr> + <td>customer_is_gitops_supported</td> + <td>boolean</td> + <td>The feature the customer has enabled - KOTS Auto-GitOps</td> + </tr> + <tr> + <td>customer_is_embedded_cluster_download_enabled</td> + <td>boolean</td> + <td>The feature the customer has enabled - Embedded Cluster</td> + </tr> + <tr> + <td>customer_is_identity_service_supported</td> + <td>boolean</td> + <td>The feature the customer has enabled - Identity</td> + </tr> + <tr> + <td>customer_is_snapshot_supported</td> + <td>boolean</td> + <td>The feature the customer has enabled - Snapshot</td> + </tr> + <tr> + <td>customer_has_entitlements</td> + <td>boolean</td> + <td>Indicates the presence or absence of entitlements and entitlment_* columns</td> + </tr> + <tr> + <td>customer_entitlement__*</td> + <td>string/integer/boolean</td> + <td>The values of any custom license fields configured for the customer. For example, customer_entitlement__active-users.</td> + </tr> + <tr> + <td>customer_created_by_id</td> + <td>string</td> + <td>The ID of the actor that created this customer: user ID or a hashed value of a token.</td> + </tr> + <tr> + <td>customer_created_by_type</td> + <td>string</td> + <td>The type of the actor that created this customer: user, service-account, or service-account.</td> + </tr> + <tr> + <td>customer_created_by_description</td> + <td>string</td> + <td>The description of the actor that created this customer. Includes username or token name depending on actor type.</td> + </tr> + <tr> + <td>customer_created_by_link</td> + <td>string</td> + <td>The link to the actor that created this customer.</td> + </tr> + <tr> + <td>customer_created_by_timestamp</td> + <td>timestamptz</td> + <td>The date the customer was created by this actor. When available, matches the value in the customer_created_date column</td> + </tr> + <tr> + <td>customer_updated_by_id</td> + <td>string</td> + <td>The ID of the actor that updated this customer: user ID or a hashed value of a token.</td> + </tr> + <tr> + <td>customer_updated_by_type</td> + <td>string</td> + <td>The type of the actor that updated this customer: user, service-account, or service-account.</td> + </tr> + <tr> + <td>customer_updated_by_description</td> + <td>string</td> + <td>The description of the actor that updated this customer. Includes username or token name depending on actor type.</td> + </tr> + <tr> + <td>customer_updated_by_link</td> + <td>string</td> + <td>The link to the actor that updated this customer.</td> + </tr> + <tr> + <td>customer_updated_by_timestamp</td> + <td>timestamptz</td> + <td>The date the customer was updated by this actor.</td> + </tr> + <tr> + <td>instance_id</td> + <td>string</td> + <td>Instance identifier</td> + </tr> + <tr> + <td>instance_is_active</td> + <td>boolean</td> + <td>The instance has pinged within the last 24 hours</td> + </tr> + <tr> + <td>instance_first_reported_at</td> + <td>timestamptz</td> + <td>The timestamp of the first recorded check-in for the instance.</td> + </tr> + <tr> + <td>instance_last_reported_at</td> + <td>timestamptz</td> + <td>The timestamp of the last recorded check-in for the instance.</td> + </tr> + <tr> + <td>instance_first_ready_at</td> + <td>timestamptz</td> + <td>The timestamp of when the cluster was considered ready</td> + </tr> + <tr> + <td>instance_kots_version</td> + <td>string</td> + <td>The version of KOTS or the Replicated SDK that the instance is running. The version is displayed as a Semantic Versioning compliant string.</td> + </tr> + <tr> + <td>instance_k8s_version</td> + <td>string</td> + <td>The version of Kubernetes running in the cluster.</td> + </tr> + <tr> + <td>instance_is_airgap</td> + <td>boolean</td> + <td>The cluster is airgaped</td> + </tr> + <tr> + <td>instance_is_kurl</td> + <td>boolean</td> + <td>The instance is installed in a Replicated kURL cluster (embedded cluster)</td> + </tr> + <tr> + <td>instance_last_app_status</td> + <td>string</td> + <td>The instance's last reported app status</td> + </tr> + <tr> + <td>instance_client</td> + <td>string</td> + <td>Indicates whether this instance is managed by KOTS or if it's a Helm CLI deployed instance using the SDK.</td> + </tr> + <tr> + <td>instance_kurl_node_count_total</td> + <td>integer</td> + <td>Total number of nodes in the cluster. Applies only to kURL clusters.</td> + </tr> + <tr> + <td>instance_kurl_node_count_ready</td> + <td>integer</td> + <td>Number of nodes in the cluster that are in a healthy state and ready to run Pods. Applies only to kURL clusters.</td> + </tr> + <tr> + <td>instance_cloud_provider</td> + <td>string</td> + <td>The cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.</td> + </tr> + <tr> + <td>instance_cloud_provider_region</td> + <td>string</td> + <td>The cloud provider region where the instance is running. For example, us-central1-b</td> + </tr> + <tr> + <td>instance_app_version</td> + <td>string</td> + <td>The current application version</td> + </tr> + <tr> + <td>instance_version_age</td> + <td>string</td> + <td>The age (in days) of the currently deployed release. This is relative to the latest available release on the channel.</td> + </tr> + <tr> + <td>instance_is_gitops_enabled</td> + <td>boolean</td> + <td>Reflects whether the end user has enabled KOTS Auto-GitOps for deployments in their environment</td> + </tr> + <tr> + <td>instance_gitops_provider</td> + <td>string</td> + <td>If KOTS Auto-GitOps is enabled, reflects the GitOps provider in use. For example, GitHub Enterprise.</td> + </tr> + <tr> + <td>instance_is_skip_preflights</td> + <td>boolean</td> + <td>Indicates whether an end user elected to skip preflight check warnings or errors</td> + </tr> + <tr> + <td>instance_preflight_status</td> + <td>string</td> + <td>The last reported preflight check status for the instance</td> + </tr> + <tr> + <td>instance_k8s_distribution</td> + <td>string</td> + <td>The Kubernetes distribution of the cluster.</td> + </tr> + <tr> + <td>instance_has_custom_metrics</td> + <td>boolean</td> + <td>Indicates the presence or absence of custom metrics and custom_metric__* columns</td> + </tr> + <tr> + <td>instance_custom_metrics_reported_at</td> + <td>timestamptz</td> + <td>Timestamp of latest custom_metric</td> + </tr> + <tr> + <td>custom_metric__*</td> + <td>string/integer/boolean</td> + <td>The values of any custom metrics that have been sent by the instance. For example, custom_metric__active_users</td> + </tr> + <tr> + <td>instance_has_tags</td> + <td>boolean</td> + <td>Indicates the presence or absence of instance tags and instance_tag__* columns</td> + </tr> + <tr> + <td>instance_tag__*</td> + <td>string/integer/boolean</td> + <td>The values of any instance tag that have been set by the vendor. For example, instance_tag__name</td> + </tr> +</table> + +================ +File: docs/vendor/instance-insights-details.md +================ +# Instance Details + +This topic describes using the Replicated Vendor Portal to quickly understand the recent events and performance of application instances installed in your customers' environments. +## About the Instance Details Page {#about-page} + +The Vendor Portal provides insights about the health, status, and performance of the active application instances associated with each customer license on the **Instance details** page. You can use the insights on the **Instance details** page to more quickly troubleshoot issues with your customers' active instances, helping to reduce support burden. + +For example, you can use the **Instance details** page to track the following events for each instance: + +* Recent performance degradation or downtime +* Length of instance downtime +* Recent changes to the cluster or infrastructure +* Changes in the number of nodes, such as nodes lost or added +* Changes in the cluster's Kubernetes version +* Changes in the application version that the instance is running + +To access the **Instance details** page, go to **Customers** and click the **Customer reporting** button for the customer that you want to view: + +![Customer reporting button on the Customers page](/images/customer-reporting-button.png) + +From the **Reporting** page for the selected customer, click the **View details** button for the desired application instance. + +The following shows an example of the **Instance details** page: + +![Instance details full page](/images/instance-details.png) + +[View a larger version of this image](/images/instance-details.png) + +As shown in the image above, the **Instance details** page includes the following sections: + +* **Current State**: Information about the state of the instance, such as the current application version. See [Current State](#current-state) below. +* **Instance Insights**: Key performance indicators (KPIs) related to health, performance, and adoption. See [Insights](#insights) below. +* **Instance Information**: Information about the cluster where the instance is installed, such as the version of Kubernetes running on the cluster. See [Instance Information](#instance-information) below. +* **Custom Metrics**: The values for any custom metrics that are configured for the application, from the most recent check-in. For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). +* **Instance Uptime**: Details about instance uptime over time. See [Instance Uptime](#instance-uptime) below. +* **Instance Activity**: Event data stream. See [Instance Activity](#instance-activity) below. + +### Current State + +The **Current State** section displays the following event data about the status and version of the instance: + +* **App status**: The status of the application. Possible statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information about enabling application status insights and how to interpret the different statuses, see [Enabling and Understanding Application Status](insights-app-status). + + Additionally, for applications that include the [Replicated SDK](/vendor/replicated-sdk-overview), you can hover over the **App status** field to view the statuses of the indiviudal resources deployed by the application, as shown in the example below: + + <img src="/images/resource-status-hover-current-state.png" alt="resource status pop up" width="400px"/> + + [View a larger version of this image](/images/resource-status-hover-current-state.png) + +* **App version**: The version label of the currently running release. You define the version label in the release properties when you promote the release. For more information about defining release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. + + If there is no version label for the release, then the Vendor Portal displays the release sequence in the **App version** field. You can find the sequence number associated with a release by running the `replicated release ls` command. See [release ls](/reference/replicated-cli-release-ls) in the _Replicated CLI_ documentation. + +* **Version age**: The absolute and relative ages of the instance: + + * **Absolute age**: `now - current_release.promoted_date` + + The number of days since the currently running application version was promoted to the channel. For example, if the instance is currently running version 1.0.0, and version 1.0.0 was promoted to the channel 30 days ago, then the absolute age is 30. + + * **Relative age (Days Behind Latest)**: `channel.latest_release.promoted_date - current_release.promoted_date` + + The number of days between when the currently running application version was promoted to the channel and when the latest available version on the channel was promoted. + + For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. The latest version available on the Stable channel is 1.5.0. If 1.0.0 was promoted 30 days ago and 1.5.0 was promoted 10 days ago, then the relative age of the application instance is 20 days. + +* **Versions behind**: The number of versions between the currently running version and the latest version available on the channel where the instance is assigned. + + For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. If the later versions 1.1.0, 1.2.0, 1.3.0, 1.4.0, and 1.5.0 were also promoted to the Stable channel, then the instance is five versions behind. + +* **Last check-in**: The timestamp when the instance most recently sent data to the Vendor Portal. + +### Instance Insights {#insights} + +The **Insights** section includes the following metrics computed by the Vendor Portal: + +* [Uptime](#uptime) +* [Time to Install](#time-to-install) + +#### Uptime + +The Vendor Portal computes the total uptime for the instance as the fraction of time that the instance spends with a Ready, Updating, or Degraded status. The Vendor Portal also provides more granular details about uptime in the **Instance Uptime** graph. See [Instance Uptime](#instance-uptime) below. + +High uptime indicates that the application is reliable and able to handle the demands of the customer environment. Low uptime might indicate that the application is prone to errors or failures. By measuring the total uptime, you can better understand the performance of your application. + +The following table lists the application statuses that are associated with an Up or Down state in the total uptime calculation: + +<table> + <tr> + <th>Uptime State</th> + <th>Application Statuses</th> + </tr> + <tr> + <td>Up</td> + <td>Ready, Updating, or Degraded</td> + </tr> + <tr> + <td>Down</td> + <td>Missing or Unavailable</td> + </tr> +</table> + +:::note +The Vendor Portal includes time spent in a Degraded status in the total uptime for an instance because an app may still be capable of serving traffic when some subset of desired replicas are available. Further, it is possible that a Degraded state is expected during upgrade. +::: + +#### Time to Install + +The Vendor Portal computes both _License time to install_ and _Instance time to install_ metrics to represent how quickly the customer was able to deploy the application to a Ready state in their environment. + +Replicated recommends that you use Time to Install as an indicator of the quality of the packaging, configuration, and documentation of your application. + +If the installation process for your application is challenging, poorly documented, lacks appropriate preflight checks, or relies heavily on manual steps, then it can take days or weeks to deploy the application in customer environments. A longer Time to Install generally represents a significantly increased support burden and a degraded customer installation experience. + +The following describes the _License time to install_ and _Instance time to install_ metrics: + +* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. + + License time to install represents the time that it takes for a customer to successfully deploy your application after you intend to distribute the application to the customer. Replicated uses the timestamp of when you create the customer license in the Vendor Portal to represent your intent to distribute the application because creating the license file is generally the final step before you share the installation materials with the customer. + + License time to install includes several activities that are involved in deploying the application, including the customer receiving the necessary materials and documentation, downloading the assets, provisioning the required hardware, networking, external systems, completing the preflight checks, and finally installing, configuring, and deploying the application. + +* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. + + Instance time to install is the length of time that it takes for the application to reach a Ready state after the customer starts a deployment attempt in their environment. Replicated considers a deployment attempt started when the Vendor Portal first records an event for the instance. + + For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. + + :::note + Instance time to install does _not_ include any deployment attempts that a customer might have made that did not generate an event. For example, time spent by the customer discarding the server used in a failed attempt before attempting to deploy the instance again on a new server. + ::: + +### Instance Information + +The **Instance Information** section displays the following details about the cluster infrastructure where the application is installed as well as vendor-defined metadata about the instance: + +* The Kubernetes distribution for the cluster. For example, GKE or EKS. +* The version of Kubernetes running in the cluster. +* The version of KOTS or the Replicated SDK installed in the cluster. +* For **First Seen**, the timestamp of the first event that the Vendor Portal generated for the instance. For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. +* If detected, the cloud provider and region where the cluster is running. For example, `GCP: us-central1`. +* An optional vendor-defined name for the instance. +* Optional vendor-defined instance tags in the form of key-value pairs. Each instance can have a maximum of 10 tags. + +In addition to the details listed above, the **Instance Information** section also displays the following for embedded clusters provisioned by Replicated kURL: +* Node operating systems +* Node operating systems versions +* Total number of cluster nodes +* Number of cluster nodes in a Ready state +* ID of the kURL installer specification + +### Instance Uptime + +The **Instance Uptime** graph shows the percentage of a given time period that the instance was in an Up, Degraded, or Down state. + +To determine if the instance is Up, Degraded, or Down, the Vendor Portal uses the application status. Possible application statuses are Ready, Updating, Degraded, Unavailable, and Missing. The following table lists the application statuses that are associated with each state in the **Instance Uptime** graph: + +<table> + <tr> + <th>Uptime State</th> + <th>Application Statuses</th> + </tr> + <tr> + <td>Up</td> + <td>Ready or Updating</td> + </tr> + <tr> + <td>Degraded</td> + <td>Degraded</td> + </tr> + <tr> + <td>Down</td> + <td>Missing or Unavailable</td> + </tr> +</table> + +The following shows an example of an **Instance Uptime** graph: + +![Uptime Graph on the Instance details page](/images/instance-uptime-graph.png) + +You can hover over the bars in the **Instance Uptime** graph to view more detail about the percent of time that the instance was in each state during the given time period. + +![Uptime Graph with event markers on the Instance details page](/images/instance-uptime-graph-event-markers.png) + +You can hover over the event markers in the **Instance Uptime** graph to view more detail about the events that occurred during that given interval on the graph. If more than two events occurred in that period, the event marker displays the number of events that occurred during that period. If you click the event marker or the event in the tooltip, the **Instance Activity** section highlights the event or the first event in the group. + +### Instance Activity + +The **Instance Activity** section displays recent events for the instance. The data stream is updated each time an instance _check-in_ occurs. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. + +The timestamp of events displayed in the **Instance Activity** stream is the timestamp when the Replicated Vendor API received data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. + +The following shows an example of the **Instance Activity** data stream: + +![Instance Activity section of Instance details page](/images/instance-activity.png) + +You can filter the **Instance Activity** stream by the following categories: + +* [App install/upgrade](#app-install-upgrade) +* [App status](#app-status) +* [Cluster status](#cluster) +* [Custom metrics](#custom-metrics) +* [Infrastructure status](#infrastructure) +* [KOTS version](#kots) +* [Replicated SDK version](#sdk) +* [Upstream update](#upstream) + +The following tables describe the events that can be displayed in the **Instance Activity** stream for each of the categories above: +#### App install/upgrade {#app-install-upgrade} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>App Channel</td> + <td>The ID of the channel the application instance is assigned.</td> + </tr> + <tr> + <td>App Version</td> + <td>The version label of the release that the instance is currently running. The version label is the version that you assigned to the release when promoting it to a channel.</td> + </tr> +</table> + +#### App status {#app-status} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>App Status</td> + <td> + <p>A string that represents the status of the application. Possible values: Ready, Updating, Degraded, Unavailable, Missing. For applications that include the <a href="/vendor/replicated-sdk-overview">Replicated SDK</a>, hover over the application status to view the statuses of the indiviudal resources deployed by the application.</p> + <p>For more information, see <a href="insights-app-status">Enabling and Understanding Application Status</a>.</p> + </td> + </tr> +</table> + +#### Cluster status {#cluster} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>Cluster Type</td> + <td> + <p>Indicates if the cluster was provisioned by kURL.</p> + <p>Possible values:</p> + <ul> + <li><code>kURL</code>: The cluster is provisioned by kURL.</li> + <li><code>Existing</code>: The cluster is <em>not</em> provisioned by kURL.</li> + </ul> + <p>For more information about kURL clusters, see <a href="packaging-embedded-kubernetes">Creating a kURL installer</a>.</p> + </td> + </tr> + <tr> + <td>Kubernetes Version</td> + <td>The version of Kubernetes running in the cluster.</td> + </tr> + <tr> + <td>Kubernetes Distribution</td> + <td> + <p>The Kubernetes distribution of the cluster.</p> + <p>Possible values:</p> + <ul> + <li>EKS</li> + <li>GKE</li> + <li>K3S</li> + <li>RKE2</li> + </ul> + </td> + </tr> + <tr> + <td>kURL Nodes Total</td> + <td> + <p>Total number of nodes in the cluster.</p> + <p><strong>Note:</strong> Applies only to kURL clusters.</p> + </td> + </tr> + <tr> + <td>kURL Nodes Ready</td> + <td> + <p>Number of nodes in the cluster that are in a healthy state and ready to run Pods.</p> + <p><strong>Note:</strong> Applies only to kURL clusters.</p> + </td> + </tr> + <tr> + <td>New kURL Installer</td> + <td> + <p>The ID of the kURL installer specification that kURL used to provision the cluster. Indicates that a new Installer specification was added. An installer specification is a manifest file that has <code>apiVersion: cluster.kurl.sh/v1beta1</code> and <code>kind: Installer</code>. </p> + <p>For more information about installer specifications for kURL, see <a href="packaging-embedded-kubernetes">Creating a kURL installer</a>.</p> + <p><strong>Note:</strong> Applies only to kURL clusters.</p> + </td> + </tr> +</table> + +#### Custom metrics {#custom-metrics} + +You can filter the activity feed by any custom metrics that are configured for the application. The labels for the custom metrics vary depending on the custom key value pairs included in the data set that is sent to the Vendor Portal. For example, the key value pair `"num_projects": 5` is displayed as **Num Projects: 5** in the activity feed. + +For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). +#### Infrastructure status {#infrastructure} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>Cloud Provider</td> + <td> + <p>The cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.</p> + <p>Possible values:</p> + <ul> + <li>AWS</li> + <li>GCP</li> + <li>DigitalOcean</li> + </ul> + </td> + </tr> + <tr> + <td>Cloud Region</td> + <td> + <p>The cloud provider region where the instance is running. For example, <code>us-central1-b</code></p> + </td> + </tr> +</table> + +#### KOTS version {#kots} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>KOTS Version</td> + <td>The version of KOTS that the instance is running. KOTS version is displayed as a Semantic Versioning compliant string.</td> + </tr> +</table> + +#### Replicated SDK version {#sdk} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>Replicated SDK Version</td> + <td>The version of the Replicated SDK that the instance is running. SDK version is displayed as a Semantic Versioning compliant string.</td> + </tr> +</table> + +#### Upstream update {#upstream} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>Versions Behind</td> + <td> + <p>The number of versions between the version that the instance is currently running and the latest version available on the channel.</p> + <p>Computed by the Vendor Portal each time it receives instance data.</p> + </td> + </tr> +</table> + +================ +File: docs/vendor/instance-insights-event-data.mdx +================ +import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" + +# About Instance and Event Data + +This topic provides an overview of the customer and instance insights that you can view in the Replicated Vendor Portal. It includes information about how the Vendor Portal accesses data as well as requirements and limitations. + +## How the Vendor Portal Collects Instance Data {#about-reporting} + +This section describes how the Vendor Portal collects instance data from online and air gap environments. + +### Online Instances + +For instances running in online (internet-connected) environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), then both send instance data. + +The data sent to the Vendor Portal includes properties such as the current version and status of the instance. For a full overview of what data might be included, see the [Replicated Data Transmission Policy](https://docs.replicated.com/vendor/policies-data-transmission). + +The following diagram shows the flow of different types of data from customer environments to the Vendor Portal: + +![Telemetry sent from instances to vendor platform](/images/telemetry-diagram.png) + +[View a larger version of this image](/images/telemetry-diagram.png) + +As shown in the diagram above, application instance data, application status data, and details about the KOTS and the SDK instances running in the cluster are all sent to the Vendor Portal through the Replicated app service: +* When both KOTS and the SDK are installed in the cluster, they both send application instance data, including information about the cluster where the instance is running. +* KOTS and the SDK both send information about themselves, including the version of KOTS or the SDK running in the cluster. +* Any custom metrics configured by the software vendor are sent to the Vendor Portal through the Replicated SDK API. For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). +* Application status data, such as if the instance is ready or degraded, is sent by KOTS. If KOTS is not installed in the cluster, then the SDK sends the application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). + +### Air Gap Instances + +<AirGapTelemetry/> + +For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). + +## Frequency of Data Sent to the Vendor Portal + +This section describes how frequently data is sent to the Vendor Portal for online and air gap instances. + +### From the Replicated SDK (Online Instances Only) + +When installed alongside the application in an online environment, the SDK automatically sends instance data to the Vendor Portal when any of the following occur: + +* The SDK sends data every four hours. + +* The instance checks for updates. An update check occurs when the instance makes a request to the `/api/v1/app/updates` SDK API endpoint. See [app](/reference/replicated-sdk-apis#app) in _Replicated SDK API (Alpha)_. + +* The instance completes a Helm update to a new application version. After the update completes, the SDK sends data when it restarts. + +* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). + +### From KOTS (Online Instances Only) + +When installed alongisde the application in an online environment, KOTS automatically sends instance data to the Vendor Portal when any of the following occur: + +* The instance checks for updates. By default, KOTS checks for updates every four hours. Additionally, an update check can occur when a user clicks the **Check for updates** button in the Replicated Admin Console. + + :::note + KOTS users can modify or disable automatic update checks from the Admin Console. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). + ::: + +* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). + +* (KOTS v1.92 and later only) The instance deploys a new application version. + +### From Air Gap Instances + +For air gap instances, the frequency of data sent to the Vendor Portal depends on how frequently support bundles are collected in the customer environment and uploaded to the Vendor Portal. + +For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). + +## How the Vendor Portal Generates Events and Insights {#about-events} + +When the Vendor Portal receives instance data, it evaluates each data field to determine if there was a change in its value. For each field that changes in value, the Vendor Portal creates an _event_ to record the change. For example, a change from Ready to Degraded in the application status generates an event. + +In addition to creating events for changes in data sent by the instance, the Vendor Portal also generates events for changes in values of computed metrics. The Vendor Portal updates the values of computed metrics each time it receives instance data. For example, the Vendor Portal computes a _Versions behind_ metric that tracks the number of versions behind the latest available version for the instance. When the instance checks for updates and a new update is available, the value of this metric changes and the Vendor Portal generates an event. + +The Vendor Portal uses events to display insights for each active instance in a **Instance details** dashboard. For more information about using the Vendor Portal **Instance details** page to monitor active instances of your application, see [Instance Details](instance-insights-details). + +## Requirements + +The following requirements apply to collecting instance telemetry: + +* Replicated KOTS or the Replicated SDK must be installed in the cluster where the application instance is running. + +* For KOTS installations and for Helm CLI installations that use `helm template` then `kubectl apply`, additional configuration is required to get application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). + +* To view resource status details for an instance on the **Instance details** page, the Replicated SDK must be installed in the cluster alongside the application. For more information, see [View Resource Status Insights](insights-app-status#resource-status) in _Enabling and Understanding Application Status_. + +* There are additional requirements for collecting telemetry from air gap instances. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). + +## Limitations + +The Vendor Portal has the following limitations for reporting instance data and generating events: + +* **Active instances**: Instance data is available for _active_ instances. An instance is considered inactive when its most recent check-in was more than 24 hours ago. An instance can become inactive if it is decommissioned, stops checking for updates, or otherwise stops reporting. + + The Vendor Portal continues to display data for an inactive instance from its most-recently seen state. This means that data for an inactive instance might continue to show a Ready status after the instance becomes inactive. Replicated recommends that you use the timestamp in the **Last Check-in** field to understand if an instance might have become inactive, causing its data to be out-of-date. +* **Air gap instances**: There are additional limitations for air gap telemetry. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). +* **Instance data freshness**: The rate at which data is updated in the Vendor Portal varies depending on how often the Vendor Portal receives instance data. +* **Event timestamps**: The timestamp of events displayed on the **Instances details** page is the timestamp when the Replicated Vendor API received the data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. +* **Caching for kURL cluster data**: For clusters created with Replicated kURL (embedded clusters), KOTS stores the counts of total nodes and ready nodes in a cache for five minutes. If KOTS sends instance data to the Vendor Portal within the five minute window, then the reported data for total nodes and ready nodes reflects the data in the cache. This means that events displayed on the **Instances details** page for the total nodes and ready nodes can show values that differ from the current values of these fields. + +================ +File: docs/vendor/instance-notifications-config.mdx +================ +import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" + + +# Configuring Instance Notifications (Beta) + +<NotificationsAbout/> + +This topic describes how to configure Slack or email notifications in the Replicted Vendor Portal for instances of your application. + +For information about creating and managing instance notifications with the Vendor API v3, see the [notifications](https://replicated-vendor-api.readme.io/reference/subscribeinstanceevents) section in the Vendor API v3 documentation. + +## Overview + +Teams can receive notifications about customer instances through a Slack channel. Individual users can also receive email notifications. + +Instance notifications can be disabled when they are no longer needed. For example, a team member can turn off their email notifications for a customer instance when they are no longer responsible for supporting that customer. + +## Prerequisite + +For Slack notifications, you must configure a Slack webhook in the Vendor Portal at the Team level before you can turn on instance notifications. For more information, see [Configuring a Slack Webhook (Beta)](team-management-slack-config). + +For email notification, no prior configuration is required. The email address listed in your Vendor Portal account settings is used. + +## Configure Notifications + +Follow this procedure to configure Slack or email notifications for application instances. You can enable notifications for application status changes, system events such as Kubernetes upgrades, or changes in the values of any custom metrics configured for the application. + +To configure notifications: + +1. Go to **Applications > Customers**, and click an active customer instance that you want to receive notifications for. + + <img src="/images/customer-instances.png" alt="Customer instances list in the Vendor Portal" width="600"/> + +1. On the Instance Details page, click **Notifications**. + + <img width="600px" src="/images/instance-notifications.png" /> + +1. From the **Configure Instance Notifications** dialog, select the types of notifications to enable. + + ![Configure Instance Notifications dialog](/images/instance-notifications-dialog.png) + + [View a larger version of this image](/images/instance-notifications-dialog.png) + +1. Click **Save**. + +1. Repeat these steps to configure notifications for other application instances. + + +## Test Notifications + +After you enable notifications for a running development instance, test that your notifications are working as expected. + +Do this by forcing your application into a non-ready state. For example, you can delete one or more application Pods and wait for a ReplicationController to recreate them. + +Then, look for notifications in the assigned Slack channel. You also receive an email if you enabled email notifications. + +:::note +There is a 30-second buffer between event detection and notifications being sent. This buffer provides better roll-ups and reduces noise. +::: + +================ +File: docs/vendor/kots-faq.mdx +================ +import SDKOverview from "../partials/replicated-sdk/_overview.mdx" +import EmbeddedKubernetes from "../partials/kots/_embedded-kubernetes-definition.mdx" +import Helm from "../partials/helm/_helm-definition.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Replicated FAQs + +This topic lists frequently-asked questions (FAQs) for different components of the Replicated Platform. + +## Getting Started FAQs + +### What are the supported application packaging options? + +Replicated strongly recommends that all applications are packaged using Helm. + +<Helm/> + +Many enterprise customers expect to be able to install an application with Helm in their own cluster. Packaging with Helm allows you to support installation with the Helm CLI and with the Replicated installers (Replicated Emebdded Cluster and Replicated KOTS) from a single release in the Replicated Platform. + +For vendors that do not want to use Helm, applications distributed with Replicated can be packaged as Kubernetes manifest files. + +### How do I get started with Replicated? + +Replicated recommends that new users start by completing one or more labs or tutorials to get familiar with the processes of creating, installing, and iterating on releases for an application with the Replicated Platform. + +Then, when you are ready to begin onboarding your own application to the Replicated Platform, see [Replicated Onboarding](replicated-onboarding) for a list of Replicated features to begin integrating. + +#### Labs + +The following labs in Instruqt provide a hands-on introduction to working with Replicated features, without needing your own sample application or development environment: + +* [Distributing Your Application with Replicated](https://play.instruqt.com/embed/replicated/tracks/distributing-with-replicated?token=em_VHOEfNnBgU3auAnN): Learn how to quickly get value from the Replicated Platform for your application. +* [Delivering Your Application as a Kubernetes Appliance](https://play.instruqt.com/embed/replicated/tracks/delivering-as-an-appliance?token=em_lUZdcv0LrF6alIa3): Use Embedded Cluster to distribute Kubernetes and an application together as a single appliance. +* [Avoiding Installation Pitfalls](https://play.instruqt.com/embed/replicated/tracks/avoiding-installation-pitfalls?token=em_gJjtIzzTTtdd5RFG): Learn how to use preflight checks to avoid common installation issues and assure your customer is installing into a supported environment. +* [Closing the Support Information Gap](https://play.instruqt.com/embed/replicated/tracks/closing-information-gap?token=em_MO2XXCz3bAgwtEca): Learn how to use support bundles to close the information gap between your customers and your support team. +* [Protecting Your Assets](https://play.instruqt.com/embed/replicated/tracks/protecting-your-assets?token=em_7QjY34G_UHKoREBd): Assure your customers have the right access to your application artifacts and features using Replicated licensing. + +#### Tutorials + +The following getting started tutorials demonstrate how to integrate key Replicated features with a sample Helm chart application: +* [Install a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup): Create a release that can be installed on a VM with the Embedded Cluster installer. +* [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup): Create a release that can be installed with both the KOTS installer and the Helm CLI. +* [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup): Configure the Admin Console Config screen to collect user-supplied values. +* [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup): Create preflight checks for your application by addin a spec for preflight checks to a Secret in the Helm templates. + +### What are air gap installations? + +_Air gap_ refers to a computer or network that does not have outbound internet access. Air-gapped environments are common for enterprises that require high security, such as government agencies or financial institutions. + +Traditionally, air-gapped systems are physically isolated from the network. For example, an air-gapped server might be stored in a separate location away from network-connected servers. Physical access to air-gapped servers is often restricted as well. + +It is also possible to use _virtual_ or _logical_ air gaps, in which security controls such as firewalls, role-based access control (RBAC), and encryption are used to logically isolate a device from a network. In this way, network access is still restricted, but there is not a phyiscal air gap that disconnects the device from the network. + +Replicated supports installations into air-gapped environments. In an air gap installation, users first download the images and other assets required for installation on an internet-connected device. These installation assets are usually provided in an _air gap bundle_ that ISVs can build in the Replicated Vendor Portal. Then, users transfer the installation assets to their air-gapped machine where they can push the images to an internal private registry and install. + +For more information, see: +* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) +* [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap) + +### What is the Commercial Sotware Distribution Lifecycle? + +Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. + +Replicated has developed the Commercial Software Distribution Lifecycle to represent the stages that are essential for every company that wants to deliver their software securely and reliably to customer-controlled environments. + +This lifecycle was inspired by the DevOps lifecycle and the Software Development Lifecycle (SDLC), but it focuses on the unique things requirements for successfully distributing commercial software to tens, hundreds, or thousands of enterprise customers. + +The phases are: +* Develop +* Test +* Release +* License +* Install +* Report +* Support + +For more information about the Replicated features that enhance each phase of the lifecycle, see [Introduction to Replicated](../intro-replicated). + +## Compatibility Matrix FAQs + +### What types of clusters can I create with Compatibility Matrix? + +You can use Compatibility Matrix to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports a variety of VM and cloud distributions, including Red Hat OpenShift, Replicated Embedded Cluster, and Oracle Container Engine for Kubernetes (OKE). For a complete list, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). + +### How does billing work? + +Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a running status and ends when the cluster is deleted. For more information, see [Billing and Credits](/vendor/testing-about#billing-and-credits). + +### How do I buy credits? + +To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to **[Compatibility Matrix > Buy additional credits](https://vendor.replicated.com/compatibility-matrix)**. Otherwise, to request credits, log in to the Vendor Portal and go to **[Compatibility Matrix > Request more credits](https://vendor.replicated.com/compatibility-matrix)**. + +### How do I add Comaptibility Matrix to my CI/CD pipelines? + +You can use Replicated CLI commands to integrate Compatibility Matrix into your CI/CD development and production workflows. This allows you to programmatically create multiple different types of clusters where you can deploy and test your application before releasing. + +For more information, see [About Integrating with CI/CD](/vendor/ci-overview). + +## KOTS and Embedded Cluster FAQs + +### What is the Admin Console? + +The Admin Console is the user interface deployed by the Replicated KOTS installer. Users log in to the Admin Console to configure and install the application. Users also access to the Admin Console after installation to complete application mangement tasks such as performing updates, syncing their license, and generating support bundles. For installations with Embedded Cluster, the Admin Console also includes a **Cluster Management** tab where users can manage the nodes in the cluster. + +The Admin Console is available in installations with Replicated Embedded Cluster and Replicated KOTS. + +The following shows an example of the Admin Console dashboard for an Embedded Cluster installation of an application named "Gitea": + +<img src="/images/gitea-ec-ready.png" width="800px" alt="admin console dashboard"/> + +[View a larger version of this image](/images/gitea-ec-ready.png) + +### How do Embedded Cluster installations work? + +To install with Embedded Cluster, users first download and extract the Embedded Cluster installation assets for the target application release on their VM or bare metal server. Then, they run an Embedded Cluster installation command to provision the cluster. During installation, Embedded Cluster also installs Replicated KOTS in the cluster, which deploys the Admin Console. + +After the installation command finishes, users log in to the Admin Console to provide application configuration values, optionally join more nodes to the cluster, run preflight checks, and deploy the application. + +Customer-specific Embedded Cluster installation instructions are provided in the Replicated Vendor Portal. For more information, see [Installing with Embedded Cluster](/enterprise/installing-embedded). + +### Does Replicated support installations into air gap environments? + +Yes. The Embedded Cluster and KOTS installers support installation in _air gap_ environments with no outbound internet access. + +To support air gap installations, vendors can build air gap bundles for their application in the Vendor Portal that contain all the required assets for a specific release of the application. Additionally, Replicated provides bundles that contain the assets for the Replicated installers. + +For more information about how to install with Embedded Cluster and KOTS in air gap environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). + +### Can I deploy Helm charts with KOTS? + +Yes. An application deployed with KOTS can use one or more Helm charts, can include Helm charts as components, and can use more than a single instance of any Helm chart. Each Helm chart requires a unique HelmChart custom resource (`apiVersion: kots.io/v1beta2`) in the release. + +For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +### What's the difference between Embedded Cluster and kURL? + +Replicated Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster feature offers significantly faster installation, updates, and node joins, a redesigned Admin Console UI, improved support for multi-node clusters, one-click updates that update the application and the cluster at the same time, and more. + +<KurlAvailability/> + +For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). + +### How do I enable Embedded Cluster and KOTS installations for my application? + +Releases that support installation with KOTS include the manifests required by KOTS to define the Admin Console experience and install the application. + +In addition to the KOTS manifests, releases that support installation with Embedded Cluster also include the Embedded Cluster Config. The Embedded Cluster Config defines aspects of the cluster that will be provisioned and also sets the version of KOTS that will be installed. + +For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). + +### Can I use my own branding? + +The KOTS Admin Console and the Replicated Download Portal support the use of a custom logo. Additionally, software vendors can use custom domains to alias the endpoints for Replicated services. + +For more information, see [Customizing the Admin Console and Download Portal](/vendor/admin-console-customize-app-icon) and [About Custom Domains](custom-domains). + +## Replicated SDK FAQs + +### What is the SDK? + +<SDKOverview/> + +### Is the SDK supported in air gap environments? + +Yes. The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. + +For more information, see [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). + +### How do I develop against the SDK API? + +You can use the Replicated SDK in _integration mode_ to develop locally against the SDK API without needing to make real changes in the Replicated Vendor Portal or in your environment. + +For more information, see [Developing Against the SDK API](/vendor/replicated-sdk-development). + +### How does the Replicated SDK work with KOTS? + +The Replicated SDK is a Helm chart that can be installed as a small service alongside an application, or as a standalone component. The SDK can be installed using the Helm CLI or KOTS. + +Replicated recommends that all applications include the SDK because it provides access to key functionality not available through KOTS, such as support for sending custom metrics from application instances. When both the SDK and KOTS are installed in a cluster alongside an application, both send instance telemetry to the Vendor Portal. + +For more information about the SDK installation options, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). + +## Vendor Portal FAQs + +### How do I add and remove team members? + +Admins can add, remove, and manage team members from the Vendor Portal. For more information, see [Managing Team Members](/vendor/team-management). + +### How do I manage RBAC policies for my team members? + +By default, every team has two policies created automatically: Admin and Read Only. If you have an Enterprise plan, you will also have the Sales and Support policies created automatically. These default policies are not configurable. + +You can also configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. + +For more information, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). + +### Can I alias Replicated endpoints? + +Yes. Replicated supports the use of custom domains to alias the endpoints for Replicated services, such as the Replicated app service and the Replicated proxy registry. + +Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. + +For more information, see [Using Custom Domains](/vendor/custom-domains-using). + +### How does Replicated collect telemetry from instances of my application? + +For instances running in online (internet-connected) customer environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster, then both send instance data. + +For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. The telemetry stored in the Secret is collected when a support bundle is generated in the environment. When the support bundle is uploaded to the Vendor Portal, the telemetry is associated with the correct customer and instance ID, and the Vendor Portal updates the instance insights and event data accordingly. + +For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). + +================ +File: docs/vendor/kurl-about.mdx +================ +import KurlDefinition from "../partials/kurl/_kurl-definition.mdx" +import Installers from "../partials/kurl/_installers.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Introduction to kURL + +<KurlAvailability/> + +This topic provides an introduction to the Replicated kURL installer, including information about kURL specifications and installations. + +:::note +The Replicated KOTS entitlement is required to install applications with KOTS and kURL. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. +::: + +## Overview + +<KurlDefinition/> + +### kURL Installers + +<Installers/> + +To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release. For more information about creating kURL installers, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). + +### kURL Installations + +To install with kURL, users run a kURL installation script on their VM or bare metal server to provision a cluster. + +When the KOTS add-on is included in the kURL installer spec, the kURL installation script installs the KOTS CLI and KOTS Admin Console in the cluster. After the installation script completes, users can access the Admin Console at the URL provided in the ouput of the command to configure and deploy the application with KOTS. + +The following shows an example of the output of the kURL installation script: + +```bash + Installation + Complete ✔ + +Kotsadm: http://10.128.0.35:8800 +Login with password (will not be shown again): 3Hy8WYYid + +This password has been set for you by default. It is recommended that you change +this password; this can be done with the following command: +kubectl kots reset-password default +``` + +kURL installations are supported in online (internet-connected) and air gapped environments. + +For information about how to install applications with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). + +## About the Open Source kURL Documentation + +The open source documentation for the kURL project is available at [kurl.sh](https://kurl.sh/docs/introduction/). + +The open source kURL documentation contains additional information including kURL installation options, kURL add-ons, and procedural content such as how to add and manage nodes in kURL clusters. Software vendors can use the open source kURL documentation to find detailed reference information when creating kURL installer specs or testing installation. + +================ +File: docs/vendor/kurl-nodeport-services.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Exposing Services Using NodePorts + +<KurlAvailability/> + +This topic describes how to expose NodePort services in [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about) installations on VMs or bare metal servers. + +## Overview + +For installations into existing clusters, KOTS automatically creates a port forward tunnel to expose the Admin Console. Unlike installations into existing clusters, KOTS does _not_ automatically open the port forward tunnel for installations in embedded clusters provisioned on virtual machines (VMs) or bare metal servers. This is because it cannot be verified that the ports are secure and authenticated. For more information about the KOTS port forward tunnel, see [Port Forwarding Services with KOTS](/vendor/admin-console-port-forward). + +Instead, to expose the Admin Console in installations with [Embedded Cluster](/vendor/embedded-overview) or [kURL](/vendor/kurl-about), KOTS creates the Admin Console as a NodePort service so it can be accessed at the node's IP address on a node port (port 8800 for kURL installations and port 30000 for Embedded Cluster installations). Additionally, for kURL installations, the UIs of Prometheus, Grafana, and Alertmanager are also exposed using NodePorts. + +For installations on VMs or bare metal servers where your application must be accessible from the user's local machine rather than from inside the cluster, you can expose application services as NodePorts to provide access to the application after installation. + +## Add a NodePort Service + +Services with `type: NodePort` are able to be contacted from outside the cluster by connecting to any node using the appropriate protocol and port. For more information about working with the NodePort service type, see [type: NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) in the Kubernetes documentation. + +The following shows an example of a NodePort type service: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: sentry + labels: + app: sentry +spec: + type: NodePort + ports: + - port: 9000 + targetPort: 9000 + nodePort: 9000 + protocol: TCP + name: sentry + selector: + app: sentry + role: web +``` + +After configuring a NodePort service for your application, you can add a link to the service on the Admin Console dashboard where it can be accessed by users after the application is installed. For more information, see [About Accessing NodePort Services](#about-accessing-nodeport-services) below. + +### Use KOTS Annotations to Conditionally Deploy NodePort Services + +You can use the KOTS [`kots.io/when`](/vendor/packaging-include-resources#kotsiowhen) annotation to conditionally deploy a service. This is useful when you want to deploy a ClusterIP or LoadBalancer service for existing cluster installations, and deploy a NodePort service for Embedded Cluster or kURL installations. + +To conditionally deploy a service based on the installation method, you can use the following KOTS template functions in the `kots.io/when` annotation: +* [IsKurl](/reference/template-functions-static-context#iskurl): Detects kURL installations. For example, `repl{{ IsKurl }}` returns true for kURL installations, and `repl{{ not IsKurl }}` returns true for non-kURL installations. +* [Distribution](/reference/template-functions-static-context#distribution): Returns the distribution of the cluster where KOTS is running. For example, `repl{{ eq Distribution "embedded-cluster" }}` returns true for Embedded Cluster installations and `repl{{ ne Distribution "embedded-cluster" }}` returns true for non-Embedded Cluster installations. + +For example, the following `sentry` service with `type: NodePort` includes `annotation.kots.io/when: repl{{ eq Distribution "embedded-cluster" }}`. This creates a NodePort service _only_ when installing with Embedded Cluster: + + ```yaml + apiVersion: v1 + kind: Service + metadata: + name: sentry + labels: + app: sentry + annotations: + # This annotation ensures that the NodePort service + # is only created in Embedded Cluster installations + kots.io/when: repl{{ eq Distribution "embedded-cluster" }} + spec: + type: NodePort + ports: + - port: 9000 + targetPort: 9000 + nodePort: 9000 + protocol: TCP + name: sentry + selector: + app: sentry + role: web + ``` + +Similarly, to ensure that a `sentry` service with `type: ClusterIP` is only created in existing cluster installations, add `annotations.kots.io/when: repl{{ ne Distribution "embedded-cluster" }}` to the ClusterIP specification: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: sentry + labels: + app: sentry +annotations: + # This annotation ensures that the ClusterIP service + # is only created in existing cluster installations + kots.io/when: repl{{ ne Distribution "embedded-cluster" }} +spec: + type: ClusterIP + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + name: sentry + selector: + app: sentry + role: web +``` + +## About Accessing NodePort Services + +This section describes providing access to NodePort services after installation. + +### VM Firewall Requirements + +To be able to access the Admin Console and any NodePort services for your application, the firewall for the VM where the user installs must allow HTTP traffic and allow inbound traffic to the port where the service is exposed from their workstation. Users can consult their cloud provider's documentation for more information about updating firewall rules. + +### Add a Link on the Admin Console Dashboard {#add-link} + +You can provide a link to a NodePort service on the Admin Console dashboard by configuring the `links` array in the Kubernetes SIG Application custom resource. This provides users with an easy way to access the application after installation. For more information, see [Adding Links to the Dashboard](admin-console-adding-buttons-links). + +For example: + +<img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> + +[View a larger version of this image](/images/gitea-open-app.png) + +================ +File: docs/vendor/kurl-reset.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Resetting a kURL Cluster + +<KurlAvailability/> + +This topic describes how to use the kURL `reset` command to reset a kURL cluster. + +## Overview + +If you need to reset a kURL installation, such as when you are testing releases with kURL, You can use the kURL `tasks.sh` `reset` command to remove Kubernetes from the system. + +Alterntaively, you can discard your current VM (if you are using one) and recreate the VM with a new OS to reinstall with kURL. + +For more information about the `reset` command, see [Resetting a Node](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node) in the kURL documentation. + +To reset a kURL installation: + +1. Access the machine where you installed with kURL. + +1. Run the following command to remove Kubernetes from the system: + + ``` + curl -sSL https://k8s.kurl.sh/latest/tasks.sh | sudo bash -s reset + ``` + +1. Follow the instructions in the output of the command to manually remove any files that the `reset` command does not remove. + +If the `reset` command is unsuccessful, discard your current VM, and recreate the VM with a new OS to reinstall the Admin Console and an application. + +================ +File: docs/vendor/licenses-about-types.md +================ +# About Community Licenses + +This topic describes community licenses. For more information about other types of licenses, see [Customer Types](licenses-about#customer-types) in _About Customers_. + +## Overview + +Community licenses are intended for use with a free or low cost version of your application. For example, you could use community licenses for an open source version of your application. + +After installing an application with a community license, users can replace their community license with a new license of a different type without having to completely reinstall the application. This means that, if you have several community users who install with the same license, then you can upgrade a single community user without editing the license for all community users. + +Community licenses are supported for applications that are installed with Replicated KOTS or with the Helm CLI. + +For applications installed with KOTS, community license users can upload a new license file of a different type in the Replicated admin console. For more information, see [Upgrade from a Community License](/enterprise/updating-licenses#upgrade-from-a-community-license) in _Updating Licenses in the Admin Console_. + +## Limitations + +Community licenses function in the same way as the other types of licenses, with the following +exceptions: + +* Updating a community license to another type of license cannot be reverted. +* Community license users are not supported by the Replicated Support team. +* Community licenses cannot support air gapped installations. +* Community licenses cannot include an expiration date. + +## Community License Admin Console Branding + +For applications installed with KOTS, the branding in the admin console for community users differs in the following ways: + +* The license tile on the admin console **Dashboard** page is highlighted in yellow and with the words **Community Edition**. + + ![Community License Dashboard](/images/community-license-dashboard.png) + + [View a larger version of this image](/images/community-license-dashboard.png) + +* All support bundles and analysis in the admin console are clearly marked as **Community Edition**. + + ![Community License Support Bundle](/images/community-license-bundle.png) + + [View a larger version of this image](/images/community-license-bundle.png) + +================ +File: docs/vendor/licenses-about.mdx +================ +import ChangeChannel from "../partials/customers/_change-channel.mdx" + +# About Customers and Licensing + +This topic provides an overview of customers and licenses in the Replicated Platform. + +## Overview + +The licensing features of the Replicated Platform allow vendors to securely grant access to software, making license agreements available to the application in end customer environments at startup and runtime. + +The Replicated Vendor Portal also allows vendors to create and manage customer records. Each customer record includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements. + +Vendors can use these licensing features to enforce entitlements such as license expiration dates, and to track and report on software usage for the purpose of surfacing insights to both internal teams and customers. + +The following diagram provides an overview of licensing with the Replicated Platform: + +![App instance communicates with the Replicated licensing server](/images/licensing-overview.png) + +[View a larger version of this image](/images/licensing-overview.png) + +As shown in the diagram above, the Replicated license and update server manages and distributes customer license information. The license server retrieves this license information from customer records managed by vendors in the Vendor Portal. + +During installation or upgrade, the customer's license ID is used to authenticate with the license server. The license ID also provides authentication for the Replicated proxy registry, securely granting proxy access to images in the vendor's external registry. + +The license server is identified with a CNAME record where it can be accessed from end customer environments. When running alongside an application in a customer environment, the Replicated SDK retrieves up-to-date customer license information from the license server during runtime. The in-cluster SDK API `/license/` endpoints can be used to get customer license information on-demand, allowing vendors to programmatically enforce and report on license agreements. + +Vendors can also integrate internal Customer Relationship Management (CRM) tools such as Salesforce with the Replicated Platform so that any changes to a customer's entitlements are automatically reflected in the Vendor Portal. This ensures that updates to license agreements are reflected in the customer environment in real time. + +## About Customers + +Each customer that you create in the Replicated Vendor Portal has a unique license ID. Your customers use their license when they install or update your application. + +You assign customers to channels in the Vendor Portal to control their access to your application releases. Customers can install or upgrade to releases that are promoted to the channel they are assigned. For example, assigning a customer to your Beta channel allows that customer to install or upgrade to only releases promoted to the Beta channel. + +Each customer license includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements, such as if the license has an expiration date or what application functionality the customer can access. Replicated securely delivers these entitlements to the application and makes them available at installation or at runtime. + +For more information about how to create and manage customers, see [Creating and Managing Customers](releases-creating-customer). + +### Customer Channel Assignment {#channel-assignment} + +<ChangeChannel/> + +For example, if the latest release promoted to the Beta channel is version 1.25.0 and version 1.10.0 is marked as required, when you edit an existing customer to assign them to the Beta channel, then the KOTS Admin Console always fetches 1.25.0, even though 1.10.0 is marked as required. The required release 1.10.0 is ignored and is not available to the customer for upgrade. + +For more information about how to mark a release as required, see [Properties](releases-about#properties) in _About Channels and Releases_. For more information about how to synchronize licenses in the Admin Console, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). + +### Customer Types + +Each customer is assigned one of the following types: + +* **Development**: The Development type can be used internally by the development +team for testing and integration. +* **Trial**: The Trial type can be used for customers who are on 2-4 week trials +of your software. +* **Paid**: The Paid type identifies the customer as a paying customer for which +additional information can be provided. +* **Community**: The Community type is designed for a free or low cost version of your application. For more details about this type, see [Community Licenses](licenses-about-types). +* (Beta) **Single Tenant Vendor Managed**: The Single Tenant Vendor Managed type is for customers for whom your team is operating the application in infrastructure you fully control and operate. Single Tenant Vendor Managed licenses are free to use, but come with limited support. The Single Tenant Vendor Managed type is a Beta feature. Reach out to your Replicated account representative to get access. + +Except Community licenses, the license type is used solely for reporting purposes and a customer's access to your application is not affected by the type that you assign. + +You can change the type of a license at any time in the Vendor Portal. For example, if a customer upgraded from a trial to a paid account, then you could change their license type from Trial to Paid for reporting purposes. + +### About Managing Customers + +Each customer record in the Vendor Portal has built-in fields and also supports custom fields: +* The built-in fields include values such as the customer name, customer email, and the license expiration date. You can optionally set initial values for the built-in fields so that each new customer created in the Vendor Portal starts with the same set of values. +* You can also create custom fields to define entitlements for your application. For example, you can create a custom field to set the number of active users permitted. + +For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). + +You can make changes to a customer record in the Vendor Portal at any time. The license ID, which is the unique identifier for the customer, never changes. For more information about managing customers in the Vendor Portal, see [Creating and Managing Customers](releases-creating-customer). + +### About the Customers Page + +The following shows an example of the **Customers** page: + +![Customers page](/images/customers-page.png) + +[View a larger version of this image](/images/customers-page.png) + +From the **Customers** page, you can do the following: + +* Create new customers. + +* Download CSVs with customer and instance data. + +* Search and filter customers. + +* Click the **Manage customer** button to edit details such as the customer name and email, the custom license fields assigned to the customer, and the license expiration policy. For more information, see [Creating and Managing Customers](releases-creating-customer). + +* Download the license file for each customer. + +* Click the **Customer reporting** button to view data about the active application instances associated with each customer. For more information, see [Customer Reporting](customer-reporting). + +* View instance details for each customer, including the version of the application that this instance is running, the Kubernetes distribution of the cluster, the last check-in time, and more: + + <img width="800px" src="/images/customer-reporting-details.png" /> + + [View a larger version of this image](/images/customer-reporting-details.png) + +* Archive customers. For more information, see [Creating and Managing Customers](releases-creating-customer). + +* Click on a customer on the **Customers** page to access the following customer-specific pages: + * [Reporting](#about-the-customer-reporting-page) + * [Manage customer](#about-the-manage-customer-page) + * [Support bundles](#about-the-customer-support-bundles-page) + +### About the Customer Reporting Page + +The **Reporting** page for a customer displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page for a customer that has two active application instances: + +![Customer reporting page in the Vendor Portal](/images/customer-reporting-page.png) +[View a larger version of this image](/images/customer-reporting-page.png) + +For more information about interpreting the data on the **Reporting** page, see [Customer Reporting](customer-reporting). + +### About the Manage Customer Page + +The **Manage customer** page for a customer displays details about the customer license, including the customer name and email, the license expiration policy, custom license fields, and more. + +The following shows an example of the **Manage customer** page: + +![Manage customer page in the Vendor Portal](/images/customer-details.png) +[View a larger version of this image](/images/customer-details.png) + +From the **Manage customer** page, you can view and edit the customer's license fields or archive the customer. For more information, see [Creating and Managing Customers](releases-creating-customer). + +### About the Customer Support Bundles Page + +The **Support bundles** page for a customer displays details about the support bundles collected from the customer. Customers with the **Support Bundle Upload Enabled** entitlement can provide support bundles through the KOTS Admin Console, or you can upload support bundles manually in the Vendor Portal by going to **Troubleshoot > Upload a support bundle**. For more information about uploading and analyzing support bundles, see [Inspecting Support Bundles](support-inspecting-support-bundles). + +The following shows an example of the **Support bundles** page: + +![Support bundles page in the Vendor Portal](/images/customer-support-bundles.png) +[View a larger version of this image](/images/customer-support-bundles.png) + +As shown in the screenshot above, the **Support bundles** page lists details about the collected support bundles, such as the date the support bundle was collected and the debugging insights found. You can click on a support bundle to view it in the **Support bundle analysis** page. You can also click **Delete** to delete the support bundle, or click **Customer Reporting** to view the **Reporting** page for the customer. + +## About Licensing with Replicated + +### About Syncing Licenses + +When you edit customer licenses for an application installed with a Replicated installer (Embedded Cluster, KOTS, kURL), your customers can use the KOTS Admin Console to get the latest license details from the Vendor Portal, then deploy a new version that includes the license changes. Deploying a new version with the license changes ensures that any license fields that you have templated in your release using [KOTS template functions](/reference/template-functions-about) are rendered with the latest license details. + +For online instances, KOTS pulls license details from the Vendor Portal when: +* A customer clicks **Sync license** in the Admin Console. +* An automatic or manual update check is performed by KOTS. +* An update is performed with Replicated Embedded Cluster. See [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). +* An application status changes. See [Current State](instance-insights-details#current-state) in _Instance Details_. + +For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). + +### About Syncing Licenses in Air-Gapped Environments + +To update licenses in air gap installations, customers need to upload the updated license file to the Admin Console. + +After you update the license fields in the Vendor Portal, you can notify customers by either sending them a new license file or instructing them to log into their Download Portal to downlaod the new license. + +For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). + +### Retrieving License Details with the SDK API + +The [Replicated SDK](replicated-sdk-overview) includes an in-cluster API that can be used to retrieve up-to-date customer license information from the Vendor Portal during runtime through the [`license`](/reference/replicated-sdk-apis#license) endpoints. This means that you can add logic to your application to get the latest license information without the customer needing to perform a license update. The SDK API polls the Vendor Portal for updated data every four hours. + +In KOTS installations that include the SDK, users need to update their licenses from the Admin Console as described in [About Syncing Licenses](#about-syncing-licenses) above. However, any logic in your application that uses the SDK API will update the user's license information without the customer needing to deploy a license update in the Admin Console. + +For information about how to use the SDK API to query license entitlements at runtime, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). + +### License Expiration Handling {#expiration} + +The built-in `expires_at` license field defines the expiration date for a customer license. When you set an expiration date in the Vendor Portal, the `expires_at` field is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. + +Replicated enforces the following logic when a license expires: +* By default, instances with expired licenses continue to run. + To change the behavior of your application when a license expires, you can can add custom logic in your application that queries the `expires_at` field using the Replicated SDK in-cluster API. For more information, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). +* Expired licenses cannot log in to the Replicated registry to pull a Helm chart for installation or upgrade. +* Expired licenses cannot pull application images through the Replicated proxy registry or from the Replicated registry. +* In Replicated KOTS installations, KOTS prevents instances with expired licenses from receiving updates. + +### Replacing Licenses for Existing Installations + +Community licenses are the only license type that can be replaced with a new license without needing to reinstall the application. For more information, see [Community Licenses](licenses-about-types). + +Unless the existing customer is using a community license, it is not possible to replace one license with another license without reinstalling the application. When you need to make changes to a customer's entitlements, Replicated recommends that you edit the customer's license details in the Vendor Portal, rather than issuing a new license. + +================ +File: docs/vendor/licenses-adding-custom-fields.md +================ +# Managing Customer License Fields + +This topic describes how to manage customer license fields in the Replicated Vendor Portal, including how to add custom fields and set initial values for the built-in fields. + +## Set Initial Values for Built-In License Fields (Beta) + +You can set initial values to populate the **Create Customer** form in the Vendor Portal when a new customer is created. This ensures that each new customer created from the Vendor Portal UI starts with the same set of built-in license field values. + +:::note +Initial values are not applied to new customers created through the Vendor API v3. For more information, see [Create a customer](https://replicated-vendor-api.readme.io/reference/createcustomer-1) in the Vendor API v3 documentation. +::: + +These _initial_ values differ from _default_ values in that setting initial values does not update the license field values for any existing customers. + +To set initial values for built-in license fields: + +1. In the Vendor Portal, go to **License Fields**. + +1. Under **Built-in license options**, click **Edit** next to each license field where you want to set an initial value. + + ![Edit Initial Value](/images/edit-initial-value.png) + + [View a larger version of this image](/images/edit-initial-value.png) + +## Manage Custom License Fields + +You can create custom license fields in the Vendor Portal. For example, you can create a custom license field to set the number of active users permitted. Or, you can create a field that sets the number of nodes a customer is permitted on their cluster. + +The custom license fields that you create are displayed in the Vendor Portal for all new and existing customers. If the custom field is not hidden, it is also displayed to customers under the **Licenses** tab in the Replicated Admin Console. + +### Limitation + +The maximum size for a license field value is 64KB. + +### Create Custom License Fields + +To create a custom license field: + +1. Log in to the Vendor Portal and select the application. + +1. On the **License Fields** page, click **Create license field**. + + <img width="500" alt="create a new License Field dialog" src="/images/license-add-custom-field.png"/> + + [View a larger version of this image](/images/license-add-custom-field.png) + +1. Complete the following fields: + + | Field | Description | + |-----------------------|------------------------| + | Field | The name used to reference the field. This value cannot be changed. | + | Title| The display name for the field. This is how the field appears in the Vendor Portal and the Admin Console. You can change the title in the Vendor Portal. | + | Type| The field type. Supported formats include integer, string, text (multi-line string), and boolean values. This value cannot be changed. | + | Default | The default value for the field for both existing and new customers. It is a best practice to provide a default value when possible. The maximum size for a license field value is 64KB. | + | Required | If checked, this prevents the creation of customers unless this field is explicitly defined with a value. | + | Hidden | If checked, the field is not visible to your customer in the Replicated Admin Console. The field is still visible to you in the Vendor Portal. **Note**: The Hidden field is displayed only for vendors with access to the Replicated installers (KOTS, kURL, Embedded Cluster). | + +### Update Custom License Fields + +To update a custom license field: + +1. Log in to the Vendor Portal and select the application. +1. On the **License Fields** page, click **Edit Field** on the right side of the target row. Changing the default value for a field updates the value for each existing customer record that has not overridden the default value. + + :::important + Enabling **Is this field is required?** updates the license field to be required on all new and existing customers. If you enable **Is this field is required?**, you must either set a default value for the field or manually update each existing customer to provide a value for the field. + ::: + +### Set Customer-Specific Values for Custom License Fields + +To set a customer-specific value for a custom license field: + +1. Log in to the Vendor Portal and select the application. +1. Click **Customers**. +1. For the target customer, click the **Manage customer** button. +1. Under **Custom fields**, enter values for the target custom license fields for the customer. + + :::note + The maximum size for a license field value is 64KB. + ::: + + <img width="600" alt="Custom license fields section in the manage customer page" src="/images/customer-license-custom-fields.png"/> + + [View a larger version of this image](/images/customer-license-custom-fields.png) + +### Delete Custom License Fields + +Deleted license fields and their values do not appear in the customer's license in any location, including your view in the Vendor Portal, the downloaded YAML version of the license, and the Admin Console **License** screen. + +By default, deleting a custom license field also deletes all of the values associated with the field in each customer record. + +Only administrators can delete license fields. + +:::important +Replicated recommends that you take care when deleting license fields. + +Outages can occur for existing deployments if your application or the Admin Console **Config** page expect a license file to provide a required value. +::: + +To delete a custom license field: + +1. Log in to the Vendor Portal and select the application. +1. On the **License Fields** page, click **Edit Field** on the right side of the target row. +1. Click **Delete** on the bottom left of the dialog. +1. (Optional) Enable **Preserve License Values** to save values for the license field that were not set by the default in each customer record. Preserved license values are not visible to you or the customer. + + :::note + If you enable **Preserve License Values**, you can create a new field with the same name and `type` as the deleted field to reinstate the preserved values. + ::: + +1. Follow the instructions in the dialog and click **Delete**. + +================ +File: docs/vendor/licenses-download.md +================ +import AirGapLicenseDownload from "../partials/install/_airgap-license-download.mdx" + +# Downloading Customer Licenses + +This topic describes how to download a license file from the Replicated Vendor Portal. + +For information about how to download customer licenses with the Vendor API v3, see [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) in the Vendor API v3 documentation. + +## Download Licenses + +You can download license files for your customers from the **Customer** page in the Vendor Portal. + +To download a license: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page. +1. In the row for the target customer, click the **Download License** button. + + ![Download license button](/images/download-license-button.png) + + [View a larger version of this image](/images/download-license-button.png) + +## Enable and Download Air Gap Licenses {#air-gap-license} + +The **Airgap Download Enabled** license option allows KOTS to install an application without outbound internet access using the `.airgap` bundle. + +To enable the air gap entitlement and download the license: + +<AirGapLicenseDownload/> + +================ +File: docs/vendor/licenses-install-types.mdx +================ +import InstallerOnlyAnnotation from "../partials/helm/_installer-only-annotation.mdx" + +# Managing Install Types for a License + +This topic describes how to manage which installation types and options are enabled for a license. + +## Overview + +You can control which installation methods are available to each of your customers by enabling or disabling **Install types** fields in the customer's license. + +The following shows an example of the **Install types** field in a license: + +![Install types license fields](/images/license-install-types.png) + +[View a larger version of this image](/images/license-install-types.png) + +The installation types that are enabled or disabled for a license determine the following: +* The Replicated installers ([Replicated KOTS](../intro-kots), [Replicated Embedded Cluster](/vendor/embedded-overview), [Replicated kURL](/vendor/kurl-about)) that the customer's license entitles them to use +* The installation assets and/or instructions provided in the Replicated Download Portal for the customer +* The customer's KOTS Admin Console experience + +Setting the supported installation types on a per-customer basis gives you greater control over the installation method used by each customer. It also allows you to provide a more curated Download Portal experience, in that customers will only see the installation assets and instructions that are relevant to them. + +## Understanding Install Types {#install-types} + +In the customer license, under **Install types**, the **Available install types** field allows you to enable and disable different installation methods for the customer. + +You can enable one or more installation types for a license. + +The following describes each installation type available, as well as the requirements for enabling each type: + +<table> + <tr> + <th width="30%">Install Type</th> + <th width="35%">Description</th> + <th>Requirements</th> + </tr> + <tr> + <th>Existing Cluster (Helm CLI)</th> + <td><p>Allows the customer to install with Helm in an existing cluster. The customer does not have access to the Replicated installers (Embedded Cluster, KOTS, and kURL).</p><p>When the <strong>Helm CLI Air Gap Instructions (Helm CLI only)</strong> install option is also enabled, the Download Portal displays instructions on how to pull Helm installable images into a local repository. See <a href="#install-options">Understanding Additional Install Options</a> below.</p></td> + <td> + <p>The latest release promoted to the channel where the customer is assigned must contain one or more Helm charts. It can also include Replicated custom resources, such as the Embedded Cluster Config custom resource, the KOTS HelmChart, Config, and Application custom resources, or the Troubleshoot Preflight and SupportBundle custom resources.</p> + <InstallerOnlyAnnotation/> + </td> + </tr> + <tr> + <th>Existing Cluster (KOTS install)</th> + <td>Allows the customer to install with Replicated KOTS in an existing cluster.</td> + <td> + <ul> + <li>Your Vendor Portal team must have the KOTS entitlement</li> + <li>The latest release promoted to the channel where the customer is assigned must contain KOTS custom resources, such as the KOTS HelmChart, Config, and Application custom resources. For more information, see [About Custom Resources](/reference/custom-resource-about).</li> + </ul> + </td> + </tr> + <tr> + <th>kURL Embedded Cluster (first generation product)</th> + <td> + <p>Allows the customer to install with Replicated kURL on a VM or bare metal server.</p> + <p><strong>Note:</strong> For new installations, enable Replicated Embedded Cluster (current generation product) instead of Replicated kURL (first generation product).</p> + </td> + <td> + <ul> + <li>Your Vendor Portal team must have the kURL entitlement</li> + <li>A kURL installer spec must be promoted to the channel where the customer is assigned. For more information, see <a href="/vendor/packaging-embedded-kubernetes">Creating a kURL Installer</a>.</li> + </ul> + </td> + </tr> + <tr> + <th>Embedded Cluster (current generation product)</th> + <td>Allows the customer to install with Replicated Embedded Cluster on a VM or bare metal server.</td> + <td> + <ul> + <li>Your Vendor Portal team must have the Embedded Cluster entitlement</li> + <li>The latest release promoted to the channel where the customer is assigned must contain an Embedded Cluster Config custom resource. For more information, see <a href="/reference/embedded-config">Embedded Cluster Config</a>.</li> + </ul> + </td> + </tr> +</table> + +## Understanding Additional Install Options {#install-options} + +After enabling installation types in the **Available install types** field, you can also enable the following options in the **Additional install options** field: + +<table> + <tr> + <th width="30%">Install Type</th> + <th>Description</th> + <th>Requirements</th> + </tr> + <tr> + <th>Helm CLI Air Gap Instructions (Helm CLI only)</th> + <td><p>When enabled, a customer will see instructions on the Download Portal on how to pull Helm installable images into their local repository.</p><p><strong>Helm CLI Air Gap Instructions</strong> is enabled by default when you select the <strong>Existing Cluster (Helm CLI)</strong> install type. For more information see [Installing with Helm in Air Gap Environments](/vendor/helm-install-airgap)</p></td> + <td>The <strong>Existing Cluster (Helm CLI)</strong> install type must be enabled</td> + </tr> + <tr> + <th>Air Gap Installation Option (Replicated Installers only)</th> + <td><p>When enabled, new installations with this license have an option in their Download Portal to install from an air gap package or do a traditional online installation.</p></td> + <td> + <p>At least one of the following Replicated install types must be enabled:</p> + <ul> + <li>Existing Cluster (KOTS install)</li> + <li>kURL Embedded Cluster (first generation product)</li> + <li>Embedded Cluster (current generation product)</li> + </ul> + </td> + </tr> +</table> + +## About Migrating Existing Licenses to Use Install Types + +By default, when an existing customer license is migrated to include the Beta **Install types** field, the Vendor Portal automatically enables certain install types so that the customer does not experience any interruptions or errors in their deployment. + +The Vendor Portal uses the following logic to enable install types for migrated licenses: + +If the existing license has the **KOTS Install Enabled** field enabled, then the Vendor Portal enables the following install types in the migrated license by default: +* Existing Cluster (Helm CLI) +* Existing Cluster (KOTS install) +* kURL Embedded Cluster (first generation product) +* Embedded Cluster (current generation product) + +Additionally, if the existing **KOTS Install Enabled** license also has the **Airgap Download Enabled** option enabled, then the Vendor Portal enables both of the air gap install options in the migrated license (**Helm CLI Air Gap Instructions (Helm CLI only)** and **Air Gap Installation Option (Replicated Installers only)**). + +Otherwise, if the **KOTS Install Enabled** field is disabled for the existing license, then the Vendor Portal enables only the **Existing Cluster (Helm CLI)** install type by default. All other install types will be disabled by default. + +================ +File: docs/vendor/licenses-reference-helm.md +================ +# Checking Entitlements in Helm Charts Before Deployment + +This topic describes how to check license entitlements before a Helm chart is installed or upgraded. The information in this topic applies to Helm charts installed with Replicated KOTS or Helm. + +The Replicated SDK API can be used to check entitlements at runtime. For more information, see [Querying Entitlements with the Replicated SDK API](licenses-reference-sdk). + +## Overview + +The Replicated registry automatically injects customer entitlement information in the `global.replicated.licenseFields` field of your Helm chart values. For example: + +```yaml +# Helm chart values.yaml +global: + replicated: + licenseFields: + expires_at: + description: License Expiration + name: expires_at + signature: + v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... + title: Expiration + value: "2023-05-30T00:00:00Z" + valueType: String +``` + +You can access the values in the `global.replicated.licenseFields` field from your Helm templates to check customer entitlements before installation. + +## Prerequisite + +Add the Replicated SDK to your application: +* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ +* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Kubernetes Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ + +## Check Entitlements Before Installation or Upgrade + +To check entitlements before installation: + +1. Create or edit a customer to use for testing: + + 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). + + 1. Edit the built-in license fields or add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). + +1. In your Helm chart, update the Helm templates with one or more directives to access the license field. For example, you can access the built-in `expires_at` field with `{{ .Values.global.replicated.licenseFields.expires_at }}`. Add the desired logic to control application behavior based on the values of license fields. + + For more information about accessing values files from Helm templates, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the _Chart Template Guide_ section of the Helm documentation. + +1. Test your changes by promoting a new release and installing in a development environment: + + 1. Package your Helm chart and its dependencies into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). + + 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + + 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). + +1. Repeat these steps to add and test new license fields. + +================ +File: docs/vendor/licenses-reference-kots-runtime.mdx +================ +# Querying Entitlements with the KOTS License API + +This topic describes how to use the Replicated KOTS License API to query license fields during runtme. The information in this topic applies to applications installed with KOTS. + +:::important +Using the KOTS License API to check entitlements during runtime is _not_ recommended for new applications distributed with Replciated. Instead, Replicated recommends that you include the Replicated SDK with your application and query entitlements during runtime using the SDK in-cluster API. See [Checking Entitlements with the Replicated SDK](licenses-reference-sdk). +::: + +## Overview + +KOTS includes default logic to control access to features in the KOTS Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. + +For information about creating custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). + +The KOTS Admin Console runs on the customer's cluster and provides entitlement information during application runtime. You can query the admin console `/license/v1/license` endpoint to enforce entitlements at runtime. + +## Query Fields + +To reference license fields at runtime, send an HTTP request to the admin console `/license/v1/license` endpoint at the following location: + +``` +http://kotsadm:3000/license/v1/license +``` + +The query returns a response in YAML format. For example: + +```javascript +{"license_id":"WicPRaoCv1pJ57ZMf-iYRxTj25eZalw3", +"installation_id":"a4r1s31mj48qw03b5vwbxvm5x0fqtdl6", +"assignee":"FirstCustomer", +"release_channel":"Unstable", +"license_type":"trial", +"expiration_time":"2026-01-23T00:00:00Z", +"fields":[ + {"field":"Customer ID","title":"Customer ID (Internal)","type":"Integer","value":121,"hide_from_customer":true}, + {"field":"Modules","title":"Enabled Modules","type":"String","value":"Analytics, Integration"}]} +``` +## Parse the API Response + +To return a license field value, parse the response using the name of the license +field. + +For example, the following Javascript parses the response for the value of a +`seat_count` custom field: + +```javascript +import * as rp from "request-promise"; + +rp({ + uri: "http://kotsadm:3000/license/v1/license", + json: true +}).then(license => { + const seatCount = license.fields.find((field) => { + return field.field === "seat_count"; + }); + console.log(seatCount.value); +}).catch(err => { + // Handle error response from `kotsadm` +}); +``` + +================ +File: docs/vendor/licenses-reference-sdk.mdx +================ +# Querying Entitlements with the Replicated SDK API + +This topic describes how to query license entitlements at runtime using the Replicated SDK in-cluster API. The information in this topic applies to applications installed with Replicated KOTS or Helm. + +## Overview + +The Replicated SDK retrieves up-to-date customer license information from the Vendor Portal during runtime. This means that any changes to customer licenses are reflected in real time in the customer environment. For example, you can revoke access to your application when a license expires, expose additional product functionality dynamically based on entitlements, and more. For more information about distributing the SDK with your application, see [About the Replicated SDK](replicated-sdk-overview). + +After the Replicated SDK is initialized and running in a customer environment, you can use the following SDK API endpoints to get information about the license: +* `/api/v1/license/info`: List license details, including the license ID, the channel the customer is assigned, and the license type. +* `/api/v1/license/fields`: List all the fields in the license. +* `/api/v1/license/fields/{field_name}`: List details about a specific license field, including the field name, description, type, and the value. + +For more information about these endpoints, see [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API_. + +## Prerequisite + +Add the Replicated SDK to your application: +* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ +* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Standard Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ + +## Query License Entitlements at Runtime {#runtime} + +To use the SDK API to query entitlements at runtime: + +1. Create or edit a customer to use for testing: + + 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). + + 1. Edit the built-in fields and add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). + +1. (Recommended) Develop against the SDK API `license` endpoints locally: + + 1. Install the Replicated SDK as a standalone component in your cluster. This is called _integration mode_. Installing in integration mode allows you to develop locally against the SDK API without needing to create releases for your application in the Vendor Portal. See [Developing Against the SDK API](/vendor/replicated-sdk-development). + + 1. In your application, add logic to control application behavior based on the customer license information returned by the SDK API service running in your cluster. See [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API (Beta)_. + + **Example:** + + ```bash + curl replicated:3000/api/v1/license/fields/expires_at + ``` + + ```json + { + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2M..." + } + } + ``` + +1. When you are ready to test your changes outside of integration mode, do the following: + + 1. Package your Helm chart and its dependencies (including the Replicated SDK) into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). + + 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + + 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). + + 1. (Optional) As needed, verify the license information returned by the SDK API in your development environment using port forwarding to access the SDK service locally: + + 1. Use port forwarding to access the `replicated` service from the local development environment on port 3000: + + ```bash + kubectl port-forward service/replicated 3000 + ``` + + The output looks similar to the following: + + ```bash + Forwarding from 127.0.0.1:3000 -> 3000 + ``` + + For more information about `kubectl port-forward`, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the kubectl reference documentation. + + 1. With the port forward running, in another terminal, use the SDK API to return information about the license. + + **Example:** + + ``` + curl localhost:3000/api/v1/license/fields/expires_at + ``` + +1. Repeat these steps to add and test new license fields. + +1. (Recommended) Use signature verification in your application to ensure the integrity of the license field. See [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). + +================ +File: docs/vendor/licenses-referencing-fields.md +================ +# Checking Entitlements in Preflights with KOTS Template Functions + +This topic describes how to check custom entitlements before installation or upgrade using preflight checks and KOTS template functions in the License context. The information in this topic applies to applications installed with KOTS. + +## Overview + +KOTS includes default logic to control access to features in the Replicated Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. + +For more information, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). + +## Add Preflights to Check Entitlements Before Installation or Upgrade {#install} + +To enforce entitlements when your customer installs or updates your application, +you can use the Replicated LicenseFieldValue template function in your application to read the value of license fields. The LicenseFieldValue template function accepts the built-in license fields and any custom fields that you configure. For more information, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. + +For example, a license might limit how many nodes are permitted in a customer's +cluster. You could define this limit by creating a `node_count` custom license field: + +| Name | Key | Type | Description | +|------|-----|------|-------------| +| Node Count | node_count | Integer | The maximum number of nodes permitted | + +To enforce the node count when a customer installs or updates your application, +you can use LicenseFieldValue to create a preflight check that references the custom `node_count` field: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: example-preflight-checks +spec: + analyzers: + - nodeResources: + checkName: Node Count Check + outcomes: + - fail: + when: 'count() > {{repl LicenseFieldValue "node_count"}}' + message: The cluster has more nodes than the {{repl LicenseFieldValue "node_count"}} you are licensed for. + - pass: + message: The number of nodes matches your license ({{repl LicenseFieldValue "node_count"}}) +``` + +In the example above, the preflight check uses the `nodeResources` analyzer and the value of the custom `node_count` field to determine if the customer has exceeded the maximum number of nodes permitted by their license. If the preflight checks fails, a failure message is displayed to the user and KOTS prevents the installation or upgrade from continuing. + +For more information about this example, see [How Can I Use License Custom Fields Value in a Pre-Flight Check?](https://help.replicated.com/community/t/how-can-i-use-license-custom-fields-value-in-a-pre-flight-check/624) in Replicated Community. + +For more information about defining preflight checks, see [Defining Preflight Checks](preflight-defining). + +================ +File: docs/vendor/licenses-using-builtin-fields.mdx +================ +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" + +# Built-In License Fields + +This topic describes the built-in license fields that appear customer licenses for applications distributed with Replicated. + +## Overview + +The license associated with each customer record in the Replicated Vendor Portal includes several built-in fields. These built-in fields include customer properties (such as the customer name, customer email, and the Vendor Portal channel where the customer is assigned), the license expiration date, as well as the Replicated features that are enabled for the customer (such as the supported install types or Admin Console features). + +When a customer installs an application distributed with Replicated, the values for each built-in and custom field in their license can be accessed using the [Replicated SDK](/vendor/replicated-sdk-overview) in-cluster API [license](/reference/replicated-sdk-apis#license) endpoints. Applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster) can also access license fields using the Replicated KOTS [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function. + +The following shows an example of a customer license: + +```yaml +apiVersion: kots.io/v1beta1 +kind: License +metadata: + name: customertest +spec: + appSlug: gitea + channelID: 2iy68JBTkvUqamgD... + channelName: Beta + channels: + - channelID: 2iy68JBTkvUqamgD... + channelName: Beta + channelSlug: beta + endpoint: https://replicated.app + isDefault: true + isSemverRequired: true + replicatedProxyDomain: proxy.replicated.com + customerEmail: example@replicated.com + customerName: Customer Test + endpoint: https://replicated.app + entitlements: + expires_at: + description: License Expiration + signature: {} + title: Expiration + value: "" + valueType: String + isAirgapSupported: true + isEmbeddedClusterDownloadEnabled: true + isKotsInstallEnabled: true + isSemverRequired: true + isSupportBundleUploadSupported: true + licenseID: 2sY6ZC2J9sO2... + licenseSequence: 4 + licenseType: prod + replicatedProxyDomain: proxy.replicated.com + signature: eyJsaWNlbnNlRGF... +``` + +## License Field Names + +This section lists the built-in fields that are included in customer licenses for applications distributed with Replicated. + +:::note +The built-in license fields are reserved field names. +::: + +### General License Fields + +<table> + <tr> + <td>Field Name</td> + <td>Description</td> + </tr> + <tr> + <td>`appSlug`</td> + <td>The unique application slug that the customer is associated with. This value never changes.</td> + </tr> + <tr> + <td>`channelID`</td> + <td>The ID of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.</td> + </tr> + <tr> + <td>`channelName`</td> + <td>The name of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.</td> + </tr> + <tr> + <td>`licenseID`, `licenseId`</td> + <td>Unique ID for the installed license. This value never changes.</td> + </tr> + <tr> + <td>`customerEmail`</td> + <td>The customer email address.</td> + </tr> + <tr> + <td>`endpoint`</td> + <td>For applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster), this is the endpoint that the KOTS Admin Console uses to synchronize the licenses and download updates. This is typically `https://replicated.app`.</td> + </tr> + <tr> + <td>`entitlementValues`</td> + <td>Includes both the built-in `expires_at` field and any custom license fields. For more information about adding custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields).</td> + </tr> + <tr> + <td>`expires_at`</td> + <td><p>Defines the expiration date for the license. The date is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. If a license does not expire, this field is missing.</p><p>For information about the default behavior when a license expires, see [License Expiration Handling](licenses-about#expiration) in _About Customers_.</p></td> + </tr> + <tr> + <td>`licenseSequence`</td> + <td>Every time a license is updated, its sequence number is incremented. This value represents the license sequence that the client currently has.</td> + </tr> + <tr> + <td>`customerName`</td> + <td>The name of the customer.</td> + </tr> + <tr> + <td>`signature`</td> + <td>The base64-encoded license signature. This value will change when the license is updated.</td> + </tr> + <tr> + <td>`licenseType`</td> + <td>A string value that describes the type of the license, which is one of the following: `paid`, `trial`, `dev`, `single-tenant-vendor-managed` or `community`. For more information about license types, see [Managing License Type](licenses-about-types).</td> + </tr> +</table> + +### Install Types + +The table below describes the built-in license fields related to the supported install type(s). For more information, see [Managing Install Types for a License](/vendor/licenses-install-types). + +<table> + <tr> + <td>Field Name</td> + <td>Description</td> + </tr> + <tr> + <td>`isEmbeddedClusterDownloadEnabled`</td> + <td><p>If a license supports installation with Replicated Embedded Cluster, this field is set to `true` or missing. If Embedded Cluster installations are not supported, this field is `false`.</p><p>This field requires that the vendor has the Embedded Cluster entitlement and that the release at the head of the channel includes an [Embedded Cluster Config](/reference/embedded-config) custom resource. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> + </tr> + <tr> + <td>`isHelmInstallEnabled`</td> + <td><p>If a license supports Helm installations, this field is set to `true` or missing. If Helm installations are not supported, this field is set to `false`. This field requires that the vendor packages the application as Helm charts and, optionally, Replicated custom resources.</p><p> This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> + </tr> + <tr> + <td>`isKotsInstallEnabled`</td> + <td><p>If a license supports installation with Replicated KOTS, this field is set to `true`. If KOTS installations are not supported, this field is either `false` or missing.</p><p>This field requires that the vendor has the KOTS entitlement.</p></td> + </tr> + <tr> + <td>`isKurlInstallEnabled`</td> + <td><p>If a license supports installation with Replicated kURL, this field is set to `true` or missing. If kURL installations are not supported, this field is `false`. </p><p>This field requires that the vendor has the kURL entitlement and a promoted kURL installer spec. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> + </tr> +</table> + +### Install Options + +The table below describes the built-in license fields related to install options. + +<table> + <tr> + <td>Field Name</td> + <td>Description</td> + </tr> + <tr> + <td>`isAirgapSupported`</td> + <td><p>If a license supports air gap installations with the Replicated installers (KOTS, kURL, Embedded Cluster), then this field is set to `true`. If Replicated installer air gap installations are not supported, this field is missing.</p><p>When you enable this field for a license, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.</p></td> + </tr> + <tr> + <td>`isHelmAirgapEnabled`</td> + <td><p>If a license supports Helm air gap installations, then this field is set to `true` or missing. If Helm air gap is not supported, this field is missing.</p><p> When you enable this feature, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.</p><p>This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> + </tr> +</table> + +### Admin Console Feature Options + +The table below describes the built-in license fields related to the Admin Console feature options. The Admin Console feature options apply only to licenses that support installation with the Replicated installers (KOTS, kURL, Embedded Cluster). + +<table> + <tr> + <td>Field Name</td> + <td>Description</td> + </tr> + <tr> + <td>`isDisasterRecoverySupported`</td> + <td>If a license supports the Embedded Cluster disaster recovery feature, this field is set to `true`. If a license does not support disaster recovery for Embedded Cluster, this field is either missing or `false`. **Note**: Embedded Cluster Disaster Recovery is in Alpha. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). For more information, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery).</td> + </tr> + <tr> + <td>`isGeoaxisSupported`</td> + <td>(kURL Only) If a license supports integration with GeoAxis, this field is set to `true`. If GeoAxis is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor has the GeoAxis entitlement. It also requires that the vendor has access to the Identity Service entitlement.</td> + </tr> + <tr> + <td>`isGitOpsSupported`</td> + <td><GitOpsNotRecommended/> If a license supports the KOTS AutoGitOps workflow in the Admin Console, this field is set to `true`. If Auto-GitOps is not supported, this field is either `false` or missing. See [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow).</td> + </tr> + <tr> + <td>`isIdentityServiceSupported`</td> + <td>If a license supports identity-service enablement for the Admin Console, this field is set to `true`. If identity service is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor have access to the Identity Service entitlement.</td> + </tr> + <tr> + <td>`isSemverRequired`</td> + <td>If set to `true`, this field requires that the Admin Console orders releases according to Semantic Versioning. This field is controlled at the channel level. For more information about enabling Semantic Versioning on a channel, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_.</td> + </tr> + <tr> + <td>`isSnapshotSupported`</td> + <td>If a license supports the snapshots backup and restore feature, this field is set to `true`. If a license does not support snapshots, this field is either missing or `false`. **Note**: This field requires that the vendor have access to the Snapshots entitlement.</td> + </tr> + <tr> + <td>`isSupportBundleUploadSupported`</td> + <td>If a license supports uploading a support bundle in the Admin Console, this field is set to `true`. If a license does not support uploading a support bundle, this field is either missing or `false`.</td> + </tr> +</table> + +================ +File: docs/vendor/licenses-verify-fields-sdk-api.md +================ +# Verifying License Field Signatures with the Replicated SDK API + +This topic describes how to verify the signatures of license fields when checking customer license entitlements with the Replicated SDK. + +## Overview + +To prevent man-in-the-middle attacks or spoofing by your customers, license fields are cryptographically signed with a probabilistic signature scheme (PSS) signature to ensure their integrity. The PSS signature for a license field is included in the response from the Replicated SDK API `/license/fields` and `/license/fields/{field-name}` endpoints as a Base64 encoded string. + +The following shows an example of a Base64 encoded PSS signature for an `expires_at` field returned by the SDK API: + +```json +{ + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2MD0YBlIAZEs1zXpmvwLdfcoTsZMOj0lZbxkPN5dPhEPIVcQgrzfzwU5HIwQbwc2jwDrLBQS4hGOKdxOWXnBUNbztsHXMqlAYQsmAhspRLDhBiEoYpFV/8oaaAuNBrmRu/IVAW6ahB4KtP/ytruVdBup3gn1U/uPAl5lhzuBifaW+NDFfJxAX..." + } +} +``` + +Replicated recommends that you use signature verification to ensure the integrity of each license field you use in your application. For more information about how to check entitlements in your application for Helm CLI installations, see [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). + +## Requirement + +Include the Replicated SDK as a dependency of your application Helm chart. For more information, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. + +## Use Your Public Key to Verify License Field Signatures + +In your application, you can use your public key (available in the Vendor Portal) and the MD5 hash of a license field value to verify the PSS signature of the license field. + +To use your public key to verify license field signatures: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Settings** page. + +1. Click the **Replicated SDK Signature Verification** tab. + + ![signature verification page](/images/signature-verification.png) + [View a larger version of this image](/images/signature-verification.png) + +1. Under **Your public key**, copy the key and save it in a secure location. + +1. (Optional) Under **Verification**, select the tab for the necessary programming language, and copy the code sample provided. + +1. In your application, add logic that uses the public key to verify the integrity of license field signatures. If you copied one of the code samples from the Vendor Portal in the previous step, paste it into your application and make any additional edits as required. + + If you are not using one of the code samples provided, consider the following requirements for verifying license field values: + * License field signatures included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints are Base64 encoded and must be decoded before they are verified. + * The MD5 hash of the license field value is required to verify the signature of the license field. The raw license field value is included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints. The MD5 hash of the value must be calculated and used for signature verification. + +================ +File: docs/vendor/namespaces.md +================ +# Application Namespaces + +Replicated strongly recommends that applications are architected to deploy a single application into a single namespace when possible. + +If you are distributing your application with Replicated KOTS, you can implement an architecture in which a single application is deployed into a single namespace. + +To do this, omit any namespace in the application manifests `metadata.namespace`. Do not use the Config custom resource object to make the namespace user-configurable. + +When you do not specify a namespace in application manifests, KOTS deploys to whatever namespace it is already running in. This gives the most flexibility when deploying to end user environments, as users already select the namespace where KOTS runs. Scoping to a single namespace also allows the app to run with minimal Kubernetes permissions, which can reduce friction when an application runs as a tenant in a large cluster. Overall, letting the end user manage namespaces is the easiest way to reduce friction. + +The following examples demonstrate the recommended approach of excluding the namespace from the application manifests, as well as the incorrect approaches of hardcoding the namespace or injecting the namespace as a user-supplied value: + +**Recommended** + +```yaml +# good, namespace absent +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spline-reticulator +spec: +``` + +**Not Recommended** + +```yaml +# bad, hardcoded +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spline-reticulator + namespace: graphviz-pro +spec: +``` + +```yaml +# bad, configurable +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spline-reticulator + namespace: repl{{ ConfigOption "gv_namespace" }} +spec: +``` + +================ +File: docs/vendor/offsite-backup.md +================ +# Offsite Data Backup + +Replicated stores customer data in multiple databases across Amazon Web +Services (AWS) S3 buckets. Clustering and network redundancies help to avoid a +single point of failure. + +The offsite data backup add-on provides additional redundancy by copying data to +an offsite Google Cloud Provider (GCP) storage location. This helps to mitigate +any potential data loss caused by an outage to AWS. + +:::note +The offsite data backup add-on is available only to [Replicated Enterprise](https://www.replicated.com/pricing/) customers at an additional cost. Please [open a product request](https://vendor.replicated.com/support?requestType=feature&productArea=vendor) if you are interested in this feature. +::: + +## Overview + +When the offsite data backup add-on is enabled, data is migrated from Replicated's existing AWS S3 buckets to a dedicated second set of AWS S3 buckets. These buckets are only used for vendors with this add-on enabled, and all vendor data remains logically isolated by vendor Team. After data is migrated from existing S3 buckets to the secondary buckets, +all data is deleted from the original S3 buckets. + +To ensure customer data in the offsite GCP storage remains up-to-date, the GCP +account uses the Google Storage Transfer service to synchronize at least daily with the +secondary dedicated S3 buckets. + +The offsite GCP data backup functions only as secondary data storage and does not serve customer +data. Customer data continues to be served from the AWS S3 buckets. In the case of an AWS outage, Replicated can use a manual +process to restore customer data from the GCP backups into a production-grade database. + +For more information, see [Architecture](#architecture) below. + +## Architecture + +The following diagram shows the flow of air gap build data and registry image data +when the offsite data backup add-on is enabled. The flow of data that is backed +up offsite in GCP is depicted with green arrows. + +![architecture of offsite data storage with the offsite data backup add-on](../../static/images/offsite-backup.png) + +[View a larger version of this image](../../static/images/offsite-backup.png) + +As shown in the diagram above, when the offsite data backup add-on is enabled, +registry and air gap data are stored in dedicated S3 buckets. Both of +these dedicated S3 buckets back up data to offsite storage in GCP. + +The diagram also shows how customer installations continue to pull data from the +vendor registry and the customer portal when offsite data backup is enabled. + +================ +File: docs/vendor/operator-defining-additional-images.mdx +================ +import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" + +# Defining Additional Images + +This topic describes how to define additional images to be included in the `.airgap` bundle for a release. + +## Overview + +<AirGapBundle/> + +When building the `.airgap` bundle for a release, the Replicated Vendor Portal finds and includes all images defined in the Pod specs for the release. During installation or upgrade, KOTS retags images from the `.airgap` bundle and pushes them to the registry configured in KOTS. + +Any required images that are _not_ defined in your application manifests must be listed in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the `.airgap` bundle for the release. + +## Define Additional Images for Air Gap Bundles + +KOTS supports including the following types of images in the `additionalImages` field: + +* Public images referenced by the docker pullable image name. +* Images pushed to a private registry that was configured in the Vendor Portal, referenced by the docker-pullable, upstream image name. For more information about configuring private registries, see [Connecting to an External Registry](/vendor/packaging-private-images). + :::note + If you use the [Replicated proxy registry](/vendor/private-images-about) for online (internet-connected) installations, be sure to use the _upstream_ image name in the `additionalImages` field, rather than referencing the location of the image at `proxy.replicated.com`. + ::: +* Images pushed to the Replicated registry referenced by the `registry.replicated.com` name. + +The following example demonstrates adding multiple images to `additionalImages`: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-app +spec: + additionalImages: + - elasticsearch:7.6.0 + - quay.io/orgname/private-image:v1.2.3 + - registry.replicated.com/my-operator/my-private-image:abd123f +``` + +================ +File: docs/vendor/operator-defining-additional-namespaces.md +================ +# Defining Additional Namespaces + +Operators often need to be able to manage resources in multiple namespaces in the cluster. +When deploying an application to an existing cluster, Replicated KOTS creates a Kubernetes Role and RoleBinding that are limited to only accessing the namespace that the application is being installed into. + +In addition to RBAC policies, clusters running in air gap environments or clusters that are configured to use a local registry also need to ensure that image pull secrets exist in all namespaces that the operator will manage resource in. + +## Creating additional namespaces + +An application can identify additional namespaces to create during installation time. +You can define these additional namespaces in the Application custom resource by adding an `additionalNamespaces` attribute to the Application custom resource manifest file. For more information, see [Application](../reference/custom-resource-application) in the _Custom Resources_ section. + +When these are defined, `kots install` will create the namespaces and ensure that the KOTS Admin Console has full access to manage resources in these namespaces. +This is accomplished by creating a Role and RoleBinding per namespace, and setting the Subject to the Admin Console service account. +If the current user account does not have access to create these additional namespaces, the installer will show an error and fail. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-operator +spec: + additionalNamespaces: + - namespace1 + - namespace2 +``` + +In addition to creating these namespaces, the Admin Console will ensure that the application pull secret exists in them, and that this secret has access to pull the application images. This includes both images that are used and additional images defined in the Application custom resource manifest. For more information, see [Defining Additional Images](operator-defining-additional-images). + +Pull secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. +An operator can reliably depend on this secret existing in all installs (online and air gapped), and can use this secret name in any created `podspec` to pull private images. + +## Dynamic namespaces + +Some applications need access to dynamically created namespaces or even all namespaces. +In this case, an application spec can list `"*"` as one of its `addtionalNamespaces` in the Application manifest file. +When KOTS encounters the wildcard, it will not create any namespaces, but it will ensure that the application image pull secret is copied to all namespaces. +The Admin Console will run an informer internally to watch namespaces in the cluster, and when a new namespace is created, the secret will automatically be copied to it. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-operator +spec: + additionalNamespaces: + - "*" +``` + +When the wildcard (`"*"`) is listed in `additionalNamespaces`, KOTS will use a ClusterRole and ClusterRoleBinding for the Admin Console. +This will ensure that the Admin Console will continue to have permissions to all newly created namespaces, even after the install has finished. + +================ +File: docs/vendor/operator-packaging-about.md +================ +# About Packaging a Kubernetes Operator Application + +Kubernetes Operators can be packaged and delivered as an application using the same methods as other Kubernetes applications. + +Operators are good for [specific use cases](https://blog.replicated.com/operators-in-kots/). In general, we recommend thinking deeply about the problem space an application solves before going down the Operator path because, although powerful, Operators take a lot of time to build and maintain. + +Operators are generally defined using one or more `CustomResourceDefinition` manifests, and the controller is often a `StatefulSet`, along with other additional objects. +These Kubernetes manifests can be included in an application by adding them to a release and promoting the release to a channel. + +Kubernetes Operators differ from traditional applications because they interact with the Kubernetes API to create and manage other objects at runtime. +When a `CustomResource` is deployed to the cluster that has the operator running, the Operator may need to create new Kubernetes objects to fulfill the request. +When an Operator creates an object that includes a `PodSpec`, the Operator should use locally-available images in order to remain compatible with air gapped environments and customers who have configured a local registry to push all images to. +Even environments that aren't air gapped may need access to private images that are included as part of the application at runtime. + +An application includes a definition for the developer to list the additional images that are required for the application, and by exposing the local registry details (endpoint, namespace and secrets) to the application so that they can be referenced when creating a `PodSpec` at runtime. + +================ +File: docs/vendor/operator-referencing-images.md +================ +# Referencing Images + +This topic explains how to support the use of private image registries for applications that are packaged with Kubernetes Operators. + +## Overview + +To support the use of private images in all environments, the Kubernetes Operator code must use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. + +There are several template functions available to assist with this. +This might require two new environment variables to be added to a manager to read these values. + +The steps to ensure that an Operator is using the correct image names and has the correct image pull secrets in dynamically created pods are: + +1. Add a new environment variables to the Manager Pod so that the Manager knows the location of the private registry, if one is set. +2. Add a new environment variable to the Manager Pod so that the Manager also knows the `imagePullSecret` that's needed to pull the local image. + +## Step 1: Add a reference to the local registry + +The manager of an Operator is often a `Statefulset`, but could be a `Deployment` or another kind. +Regardless of where the spec is defined, the location of the private images can be read using the Replicated KOTS template functions. For more information about using template functions, see [About Template Functions](/reference/template-functions-about). + +#### Option 1: Define each image +If an Operator only requires one additional image, the easiest way to determine this location is to use the `LocalImageName` function. +This will always return the image name to use, whether the customer's environment is configured to use a local registry or not. + +**Example:** + +```yaml +env: + - name: IMAGE_NAME_ONE + value: 'repl{{ LocalImageName "elasticsearch:7.6.0" }}' +``` + +For online installations (no local registry), this will be written with no changes -- the variable will contain `elasticsearch:7.6.0`. +For installations that are air gapped or have a locally-configured registry, this will be rewritten as the locally referenceable image name. For example, `registry.somebigbank.com/my-app/elasticsearch:7.6.0`. + +**Example:** + +```yaml +env: + - name: IMAGE_NAME_TWO + value: 'repl{{ LocalImageName "quay.io/orgname/private-image:v1.2.3" }}' +``` + +In the above example, this is a private image, and will always be rewritten. For online installations, this will return `proxy.replicated.com/proxy/app-name/quay.io/orgname/private-image:v1.2.3` and for installations with a locally-configured registry it will return `registry.somebigbank.com/org/my-app-private-image:v.1.2.3`. + +#### Option 2: Build image names manually + +For applications that have multiple images or dynamically construct the image name at runtime, the KOTS template functions can also return the elements that make up the local registry endpoint and secrets, and let the application developer construct the locally-referenceable image name. + +**Example:** + +```yaml +env: + - name: REGISTRY_HOST + value: 'repl{{ LocalRegistryHost }}' + - name: REGISTRY_NAMESPACE + value: 'repl{{ LocalRegistryNamespace }}' +``` + +## Step 2: Determine the imagePullSecret + +Private, local images will need to reference an image pull secret to be pulled. +The value of the secret's `.dockerconfigjson` is provided in a template function, and the application can write this pull secret as a new secret to the namespace. +If the application is deploying the pod to the same namespace as the Operator, the pull secret will already exist in the namespace, and the secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. +KOTS will create this secret automatically, but only in the namespace that the Operator is running in. +It's the responsibility of the application developer (the Operator code) to ensure that this secret is present in any namespace that new pods will be deployed to. + +This template function returns the base64-encoded, docker auth that can be written directly to a secret, and referenced in the `imagePullSecrets` attribute of the PodSpec. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: myregistrykey + namespace: awesomeapps +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +type: kubernetes.io/dockerconfigjson +``` + +This will return an image pull secret for the locally configured registry. + +If your application has both public and private images, it is recommended that the image name is passed to the image pull secret for the locally configured registry. This will ensure that installs without a local registry can differentiate between private, proxied and public images. + +**Example:** + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-pull-secret + namespace: awesomeapps +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +type: kubernetes.io/dockerconfigjson +``` + +In the above example, the `LocalRegistryImagePullSecret()` function will return an empty auth array if the installation is not air gapped, does not have a local registry configured, and the `elasticsearch:7.6.0` image is public. +If the image is private, the function will return the license-key derived pull secret. +And finally, if the installation is using a local registry, the image pull secret will contain the credentials needed to pull from the local registry. + +**Example:** + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-pull-secret + namespace: awesomeapps +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +type: kubernetes.io/dockerconfigjson +``` + +The above example will always return an image pull secret. +For installations without a local registry, it will be the Replicated license secret, and for installations with a local registry, it will be the local registry. + +## Using the local registry at runtime + +The developer of the Operator should use these environment variables to change the `image.name` in any deployed PodSpec to ensure that it will work in air gapped environments. + +================ +File: docs/vendor/orchestrating-resource-deployment.md +================ +import WeightLimitation from "../partials/helm/_helm-cr-weight-limitation.mdx" +import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" +import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" + +# Orchestrating Resource Deployment + +This topic describes how to orchestrate the deployment order of resources deployed as part of your application. The information in this topic applies to Helm chart- and standard manifest-based applications deployed with Replicated KOTS. + +## Overview + +Many applications require that certain resources are deployed and in a ready state before other resources can be deployed. + +When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. + +For applications deployed with KOTS, you can manage the order in which resources are deployed using the following methods: + +* For Helm charts, set the `weight` property in the corresponding HelmChart custom resource. See [HelmChart `weight`](#weight). + +* For standard manifests, add KOTS annotations to the resources. See [Standard Manifest Deployment Order with KOTS Annotations](#manifests). + +## Helm Chart Deployment Order with `weight` {#weight} + +You can configure the [`weight`](/reference/custom-resource-helmchart-v2#weight) property of the Replicated HelmChart custom resource to define the order in which the Helm charts in your release are installed. + +KOTS directs Helm to install the Helm charts based on the value of `weight` in ascending order, deploying the chart with the lowest weight first. Any dependencies are installed along with the parent chart. For example, a chart with a `weight` of `-1` deploys before a chart with a `weight` of `0`. + +The value for the `weight` property can be any negative or positive integer or `0`. By default, when you do not provide a `weight` for a Helm chart, the `weight` is `0`. + +For example: + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + releaseName: samplechart-release-1 + # weight determines the order that charts are applied, with lower weights first. + weight: 4 +``` + +#### Limitations + +The `weight` field in the HelmChart custom resource has the following limitations: + +* <WeightLimitation/> + +* When installing a Helm chart-based application, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. The `weight` property does not allow Helm charts to be deployed before standard manifests. + +## Standard Manifest Deployment Order with KOTS Annotations {#manifests} + +You can use the KOTS annotations described in this section to control the order in which standard manifests are deployed. + +### Requirement + +You must quote the boolean or integer values in annotations because Kubernetes annotations must be strings. For more information about working with annotations in Kubernetes resources, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. + +### `kots.io/creation-phase` + +When the `kots.io/creation-phase: '<integer>'` annotation is present on a resource, KOTS groups the resource into the specified creation phase. KOTS deploys each phase in order from lowest to highest. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. + +Resources in the same phase are deployed in the same order that Helm installs resources. To view the order in which KOTS deploys resources of the same phase, see [Helm installs resources in the following order](https://helm.sh/docs/intro/using_helm/#:~:text=Helm%20installs%20resources%20in%20the,order) in the Helm documentation. + +The following example deploys the `CustomResourceDefinition` before the default creation phase: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: myresources.example.com + annotations: + kots.io/creation-phase: "-1" +... +``` + +### `kots.io/deletion-phase` + +When the `kots.io/deletion-phase: '<integer>'` annotation is present on a resource, KOTS groups the resource into the specified deletion phase. KOTS deletes each phase in order from lowest to highest. Resources within the same phase are deleted in the reverse order from which they were created. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. + +The following example deploys the `CustomResourceDefinition` before the default creation phase and deletes the resource after the default deletion phase: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: myresources.example.com + annotations: + kots.io/creation-phase: "-1" + kots.io/deletion-phase: "1" +... +``` +### `kots.io/wait-for-ready` + +When the `kots.io/wait-for-ready: '<bool>'` annotation is present on a resource and evaluates to `'true'`, KOTS waits for the resource to be in a ready state before deploying any other resources. For most resource types, KOTS has existing logic to determine if a resource is ready. If there is no existing logic for the given resource type, then KOTS waits until the resource exists and is queryable from the Kubernetes API server. + +In the following example, KOTS waits for the Postgres `StatefulSet` to be ready before continuing to deploy other resources: + +```yaml +apiVersion: apps/v1 +kind: Statefulset +metadata: + name: postgresql + annotations: + kots.io/wait-for-ready: 'true' + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + strategy: + type: Recreate + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:9.6" + imagePullPolicy: "" +... +``` + +### `kots.io/wait-for-properties` + +When the `kots.io/wait-for-properties: '<jsonpath>=<value>,<jsonpath>=<value>'` annotation is present on a resource, KOTS waits for one or more specified resource properties to match the desired values before deploying other resources. This annotation is useful when the `kots.io/wait-for-ready` annotation, which waits for a resource to exist, is not sufficient. + +The value for this annotation is a comma-separated list of key-value pairs, where the key is a JSONPath specifying the path to the property and the value is the desired value for the property. In the following example, KOTS waits for a resource to reach a desired state before deploying other resources. In this case, KOTS waits until each of the three status properties have the target values: + +```yaml +kind: MyResource +metadata: + name: my-resource + annotations: + kots.io/wait-for-properties: '.status.tasks.extract=true,.status.tasks.transform=true,.status.tasks.load=true' +... +status: + tasks: + extract: false + transform: false + load: false +``` + +================ +File: docs/vendor/packaging-air-gap-excluding-minio.md +================ +# Excluding MinIO from Air Gap Bundles (Beta) + +The Replicated KOTS Admin Console requires an S3-compatible object store to store application archives and support bundles. By default, KOTS deploys MinIO to satisfy the object storage requirement. For more information about the options for installing without MinIO in existing clusters, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). + +As a software vendor, you can exclude MinIO images from all Admin Console air gap distributions (`kotsadm.tar.gz`) in the download portal. Excluding MinIO from the `kotsadm.tar.gz` air gap bundle is useful if you want to prevent MinIO images from appearing in the air gap distribution that your end users download. It also reduces the file size of `kotsadm.tar.gz`. + +:::note +You can still retrieve a bundle with MinIO images from the KOTS release page in GitHub when this feature is enabled. See [replicatedhq/kots](https://github.com/replicatedhq/kots/releases/) in GitHub. +::: + +To exclude MinIO from the `kotsadm.tar.gz` Admin Console air gap bundle: + +1. Log in to your Vendor Portal account. Select **Support** > **Request a feature**, and submit a feature request for "Exclude MinIO image from air gap bundle". After this feature is enabled, all `kotsadm.tar.gz` files in the download portal will not include MinIO. + +1. Instruct your end users to set the flag `--with-minio=false` with the `kots install` command during an air gap installation. For more information about setting this runtime flag, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). + + :::important + If you have this feature enabled in your Team account and the end user does not include `--with-minio=false` with the `kots install` command, then the installation fails. + ::: + +================ +File: docs/vendor/packaging-cleaning-up-jobs.md +================ +# Cleaning Up Kubernetes Jobs + +This topic describes how to use the Replicated KOTS `kots.io/hook-delete-policy` annotation to remove Kubernetes job objects from the cluster after they complete. + +## About Kubernetes Jobs + +Kubernetes Jobs are designed to run and then terminate. But, they remain in the namespace after completion. Because Job objects are immutable, this can cause conflicts and errors when attempting to update the Job later. + +A common workaround is to use a content SHA from the Job object in the name. However, a user can update their application instance through various events (upstream update, license sync, config update, CLI upload). If the Job is already completed, it is an error to reapply the same job to the cluster again. + +The built-in Replicated KOTS operator/controller can help by deleting Jobs upon completion. +This allows the same Job to be deployed again without polluting the namespace with completed Jobs. + +For more information about Job objects, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) in the Kubernetes documentation. + +## KOTS `hook-delete-policy` Annotation + +To enable the built-in KOTS operator/controller to automatically delete Jobs when they complete, specify a delete hook policy as an annotation on the Job object. + +The KOTS annotation key is `kots.io/hook-delete-policy` and there are two possible values (you can use both simultaneously): `hook-succeeded` and `hook-failed`. + +When this annotation is present and includes `hook-succeeded`, the job is deleted when it completes successfully. +If this annotation is present and includes `hook-failed`, the job is deleted on failure. + +For Helm charts deployed with KOTS, KOTS automatically adds this `kots.io/hook-delete-policy` annotation to any Job objects in the Helm chart that include a `helm.sh/hook-delete-policy` annotation. This means that there is nothing extra to configure when deploying a Helm chart with Helm delete hooks. + +The following example shows a Job object with the `kots.io/hook-delete-policy` annotation: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: pi + annotations: + "kots.io/hook-delete-policy": "hook-succeeded, hook-failed" +spec: + template: + spec: + containers: + - name: pi + image: perl + command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] + restartPolicy: Never + backoffLimit: 4 +``` + +================ +File: docs/vendor/packaging-embedded-kubernetes.mdx +================ +import Installers from "../partials/kurl/_installers.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Creating a kURL Installer + +<KurlAvailability/> + +This topic describes how to create a kURL installer spec in the Replicated Vendor Portal to support installations with Replicated kURL. + +For information about creating kURL installers with the Replicated CLI, see [installer create](/reference/replicated-cli-installer-create). + +## Overview + +<Installers/> + +For more information about kURL, see [Introduction to kURL](kurl-about). + +## Create an Installer + +To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release: + +<table> + <tr> + <th width="30%">Method</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><a href="packaging-embedded-kubernetes#channel">Promote the installer to a channel</a></td> + <td><p>The installer is promoted to one or more channels. All releases on the channel use the kURL installer that is currently promoted to that channel. There can be only one active kURL installer on each channel at a time.</p><p>The benefit of promoting an installer to one or more channels is that you can create a single installer without needing to add a separate installer for each release. However, because all the releases on the channel will use the same installer, problems can occur if all releases are not tested with the given installer.</p></td> + </tr> + <tr> + <td><a href="packaging-embedded-kubernetes#release">Include the installer in a release (Beta)</a></td> + <td><p>The installer is included as a manifest file in a release. This makes it easier to test the installer and release together. It also makes it easier to know which installer spec customers are using based on the application version that they have installed.</p></td> + </tr> +</table> + +### Promote the Installer to a Channel {#channel} + +To promote a kURL installer to a channel: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **kURL Installers**. + +1. On the **kURL Installers** page, click **Create kURL installer**. + + <img alt="vendor portal kurl installers page" src="/images/kurl-installers-page.png" width="650px"/> + + [View a larger version of this image](/images/kurl-installers-page.png) + +1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [Requirements and Recommendations](#requirements-and-recommendations) below. + + You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: + + <img alt="kurl.sh landing page" src="/images/kurl-build-an-installer.png" width="650px"/> + + [View a larger version of this image](/images/kurl-build-an-installer.png) + +1. Click **Save installer**. You can continue to edit your file until it is promoted. + +1. Click **Promote**. In the **Promote Installer** dialog that opens, edit the fields: + + <img alt="promote installer dialog" src="/images/promote-installer.png" width="450px"/> + + [View a larger version of this image](/images/promote-installer.png) + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Description</th> + </tr> + <tr> + <td>Channel</td> + <td>Select the channel or channels where you want to promote the installer.</td> + </tr> + <tr> + <td>Version label</td> + <td>Enter a version label for the installer.</td> + </tr> + </table> + +1. Click **Promote** again. The installer appears on the **kURL Installers** page. + + To make changes after promoting, create and promote a new installer. + +### Include an Installer in a Release (Beta) {#release} + +To include the kURL installer in a release: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Releases**. Then, either click **Create Release** to create a new release, or click **Edit YAML** to edit an existing release. + + The YAML editor opens. + +1. Create a new file in the release with `apiVersion: cluster.kurl.sh/v1beta1` and `kind: Installer`: + + ```yaml + apiVersion: cluster.kurl.sh/v1beta1 + kind: Installer + metadata: + name: "latest" + spec: + + ``` + +1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [ kURL Add-on Requirements and Recommendations](#requirements-and-recommendations) below. + + You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: + + <img alt="kurl.sh landing page" src="/images/kurl-build-an-installer.png" width="650px"/> + + [View a larger version of this image](/images/kurl-build-an-installer.png) + +1. Click **Save**. This saves a draft that you can continue to edit until you promote it. + +1. Click **Promote**. + + To make changes after promoting, create a new release. + +## kURL Add-on Requirements and Recommendations {#requirements-and-recommendations} + +KURL includes several add-ons for networking, storage, ingress, and more. The add-ons that you choose depend on the requirements for KOTS and the unique requirements for your application. For more information about each add-on, see the open source [kURL documentation](https://kurl.sh/docs/introduction/). + +When creating a kURL installer, consider the following requirements and guidelines for kURL add-ons: + +- You must include the KOTS add-on to support installation with KOTS and provision the KOTS Admin Console. See [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the kURL documentation. + +- To support the use of KOTS snapshots, Velero must be installed in the cluster. Replicated recommends that you include the Velero add-on in your kURL installer so that your customers do not have to manually install Velero. + + :::note + During installation, the Velero add-on automatically deploys internal storage for backups. The Velero add-on requires the MinIO or Rook add-on to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. + ::: + +- You must select storage add-ons based on the KOTS requirements and the unique requirements for your application. For more information, see [About Selecting Storage Add-ons](packaging-installer-storage). + +- kURL installers that are included in releases must pin specific add-on versions and cannot pin `latest` versions or x-ranges (such as 1.2.x). Pinning specific versions ensures the most testable and reproducible installations. For example, pin `Kubernetes 1.23.0` in your manifest to ensure that version 1.23.0 of Kubernetes is installed. For more information about pinning Kubernetes versions, see [Versions](https://kurl.sh/docs/create-installer/#versions) and [Versioned Releases](https://kurl.sh/docs/install-with-kurl/#versioned-releases) in the kURL open source documentation. + + :::note + For kURL installers that are _not_ included in a release, pinning specific versions of Kubernetes and Kubernetes add-ons in the kURL installer manifest is not required, though is highly recommended. + ::: + +- After you configure a kURL installer, Replicated recommends that you customize host preflight checks to support the installation experience with kURL. Host preflight checks help ensure successful installation and the ongoing health of the cluster. For more information about customizing host preflight checks, see [Customizing Host Preflight Checks for Kubernetes Installers](preflight-host-preflights). + +- For installers included in a release, Replicated recommends that you define a preflight check in the release to ensure that the target kURL installer is deployed before the release is installed. For more information about how to define preflight checks, see [Defining Preflight Checks](preflight-defining). + + For example, the following preflight check uses the `yamlCompare` analyzer with the `kots.io/installer: "true"` annotation to compare the target kURL installer that is included in the release against the kURL installer that is currently deployed in the customer's environment. For more information about the `yamlCompare` analyzer, see [`yamlCompare`](https://troubleshoot.sh/docs/analyze/yaml-compare/) in the open source Troubleshoot documentation. + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: installer-preflight-example + spec: + analyzers: + - yamlCompare: + annotations: + kots.io/installer: "true" + checkName: Kubernetes Installer + outcomes: + - fail: + message: The kURL installer for this version differs from what you have installed. It is recommended that you run the updated kURL installer before deploying this version. + uri: https://kurl.sh/my-application + - pass: + message: The kURL installer for this version matches what is currently installed. + ``` + +================ +File: docs/vendor/packaging-include-resources.md +================ +# Conditionally Including or Excluding Resources + +This topic describes how to include or exclude optional application resources based on one or more conditional statements. The information in this topic applies to Helm chart- and standard manifest-based applications. + +## Overview + +Software vendors often need a way to conditionally deploy resources for an application depending on users' configuration choices. For example, a common use case is giving the user the choice to use an external database or an embedded database. In this scenario, when a user chooses to use their own external database, it is not desirable to deploy the embedded database resources. + +There are different options for creating conditional statements to include or exclude resources based on the application type (Helm chart- or standard manifest-based) and the installation method (Replicated KOTS or Helm CLI). + +### About Replicated Template Functions + +For applications deployed with KOTS, Replicated template functions are available for creating the conditional statements that control which optional resources are deployed for a given user. Replicated template functions can be used in standard manifest files such as Replicated custom resources or Kubernetes resources like StatefulSets, Secrets, and Services. + +For example, the Replicated ConfigOptionEquals template functions returns true if the specified configuration option value is equal to a supplied value. This is useful for creating conditional statements that include or exclude a resource based on a user's application configuration choices. + +For more information about the available Replicated template functions, see [About Template Functions](/reference/template-functions-about). + +## Include or Exclude Helm Charts + +This section describes methods for including or excluding Helm charts from your application deployment. + +### Helm Optional Dependencies + +Helm supports adding a `condition` field to dependencies in the Helm chart `Chart.yaml` file to include subcharts based on one or more boolean values evaluating to true. + +For more information about working with dependencies and defining optional dependencies for Helm charts, see [Dependencies](https://helm.sh/docs/chart_best_practices/dependencies/) in the Helm documentation. + +### HelmChart `exclude` Field + +For Helm chart-based applications installed with KOTS, you can configure KOTS to exclude certain Helm charts from deployment using the HelmChart custom resource [`exclude`](/reference/custom-resource-helmchart#exclude) field. When the `exclude` field is set to a conditional statement, KOTS excludes the chart if the condition evaluates to `true`. + +The following example uses the `exclude` field and the ConfigOptionEquals template function to exclude a postgresql Helm chart when the `external_postgres` option is selected on the Replicated Admin Console **Config** page: + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: postgresql +spec: + exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' + chart: + name: postgresql + chartVersion: 12.1.7 + releaseName: samplechart-release-1 +``` + +## Include or Exclude Standard Manifests + +For standard manifest-based applications installed with KOTS, you can use the `kots.io/exclude` or `kots.io/when` annotations to include or exclude resources based on a conditional statement. + +By default, if neither `kots.io/exclude` nor `kots.io/when` is present on a resource, the resource is included. + +### Requirements + +The `kots.io/exclude` and `kots.io/when` annotations have the following requirements: + +* Only one of the `kots.io/exclude` nor `kots.io/when` annotations can be present on a single resource. If both are present, the `kots.io/exclude` annotation is applied, and the `kots.io/when` annotation is ignored. + +* The values of the `kots.io/exclude` and `kots.io/when` annotations must be wrapped in quotes. This is because Kubernetes annotations must be strings. For more information about working with Kubernetes annotations, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. + +### `kots.io/exclude` + +When the `kots.io/exclude: '<bool>'` annotation is present on a resource and evaluates to true, the resource is excluded from the deployment. + +The following example uses the `kots.io/exclude` annotation and the ConfigOptionEquals template function to exclude the postgresql `StatefulSet` when an `install_postgres` checkbox on the Admin Console **Config** page is disabled: + +```yaml +apiVersion: apps/v1 +kind: Statefulset +metadata: + name: postgresql + annotations: + kots.io/exclude: '{{repl ConfigOptionEquals "install_postgres" "0" }}' + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + strategy: + type: Recreate + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:9.6" + imagePullPolicy: "" +... +``` + +### `kots.io/when` + +When the `kots.io/when: '<bool>'` annotation is present on a resource and evaluates to true, the resource is included in the deployment. + +The following example uses the `kots.io/when` annotation and the ConfigOptionEquals template function to include the postgresql `StatefulSet` resource when the `install_postgres` checkbox on the Admin Console **Config** page is enabled: + +```yaml +apiVersion: apps/v1 +kind: Statefulset +metadata: + name: postgresql + annotations: + kots.io/when: '{{repl ConfigOptionEquals "install_postgres" "1" }}' + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + strategy: + type: Recreate + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:9.6" + imagePullPolicy: "" +... +``` + +================ +File: docs/vendor/packaging-ingress.md +================ +# Adding Cluster Ingress Options + +When delivering a configurable application, ingress can be challenging as it is very cluster specific. +Below is an example of a flexible `ingress.yaml` file designed to work in most Kubernetes clusters, including embedded clusters created with Replicated kURL. + +## Example + +The following example includes an Ingress resource with a single host based routing rule. +The resource works in both existing clusters and kURL clusters. + +### Config + +A config option `enable_ingress` has been provided to allow the end-user to choose whether or not to enable the Ingress resource. +In some clusters a custom Ingress resource may be desired — when an ingress controller is not available, other means of exposing services may be preferred. + +An `annotations` text area has been made available for the end-user to add additional annotations to the ingress. +Here, cluster specific annotations can be added to support a variety of ingress controllers. +For example, when using the [ALB ingress controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) in AWS, it is necessary to include the `kubernetes.io/ingress.class: alb` annotation on your Ingress resource. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: example-application +spec: + groups: + - name: ingress + title: Ingress + items: + - name: enable_ingress + type: bool + title: Enable Kubernetes Ingress + help_text: | + When checked, deploy the provided Kubernetes Ingress resource. + default: "1" + - name: hostname + type: text + title: Hostname + help_text: | + Use this field to provide a hostname for your Example Application installation. + required: true + when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} + - name: allow_http + type: bool + title: Allow Unsecured Access through HTTP + help_text: | + Uncheck this box to disable HTTP traffic between the client and the load balancer. + default: "1" + when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} + - name: annotations + type: textarea + title: Annotations + help_text: | + Use this textarea to provide annotations specific to your ingress controller. + For example, `kubernetes.io/ingress.class: alb` when using the ALB ingress controller. + when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} +``` + +### Ingress + +For ingress, you must create two separate resources. +The first of which will be deployed to existing cluster installations, while the second will only be deployed to an embedded cluster. +Both of these resources are selectively excluded with the [`exclude` annotation](packaging-include-resources). + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-application-ingress + annotations: + kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) IsKurl }}' + kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' + nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' + kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} +spec: + rules: + - host: repl{{ or (ConfigOption "hostname") "~" }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: nginx + port: + number: 80 +``` + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-application-ingress-embedded + annotations: + kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) (not IsKurl) }}' + kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' + nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' + kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} +spec: + tls: + - hosts: + - repl{{ ConfigOption "hostname" }} + secretName: kotsadm-tls + rules: + - host: repl{{ ConfigOption "hostname" }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: nginx + port: + number: 80 +``` + +================ +File: docs/vendor/packaging-installer-storage.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# About Selecting Storage Add-ons + +<KurlAvailability/> + +This topic provides guidance for selecting Replicated kURL add-ons to provide highly available data storage in kURL clusters. For additional guidance, see [Choosing a PV Provisioner](https://kurl.sh/docs/create-installer/choosing-a-pv-provisioner) in the open source kURL documentation. + +## Overview + +kURL includes add-ons for object storage and for dynamic provisioning of PersistentVolumes (PVs) in clusters. You configure these add-ons in your kURL installer to define how data for your application and data for Replicated KOTS is managed in the cluster. + +The following lists the kURL add-ons for data storage: +* **MinIO**: MinIO is an open source, S3-compatible object store. See [MinIO Add-on](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. +* **Rook**: Rook provides dynamic PV provisioning of distributed Ceph storage. Ceph is a distributed storage system that provides S3-compatible object storage. See [Rook Add-on](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. +* **OpenEBS**: OpenEBS Local PV creates a StorageClass to dynamically provision local PersistentVolumes (PVs) in a cluster. See [OpenEBS Add-on](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. +* **Longhorn**: Longhorn is an open source distributed block storage system for Kubernetes. See [Longhorn Add-on](https://kurl.sh/docs/add-ons/longhorn) in the kURL documentation. + + :::important + The Longhorn add-on is deprecated and not supported in production clusters. If you are currently using Longhorn, you must migrate data from Longhorn to either OpenEBS or Rook. For more information about migrating from Longhorn, see [Migrating to Change CSI Add-On](https://kurl.sh/docs/install-with-kurl/migrating-csi) in the kURL documentation. + ::: + +## About Persistent Storage for KOTS + +This section describes the default storage requirements for KOTS. Each of the [Supported Storage Configurations](#supported-storage-configurations) described below satisfy these storage requirements for KOTS. + +### rqlite StatefulSet + +KOTS deploys a rqlite StatefulSet to store the version history, application metadata and other small amounts of data needed to manage the application(s). No configuration is required to deploy rqlite. + +Rqlite is a distributed relational database that uses SQLite as its storage engine. For more information, see the [rqlite](https://rqlite.io/) website. + +### Object Storage or Local PV + +By default, KOTS requires an S3-compatible object store to store the following: +* Support bundles +* Application archives +* Backups taken with Replicated snapshots that are configured to NFS or host path storage destinations + +Both the Rook add-on and the MinIO add-on satisfy this object store requirement. + +Alternatively, you can configure KOTS to be deployed without object storage. This installs KOTS as a StatefulSet using a persistent volume (PV) for storage. When there is no object storage available, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in the local PV. In this case, the OpenEBS add-on can be included to provide the local PV storage. For more information, see [Installing Without Object Storage](/enterprise/installing-stateful-component-requirements). + +### Distributed Storage in KOTS v1.88 and Earlier + +KOTS v1.88 and earlier requires distributed storage. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. For more information, see [Rook Ceph](#rook-ceph) below. + +## Factors to Consider When Choosing a Storage Configuration + +The object store and/or PV provisioner add-ons that you choose to include in your kURL installer depend on the following factors: +* **KOTS storage requirements**: The storage requirements for the version of the KOTS add-on that you include in the spec. For example, KOTS v1.88 and earlier requires distributed storage. +* **Other add-on storage requirements**: The storage requirements for the other add-ons that you include in the spec. For example, the Velero add-on requires object storage to deploy the default internal storage for snapshots during installation. +* **Application storage requirements**: The storage requirements for your application. For example, you might include different add-ons depending on if your application requires a single or multi-node cluster, or if your application requires distributed storage. + +## Supported Storage Configurations + +This section describes the supported storage configurations for embedded clusters provisioned by kURL. + +### OpenEBS Without Object Storage (Single Node) {#single-node} + +If your application can be deployed to a single node cluster and does not require object storage, then you can choose to exclude object storage and instead use the OpenEBS add-on only to provide local storage on the single node in the cluster. + +When configured to use local PV storage instead of object storage, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in a PV on the single node in the cluster. + +#### Requirements + +To use the OpenEBS add-on without object storage, your kURL installer must meet the following requirements: + +* When neither the MinIO nor the Rook add-on are included in the kURL installer, you must set the `disableS3` field to `true` in the KOTS add-on. Setting `disableS3: true` in the KOTS add-on allows KOTS to use the local PV storage provided by OpenEBS instead of using object storage. For more information, see [Effects of the disableS3 Flag](https://kurl.sh/docs/add-ons/kotsadm#effects-of-the-disables3-flag) in _KOTS Add-on_ in the kURL documentation. + +* When neither the MinIO nor the Rook add-on are included in the kURL installer, the Velero add-on cannot be included. This is because, during installation, the Velero add-on automatically deploys internal storage for backups taken with the Replicated snapshots feature. The Velero add-on requires object storage to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. + + When the Velero add-on is not included, your users must install and configure Velero on the cluster after installation in order to use Replicated snapshots for backup and restore. See [About Backup and Restore with Snapshots](/vendor/snapshots-overview). + + For a storage configuration for single node clusters that supports the use of the Velero add-on, see [OpenEBS with MinIO (Single or Multi-Node)](#openebs-minio) below. + +#### Example + +The following is an example installer that uses OpenEBS v3.3.x with Local PV for local storage and disables object storage for KOTS: + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "local" +spec: + ... + openebs: + version: "3.3.x" + isLocalPVEnabled: true + localPVStorageClassName: "default" + kotsadm: + disables3: true +``` + +For more information about properties for the OpenEBS add-on, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. + +### OpenEBS with MinIO (Single or Multi-Node) {#openebs-minio} + +Using the OpenEBS add-on with the MinIO add-on provides a highly available data storage solution for multi-node clusters that is lighter-weight compared to using Rook Ceph. Replicated recommends that you use OpenEBS Local PV with MinIO for multi-node clusters if your application does _not_ require distributed storage. If your application requires distributed storage, see [Rook Ceph](#rook-ceph) below. + +When both the MinIO and OpenEBS add-ons are included, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in MinIO object storage. Additionally, KOTS uses OpenEBS Local PV to provision the PVs on each node that MinIO uses for local storage. + +#### Requirement + +To use both the OpenEBS add-on and the MinIO add-on, the KOTS add-on must use KOTS v1.89 or later. + +KOTS v1.88 and earlier requires distributed storage, which is not provided by OpenEBS Local PV. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. See [Rook Ceph](#rook-ceph) below. + +#### Example + +The following is an example installer that uses both the OpenEBS add-on version 3.3.x and MinIO add-on version `2022-09-07T22-25-02Z`: + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "openebs-with-minio" +spec: + ... + openebs: + version: "3.3.x" + isLocalPVEnabled: true + localPVStorageClassName: "default" + minio: + version: "2022-09-07T22-25-02Z" +``` + +For more information about properties for the OpenEBS and MinIO add-ons, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) and [MinIO](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. + +### Rook Ceph (Multi-Node) {#rook-ceph} + +If your application requires multiple nodes and distributed storage, Replicated recommends that you use the Rook add-on for storage. The Rook add-on creates an S3-compatible, distributed object store with Ceph and also creates a StorageClass for dynamically provisioning PVs. + +#### Requirement + +Rook versions 1.4.3 and later require a dedicated block device attached to each node in the cluster. The block device must be unformatted and dedicated for use by Rook only. The device cannot be used for other purposes, such as being part of a Raid configuration. If the device is used for purposes other than Rook, then the installer fails, indicating that it cannot find an available block device for Rook. + +For Rook Ceph versions earlier than 1.4.3, a dedicated block device is recommended in production clusters. Running distributed storage such as Rook on block devices is recommended for improved data stability and performance. + +#### Example + +The following is an example installer that uses the Rook add-on version 1.7.x: + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "distributed" +spec: + ... + rook: + version: "1.7.x" + storageClassName: "distributed" + isSharedFilesystemDisabled: true +``` + +For more information about properties for the Rook add-on, see [Rook](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. + +================ +File: docs/vendor/packaging-kots-versions.md +================ +# Setting Minimum and Target Versions for KOTS + +This topic describes how to set minimum and target version for Replicated KOTS in the KOTS [Application](/reference/custom-resource-application) custom resource. + +## Limitation + +Setting minimum and target versions for KOTS is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). + +This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed.`. + +To avoid installation failures, do not use `targetKotsVersion` or `minKotsVersion` in releases that support installation with Embedded Cluster. + +## Using Minimum KOTS Versions (Beta) + +The `minKotsVersion` attribute in the Application custom resource defines the minimum version of Replicated KOTS that is required by the application release. This can be useful when you want to get users who are lagging behind to update to a more recent KOTS version, or if your application requires functionality that was introduced in a particular KOTS version. + +Including this attribute enforces compatibility checks for both new installations and application updates. An installation or update is blocked if the currently deployed KOTS version is earlier than the specified minimum KOTS version. Users must upgrade to at least the specified minimum version of KOTS before they can install or update the application. + +### How the Admin Console Handles minKotsVersion + +When you promote a new release specifying a minimum KOTS version that is later than what a user currently has deployed, and that user checks for updates, that application version appears in the version history of the Admin Console. However, it is not downloaded. + +The Admin Console temporarily displays an error message that informs the user that they must update KOTS before downloading the application version. This error also displays when the user checks for updates with the [`kots upstream upgrade`](/reference/kots-cli-upstream-upgrade) command. + +KOTS cannot update itself automatically, and users cannot update KOTS from the Admin Console. For more information on how to update KOTS in existing clusters or in kURL clusters, see [Performing Updates in Existing Clusters](/enterprise/updating-app-manager) and [Performing Updates in kURL Clusters](/enterprise/updating-kurl). + +After updating KOTS to the minimum version or later, users can use the Admin Console or the [`kots upstream download`](/reference/kots-cli-upstream-download) command to download the release and subsequently deploy it. + + +## Using Target KOTS Versions + +Including `targetKotsVersion` in the Application custom resource enforces compatibility checks for new installations. It blocks the installation if a user tries to install a version of KOTS that is later than the target version. For example, this can prevent users from installing a version of KOTS that you have not tested yet. + +If the latest release in a channel includes `targetKotsVersion`, the install command for existing clusters is modified to install that specific version of KOTS. The install command for existing clusters is on the channel card in the [Vendor Portal](https://vendor.replicated.com). + +### How the Admin Console Handles targetKotsVersion + +Specifying a `targetKotsVersion` does not prevent an end user from upgrading to a later version of KOTS after the initial installation. + +If a new version of the application specifies a later target KOTS version than what is currently installed, users are not prevented from deploying that version of the application. + +If a user's Admin Console is running a version of KOTS that is earlier than the target version specified in a new version of the application, the Admin Console displays a notification in the footer, indicating that a newer supported version of KOTS is available. + +### Using Target Versions with kURL + +For installations in a cluster created by Replicated kURL, the version of the KOTS add-on must not be later than the target KOTS version specified in the Application custom resource. If the KOTS add-on version is later than the version specified for `targetKotsVersion`, the initial installation fails. + +For more information about the KOTS add-on, see [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the open source kURL documentation. + +================ +File: docs/vendor/packaging-private-images.md +================ +# Connecting to an External Registry + +This topic describes how to add credentials for an external private registry using the Replicated Vendor Portal or Replicated CLI. Adding an external registry allows you to grant proxy access to private images using the Replicated proxy registry. For more information, see [About the Replicated Proxy Registry](private-images-about). + +For information about adding a registry with the Vendor API v3, see [Create an external registry with the specified parameters](https://replicated-vendor-api.readme.io/reference/createexternalregistry) in the Vendor API v3 documentation. + +## Supported Registries + +Replicated recommends that application vendors use one the following external private registries: + +* Amazon Elastic Container Registry (ECR) +* DockerHub +* GitHub Container Registry +* Google Artifact Registry +* Google Container Registry (Deprecated) +* Sonatype Nexus +* Quay.io + +These registries have been tested for compatibility with KOTS. + +You can also configure access to most other external registries if the registry conforms to the Open Container Initiative (OCI) standard. + +## Add Credentials for an External Registry + +All applications in your team have access to the external registry that you add. This means that you can use the images in the external registry across multiple apps in the same team. + +### Using the Vendor Portal + +To add an external registry using the Vendor Portal: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com) and go to the **Images** page. +1. Click **Add External Registry**. + + <img src="/images/add-external-registry.png" alt="/images/add-external-registry.png" width="400px"></img> + + [View a larger version of this image](/images/add-external-registry.png) + +1. In the **Provider** drop-down, select your registry provider. + +1. Complete the fields in the dialog, depending on the provider that you chose: + + :::note + Replicated stores your credentials encrypted and securely. Your credentials and the encryption key do not leave Replicated servers. + ::: + + * **Amazon ECR** + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as 123456689.dkr.ecr.us-east-1.amazonaws.com</td> + </tr> + <tr> + <td>Access Key ID</td> + <td>Enter the Access Key ID for a Service Account User that has pull access to the registry. See <a href="tutorial-ecr-private-images#setting-up-the-service-account-user">Setting up the Service Account User</a>.</td> + </tr> + <tr> + <td>Secret Access Key</td> + <td>Enter the Secret Access Key for the Service Account User.</td> + </tr> + </table> + + * **DockerHub** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as index.docker.io.</td> + </tr> + <tr> + <td>Auth Type</td> + <td>Select the authentication type for a DockerHub account that has pull access to the registry.</td> + </tr> + <tr> + <td>Username</td> + <td>Enter the host name for the account.</td> + </tr> + <tr> + <td>Password or Token</td> + <td>Enter the password or token for the account, depending on the authentication type you selected.</td> + </tr> + </table> + + * **GitHub Container Registry** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry.</td> + </tr> + <tr> + <td>Username</td> + <td>Enter the username for an account that has pull access to the registry.</td> + </tr> + <tr> + <td>GitHub Token</td> + <td>Enter the token for the account.</td> + </tr> + </table> + + * **Google Artifact Registry** + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as <br/>us-east1-docker.pkg.dev</td> + </tr> + <tr> + <td>Auth Type</td> + <td>Select the authentication type for a Google Cloud Platform account that has pull access to the registry.</td> + </tr> + <tr> + <td>Service Account JSON Key or Token</td> + <td> + <p>Enter the JSON Key from Google Cloud Platform assigned with the Artifact Registry Reader role, or token for the account, depending on the authentication type you selected.</p> + <p>For more information about creating a Service Account, see <a href="https://cloud.google.com/container-registry/docs/access-control">Access Control with IAM</a> in the Google Cloud documentation.</p> + </td> + </tr> + </table> + * **Google Container Registry** + :::important + Google Container Registry is deprecated. For more information, see <a href="https://cloud.google.com/container-registry/docs/deprecations/container-registry-deprecation">Container Registry deprecation</a> in the Google documentation. + ::: + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as gcr.io.</td> + </tr> + <tr> + <td>Service Account JSON Key</td> + <td><p>Enter the JSON Key for a Service Account in Google Cloud Platform that is assigned the Storage Object Viewer role.</p><p>For more information about creating a Service Account, see <a href="https://cloud.google.com/container-registry/docs/access-control">Access Control with IAM</a> in the Google Cloud documentation.</p></td> + </tr> + </table> + + * **Quay.io** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as quay.io.</td> + </tr> + <tr> + <td>Username and Password</td> + <td>Enter the username and password for an account that has pull access to the registry.</td> + </tr> + </table> + + * **Sonatype Nexus** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as nexus.example.net.</td> + </tr> + <tr> + <td>Username and Password</td> + <td>Enter the username and password for an account that has pull access to the registry.</td> + </tr> + </table> + + * **Other** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as example.registry.com.</td> + </tr> + <tr> + <td>Username and Password</td> + <td>Enter the username and password for an account that has pull access to the registry.</td> + </tr> + </table> + +1. For **Image name & tag**, enter the image name and image tag and click **Test** to confirm that the Vendor Portal can access the image. For example, `api:v1.0.1` or `my-app/api:v1.01`. + +1. Click **Link registry**. + +### Using the CLI + +To configure access to private images in an external registry using the Replicated CLI: + +1. Install and configure the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Run the `registry add` command for your external private registry. For more information about the `registry add` command, see [registry add](/reference/replicated-cli-registry-add) in _Replicated CLI_. + + For example, to add a DockerHub registry: + + ```bash + replicated registry add dockerhub --username USERNAME \ + --password PASSWORD + ``` + + Where: + * `USERNAME` is the username for DockerHub credentials with access to the registry. + * `PASSWORD` is the password for DockerHub credentials with access to the registry. + + :::note + To prevent the password from being saved in your shell history, Replicated recommends that you use the `--password-stdin` flag and entering the password when prompted. + ::: + +## Test External Registry Credentials + +Replicated recommends that you test external registry credentials to ensure that the saved credentials on Replicated servers can pull the specified image. + +To validate that the configured registry can pull specific images: + +```bash +replicated registry test HOSTNAME \ + --image IMAGE_NAME +``` + +Where: +* `HOSTNAME` is the name of the host, such as `index.docker.io`. +* `IMAGE_NAME` is the name of the target image in the registry. + +For example: + +```bash +replicated registry test index.docker.io --image my-company/my-image:v1.2.3 +``` + +## Related Topic + +[Tutorial: Using ECR for Private Images](tutorial-ecr-private-images) + +================ +File: docs/vendor/packaging-private-registry-security.md +================ +# Replicated Registry Security + +This document lists the security measures and processes in place to ensure that images pushed to the Replicated registry remain private. For more information about pushing images to the Replicated registry, see [Using the Replicated Registry for KOTS Installations](private-images-replicated). + + +## Single Tenant Isolation + +The registry is deployed and managed as a multi-tenant application, but each tenant is completely isolated from data that is created and pulled by other tenants. Docker images have shared base layers, but the private registry does not share these between tenants. For example, if a tenant creates an image `FROM postgres:10.3` and pushes the image to Replicated, all of the layers are uploaded, even if other tenants have this base layer uploaded. + +A tenant in the private registry is a team on the Replicated [Vendor Portal](https://vendor.replicated.com). Licenses and customers created by the team are also granted some permissions to the registry data, as specified in the following sections. Cross-tenant access is never allowed in the private registry. + + +## Authentication and Authorization + +The private registry supports several methods of authentication. Public access is never allowed because the registry only accepts authenticated requests. + + +### Vendor Authentication + +All accounts with read/write access on the Vendor Portal have full access to all images pushed by the tenant to the registry. These users can push and pull images to and from the registry. + + +### End Customer Authentication + +A valid (unexpired) license file has an embedded `registry_token` value. Replicated components shipped to customers use this value to authenticate to the registry. Only pull access is enabled when authenticating using a `registry_token`. A `registry_token` has pull access to all images in the tenant's account. All requests to pull images are denied when a license expires or the expiration date is changed to a past date. + + +## Networking and Infrastructure + +A dedicated cluster is used to run the private registry and is not used for any services. + +The registry metadata is stored in a shared database instance. This database contains information about each layer in an image, but not the image data itself. + +The registry image data is securely stored in an encrypted S3 bucket. Each layer is encrypted at rest, using a shared key stored in [Amazon Key Management Service](https://aws.amazon.com/kms/). Each tenant has a unique directory in the shared bucket and access is limited to the team or license making the request. + +The registry cluster runs on a hardened operating system image (CentOS-based), and all instances are on a private virtual private cloud (VPC). Public IP addresses are not assigned to the instances running the cluster and the registry images. Instead, only port 443 traffic is allowed from a layer 7 load balancer to these servers. + +There are no SSH public keys on these servers, and password-based SSH login is disallowed. The servers are not configured to have any remote access. All deployments to these servers are automated using tools such as Terraform and a custom-built CI/CD process. Only verified images are pulled and run. + + +## Runtime Monitoring + +Replicated uses a Web Application Firewall (WAF) on the cluster that monitors and blocks any unusual activity. When unusual activity is detected, access from that endpoint is automatically blocked for a period of time, and a Replicated site reliability engineer (SRE) is alerted. + + +## Penetration Testing + +Replicated completed a formal pen test that included the private registry in the scope of the test. Replicated also runs a bug bounty program and encourages responsible disclosure on any vulnerabilities that are found. + +================ +File: docs/vendor/packaging-public-images.mdx +================ +# Connecting to a Public Registry through the Proxy Registry + +This topic describes how to pull images from public registries using the Replicated proxy registry. + +For more information about the Replicated proxy registry, see [About the Replicated Proxy Registry](private-images-about). + +## Pull Public Images Through the Replicated Proxy Registry + +You can use the Replicated proxy registry to pull both public and private images. Using the Replicated proxy registry for public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. + +For public images, you need to first configure registry credentials. + +To pull public images through the Replicated proxy registry, use the following `docker` command: + +```bash +docker pull REPLICATED_PROXY_DOMAIN/proxy/APPSLUG/UPSTREAM_REGISTRY_HOSTNAME/IMAGE:TAG +``` +Where: +* `APPSLUG` is your Replicated app slug found on the [app settings page](https://vendor.replicated.com/settings). +* `REPLICATED_PROXY_DOMAIN` is `proxy.replicated.com` or your custom domain. For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). +* `UPSTREAM_REGISTRY_HOSTNAME` is the hostname for the public registry where the image is located. If the image is located in a namespace within the registry, include the namespace after the hostname. For example, `quay.io/namespace`. +* `IMAGE` is the image name. +* `TAG` is the image tag. + +## Examples + +This section includes examples of pulling public images through the Replicated proxy registry. + +### Pull Images from DockerHub + +The following examples show how to pull public images from DockerHub: + +```bash +# DockerHub is the default when no hostname is specified +docker pull proxy.replicated.com/proxy/APPSLUG/busybox +docker pull proxy.replicated.com/proxy/APPSLUG/nginx:1.16.0 +``` +```bash +# You can also optionally specify docker.io +docker pull proxy.replicated.com/proxy/APPSLUG/docker.io/replicated/replicated-sdk:1.0.0 +``` + +### Pull Images from Other Registries + +The following example shows how to pull images from the Amazon ECR Public Gallery: + +```bash +docker pull proxy.replicated.com/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest +``` + +### Pull Images Using a Custom Domain for the Proxy Registry + +The following example shows how to pull a public image when a custom domain is configured for the proxy registry: + +```bash +docker pull my.customdomain.io/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest +``` +For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). + +## Related Topic + +[Connecting to an External Registry](packaging-private-images) + +================ +File: docs/vendor/packaging-rbac.md +================ +# Configuring KOTS RBAC + +This topic describes role-based access control (RBAC) for Replicated KOTS in existing cluster installations. It includes information about how to change the default cluster-scoped RBAC permissions granted to KOTS. + +## Cluster-scoped RBAC + +When a user installs your application with KOTS in an existing cluster, Kubernetes RBAC resources are created to allow KOTS to install and manage the application. + +By default, the following ClusterRole and ClusterRoleBinding resources are created that grant KOTS access to all resources across all namespaces in the cluster: + +```yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "ClusterRole" +metadata: + name: "kotsadm-role" +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +``` + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kotsadm-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kotsadm-role +subjects: +- kind: ServiceAccount + name: kotsadm + namespace: appnamespace +``` + +Alternatively, if your application does not require access to resources across all namespaces in the cluster, then you can enable namespace-scoped RBAC for KOTS. For information, see [About Namespace-scoped RBAC](#min-rbac) below. + +## Namespace-scoped RBAC {#min-rbac} + +Rather than use the default cluster-scoped RBAC, you can configure your application so that the RBAC permissions granted to KOTS are limited to a target namespace or namespaces. By default, for namespace-scoped installations, the following Role and RoleBinding resources are created that grant KOTS permissions to all resources in a target namespace: + +```yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "Role" +metadata: + name: "kotsadm-role" +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +``` + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kotsadm-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kotsadm-role +subjects: +- kind: ServiceAccount + name: kotsadm + namespace: appnamespace +``` + +Namespace-scoped RBAC is supported for applications that use Kubernetes Operators or multiple namespaces. During application installation, if there are `additionalNamespaces` specified in the Application custom resource manifest file, then Roles and RoleBindings are created to grant KOTS access to resources in all specified namespaces. + +### Enable Namespace-scoped RBAC {#enable} + +To enable namespace-scoped RBAC permissions for KOTS, specify one of the following options in the Application custom resource manifest file: + +* `supportMinimalRBACPrivileges`: Set to `true` to make namespace-scoped RBAC optional for existing cluster installations. When `supportMinimalRBACPrivileges` is `true`, cluster-scoped RBAC is used by default and users must pass the `--use-minimal-rbac` flag with the installation or upgrade command to use namespace-scoped RBAC. + +* `requireMinimalRBACPrivileges`: Set to `true` to require that all installations to existing clusters use namespace-scoped access. When `requireMinimalRBACPrivileges` is `true`, all installations use namespace-scoped RBAC automatically and users do not pass the `--use-minimal-rbac` flag. + +For more information about these options, see [requireMinimalRBACPrivileges](/reference/custom-resource-application#requireminimalrbacprivileges) and [supportMinimalRBACPrivileges](/reference/custom-resource-application#supportminimalrbacprivileges) in _Application_. + +### About Installing with Minimal RBAC + +In some cases, it is not possible to grant the user `* * *` permissions in the target namespace. For example, an organization might have security policies that prevent this level of permissions. + +If the user installing or upgrading KOTS cannot be granted `* * *` permissions in the namespace, then they can instead request the following: +* The minimum RBAC permissions required by KOTS +* RBAC permissions for any CustomResourceDefinitions (CRDs) that your application includes + +Installing with the minimum KOTS RBAC permissions also requires that the user manually creates a ServiceAccount, Role, and RoleBinding for KOTS, rather than allowing KOTS to automatically create a Role with `* * *` permissions. + +For more information about how users can install KOTS with minimal RBAC when namespace-scoped RBAC is enabled, see [Namespace-scoped RBAC Requirements](/enterprise/installing-general-requirements#namespace-scoped) in _Installation Requirements_. + +### Limitations + +The following limitations apply when using the `requireMinimalRBACPrivileges` or `supportMinimalRBACPrivileges` options to enable namespace-scoped RBAC for KOTS: + +* **Existing clusters only**: The `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` options apply only to installations in existing clusters. + +* **Preflight checks**: When namespace-scoped access is enabled, preflight checks cannot read resources outside the namespace where KOTS is installed. The preflight checks continue to function, but return less data. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). + +* **Velero namespace access for KOTS snapshots**: Velero is required for enabling backup and restore with the KOTS snapshots feature. Namespace-scoped RBAC does not grant access to the namespace where Velero is installed in the cluster. + + To set up snapshots when KOTS has namespace-scoped access, users can run the `kubectl kots velero ensure-permissions` command. This command creates additional Roles and RoleBindings to allow the necessary cross-namespace access. For more information, see [`velero ensure-permissions`](/reference/kots-cli-velero-ensure-permissions/) in the KOTS CLI documentation. + + For more information about snapshots, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). + +* **Air Gap Installations**: For air gap installations, the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags are supported only in automated, or _headless_, installations. In headless installations, the user passes all the required information to install both KOTS and the application with the `kots install` command. In non-headless installations, the user provides information to install the application through the Admin Console UI after KOTS is installed. + + In non-headless installations in air gap environments, KOTS does not have access to the application's `.airgap` package during installation. This means that KOTS does not have the information required to determine whether namespace-scoped access is needed, so it defaults to the more permissive, default cluster-scoped RBAC policy. + + For more information about how to do headless installations in air gap environments, see [Air Gap Installation](/enterprise/installing-existing-cluster-automation#air-gap) in _Installing with the KOTS CLI_. + +* **Changing RBAC permissions for installed instances**: The RBAC permissions for KOTS are set during its initial installation. KOTS runs using the assumed identity and cannot change its own authorization. When you update your application to add or remove the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags in the Application custom resource, the RBAC permissions for KOTS are affected only for new installations. Existing KOTS installations continue to run with their current RBAC permissions. + + To expand the scope of RBAC for KOTS from namespace-scoped to cluster-scoped in new installations, Replicated recommends that you include a preflight check to ensure the permission is available in the cluster. + +================ +File: docs/vendor/packaging-using-tls-certs.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Using TLS Certificates + +<KurlAvailability/> + +Replicated KOTS provides default self-signed certificates that renew automatically. For embedded clusters created with Replicated kURL, the self-signed certificate renews 30 days before expiration when you enable the kURL EKCO add-on version 0.7.0 and later. + +Custom TLS options are supported: + +- **Existing clusters:** The expectation is for the end customer to bring their own Ingress Controller such as Contour or Istio and upload their own `kubernetes.io/tls` secret. For an example, see [Ingress with TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) in the Kubernetes documentation. + +- **Embedded kURL clusters:** End customers can upload a custom TLS certificate. Replicated kURL creates a TLS secret that can reused by other Kubernetes resources, such as Deployment or Ingress, which can be easier than providing and maintaining multiple certificates. As a vendor, you can enable the use of custom TLS certificates with these additional resources. + +For example, if your application does TLS termination, your deployment would need the TLS secret. Or if the application is connecting to another deployment that is also secured using the same SSL certificate (which may not be a trusted certificate), the custom TLS certificate can be used to do validation without relying on the trust chain. + +### Get the TLS Secret + +kURL sets up a Kubernetes secret called `kotsadm-tls`. The secret stores the TLS certificate, key, and hostname. Initially, the secret has an annotation set called `acceptAnonymousUploads`. This indicates that a new TLS certificate can be uploaded by the end customer during the installation process. For more information about installing with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). + +Before you can reference the TLS certificate in other resources, you must get the `kotsadm-tls` secret output. + +To get the `kots-adm-tls` secret, run: + +```shell +kubectl get secret kotsadm-tls -o yaml +``` + +In the output, the `tls.crt` and `tls.key` hold the certificate and key that can be referenced in other Kubernetes resources. + +**Example Output:** + +```yaml +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: kotsadm-tls +data: + tls.crt: <base64_encoded> + tls.key: <base64_encoded> +``` + +### Use TLS in a Deployment Resource + +This procedure shows how to reference the `kotsadm-tls` secret using an example nginx Deployment resource (`kind: Deployment`). + +To use the `kotsadm-tls` secret in a Deployment resource: + +1. In the Deployment YAML file, configure SSL for volumeMounts and volumes, and add the `kotsadm-tls` secret to volumes: + + **Example:** + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx + spec: + template: + spec: + containers: + volumeMounts: + - mountPath: "/etc/nginx/ssl" + name: nginx-ssl + readOnly: true + volumes: + - name: nginx-ssl + secret: + secretName: kotsadm-tls + ``` + +1. Deploy the release, and then verify the pod deployment using the `kubectl exec` command: + + **Example:** + + ```shell + export POD_NAME=nginx-<hash> + kubectl exec -it ${POD_NAME} bash + ``` + +1. Run the `ls` and `cat` commands to verify that the certificate and key were deployed to the specified volumeMount: + + **Example:** + + ```shell + $ ls /etc/nginx/ssl + tls.crt tls.key + + $ cat /etc/nginx/ssl/tls.crt + -----BEGIN CERTIFICATE----- + MIID8zCCAtugAwIBAgIUZF+NWHnpJCt2R1rDUhYjwgVv72UwDQYJKoZIhvcNAQEL + + $ cat /etc/nginx/ssl/tls.key + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCyiGNuHw2LY3Rv + ``` + +### Use TLS in an Ingress Resource + +You can add the `kotsadm-tls` secret to the Ingress resource to terminate TLS at the contour layer. The following example shows how to configure `secretName: kotsadm-tls` under the TLS `hosts` field in an Ingress resource (`kind: Ingress`): + +**Example:** + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: nginx +spec: + rules: + tls: + - hosts: + - 'tls.foo.com' + secretName: kotsadm-tls + - host: tls.foo.com + http: + paths: + - path: / + backend: + serviceName: nginx + servicePort: 80 +``` +:::note +`tls.foo.com` must resolve to a valid IP, and also must match the Common Name (CN) or Subjective Alternative Name (SAN) of the TLS certificate. +::: + +================ +File: docs/vendor/planning-questionnaire.md +================ +# Customer Application Deployment Questionnaire + +Before you package and distribute an application, Replicated recommends that you +understand several key characteristics about the environments where your customers +will deploy your application. + +To gather this information about your customers' environments: +1. Copy and customize the [$APP Deployment Questionnaire](#app-deployment-questionnaire) below. +1. Replace $APP with the name of your application. +1. Send the questionnaire to your users. + +## $APP Deployment Questionnaire + +### Infrastructure + +This section includes questions about your infrastructure and how you deploy software. +This includes both internally-written and Commercial Off The Shelf (COTS) applications. + +If it’s more convenient, limit answers to the scope of the target infrastructure for deploying $APP. + +- Do you use any IaaS like AWS, GCP, or Azure? + +- If you deploy to a physical datacenter, do you use a Hypervisor like VSphere? + +- Do you ever install on bare metal? + +- Do you have any restrictions on what operating systems are used? + +- Does the target infrastructure have a direct outbound internet connection? Can it connect out via a Proxy? + +- If the environment has no outbound network, do machines in a DMZ have direct network access to the air gapped infrastructure, or do release artifacts need to be copied to physical media for installation? + +- If there is an issue causing downtime in the on-prem application, would you be willing to give the $APP team direct SSH access to the instance(s)? + +### Development and Deployment Processes + +- Do you require applications be deployed by a configuration management framework like Chef, Ansible, or Puppet? + +- Do you run any container-based workloads today? + +- If you run container workloads, do you run any kind of orchestration like Kubernetes, Mesos, or Docker Swarm? + +- If you run container workloads, what tools do you use to host and serve container images? + +- If you run container workloads, what tools do you use to scan and secure container images? + +- If you are deploying $APP to your existing Kubernetes cluster, can your cluster nodes pull images from the public internet, or do you require images to be stored in an internal registry? + +### Change Management + +- How do you test new releases of COTS software? Do you have a UAT or Staging environment? Are there other change management requirements? + +- How often do you like to receive planned (non-critical) software updates? Quarterly? Monthly? As often as possible? + +- For critical updates, what is your target deployment time for new patches? Do you have a requirement for how quickly patches are made available after a vulnerability is announced? + +- Do you drive production deploys automatically from version control (“gitops”)? + + +### Application Usage and Policy Requirements + +- For applications that expose a web UI, how will you be connecting to the instance? As much as possible, include details about your workstation, any tunneling/VPN/proxy infrastructure, and what browsers you intend to use. + +- Do you require a disaster recovery strategy for deployed applications? If so, where are backups stored today? (SFTP? NAS? S3-compliant object store? Something else?) + +- Do you require deployed COTS applications to support logins with an internal identity provider like OpenLDAP, Windows AD or SAML? + +- Do you require an audit log of all user activity performed in $APP? What are your needs around exporting / aggregating audit log data? + +- Do you anticipate the need to scale the capacity of $APP up and down during its lifetime? + +- What are your requirements around log aggregation? What downstream systems do you need system logs to be piped to? + +================ +File: docs/vendor/policies-data-transmission.md +================ +# Data Transmission Policy + +A Replicated installation connects to a Replicated-hosted endpoint periodically to perform various tasks including checking for updates and synchronizing the installed license properties. During this time, some data is transmitted from an installed instance to the Replicated API. This data is limited to: + +- The IP address of the primary Replicated instance. +- The ID of the installation. +- [Resource statuses](/enterprise/status-viewing-details#resource-statuses) +- Information about the installation including data needed for [instance details](/vendor/instance-insights-details). +- [Custom metrics](/vendor/custom-metrics) which the vendor may configure as part of the installation. +- Date and timestamps of the data transmission. + +This data is required to provide the expected update and license services. The data is also used to provide telemetry and other reporting features. + +By default, no additional data is collected and transmitted from the instance to external servers. + +All data is encrypted in transit according to industry best practices. For more information about Replicated's security practices, see [Security at Replicated](https://www.replicated.com/security/) on the Replicated website. + +For more information about application instance data fields that the Replicated Vendor Portal uses to generate events for instances, see [About Instance and Event Data](/vendor/instance-insights-event-data). + +Last modified December 31, 2023 + +================ +File: docs/vendor/policies-infrastructure-and-subprocessors.md +================ +# Infrastructure and Subprocessor Providers + +This lists describes the infrastructure environment, subprocessors and other entities material to the Replicated products and services. + +Prior to engaging any third party, Replicated performs diligence to evaluate their privacy, security and confidentiality practices. Whenever possible, Replicated uses encryption for data at rest and in motion so that all information is not available to these third parties. + +Replicated does not engage in the business of selling or trading personal information. Any personally identifible information Replicated might possibly hold is data that a customer has provided to us. + +The fields that Replicated may posess as identifiable to a physical person may include: +- Name +- Email +- Phone Number +- Job Title +- Business Address +- Github Username + +Note: This does not imply that all these fields are collected for each person. It also does not mean all these datapoints are used with each declared provider. + + +## Replicated Infrastructure Providers + +Replicated might use the following entities to provide infrastructure that helps with delivery of our products: + + +| Entity Name | Purpose | Country where Infrastructure Resides | Notes +|---------------------|----------------------------|-------|----| +| Amazon Web Services | Various IaaS | United States | Vendor portal, registry, api and supporting infrastructure services. +| Cloudflare | Network security, DDoS mitigation, DNS | United States | +| Datadog | Performance monitoring | United States | +| DBT Labs | Data transformation or migration | United States | +| FiveTran | Data transformation or migration | United States | +| Github | Customer support | United States | Replicated's customers may engage with our customer support team using Github issues in a private repo. +| Google Looker | Product usage metrics | United States | +| Hex | Data transformation or migration | United States | +| Knock Labs, Inc.| Event notifications | United States | | +| Postmark / Active Campaign | Transactional emails from Vendor Portal. Marketing related communications. | United States | Active Campaign and Postmark businesses merged.| +| Salesforce |Customer and sales relationship management| United States | +| Snowflake | Usage data analysis and transformation | United States | +| Timescale | Time-series data of instance metrics | United States | See our [Data Transmission Policy](/vendor/policies-data-transmission) + +Last modified January 4, 2024 + +================ +File: docs/vendor/policies-support-lifecycle.md +================ +# Support Lifecycle Policy + +Replicated will provide support for products per our terms and services until that product is noted as End of Life (EOL). + +<table> + <tr> + <th width="30%">Product Phase</th> + <th width="70%">Definition</th> + </tr> + <tr> + <td>Alpha</td> + <td>A product or feature that is exploratory or experimental. Typically, access to alpha features and their documentation is limited to customers providing early feedback. While most alpha features progress to beta and general availability (GA), some are deprecated based on assessment learnings.</td> + </tr> + <tr> + <td>Beta</td> + <td><p>A product or feature that is typically production-ready, but has not met Replicated’s definition of GA for one or more of the following reasons:</p><ul><li>Remaining gaps in intended functionality</li><li>Outstanding needs around testing</li><li>Gaps in documentation or sales enablement</li><li>In-progress customer value validation efforts</li></ul><p>Documentation for beta products and features is published on the Replicated Documentation site with a "(Beta)" label. Beta products or features follow the same build and test processes required for GA.</p><p>Please contact your Replicated account representative if you have questions about why a product or feature is beta.</p></td> + </tr> + <tr> + <td>“GA” - General Availability</td> + <td>A product or feature that has been validated as both production-ready and value-additive by a percentage of Replicated customers. Products in the GA phase are typically those that are available for purchase from Replicated.</td> + </tr> + <tr> + <td>“LA” - Limited Availability</td> + <td>A product has reached the Limited Availability phase when it is no longer available for new purchases from Replicated. Updates will be primarily limited to security patches, critical bugs and features that enable migration to GA products.</td> + </tr> + <tr> + <td>“EOA” - End of Availability</td> + <td><p>A product has reached the End of Availability phase when it is no longer available for renewal purchase by existing customers. This date may coincide with the Limited Availability phase.</p><p>This product is considered deprecated, and will move to End of Life after a determined support window. Product maintenance is limited to critical security issues only.</p></td> + </tr> + <tr> + <td>“EOL” - End of Life</td> + <td><p>A product has reached its End of Life, and will no longer be supported, patched, or fixed by Replicated. Associated product documentation may no longer be available.</p><p>The Replicated team will continue to engage to migrate end customers to GA product based deployments of your application.</p></td> + </tr> +</table> + +<table> + <tr> + <th width="25%">Replicated Product</th> + <th width="15%">Product Phase</th> + <th width="25%">End of Availability</th> + <th width="25%">End of Life</th> + </tr> + <tr> + <td><a href="/vendor/testing-about">Compatibility Matrix</a></td> + <td>GA</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="/vendor/replicated-sdk-overview">Replicated SDK</a></td> + <td>Beta</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="/intro-kots">Replicated KOTS Installer</a></td> + <td>GA</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="/vendor/kurl-about">Replicated kURL Installer</a></td> + <td>GA</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="/vendor/embedded-overview">Replicated Embedded Cluster Installer</a></td> + <td>GA</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://help.replicated.com/docs/native/getting-started/overview/">Replicated Classic Native Installer</a></td> + <td>EOL</td> + <td>2023-12-31*</td> + <td>2024-12-31*</td> + </tr> +</table> + +*Except for customers who have specifically contracted different dates for the End of Availability and End of Life timelines. + +## Supported Replicated Installer Versions + +The following table lists the versions of Replicated KOTS and Replicated kURL that are supported on each Kubernetes version. + +The End of Replicated Support date is the End Of Life (EOL) date for the Kubernetes version. The EOL date for each Kubernetes version is published on the [Releases](https://kubernetes.io/releases/) page in the Kubernetes documentation. + +<table> + <tr> + <th>Kubernetes Version</th> + <th>Embedded Cluster Versions</th> + <th>KOTS Versions</th> + <th>kURL Versions</th> + <th>End of Replicated Support</th> + </tr> + <tr> + <td>1.32</td> + <td>N/A</td> + <td>N/A</td> + <td>N/A</td> + <td>2026-02-28</td> + </tr> + <tr> + <td>1.31</td> + <td>N/A</td> + <td>1.117.0 and later</td> + <td>v2024.08.26-0 and later</td> + <td>2025-10-28</td> + </tr> + <tr> + <td>1.30</td> + <td>1.16.0 and later</td> + <td>1.109.1 and later</td> + <td>v2024.05.03-0 and later</td> + <td>2025-06-28</td> + </tr> + <tr> + <td>1.29</td> + <td>1.0.0 and later</td> + <td>1.105.2 and later</td> + <td>v2024.01.02-0 and later</td> + <td>2025-02-28</td> + </tr> +</table> + +Replicated support for end-customer installations is limited to those installs using a Replicated provided installer product, such as KOTS, kURL or Embedded Cluster, available with the [Business or Enterprise plans](https://www.replicated.com/pricing). Replicated support for direct Helm CLI installs or other vendor provided installers is limited to the successful distribution of the software to the end-customer, as well as any issues with the Replicated SDK if included with the installation. + + +The information contained herein is believed to be accurate as of the date of publication, but updates and revisions may be posted periodically and without notice. + +Last modified January 2, 2025. + +================ +File: docs/vendor/policies-vulnerability-patch.md +================ +# Vulnerability Patch Policy + +While it’s our goal to distribute vulnerability-free versions of all components, this isn’t always possible. +Kubernetes and KOTS are made from many components, each authored by different vendors. + +The best way to stay ahead of vulnerabilities is to run the latest version and have a strategy to quickly update when a patch is available. + +## How We Scan + +Our build pipeline uses [Trivy](https://www.aquasec.com/products/trivy/) to scan for and detect known, published vulnerabilities in our images. +It’s possible that other security scanners will detect a different set of results. +We commit to patching vulnerabilities according to the timeline below based on the results of our internal scans. + +If you or your customer detects a different vulnerability using a different scanner, we encourage you to report it to us in a GitHub issue, Slack message, or opening a support issue from the Replicated Vendor Portal. +Our team will evaluate the vulnerability and determine the best course of action. + +## Base Images + +KOTS images are built on top of Chainguard's open source [Wolfi](https://edu.chainguard.dev/open-source/wolfi/overview/) base image. Wolfi is a Linux undistro that is focused on supply chain security. + +KOTS has automation that uses the Chainguard [melange](https://edu.chainguard.dev/open-source/melange/overview/) and [apko](https://edu.chainguard.dev/open-source/apko/overview/) projects to build packages and assemble images on Wolfi. Building and assembling images in this way helps to ensure that any CVEs can be resolved quickly and efficiently. + +## Upstream CVE Disclosure + +Replicated KOTS, kURL, and Embedded Cluster deliver many upstream Kubernetes and ecosystem components. +We do not build these packages and rely on the upstream software vendor to distribute patches. +Our intent is to make any patches available as soon as possible, but guarantee the following timeline to make upstream patches available after we learn about the vulnerability and a patch is available to us: + +| CVE Level | Time to release | +|-----------|-----------------| +| Critical | Within 2 weeks | +| High | Within 60 days | +| Medium | Within 90 days | +| Low | Best effort unless risk accepted | + +## Notable Upstream CVEs + +This section lists CVEs that have yet to be resolved by the upstream maintainers and therefore are not patched in Replicated. This is not an exhaustive list of unpatched upstream CVEs; instead, these are noteworthy CVEs that we have evaluated and on which we offer our opinion to help with your own security reviews. When available, we will apply upstream patches in accordance with our policy desribed in [Upstream CVE Disclosure](#upstream-cve-disclosure) above. We will update this list after applying any upstream patches. + +| CVE ID | Explanation| +|--------|------------| +| None | N/A | + +## Vulnerability Management Exception Policy +There might be instances where policy exceptions are required to continue using third party software with known vulnerabilities in our on premises products. Some reasons for an exception include: + +- Feature breakage or bugs in patched versions +- Performance issues in patched versions +- Patched version contains higher severity vulnerabilities + +Regardless of the reason, an exception is vetted from a business impact and security standpoint. The business review assesses the overall impact to the product created by the patched, but otherwise problematic, piece of software. The security portion determines if the CVE is applicable to this specific context and if that CVE's impact to the product’s overall security posture is acceptable. + +In the event of a vulnerability management exception, a notice is posted containing: + +- The impacted product(s) +- The rationale for the exception +- The relevant CVE(s) +- A risk assessment in the product context for each CVE + +As subsequent versions of the vulnerable software are released, Replicated continues to research to find a solution that satisfies the business and security requirements of the original exception.  + +## Known Disclosed Vulnerabilities in our On Premises Products + +| CVE | CVE Summary | Rationale | Additional Reading | +|-----|-------------|-----------|--------------------| +| None | N/A | N/A | N/A | + +Last modified January 29, 2025. + +================ +File: docs/vendor/preflight-defining.mdx +================ +# Defining Preflight Checks + +This topic describes how to define preflight checks in Helm and Kubernetes manifest-based applications. For more information about preflight checks, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). + +The information in this topic applies to applications that are installed with Helm or with Replicated KOTS. + +## Step 1: Create the Manifest File + +You can define preflight checks in a Kubernetes Secret or in a Preflight custom resource. The type of manifest file that you use depends on your application type (Helm or Kubernetes manifest-based) and the installation methods that your application supports (Helm, KOTS v1.101.0 or later, or KOTS v1.100.3 or earlier). + +* **Helm Applications**: For Helm applications, see the following guidance: + + * **(Recommended) Helm or KOTS v1.101.0 or Later**: For Helm applications installed with Helm or KOTS v1.101.0 or later, define the preflight checks in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). + + * **KOTS v1.100.3 or Earlier**: For Helm applications installed with KOTS v1.100.3 or earlier, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). + +* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). + +### Kubernetes Secret {#secret} + +For Helm applications installed with Helm or KOTS v1.101.0 or later, define preflight checks in a Kubernetes Secret in your Helm chart `templates`. This allows you to define the preflights spec only one time to support running preflight checks in both Helm and KOTS installations. + +For a tutorial that demonstrates how to define preflight checks in a Secret in chart `templates` and then run the preflight checks in both Helm and KOTS installations, see [Tutorial: Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup). + +Add the following YAML to a Kubernetes Secret in your Helm chart `templates` directory: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + collectors: [] + analyzers: [] +``` + +As shown above, the Secret must include the following: + +* The label `troubleshoot.sh/kind: preflight` +* A `stringData` field with a key named `preflight.yaml` so that the preflight binary can use this Secret when it runs from the CLI + +### Preflight Custom Resource {#preflight-cr} + +Define preflight checks in a Preflight custom resource for the following installation types: +* Kubernetes manifest-based applications installed with any version of KOTS +* Helm applications installed with KOTS v1.100.3 and earlier + :::note + For Helm charts installed with KOTS v1.101.0 and later, Replicated recommends that you define preflight checks in a Secret in the Helm chart `templates` instead of using the Preflight custom resource. See [Create a Secret](#secret) above. + + In KOTS v1.101.0 and later, preflights defined in the Helm chart override the Preflight custom resource used by KOTS. During installation, if KOTS v1.101.0 and later cannot find preflights specified in the Helm chart archive, then KOTS searches for `kind: Preflight` in the root of the release. + ::: + +Add the following YAML to a new file in a release: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: preflights +spec: + collectors: [] + analyzers: [] +``` + +For more information about the Preflight custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). + +## Step 2: Define Collectors and Analyzers + +This section describes how to define collectors and analyzers for preflight checks based on your application needs. You add the collectors and analyzers that you want to use in the `spec.collectors` and `spec.analyzers` keys in the manifest file that you created. + +### Collectors + +Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define to generate results for the preflight checks. + +The following default collectors are included automatically to gather information about the cluster and cluster resources: +* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) +* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) + +You do not need manually include the `clusterInfo` or `clusterResources` collectors in the specification. To use only the `clusterInfo` and `clusterResources` collectors, delete the `spec.collectors` key from the preflight specification. + +The Troubleshoot open source project includes several additional collectors that you can include in the specification to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. + +### Analyzers + +Analyzers use the output from the collectors to generate results for the preflight checks, including the criteria for pass, fail, and warn outcomes and custom messages for each outcome. + +For example, in a preflight check that checks the version of Kubernetes running in the target cluster, the analyzer can define a fail outcome when the cluster is running a version of Kubernetes less than 1.25 that includes the following custom message to the user: `The application requires Kubernetes 1.25.0 or later, and recommends 1.27.0`. + +The Troubleshoot open source project includes several analyzers that you can include in your preflight check specification. The following are some of the analyzers in the Troubleshoot project that use the default `clusterInfo` or `clusterResources` collectors: +* [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) +* [clusterVersion](https://troubleshoot.sh/docs/analyze/cluster-version/) +* [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) +* [distribution](https://troubleshoot.sh/docs/analyze/distribution/) +* [nodeResources](https://troubleshoot.sh/docs/analyze/node-resources/) +* [statefulsetStatus](https://troubleshoot.sh/docs/analyze/stateful-set-status/) +* [storageClass](https://troubleshoot.sh/docs/analyze/storage-class/) + +To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. + +### Block Installation with Required (Strict) Preflights {#strict} + +For applications installed with KOTS, you can set any preflight analyzer to `strict: true`. When `strict: true` is set, any `fail` outcomes for the analyzer block the deployment of the release. + +:::note +Strict preflight analyzers are ignored if the `exclude` property is also included and evaluates to `true`. See [exclude](https://troubleshoot.sh/docs/analyze/#exclude) in the Troubleshoot documentation. +::: + +### Examples + +For common examples of collectors and analyzers used in preflight checks, see [Examples of Preflight Specs](/vendor/preflight-examples). + +================ +File: docs/vendor/preflight-examples.mdx +================ +import HttpSecret from "../partials/preflights/_http-requests-secret.mdx" +import HttpCr from "../partials/preflights/_http-requests-cr.mdx" +import MySqlSecret from "../partials/preflights/_mysql-secret.mdx" +import MySqlCr from "../partials/preflights/_mysql-cr.mdx" +import K8sVersionSecret from "../partials/preflights/_k8s-version-secret.mdx" +import K8sVersionCr from "../partials/preflights/_k8s-version-cr.mdx" +import K8sDistroSecret from "../partials/preflights/_k8s-distro-secret.mdx" +import K8sDistroCr from "../partials/preflights/_k8s-distro-cr.mdx" +import NodeReqSecret from "../partials/preflights/_node-req-secret.mdx" +import NodeReqCr from "../partials/preflights/_node-req-cr.mdx" +import NodeCountSecret from "../partials/preflights/_node-count-secret.mdx" +import NodeCountCr from "../partials/preflights/_node-count-cr.mdx" +import NodeMemSecret from "../partials/preflights/_node-mem-secret.mdx" +import NodeMemCr from "../partials/preflights/_node-mem-cr.mdx" +import NodeStorageClassSecret from "../partials/preflights/_node-storage-secret.mdx" +import NodeStorageClassCr from "../partials/preflights/_node-storage-cr.mdx" +import NodeEphemStorageSecret from "../partials/preflights/_node-ephem-storage-secret.mdx" +import NodeEphemStorageCr from "../partials/preflights/_node-ephem-storage-cr.mdx" +import NodeCpuSecret from "../partials/preflights/_node-cpu-secret.mdx" +import NodeCpuCr from "../partials/preflights/_node-cpu-cr.mdx" +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Example Preflight Specs + +This section includes common examples of preflight check specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/preflight) in GitHub. + +## Check HTTP or HTTPS Requests from the Cluster + +The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. + +For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <HttpSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <HttpCr/> + <p>The following shows how the <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing pass message" src="/images/preflight-http-pass.png"/> + <a href="/images/preflight-http-pass.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Kubernetes Version + +The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. + +For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <K8sVersionSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <K8sVersionCr/> + <p>The following shows how the <code>warn</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing warning message" src="/images/preflight-k8s-version-warn.png"/> + <a href="/images/preflight-k8s-version-warn.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Kubernetes Distribution + +The examples below use the `distribution` analyzer to check the Kubernetes distribution of the cluster. The `distribution` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. + +For more information, see [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) and [Distribution](https://troubleshoot.sh/docs/analyze/distribution/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <K8sDistroSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <K8sDistroCr/> + <p>The following shows how the <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing pass message" src="/images/preflight-k8s-distro.png"/> + <a href="/images/preflight-k8s-distro.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check MySQL Version Using Template Functions + +The examples below use the `mysql` collector and the `mysql` analyzer to check the version of MySQL running in the cluster. + +For more information, see [Collect > MySQL](https://troubleshoot.sh/docs/collect/mysql/) and [Analyze > MySQL](https://troubleshoot.sh/docs/analyze/mysql/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <p>This example uses Helm template functions to render the credentials and connection details for the MySQL server that were supplied by the user. Additionally, it uses Helm template functions to create a conditional statement so that the MySQL collector and analyzer are included in the preflight checks only when MySQL is deployed, as indicated by a <code>.Values.global.mysql.enabled</code> field evaluating to true.</p> + <p>For more information about using Helm template functions to access values from the values file, see <a href="https://helm.sh/docs/chart_template_guide/values_files/">Values Files</a>.</p> + <MySqlSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <p>This example uses KOTS template functions in the Config context to render the credentials and connection details for the MySQL server that were supplied by the user in the Replicated Admin Console <strong>Config</strong> page. Replicated recommends using a template function for the URI, as shown above, to avoid exposing sensitive information. For more information about template functions, see <a href="/reference/template-functions-about">About Template Functions</a>.</p> + <p>This example also uses an analyzer with <code>strict: true</code>, which prevents installation from continuing if the preflight check fails.</p> + <MySqlCr/> + <p>The following shows how a <code>fail</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade when <code>strict: true</code> is set for the analyzer:</p> + <img alt="Strict preflight checks in Admin Console showing fail message" src="/images/preflight-mysql-fail-strict.png"/> + <a href="/images/preflight-mysql-fail-strict.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Node Memory + +The examples below use the `nodeResources` analyzer to check that a required storage class is available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeMemSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeMemCr/> + <p>The following shows how a <code>warn</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing warn message" src="/images/preflight-node-memory-warn.png"/> + <a href="/images/preflight-node-memory-warn.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Node Storage Class Availability + +The examples below use the `storageClass` analyzer to check that a required storage class is available in the nodes in the cluster. The `storageClass` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeStorageClassSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeStorageClassCr/> + <p>The following shows how a <code>fail</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing fail message" src="/images/preflight-storageclass-fail.png"/> + <a href="/images/preflight-storageclass-fail.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Node Ephemeral Storage + +The examples below use the `nodeResources` analyzer to check the ephemeral storage available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeEphemStorageSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeEphemStorageCr/> + <p>The following shows how a <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing pass message" src="/images/preflight-ephemeral-storage-pass.png"/> + <a href="/images/preflight-ephemeral-storage-pass.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Requirements Are Met By At Least One Node + +The examples below use the `nodeResources` analyzer with filters to check that the requirements for memory, CPU cores, and architecture are met by at least one node in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeReqSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeReqCr/> + <p>The following shows how the <code>fail</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing fail message" src="/images/preflight-node-filters-faill.png"/> + <a href="/images/preflight-node-filters-faill.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Total CPU Cores Across Nodes + +The examples below use the `nodeResources` analyzer to check the version of Kubernetes running in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeCpuSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeCpuCr/> + <p>The following shows how the <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing fail message" src="/images/preflight-cpu-pass.png"/> + <a href="/images/preflight-cpu-pass.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +================ +File: docs/vendor/preflight-host-preflights.md +================ +# Customizing Host Preflight Checks for kURL + +This topic provides information about how to customize host preflight checks for installations with Replicated kURL. For information about the default host preflight checks that run for installations with Replicated Embedded Cluster, see [About Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. + +## About Host Preflight Checks +You can include host preflight checks with kURL to verify that infrastructure requirements are met for: + +- Kubernetes +- kURL add-ons +- Your application + +This helps to ensure successful installation and the ongoing health of the cluster. + +While host preflights are intended to ensure requirements are met for running the cluster, you can also use them to codify some of your application requirements so that users get feedback even earlier in the installation process, rather than waiting to run preflights after the cluster is already installed. For more information about application checks, collectors, and analyzers, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). + +Default host preflight checks verify conditions such as operating system and disk usage. Default host preflight failures block the installation from continuing and exit with a non-zero return code. Users can then update their environment and run the kURL installation script again to re-run the host preflight checks. + +Host preflight checks run automatically. The default host preflight checks that run can vary, depending on whether the installation is new, an upgrade, joining a node, or an air gap installation. Additionally, some checks only run when certain add-ons are enabled in the installer. For a complete list of default host preflight checks, see [Default Host Preflights](https://kurl.sh/docs/install-with-kurl/host-preflights#default-host-preflights) in the kURL documentation. + +There are general kURL host preflight checks that run with all installers. There are also host preflight checks included with certain add-ons. Customizations include the ability to: + + - Bypass failures + - Block an installation for warnings + - Exclude certain preflights under specific conditions, such as when a particular license entitlement is enabled + - Skip the default host preflight checks and run only custom checks + - Add custom checks to the default host preflight checks + +For more information about customizing host preflights, see [Customize Host Preflight Checks](#customize-host-preflight-checks). + +## Customize Host Preflight Checks + +The default host preflights run automatically as part of your kURL installation. You can customize the host preflight checks by disabling them entirely, adding customizations to the default checks to make them more restrictive, or completely customizing them. You can also customize the outcomes to enforce warnings or ignore failures. + +### Add Custom Preflight Checks to the Defaults + +To run customized host preflight checks in addition to the default host preflight checks, add a `hostPreflights` field to the `kurl` field in your Installer manifest. Under the `hostPreflights` field, add a host preflight specification (`kind: HostPreflight`) with your customizations. You only need to specify your customizations because the default host preflights run automatically. + +Customized host preflight checks run in addition to default host preflight checks, if the default host preflight checks are enabled. + +If you only want to make the default host preflight checks more restrictive, add your more restrictive host preflight checks to `kurl.hostPreflights`, and do not set `excludeBuiltinHostPreflights`. For example, if your application requires 6 CPUs but the default host preflight check requires 4 CPUs, you can simply add a custom host preflight check for 6 CPUs, since the default host preflight must pass if the more restrictive custom check passes. + +The following example shows customized `kurl` host preflight checks for: + + - An application that requires more CPUs than the default + - Accessing a website that is critical to the application + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "latest" +spec: + kurl: + hostPreflights: + apiVersion: troubleshoot.sh/v1beta2 + kind: HostPreflight + spec: + collectors: + - cpu: {} + - http: + collectorName: Can Access A Website + get: + url: https://myFavoriteWebsite.com + analyzers: + - cpu: + checkName: Number of CPU check + outcomes: + - fail: + when: "count < 4" + message: This server has less than 4 CPU cores + - warn: + when: "count < 6" + message: This server has less than 6 CPU cores + - pass: + message: This server has at least 6 CPU cores + - http: + checkName: Can Access A Website + collectorName: Can Access A Website + outcomes: + - warn: + when: "error" + message: Error connecting to https://myFavoriteWebsite.com + - pass: + when: "statusCode == 200" + message: Connected to https://myFavoriteWebsite.com +``` + +### Customize the Default Preflight Checks + +To customize the default host preflights: + +1. Disable the default host preflight checks using `excludeBuiltinHostPreflights: true`. +1. Copy the default `host-preflights.yaml` specification for kURL from [host-preflights.yaml](https://github.com/replicatedhq/kURL/blob/main/pkg/preflight/assets/host-preflights.yaml) in the kURL repository. +1. Copy the default `host-preflight.yaml` specification for any and all add-ons that are included in your specification and have default host preflights. For links to the add-on YAML files, see [Finding the Add-on Host Preflight Checks](https://kurl.sh/docs/create-installer/host-preflights/#finding-the-add-on-host-preflight-checks) in the kURL documentation. +1. Merge the copied host preflight specifications into one host preflight specification, and paste it to the `kurl.hostPreflights` field in the Installer YAML in the Vendor Portal. +1. Edit the defaults as needed. + +### Ignore or Enforce Warnings and Failures + +Set either of the following flags to customize the outcome of your host preflight checks: + +<table> +<tr> + <th width="30%">Flag: Value</th> + <th width="70%">Description</th> +</tr> +<tr> + <td><code>hostPreflightIgnore: true</code></td> + <td>Ignores host preflight failures and warnings. The installation proceeds regardless of host preflight outcomes.</td> +</tr> +<tr> + <td><code>hostPreflightEnforceWarnings: true</code></td> + <td>Blocks an installation if the results include a warning.</td> +</tr> +</table> + +### Disable Host Preflight Checks + +To disable the default host preflight checks for Kubernetes and all included add-ons, add the `kurl` field to your Installer manifest and add `kurl.excludeBuiltinHostPreflights: true`. In this case, no host preflight checks are run. + +`excludeBuiltinHostPreflights` is an aggregate flag, so setting it to `true` disables the default host preflights for Kubernetes and all included add-ons. + +**Example:** + + ```yaml + apiVersion: "cluster.kurl.sh/v1beta1" + kind: "Installer" + metadata: + name: "latest" + spec: + kurl: + excludeBuiltinHostPreflights: true + ``` + +## Example of Customized Host Preflight Checks + +The following example shows: + +- Default host preflights checks are disabled +- Customized host preflight checks run +- The installation is blocked if there is a warning + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "latest" +spec: + kurl: + excludeBuiltinHostPreflights: true + hostPreflightEnforceWarnings: true + hostPreflights: + apiVersion: troubleshoot.sh/v1beta2 + kind: HostPreflight + spec: + collectors: + - cpu: {} + - http: + collectorName: Can Access A Website + get: + url: https://myFavoriteWebsite.com + analyzers: + - cpu: + checkName: Number of CPU check + outcomes: + - fail: + when: "count < 4" + message: This server has less than 4 CPU cores + - warn: + when: "count < 6" + message: This server has less than 6 CPU cores + - pass: + message: This server has at least 6 CPU cores + - http: + checkName: Can Access A Website + collectorName: Can Access A Website + outcomes: + - warn: + when: "error" + message: Error connecting to https://myFavoriteWebsite.com + - pass: + when: "statuscode == 200" + message: Connected to https://myFavoriteWebsite.com + ``` + +================ +File: docs/vendor/preflight-running.md +================ +# Running Preflight Checks for Helm Installations + +This topic describes how to use the preflight kubectl plugin to run preflight checks for applications installed with the Helm CLI. + +## Overview + +For applications installed with the Helm CLI, your users can optionally run preflight checks using the open source preflight kubectl plugin before they run `helm install`. + +The preflight plugin requires a preflight check specification as input. For Helm chart-based applications, the specification is defined in a Secret in the Helm chart `templates` directory. For information about how to configure preflight checks for your application, see [Defining Preflight Checks](preflight-defining). + +To run preflight checks that are defined in your application Helm chart templates, your users run `helm template` to render the Helm chart templates and then provide the result to the preflight plugin as stdin. The preflight plugin automatically filters the stream of stdout from the `helm template` command to find and run any preflight specifications. + +## Prerequisite + +The preflight kubectl plugin is required to run preflight checks for Helm CLI installations. The preflight plugin is a client-side utility that adds a single binary to the path. + +To install the preflight plugin, run the following command to install the preflight plug-in using krew: + +``` +curl https://krew.sh/preflight | bash +``` +For information about the preflight plugin, including additional installation options, see [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. + +## Command + +``` +helm template HELM_CHART | kubectl preflight - +``` + +Where `HELM_CHART` is the Helm chart that contains the preflight specification. + +For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. + +**Examples:** + +``` +helm template gitea-1.0.6.tgz | kubectl preflight - +``` +``` +helm template gitea | kubectl preflight - +``` +``` +helm template oci://myregistry.io/org/examplechart | kubectl preflight - +``` + +## Run Preflight Checks from a Release + +When you promote a release that contains one or more Helm charts, the Helm charts are automatically pushed to the Replicated registry. To run preflight checks before installing a release, your users must first log in to the Replicated registry where they can access your application Helm chart containing the preflight specification. + +To run preflights checks from a release before installation: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps/gitea-boxer/customers), go to the **Customers** page. Click on the name of the target customer. + +1. On the landing page for the customer, click **Helm install instructions**. + + The **Helm install instructions** dialog opens: + + <img alt="Helm install instructions dialog with preflight checks" src="/images/helm-install-preflights.png" width="550px"/> + + [View a larger version of this image](/images/helm-install-preflights.png) + +1. Run the commands provided in the dialog: + + 1. Run the first command to log in to the Replicated registry: + + ``` + helm registry login registry.replicated.com --username USERNAME --password PASSWORD + ``` + + Where: + - `USERNAME` is the customer's email address. + - `PASSWORD` is the customer's license ID. + + **Example:** + ``` + helm registry login registry.replicated.com --username example@companyname.com password 1234abcd + ``` + + 1. Run the second command to install the kubectl plugin with krew: + + ``` + curl https://krew.sh/preflight | bash + ``` + + 1. Run the third command to run preflight checks: + + ``` + helm template oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART | kubectl preflight - + ``` + + Where: + - `APP_SLUG` is the name of the application. + - `CHANNEL` is the lowercased name of the release channel. + - `CHART` is the name of the Helm chart. + + **Examples:** + + ``` + helm template oci://registry.replicated.com/gitea-app/unstable/gitea | kubectl preflight - + ``` + ``` + helm template oci://registry.replicated.com/gitea-app/unstable/gitea --values values.yaml | kubectl preflight - + ``` + + For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. + + 1. (Optional) Run the fourth command to install the application. For more information, see [Installing with Helm](install-with-helm). + +## (Optional) Save Preflight Check Results + +The output of the preflight plugin shows the success, warning, or fail message for each preflight, depending on how they were configured. You can ask your users to send you the results of the preflight checks if needed. + +To save the results of preflight checks to a `.txt` file, users can can press `s` when viewing the results from the CLI, as shown in the example below: + +![Save output dialog](/images/helm-preflight-save-output.png) + +[View a larger version of this image](/images/helm-preflight-save-output.png) + +================ +File: docs/vendor/preflight-sb-helm-templates-about.md +================ +# Using Helm Templates in Specifications + +You can use Helm templates to configure collectors and analyzers for preflight checks and support bundles for Helm installations. + +Helm templates can be useful when you need to: + +- Run preflight checks based on certain conditions being true or false, such as the customer wants to use an external database. +- Pull in user-specific information from the values.yaml file, such as the version a customer is using for an external database. + +You can also use Helm templating with the Troubleshoot template functions for the `clusterPodStatuses` analyzer. For more information, see [Helm and Troubleshoot Template Example](#troubleshoot). + +## Helm Template Example + +In the following example, the `mysql` collector is included in a preflight check if the customer does not want to use the default MariaDB. This is indicated by the template `{{- if eq .Values.global.mariadb.enabled false -}}`. + +This specification also takes the MySQL connection string information from the `values.yaml` file, indicated by the template `'{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false'` in the `uri` field. + +Additionally, the specification verifies the maximum number of nodes in the `values.yaml` file is not exceeded by including the template `'count() > {{ .Values.global.maxNodeCount }}'` for the `nodeResources` analyzer. + +```yaml +{{- define "preflight.spec" }} +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: preflight-sample +spec: + {{ if eq .Values.global.mariadb.enabled false }} + collectors: + - mysql: + collectorName: mysql + uri: '{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false' + {{ end }} + analyzers: + - nodeResources: + checkName: Node Count Check + outcomes: + - fail: + when: 'count() > {{ .Values.global.maxNodeCount }}' + message: "The cluster has more than {{ .Values.global.maxNodeCount }} nodes." + - pass: + message: You have the correct number of nodes. + - clusterVersion: + outcomes: + - fail: + when: "< 1.22.0" + message: The application requires at least Kubernetes 1.22.0, and recommends 1.23.0. + uri: https://kubernetes.io + - warn: + when: "< 1.23.0" + message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.23.0 or later. + uri: https://kubernetes.io + - pass: + message: Your cluster meets the recommended and required versions of Kubernetes. + {{ if eq .Values.global.mariadb.enabled false }} + - mysql: + checkName: Must be MySQL 8.x or later + collectorName: mysql + outcomes: + - fail: + when: connected == false + message: Cannot connect to MySQL server + - fail: + when: version < 8.x + message: The MySQL server must be at least version 8 + - pass: + message: The MySQL server is ready + {{ end }} +{{- end }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | +{{- include "preflight.spec" . | indent 4 }} +``` + +## Helm and Troubleshoot Template Example {#troubleshoot} + +You can also use Helm templates with the Troubleshoot template functions to automatically add the Pod name and namespace to a message when a `clusterPodStatuses` analyzer fails. For more information about the Troubleshoot template function, see [Cluster Pod Statuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) in the Troubleshoot documentation. + +When you add the `clusterPodStatuses` analyzer template function values (such as `{{ .Name }}`) to your Helm template, you must encapsulate the Helm template using \{\{ ` ` \}\} so that Helm does not expand it. + +The following example shows an analyzer that uses Troubleshoot templates and the override for Helm: + +```yaml +# This is the support bundle config secret that will be used to generate the support bundle +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: support-bundle + name: {{ .Release.Name }}-support-bundle + namespace: {{ .Release.Namespace }} +type: Opaque +stringData: + # This is the support bundle spec that will be used to generate the support bundle + # Notes: we use {{ .Release.Namespace }} to ensure that the support bundle is scoped to the release namespace + # We can use any of Helm's templating features here, including {{ .Values.someValue }} + support-bundle-spec: | + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: support-bundle + spec: + collectors: + - clusterInfo: {} + - clusterResources: {} + - logs: + selector: + - app=someapp + namespace: {{ .Release.Namespace }} + analyzers: + - clusterPodStatuses: + name: unhealthy + namespaces: + - default + - myapp-namespace + outcomes: + - fail: + when: "== CrashLoopBackOff" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a CrashLoopBackOff state.` }} + - fail: + when: "== ImagePullBackOff" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a ImagePullBackOff state.` }} + - fail: + when: "== Pending" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Pending state.` }} + - fail: + when: "== Evicted" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Evicted state.` }} + - fail: + when: "== Terminating" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Terminating state.` }} + - fail: + when: "== Init:Error" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in an Init:Error state.` }} + - fail: + when: "== Init:CrashLoopBackOff" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in an Init:CrashLoopBackOff state.` }} + - fail: + when: "!= Healthy" # Catch all unhealthy pods. A pod is considered healthy if it has a status of Completed, or Running and all of its containers are ready. + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is unhealthy with a status of {{ .Status.Reason }}.` }} +``` + +================ +File: docs/vendor/preflight-support-bundle-about.mdx +================ +import Overview from "../partials/preflights/_preflights-sb-about.mdx" + +# About Preflight Checks and Support Bundles + +This topic provides an introduction to preflight checks and support bundles, which are provided by the [Troubleshoot](https://troubleshoot.sh/) open source project. + +## Overview + +<Overview/> + +Preflight checks and support bundles consist of _collectors_, _redactors_, and _analyzers_ that are defined in a YAML specification. When preflight checks or support bundles are executed, data is collected, redacted, then analyzed to provide insights to users, as illustrated in the following diagram: + +![Troubleshoot Workflow Diagram](/images/troubleshoot-workflow-diagram.png) + +[View a larger version of this image](/images/troubleshoot-workflow-diagram.png) + +For more information about each step in this workflow, see the sections below. + +### Collect + +During the collection phase, _collectors_ gather information from the cluster, the environment, the application, and other sources. + +The data collected depends on the types of collectors that are included in the preflight or support bundle specification. For example, the Troubleshoot project provides collectors that can gather information about the Kubernetes version that is running in the cluster, information about database servers, logs from pods, and more. + +For more information, see the [Collect](https://troubleshoot.sh/docs/collect/) section in the Troubleshoot documentation. + +### Redact + +During the redact phase, _redactors_ censor sensitive customer information from the data before analysis. By default, the following information is automatically redacted: + +- Passwords +- API token environment variables in JSON +- AWS credentials +- Database connection strings +- URLs that include usernames and passwords + +For Replicated KOTS installations, it is also possible to add custom redactors to redact additional data. For more information, see the [Redact](https://troubleshoot.sh/docs/redact/) section in the Troubleshoot documentation. + +### Analyze + +During the analyze phase, _analyzers_ use the redacted data to provide insights to users. + +For preflight checks, analyzers define the pass, fail, and warning outcomes, and can also display custom messages to the user. For example, you can define a preflight check that fails if the cluster's Kubernetes version does not meet the minimum version that your application supports. + +For support bundles, analyzers can be used to identify potential problems and share relevant troubleshooting guidance with users. Additionally, when a support bundle is uploaded to the Vendor Portal, it is extracted and automatically analyzed. The goal of analyzers in support bundles is to surface known issues or hints of what might be a problem to make troubleshooting easier. + +For more information, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. + +## Preflight Checks + + +This section provides an overview of preflight checks, including how preflights are defined and run. + +### Overview + +Preflight checks let you define requirements for the cluster where your application is installed. When run, preflight checks provide clear feedback to your customer about any missing requirements or incompatibilities in the cluster before they install or upgrade your application. For KOTS installations, preflight checks can also be used to block the deployment of the application if one or more requirements are not met. + +Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. + +### About Host Preflights {#host-preflights} + +_Host preflight checks_ automatically run during [Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated kURL](/vendor/kurl-about) installations on a VM or bare metal server. The purpose of host preflight checks is to verify that the user's installation environment meets the requirements of the Embedded Cluster or kURL installer, such as checking the number of CPU cores in the system, available disk space, and memory usage. If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. + +Host preflight checks are separate from any application-specific preflight checks that are defined in the release, which run in the Admin Console before the application is deployed with KOTS. Both Embedded Cluster and kURL have default host preflight checks that are specific to the requirements of the given installer. For kURL installations, it is possible to customize the default host preflight checks. + +For more information about the default Embedded Cluster host preflight checks, see [Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. + +For more information about kURL host preflight checks, including information about how to customize the defaults, see [Customizing Host Preflight Checks for kURL](/vendor/preflight-host-preflights). + +### Defining Preflights + +To add preflight checks for your application, create a Preflight YAML specification that defines the collectors and analyzers that you want to include. + +For information about how to add preflight checks to your application, including examples, see [Defining Preflight Checks](preflight-defining). + +### Blocking Installation with Required (Strict) Preflights + +For applications installed with KOTS, it is possible to block the deployment of a release if a preflight check fails. This is helpful when it is necessary to prevent an installation or upgrade from continuing unless a given requirement is met. + +You can add required preflight checks for an application by including `strict: true` for the target analyzer in the preflight specification. For more information, see [Block Installation with Required Preflights](preflight-defining#strict) in _Defining Preflight Checks_. + +### Running Preflights + +This section describes how users can run preflight checks for KOTS and Helm installations. + +#### Replicated Installations + +For Replicated installations with Embedded Cluster, KOTS, or kURL, preflight checks run automatically as part of the installation process. The results of the preflight checks are displayed either in the KOTS Admin Console or in the KOTS CLI, depending on the installation method. + +Additionally, users can access preflight checks from the Admin Console after installation to view their results and optionally re-run the checks. + +The following shows an example of the results of preflight checks displayed in the Admin Console during installation: + +![Preflight results in Admin Console](/images/preflight-warning.png) + +[View a larger version of this image](/images/preflight-warning.png) + +#### Helm Installations + +For installations with Helm, the preflight kubectl plugin is required to run preflight checks. The preflight plugin is a client-side utility that adds a single binary to the path. For more information, see [Getting Started](https://troubleshoot.sh/docs/) in the Troubleshoot documentation. + +Users can optionally run preflight checks before they run `helm install`. The results of the preflight checks are then displayed through the CLI, as shown in the example below: + +![Save output dialog](/images/helm-preflight-save-output.png) + +[View a larger version of this image](/images/helm-preflight-save-output.png) + +For more information, see [Running Preflight Checks for Helm Installations](preflight-running). + +## Support Bundles + +This section provides an overview of support bundles, including how support bundles are customized and generated. + +### Overview + +Support bundles collect and analyze troubleshooting data from customer environments, helping both users and support teams diagnose problems with application deployments. + +Support bundles can collect a variety of important cluster-level data from customer environments, such as: +* Pod logs +* Node resources and status +* The status of replicas in a Deployment +* Cluster information +* Resources deployed to the cluster +* The history of Helm releases installed in the cluster + +Support bundles can also be used for more advanced use cases, such as checking that a command successfully executes in a pod in the cluster, or that an HTTP request returns a succesful response. + +Support bundles then use the data collected to provide insights to users on potential problems or suggested troubleshooting steps. The troubleshooting data collected and analyzed by support bundles not only helps users to self-resolve issues with their application deployment, but also helps reduce the amount of time required by support teams to resolve requests by ensuring they have access to all the information they need up front. + +### About Host Support Bundles + +For installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about), it is possible to generate a support bundle that includes host-level information to help troubleshoot failures related to host configuration like DNS, networking, or storage problems. + +For Embedded Cluster installations, a default spec can be used to generate support bundles that include cluster- and host-level information. See [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). + +For kURL installations, vendors can customize a host support bundle spec for their application. See [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). + +### Customizing Support Bundles + +To enable support bundles for your application, add a support bundle YAML specification to a release. An empty support bundle specification automatically includes several default collectors and analzyers. You can also optionally customize the support bundle specification for by adding, removing, or editing collectors and analyzers. + +For more information, see [Adding and Customizing Support Bundles](support-bundle-customizing). + +### Generating Support Bundles + +Users generate support bundles as `tar.gz` files from the command line, using the support-bundle kubectl plugin. Your customers can share their support bundles with your team by sending you the resulting `tar.gz` file. + +KOTS users can also generate and share support bundles from the KOTS Admin Console. + +For more information, see [Generating Support Bundles](support-bundle-generating). + +================ +File: docs/vendor/private-images-about.md +================ +# About the Replicated Proxy Registry + +This topic describes how the Replicated proxy registry can be used to grant proxy access to your application's private images or allow pull through access of public images. + +## Overview + +If your application images are available in a private image registry exposed to the internet such as Docker Hub or Amazon Elastic Container Registry (ECR), then the Replicated proxy registry can grant proxy, or _pull-through_, access to the images without exposing registry credentials to your customers. When you use the proxy registry, you do not have to modify the process that you already use to build and push images to deploy your application. + +To grant proxy access, the proxy registry uses the customer licenses that you create in the Replicated vendor portal. This allows you to revoke a customer’s ability to pull private images by editing their license, rather than having to manage image access through separate identity or authentication systems. For example, when a trial license expires, the customer's ability to pull private images is automatically revoked. + +The following diagram demonstrates how the proxy registry pulls images from your external registry, and how deployed instances of your application pull images from the proxy registry: + +![Proxy registry workflow diagram](/images/private-registry-diagram.png) + +[View a larger version of this image](/images/private-registry-diagram-large.png) + +## About Enabling the Proxy Registry + +The proxy registry requires read-only credentials to your private registry to access your application images. See [Connecting to an External Registry](/vendor/packaging-private-images). + +After connecting your registry, the steps the enable the proxy registry vary depending on your application deployment method. For more information, see: +* [Using the Proxy Registry with KOTS Installations](/vendor/private-images-kots) +* [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) + +## About Allowing Pull-Through Access of Public Images + +Using the Replicated proxy registry to grant pull-through access to public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. + +For more information about how to pull public images through the proxy registry, see [Connecting to a Public Registry through the Proxy Registry](/vendor/packaging-public-images). + +================ +File: docs/vendor/private-images-kots.mdx +================ +import Deprecated from "../partials/helm/_replicated-deprecated.mdx" +import StepCreds from "../partials/proxy-service/_step-creds.mdx" +import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" + +# Using the Proxy Registry with KOTS Installations + +This topic describes how to use the Replicated proxy registry with applications deployed with Replicated KOTS. + +## Overview + +Replicated KOTS automatically creates the required image pull secret for accessing the Replicated proxy registry during application deployment. When possible, KOTS also automatically rewrites image names in the application manifests to the location of the image at `proxy.replicated.com` or your custom domain. + +### Image Pull Secret + +During application deployment, KOTS automatically creates an `imagePullSecret` with `type: kubernetes.io/dockerconfigjson` that is based on the customer license. This secret is used to authenticate with the proxy registry and grant proxy access to private images. + +For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. + +### Image Location Patching (Standard Manifests and HelmChart v1) + +For applications packaged with standard Kubernetes manifests (or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource), KOTS automatically patches image names to the location of the image at at `proxy.replicated.com` or your custom domain during deployment. If KOTS receives a 401 response when attempting to load image manifests using the image reference from the PodSpec, it assumes that this is a private image that must be proxied through the proxy registry. + +KOTS uses Kustomize to patch the `midstream/kustomization.yaml` file to change the image name during deployment to reference the proxy registry. For example, a PodSpec for a Deployment references a private image hosted at `quay.io/my-org/api:v1.0.1`: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example +spec: + template: + spec: + containers: + - name: api + image: quay.io/my-org/api:v1.0.1 +``` + +When this application is deployed, KOTS detects that it cannot access +the image at quay.io. So, it creates a patch in the `midstream/kustomization.yaml` +file that changes the image name in all manifest files for the application. This causes the container runtime in the cluster to use the proxy registry to pull the images, using the license information provided to KOTS for authentication. + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +bases: +- ../../base +images: +- name: quay.io/my-org/api:v1.0.1 + newName: proxy.replicated.com/proxy/my-kots-app/quay.io/my-org/api +``` + +## Enable the Proxy Registry + +This section describes how to enable the proxy registry for applications deployed with KOTS, including how to ensure that image names are rewritten and that the required image pull secret is provided. + +To enable the proxy registry: + +1. <StepCreds/> + +1. <StepCustomDomain/> + +1. Rewrite images names to the location of the image at `proxy.replicated.com` or your custom domain. Also, ensure that the correct image pull secret is provided for all private images. The steps required to configure image names and add the image pull secret vary depending on your application type: + + * **HelmChart v2**: For Helm charts deployed with the[ HelmChart v2](/reference/custom-resource-helmchart-v2) custom resource, configure the HelmChart v2 custom resource to dynamically update image names in your Helm chart and to inject the image pull secret that is automatically created by KOTS. For instructions, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). + + * **Standard Manifests or HelmChart v1**: For standard manifest-based applications or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource, no additional configuration is required. KOTS automatically rewrites image names and injects image pull secrets during deployment for these application types. + + :::note + <Deprecated/> + ::: + + * **Kubernetes Operators**: For applications packaged with Kubernetes Operators, KOTS cannot modify pods that are created at runtime by the Operator. To support the use of private images in all environments, the Operator code should use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. For instructions, see [Referencing Images](/vendor/operator-referencing-images) in the _Packaging Kubernetes Operators_ section. + +1. If you are deploying Pods to namespaces other than the application namespace, add the namespace to the `additionalNamespaces` attribute of the KOTS Application custom resource. This ensures that KOTS can provision the `imagePullSecret` in the namespace to allow the Pod to pull the image. For instructions, see [Defining Additional Namespaces](operator-defining-additional-namespaces). + +================ +File: docs/vendor/private-images-replicated.mdx +================ +import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" + +# Using the Replicated Registry for KOTS Installations + +This topic describes how to push images to the Replicated private registry. + +## Overview + +For applications installed with KOTS, you can host private images on the Replicated registry. Hosting your images on the Replicated registry is useful if you do not already have your images in an existing private registry. It is also useful for testing purposes. + +Images pushed to the Replicated registry are displayed on the **Images** page in the Vendor Portal: + +![Replicated Private Registry section of the vendor portal Images page](/images/images-replicated-registry.png) + +[View a larger version of this image](/images/images-replicated-registry.png) + +For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). + +## Limitations + +The Replicated registry has the following limitations: + +* You cannot delete images from the Replicated registry. As a workaround, you can push a new, empty image to the registry using the same tags as the target image. Replicated does not recommend removing tags from the registry because it could break older releases of your application. + +* When using Docker Build to build and push images to the Replicated registry, provenance attestations are not supported. To avoid a 400 error, include the `--provenance=false` flag to disable all provenance attestations. For more information, see [docker buildx build](https://docs.docker.com/engine/reference/commandline/buildx_build/#provenance) and [Provenance Attestations](https://docs.docker.com/build/attestations/slsa-provenance/) in the Docker documentation. + +* You might encounter a timeout error when pushing images with layers close to or exceeding 2GB in size, such as: "received unexpected HTTP status: 524." To work around this, reduce the size of the image layers and push the image again. If the 524 error persists, continue decreasing the layer sizes until the push is successful. + +## Push Images to the Replicated Registry + +This procedure describes how to tag and push images to the Replicated registry. For more information about building, tagging, and pushing Docker images, see the +[Docker CLI documentation](https://docs.docker.com/engine/reference/commandline/cli/). + +To push images to the Replicated registry: + +1. Do one of the following to connect with the `registry.replicated.com` container registry: + * **(Recommended) Log in with a user token**: Use `docker login registry.replicated.com` with your Vendor Portal email as the username and a Vendor Portal user token as the password. For more information, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. + * **Log in with a service account token:** Use `docker login registry.replicated.com` with a Replicated Vendor Portal service account as the password. If you have an existing team token, you can use that instead. You can use any string as the username. For more information, see [Service Accounts](replicated-api-tokens#service-accounts) in _Generating API Tokens_. + + <TeamTokenNote/> + + * **Log in with your credentials**: Use `docker login registry.replicated.com` with your Vendor Portal email and password as the credentials. + +1. Tag your private image with the Replicated registry hostname in the standard +Docker format: + + ``` + docker tag IMAGE_NAME registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG + ``` + + Where: + * `IMAGE_NAME` is the name of the existing private image for your application. + * `APPLICATION_SLUG` is the unique slug for the application. You can find the application slug on the **Application Settings** page in the Vendor Portal. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. + * `TARGET_IMAGE_NAME` is a name for the image. Replicated recommends that the `TARGET_IMAGE_NAME` is the same as the `IMAGE_NAME`. + * `TAG` is a tag for the image. + + For example: + + ```bash + docker tag worker registry.replicated.com/myapp/worker:1.0.1 + ``` + +1. Push your private image to the Replicated registry using the following format: + + ``` + docker push registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG + ``` + Where: + * `APPLICATION_SLUG` is the unique slug for the application. + * `TARGET_IMAGE_NAME` is a name for the image. Use the same name that you used when tagging the image in the previous step. + * `TAG` is a tag for the image. Use the same tag that you used when tagging the image in the previous step. + + For example: + + ```bash + docker push registry.replicated.com/myapp/worker:1.0.1 + ``` + +1. In the [Vendor Portal](https://vendor.replicated.com/), go to **Images** and scroll down to the **Replicated Private Registry** section to confirm that the image was pushed. + +================ +File: docs/vendor/private-images-tags-digests.md +================ +# Using Image Tags and Digests + +This topic describes using image tags and digests with your application images. It includes information about when image tags and digests are supported, and how to enable support for image digests in air gap bundles. + +## Support for Image Tags and Digests + +The following table describes the use cases in which image tags and digests are supported: + +<table> + <tr> + <th width="10%">Installation</th> + <th width="30%">Support for Image Tags</th> + <th width="30%">Support for Image Digests</th> + </tr> + <tr> + <td>Online</td> + <td>Supported by default</td> + <td>Supported by default</td> + </tr> + <tr> + <td>Air Gap</td> + <td>Supported by default for Replicated KOTS installations</td> + <td> + <p>Supported for applications on KOTS v1.82.0 and later when the <b>Enable new air gap bundle format</b> toggle is enabled on the channel.</p> + <p>For more information, see <a href="#digests-air-gap">Using Image Digests in Air Gap Installations</a> below.</p> + </td> + </tr> +</table> + +:::note +You can use image tags and image digests together in any case where both are supported. +::: + +## Using Image Digests in Air Gap Installations {#digests-air-gap} + +For applications installed with KOTS v1.82.0 or later, you can enable a format for air gap bundles that supports the use of image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. + +You can enable or disable this air gap bundle format using the **Enable new air gap bundle format** toggle in the settings for any channel in the Vendor Portal. The **Enable new air gap bundle format** toggle is enabled by default. + +When you enable **Enable new air gap bundle format** on a channel, all air gap bundles that you build or rebuild on that channel use the updated air gap bundle format. + +If users on a version of KOTS earlier than v1.82.0 attempt to install or upgrade an application with an air gap bundle that uses the **Enable new air gap bundle format** format, then the Admin Console displays an error message when they attempt to upload the bundle. + +To enable the new air gap bundle format on a channel: + +1. In the Replicated [Vendor Portal](https://vendor.replicated.com/channels), go to the Channels page and click the edit icon in the top right of the channel where you want to use the new air gap bundle format. +1. Enable the **Enable new air gap bundle format** toggle. +1. (Recommended) To prevent users on a version of KOTS earlier than v1.82.0 from attempting to upgrade with an air gap bundle that uses the new air gap bundle format, set `minKotsVersion` to "1.82.0" in the Application custom resource manifest file. + + `minKotsVersion` defines the minimum version of KOTS required by the application release. Including `minKotsVersion` displays a warning in the Admin Console when users attempt to install or upgrade the application if they are not on the specified minimum version or later. For more information, see [Setting Minimum and Target Versions for KOTS](packaging-kots-versions). + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + ... + minKotsVersion: "1.82.0" + ... + ``` + +1. Test your changes: + 1. Save and promote the release to a development environment. + 1. On the channel where you enabled **Enable new air gap bundle format**, click **Release history**. On the Release History page, click **Build** next to the latest release to create an air gap bundle with the new format. + + ![Vendor portal release history page](../../static/images/airgap-download-bundle.png) + + 1. Click **Download Airgap Bundle**. + 1. Install or upgrade the application with version 1.82.0 or later of the Admin Console or the KOTS CLI. Upload the new air gap bundle to confirm that the installation or upgrade completes successfully. + +================ +File: docs/vendor/quick-start.mdx +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import HelmPackage from "../partials/helm/_helm-package.mdx" +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import EcCr from "../partials/embedded-cluster/_ec-config.mdx" +import Requirements from "../partials/embedded-cluster/_requirements.mdx" + +# Replicated Quick Start + +Welcome! This topic provides a quick start workflow to help new users learn about the Replicated Platform. Complete this quick start before you onboard your application to the platform. + +## Introduction + +This quick start shows how to create, install, and update releases for a sample Helm chart in the Replicated Platform. You will repeat these same basic steps to create and test releases throughout the onboarding process to integrate Replicated features with your own application. + +The goals of this quick start are to introduce new Replicated users to the following common tasks for the purpose of preparing to onboard to the Replicated Platform: + +* Working with _applications_, _channels_, _releases_, and _customers_ in the Replicated Vendor Portal + +* Working with the Replicated CLI + +* Installing and updating applications on a VM with Replicated Embedded Cluster + +* Managing an installation with the Replicated KOTS Admin Console + +## Set Up the Environment + +Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: + +<Requirements/> + +## Quick Start + +1. Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). + +1. Create an application using the Replicated CLI: + + 1. On your local machine, install the Replicated CLI: + + ```bash + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + + 1. Authorize the Replicated CLI: + + ```bash + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your Vendor Portal account and authorize the CLI. + + 1. Create an application named `Gitea`: + + ```bash + replicated app create Gitea + ``` + + 1. Set the `REPLICATED_APP` environment variable to the application that you created: + + ```bash + export REPLICATED_APP=APP_SLUG + ``` + Where `APP_SLUG` is the unique application slug provided in the output of the `app create` command. For example, `export REPLICATED_APP=gitea-kite`. + + This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command. + +1. Get the sample Bitnami Gitea Helm chart and add the Replicated SDK as a dependency: + + 1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. + + 1. Change to the new `gitea` directory that was created: + + ```bash + cd gitea + ``` + + 1. In the Helm chart `Chart.yaml`, add the Replicated SDK as a dependency: + + <DependencyYaml/> + + The Replicated SDK is a Helm chart that provides access to Replicated features and can be installed as a small service alongside your application. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + + 1. Update dependencies and package the Helm chart to a `.tgz` chart archive: + + ```bash + helm package -u . + ``` + Where `-u` or `--dependency-update` is an option for the helm package command that updates chart dependencies before packaging. For more information, see [Helm Package](https://helm.sh/docs/helm/helm_package/) in the Helm documentation. + +1. Add the chart archive to a release: + + 1. In the `gitea` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with Replicated KOTS and Replicated Embedded Cluster to this subdirectory. + + 1. Move the Helm chart archive that you created to `manifests`: + + ``` + mv gitea-1.0.6.tgz manifests + ``` + + 1. In `manifests`, create the following YAML files: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml + ``` + + 1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The <a href="/vendor/helm-optional-value-keys#conditionally-set-values"><code>optionalValues</code></a> field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment for the purpose of this quick start.</p> + <h5>YAML</h5> + <HelmChartCr/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml"> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.</p> + <h5>YAML</h5> + <KotsCr/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes SIG Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sCr/> + </TabItem> + <TabItem value="ec" label="embedded-cluster.yaml"> + <h5>Description</h5> + <p>To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.</p> + <h5>YAML</h5> + <EcCr/> + </TabItem> + </Tabs> + + 1. Lint the YAML files: + + ```bash + replicated release lint --yaml-dir . + ``` + **Example output:** + ```bash + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + You can ignore any warning messages for the purpose of this quick start. + ::: + + 1. Create the release and promote it to the Unstable channel: + + ```bash + replicated release create --yaml-dir . --promote Unstable + ``` + **Example output**: + ```bash + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 + • Promoting ✓ + • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 1 + ``` + +1. Create a customer so that you can install the release on your VM with Embedded Cluster: + + 1. In the [Vendor Portal](https://vendor.replicated.com), under the application drop down, select the Gitea application that you created. + + <img alt="App drop down" src="/images/quick-start-select-gitea-app.png" width="250px"/> + + [View a larger version of this image](/images/quick-start-select-gitea-app.png) + + 1. Click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + + 1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. + + 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + + 1. For **License type**, select **Development**. + + 1. For **License options**, enable the following entitlements: + * **KOTS Install Enabled** + * **Embedded Cluster Enabled** + + 1. Click **Save Changes**. + +1. Install the application with Embedded Cluster: + + 1. On the page for the customer that you created, click **Install instructions > Embedded Cluster**. + + ![Customer install instructions dropdown](/images/customer-install-instructions-dropdown.png) + + [View a larger image](/images/customer-install-instructions-dropdown.png) + + 1. On the command line, SSH onto your VM and run the commands in the **Embedded cluster install instructions** dialog to download the latest release, extract the installation assets, and install. + + <img width="500px" src="/images/embedded-cluster-install-dialog-latest.png" alt="embedded cluster install instructions dialog"/> + + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + + 1. When prompted, enter a password for accessing the Admin Console. + + The installation command takes a few minutes to complete. + + **Example output:** + + ```bash + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Host files materialized! + ✔ Running host preflights + ✔ Node installation finished! + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Additional components are ready! + Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 + ``` + + At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. + + 1. Go to the URL provided in the output to access to the Admin Console. + + 1. On the Admin Console landing page, click **Start**. + + 1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. + + 1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. + + By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. + + 1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. + + 1. On the **Configure the cluster** screen, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. + + The Admin Console dashboard opens. + + 1. On the Admin Console dashboard, next to the version, click **Deploy** and then **Yes, Deploy**. + + The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. + + 1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser. + + For example: + + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + + [View a larger version of this image](/images/gitea-ec-ready.png) + + <img alt="Gitea app landing page" src="/images/gitea-app.png" width="600px"/> + + [View a larger version of this image](/images/gitea-app.png) + +1. Return to the Vendor Portal and go to **Customers**. Under the name of the customer, confirm that you can see an active instance. + + This instance telemetry is automatically collected and sent back to the Vendor Portal by both KOTS and the Replicated SDK. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). + +1. Under **Instance ID**, click on the ID to view additional insights including the versions of Kubernetes and the Replicated SDK running in the cluster where you installed the application. For more information, see [Instance Details](/vendor/instance-insights-details). + +1. Create a new release that adds preflight checks to the application: + + 1. In your local filesystem, go to the `gitea` directory. + + 1. Create a `gitea-preflights.yaml` file in the `templates` directory: + + ``` + touch templates/gitea-preflights.yaml + ``` + + 1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a simple preflight spec: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" + stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." + ``` + The YAML above defines a preflight check that confirms that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. + + 1. In the `Chart.yaml` file, increment the version to 1.0.7: + + ```yaml + # Chart.yaml + version: 1.0.7 + ``` + + 1. Update dependencies and package the chart to a `.tgz` chart archive: + + ```bash + helm package -u . + ``` + + 1. Move the chart archive to the `manifests` directory: + + ```bash + mv gitea-1.0.7.tgz manifests + ``` + + 1. In the `manifests` directory, open the KOTS HelmChart custom resource (`gitea.yaml`) and update the `chartVersion`: + + ```yaml + # gitea.yaml KOTS HelmChart + chartVersion: 1.0.7 + ``` + + 1. Remove the chart archive for version 1.0.6 of the Gitea chart from the `manifests` directory: + + ``` + rm gitea-1.0.6.tgz + ``` + + 1. From the `manifests` directory, create and promote a new release, setting the version label of the release to `0.0.2`: + + ```bash + replicated release create --yaml-dir . --promote Unstable --version 0.0.2 + ``` + **Example output**: + ```bash + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 2 + • Promoting ✓ + • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 2 + ``` + +1. On your VM, update the application instance to the new version that you just promoted: + + 1. In the Admin Console, go to the **Version history** tab. + + The new version is displayed automatically. + + 1. Click **Deploy** next to the new version. + + The Embedded Cluster upgrade wizard opens. + + 1. In the Embedded Cluster upgrade wizard, on the **Preflight checks** screen, note that the "Slack Accessible" preflight check that you added was successful. Click **Next: Confirm and deploy**. + + ![preflight page of the embedded cluster upgrade wizard](/images/quick-start-ec-upgrade-wizard-preflight.png) + + [View a larger version of this image](/images/quick-start-ec-upgrade-wizard-preflight.png) + + :::note + The **Config** screen in the upgrade wizard is bypassed because this release does not contain a KOTS Config custom resource. The KOTS Config custom resource is used to set up the Config screen in the KOTS Admin Console. + ::: + + 1. On the **Confirm and Deploy** page, click **Deploy**. + +1. Reset and reboot the VM to remove the installation: + + ```bash + sudo ./APP_SLUG reset + ``` + Where `APP_SLUG` is the unique slug for the application. + + :::note + You can find the application slug by running `replicated app ls` on your local machine. + ::: + +## Next Steps + +Congratulations! As part of this quick start, you: +* Added the Replicated SDK to a Helm chart +* Created a release with the Helm chart +* Installed the release on a VM with Embedded Cluster +* Viewed telemetry for the installed instance in the Vendor Portal +* Created a new release to add preflight checks to the application +* Updated the application from the Admin Console + +Now that you are familiar with the workflow of creating, installing, and updating releases, you can begin onboarding your own application to the Replicated Platform. + +To get started, see [Replicated Onboarding](replicated-onboarding). + +## Related Topics + +For more information about the Replicated Platform features mentioned in this quick start, see: + +* [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) +* [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Introduction to KOTS](/intro-kots) +* [Managing Releases with the CLI](/vendor/releases-creating-cli) +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) +* [Using Embedded Cluster](/vendor/embedded-overview) + +## Related Tutorials + +For additional tutorials related to this quick start, see: + +* [Deploying a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup) +* [Adding Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) +* [Deploying a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup) + +================ +File: docs/vendor/releases-about.mdx +================ +import ChangeChannel from "../partials/customers/_change-channel.mdx" +import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" +import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" +import VersionLabelReqsHelm from "../partials/releases/_version-label-reqs-helm.mdx" + +# About Channels and Releases + +This topic describes channels and releases, including information about the **Releases** and **Channels** pages in the Replicated Vendor Portal. + +## Overview + +A _release_ represents a single version of your application. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. + +Channels also control which customers are able to install a release. You assign each customer to a channel to define the releases that the customer can access. For example, a customer assigned to the Stable channel can only install releases that are promoted to the Stable channel, and cannot see any releases promoted to other channels. For more information about assigning customers to channels, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. + +Using channels and releases helps you distribute versions of your application to the right customer segments, without needing to manage different release workflows. + +You can manage channels and releases with the Vendor Portal, the Replicated CLI, or the Vendor API v3. For more information about creating and managing releases or channels, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Creating and Editing Channels](releases-creating-channels). + +## About Channels + +This section provides additional information about channels, including details about the default channels in the Vendor Portal and channel settings. + +### Unstable, Beta, and Stable Channels + +Replicated includes the following channels by default: + +* **Unstable**: The Unstable channel is designed for internal testing and development. You can create and assign an internal test customer to the Unstable channel to install in a development environment. Replicated recommends that you do not license any of your external users against the Unstable channel. +* **Beta**: The Beta channel is designed for release candidates and early-adopting customers. Replicated recommends that you promote a release to the Beta channel after it has passed automated testing in the Unstable channel. You can also choose to license early-adopting customers against this channel. +* **Stable**: The Stable channel is designed for releases that are generally available. Replicated recommends that you assign most of your customers to the Stable channel. Customers licensed against the Stable channel only receive application updates when you promote a new release to the Stable channel. + +You can archive or edit any of the default channels, and create new channels. For more information, see [Creating and Editing Channels](releases-creating-channels). + +### Settings + +Each channel has settings. You can customize the settings for a channel to control some of the behavior of releases promoted to the channel. + +The following shows the **Channel Settings** dialog, accessed by clicking the settings icon on a channel: + +<img src="/images/channel-settings.png" alt="Channel Settings dialog in the Vendor Portal" width="500"/> + +[View a larger version of this image](/images/channel-settings.png) + +The following describes each of the channel settings: + +* **Channel name**: The name of the channel. You can change the channel name at any time. Each channel also has a unique ID listed below the channel name. +* **Description**: Optionally, add a description of the channel. +* **Set this channel to default**: When enabled, sets the channel as the default channel. The default channel cannot be archived. +* **Custom domains**: Select the customer-facing domains that releases promoted to this channel use for the Replicated registry, Replicated proxy registry, Replicated app service, or Replicated Download Portal endpoints. If a default custom domain exists for any of these endpoints, choosing a different domain in the channel settings overrides the default. If no custom domains are configured for an endpoint, the drop-down for the endpoint is disabled. + + For more information about configuring custom domains and assigning default domains, see [Using Custom Domains](custom-domains-using). +* The following channel settings apply only to applications that support KOTS: + * **Automatically create airgap builds for newly promoted releases in this channel**: When enabled, the Vendor Portal automatically builds an air gap bundle when a new release is promoted to the channel. When disabled, you can generate an air gap bundle manually for a release on the **Release History** page for the channel. + * **Enable semantic versioning**: When enabled, the Vendor Portal verifies that the version label for any releases promoted to the channel uses a valid semantic version. For more information, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_. + * **Enable new airgap bundle format**: When enabled, air gap bundles built for releases promoted to the channel use a format that supports image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. For more information, see [Using Image Digests in Air Gap Installations](private-images-tags-digests#digests-air-gap) in _Using Image Tags and Digests_. + + :::note + The new air gap bundle format is supported for applications installed with KOTS v1.82.0 or later. + ::: + +## About Releases + +This section provides additional information about releases, including details about release promotion, properties, sequencing, and versioning. + +### Release Files + +A release contains your application files as well as the manifests required to install the application with the Replicated installers ([Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated KOTS](../intro-kots)). + +The application files in releases can be Helm charts and/or Kubernetes manifests. Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise customers will expect to be able to install with Helm. + +### Promotion + +Each release is promoted to one or more channels. While you are developing and testing releases, Replicated recommends promoting to a channel that does not have any real customers assigned, such as the default Unstable channel. When the release is ready to be shared externally with customers, you can then promote to a channel that has the target customers assigned, such as the Beta or Stable channel. + +A release cannot be edited after it is promoted to a channel. This means that you can test a release on an internal development channel, and know with confidence that the same release will be available to your customers when you promote it to a channel where real customers are assigned. + +### Properties + +Each release has properties. You define release properties when you promote a release to a channel. You can edit release properties at any time from the channel **Release History** page in the Vendor Portal. For more information, see [Edit Release Properties](releases-creating-releases#edit-release-properties) in _Managing Releases with the Vendor Portal_. + +The following shows an example of the release properties dialog: + +<img src="/images/release-properties.png" width="500px" alt="release properties dialog for a release with version label 0.1.22"/> + +[View a larger version of this image](/images/release-properties.png) + +As shown in the screenshot above, the release has the following properties: + +* **Version label**: The version label for the release. Version labels have the following requirements: + + * If semantic versioning is enabled for the channel, you must use a valid semantic version. For more information, see [Semantic Versioning](#semantic-versioning). + + <VersionLabelReqsHelm/> + +* **Requirements**: Select **Prevent this release from being skipped during upgrades** to mark the release as required. + + <RequiredReleasesDescription/> + + <RequiredReleasesLimitations/> + +* **Release notes (supports markdown)**: Detailed release notes for the release. The release notes support markdown and are shown to your customer. + +### Sequencing + +By default, Replicated uses release sequence numbers to organize and order releases, and uses instance sequence numbers in an instance's internal version history. + +#### Release Sequences + +In the Vendor Portal, each release is automatically assigned a unique, monotonically-increasing sequence number. You can use this number as a fallback to identify a promoted or draft release, if you do not set the `Version label` field during promotion. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). + +The following graphic shows release sequence numbers in the Vendor Portal: + +<img alt="Release sequence numbers" src="/images/release-sequences.png" width="750px"/> + +[View a larger version of this image](/images/release-sequences.png) + +#### Instance Sequences + +When a new version is available for upgrade, including when KOTS checks for upstream updates as well as when the user syncs their license or makes a config change, the KOTS Admin Console assigns a unique instance sequence number to that version. The instance sequence in the Admin Console starts at 0 and increments for each identifier that is returned when a new version is available. + +This instance sequence is unrelated to the release sequence dispalyed in the Vendor Portal, and it is likely that the instance sequence will differ from the release sequence. Instance sequences are only tracked by KOTS instances, and the Vendor Portal has no knowledge of these numbers. + +The following graphic shows instance sequence numbers on the Admin Console dashboard: + +<img alt="Instance sequence numbers" src="/images/instance-sequences.png" width="550px"/> + +[View a larger version of this image](/images/instance-sequences.png) + +#### Channel Sequences + +When a release is promoted to a channel, a channel sequence number is assigned. This unique sequence number increments by one and tracks the order in which releases were promoted to a channel. You can view the channel sequence on the **Release History** page in the Vendor Portal, as shown in the image below: + +<img alt="Channel sequence on Release History page" src="/images/release-history-channel-sequence.png" width="750px"/> + +[View a larger version of this image](/images/release-history-channel-sequence.png) + +The channel sequence is also used in certain URLs. For example, a release with a *release sequence* of `170` can have a *channel sequence* of `125`. The air gap download URL for that release can contain `125` in the URL, even though the release sequence is `170`. + +Ordering is more complex if some or all of the releases in a channel have a semantic version label and semantic versioning is enabled for the channel. For more information, see [Semantic Versioning Sequence](#semantic-versioning-sequence). + +#### Semantic Versioning Sequence + +For channels with semantic versioning enabled, the Admin Console sequences instance releases by their semantic versions instead of their promotion dates. + +If releases without a valid semantic version are already promoted to a channel, the Admin Console sorts the releases that do have semantic versions starting with the earliest version and proceeding to the latest. The releases with non-semantic versioning stay in the order of their promotion dates. For example, assume that you promote these releases in the following order to a channel: + +- 1.0.0 +- abc +- 0.1.0 +- xyz +- 2.0.0 + +Then, you enable semantic versioning on that channel. The Admin Console sequences the version history for the channel as follows: + +- 0.1.0 +- 1.0.0 +- abc +- xyz +- 2.0.0 + +### Semantic Versioning + +Semantic versioning is available with the Replicated KOTS v1.58.0 and later. Note the following: + +- For applications created in the Vendor Portal on or after February 23, 2022, semantic versioning is enabled by default on the Stable and Beta channels. Semantic versioning is disabled on the Unstable channel by default. + +- For existing applications created before February 23, 2022, semantic versioning is disabled by default on all channels. + +Semantic versioning is recommended because it makes versioning more predictable for users and lets you enforce versioning so that no one uses an incorrect version. + +To use semantic versioning: + +1. Enable semantic versioning on a channel, if it is not enabled by default. Click the **Edit channel settings** icon, and turn on the **Enable semantic versioning** toggle. +1. Assign a semantic version number when you promote a release. + +Releases promoted to a channel with semantic versioning enabled are verified to ensure that the release version label is a valid semantic version. For more information about valid semantic versions, see [Semantic Versioning 2.0.0](https://semver.org). + +If you enable semantic versioning for a channel and then promote releases to it, Replicated recommends that you do not later disable semantic versioning for that channel. + +You can enable semantic versioning on a channel that already has releases promoted to it without semantic versioning. Any subsequently promoted releases must use semantic versioning. In this case, the channel will have releases with and without semantic version numbers. For information about how Replicated organizes these release sequences, see [Semantic Versioning Sequences](#semantic-versioning-sequence). + +### Demotion + +A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. + +The demoted release's channel sequence and version are not reused. For customers, the release will appear to have been skipped. Un-demoting a release will restore its place in the channel sequence making it again available for download and installation. + +For information about how to demote a release, see [Demote a Release](/vendor/releases-creating-releases#demote-a-release) in _Managing Releases with the Vendor Portal_. + +## Vendor Portal Pages + +This section provides information about the channels and releases pages in the Vendor Portal. + +### Channels Page + +The **Channels** page in the Vendor Portal includes information about each channel. From the **Channels** page, you can edit and archive your channels. You can also edit the properties of the releases promoted to each channel, and view and edit the customers assigned to each channel. + +The following shows an example of a channel in the Vendor Portal **Channels** page: + +<img src="/images/channel-card.png" alt="Channel card in the Vendor Portal" width="400"/> + +[View a larger version of this image](/images/channel-card.png) + +As shown in the image above, you can do the following from the **Channels** page: + +* Edit the channel settings by clicking on the settings icon, or archive the channel by clicking on the trash can icon. For information about channel settings, see [Settings](#settings). + +* In the **Adoption rate** section, view data on the adoption rate of releases promoted to the channel among customers assigned to the channel. + +* In the **Customers** section, view the number of active and inactive customers assigned to the channel. Click **Details** to go to the **Customers** page, where you can view details about the customers assigned to the channel. + +* In the **Latest release** section, view the properties of the latest release, and get information about any warnings or errors in the YAML files for the latest release. + + Click **Release history** to access the history of all releases promoted to the channel. From the **Release History** page, you can view the version labels and files in each release that has been promoted to the selected channel. + + You can also build and download air gap bundles to be used in air gap installations with Replicated installers (Embedded Cluster, KOTS, kURL), edit the release properties for each release promoted to the channel from the **Release History** page, and demote a release from the channel. + + The following shows an example of the **Release History** page: + + <img src="/images/channels-release-history.png" alt="Release history page in the Vendor Portal" width="750"/> + + [View a larger version of this image](/images/channel-card.png) + +* For applications that support KOTS, you can also do the following from the **Channel** page: + + * In the **kURL installer** section, view the current kURL installer promoted to the channel. Click **Installer history** to view the history of kURL installers promoted to the channel. For more information about creating kURL installers, see [Creating a kURL Installer](packaging-embedded-kubernetes). + + * In the **Install** section, view and copy the installation commands for the latest release on the channel. + +### Draft Release Page + +For applications that support installation with KOTS, the **Draft** page provides a YAML editor to add, edit, and delete your application files and Replicated custom resources. You click **Releases > Create Release** in the Vendor Portal to open the **Draft** page. + +The following shows an example of the **Draft** page in the Vendor Portal: + + <img alt="Draft release page"src="/images/guides/kots/default-yaml.png" width="700px"/> + + [View a larger version of this image](/images/guides/kots/default-yaml.png) + +You can do the following tasks on the **Draft** page: + +- In the file directory, manage the file directory structure. Replicated custom resource files are grouped together above the white line of the file directory. Application files are grouped together underneath the white line in the file directory. + + Delete files using the trash icon that displays when you hover over a file. Create a new file or folder using the corresponding icons at the bottom of the file directory pane. You can also drag and drop files in and out of the folders. + + ![Manage File Directory](/images/new-file-and-trash.png) + +- Edit the YAML files by selecting a file in the directory and making changes in the YAML editor. + +- In the **Help** or **Config help** pane, view the linter for any errors. If there are no errors, you get an **Everything looks good!** message. If an error displays, you can click the **Learn how to configure** link. For more information, see [Linter Rules](/reference/linter). + +- Select the Config custom resource to preview how your application's Config page will look to your customers. The **Config preview** pane only appears when you select that file. For more information, see [About the Configuration Screen](config-screen-about). + +- Select the Application custom resource to preview how your application icon will look in the Admin Console. The **Application icon preview** only appears when you select that file. For more information, see [Customizing the Application Icon](admin-console-customize-app-icon). + +================ +File: docs/vendor/releases-creating-channels.md +================ +# Creating and Editing Channels + +This topic describes how to create and edit channels using the Replicated Vendor Portal. For more information about channels, see [About Channels and Releases](releases-about). + +For information about creating channels with the Replicated CLI, see [channel create](/reference/replicated-cli-channel-create). + +For information about creating and managing channels with the Vendor API v3, see the [channels](https://replicated-vendor-api.readme.io/reference/createchannel) section in the Vendor API v3 documentation. + +## Create a Channel + +To create a channel: + +1. From the Replicated [Vendor Portal](https://vendor.replicated.com), select **Channels** from the left menu. +1. Click **Create Channel**. + + The Create a new channel dialog opens. For example: + + <img src="/images/channels-create.png" alt="Create channel dialog" width="400px"/> + +1. Enter a name and description for the channel. +1. (Recommended) Enable semantic versioning on the channel if it is not enabled by default by turning on **Enable semantic versioning**. For more information about semantic versioning and defaults, see [Semantic Versioning](releases-about#semantic-versioning). + +1. (Recommended) Enable an air gap bundle format that supports image digests and deduplication of image layers, by turning on **Enable new air gap bundle format**. For more information, see [Using Image Tags and Digests](private-images-tags-digests). + +1. Click **Create Channel**. + +## Edit a Channel + +To edit the settings of an existing channel: + +1. In the Vendor Portal, select **Channels** from the left menu. +1. Click the gear icon on the top right of the channel that you want to modify. + + The Channel settings dialog opens. For example: + + <img src="/images/channel-settings.png" alt="Channel Settings dialog in the Vendor Portal" width="500"/> + +1. Edit the fields and click **Save**. + + For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. + +## Archive a Channel + +You can archive an existing channel to prevent any new releases from being promoted to the channel. + +:::note +You cannot archive a channel if: +* There are customers assigned to the channel. +* The channel is set as the default channel. + +Assign customers to a different channel and set a different channel as the default before archiving. +::: + +To archive a channel with the Vendor Portal or the Replicated CLI: + +* **Vendor portal**: In the Vendor Portal, go to the **Channels** page and click the trash can icon in the top right corner of the card for the channel that you want to archive. +* **Replicated CLI**: + 1. Run the following command to find the ID for the channel that you want to archive: + ``` + replicated channel ls + ``` + The output of this command includes the ID and name for each channel, as well as information about the latest release version on the channels. + + 1. Run the following command to archive the channel: + ``` + replicated channel rm CHANNEL_ID + ``` + Replace `CHANNEL_ID` with the channel ID that you retrieved in the previous step. + + For more information, see [channel rm](/reference/replicated-cli-channel-rm) in the Replicated CLI documentation. + +================ +File: docs/vendor/releases-creating-cli.mdx +================ +# Managing Releases with the CLI + +This topic describes how to use the Replicated CLI to create and promote releases. + +For information about creating and managing releases with the Vendor Portal, see [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + +For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) section in the Vendor API v3 documentation. + +## Prerequisites + +Before you create a release using the Replicated CLI, complete the following prerequisites: + +* Install the Replicated CLI and then log in to authorize the CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* Create a new application using the `replicated app create APP_NAME` command. You only need to do this procedure one time for each application that you want to deploy. See [`app create`](/reference/replicated-cli-app-create) in _Reference_. + +* Set the `REPLICATED_APP` environment variable to the slug of the target application. See [Set Environment Variables](/reference/replicated-cli-installing#env-var) in _Installing the Replicated CLI_. + + **Example**: + + ```bash + export REPLICATED_APP=my-app-slug + ``` + +## Create a Release From a Local Directory {#dir} + +You can use the Replicated CLI to create a release from a local directory that contains the release files. + +To create and promote a release: + +1. (Helm Charts Only) If your release contains any Helm charts: + + 1. Package each Helm chart as a `.tgz` file. See [Packaging a Helm Chart for a Release](/vendor/helm-install-release). + + 1. Move the `.tgz` file or files to the local directory that contains the release files: + + ```bash + mv CHART_TGZ PATH_TO_RELEASE_DIR + ``` + Where: + * `CHART_TGZ` is the `.tgz` Helm chart archive. + * `PATH_TO_RELEASE_DIR` is path to the directory that contains the release files. + + **Example** + + ```bash + mv wordpress-1.3.5.tgz manifests + ``` + + 1. In the same directory that contains the release files, add a HelmChart custom resource for each Helm chart in the release. See [Configuring the HelmChart Custom Resource](helm-native-v2-using). + +1. Lint the application manifest files and ensure that there are no errors in the YAML: + + ```bash + replicated release lint --yaml-dir=PATH_TO_RELEASE_DIR + ``` + + Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. + + For more information, see [release lint](/reference/replicated-cli-release-lint) and [Linter Rules](/reference/linter). + +1. Do one of the following: + + * **Create and promote the release with one command**: + + ```bash + replicated release create --yaml-dir PATH_TO_RELEASE_DIR --lint --promote CHANNEL + ``` + Where: + * `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. + * `CHANNEL` is the channel ID or the case sensitive name of the channel. + + * **Create and edit the release before promoting**: + + 1. Create the release: + + ```bash + replicated release create --yaml-dir PATH_TO_RELEASE_DIR + ``` + Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. + + For more information, see [release create](/reference/replicated-cli-release-create). + + 1. Edit and update the release as desired: + + ``` + replicated release update SEQUENCE --yaml-dir PATH_TO_RELEASE_DIR + ``` + Where: + + - `SEQUENCE` is the release sequence number. This identifies the existing release to be updated. + - `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. + + For more information, see [release update](/reference/replicated-cli-release-update). + + 1. Promote the release when you are ready to test it. Releases cannot be edited after they are promoted. To make changes after promotion, create a new release. + + ``` + replicated release promote SEQUENCE CHANNEL + ``` + + Where: + + - `SEQUENCE` is the release sequence number. + - `CHANNEL` is the channel ID or the case sensitive name of the channel. + + For more information, see [release promote](/reference/replicated-cli-release-promote). + +1. Verify that the release was promoted to the target channel: + + ``` + replicated release ls + ``` + +================ +File: docs/vendor/releases-creating-customer.mdx +================ +import ChangeChannel from "../partials/customers/_change-channel.mdx" +import Download from "../partials/customers/_download.mdx" +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" + +# Creating and Managing Customers + +This topic describes how to create and manage customers in the Replicated Vendor Portal. For more information about customer licenses, see [About Customers](licenses-about). + +## Create a Customer + +This procedure describes how to create a new customer in the Vendor Portal. You can edit customer details at any time. + +For information about creating a customer with the Replicated CLI, see [customer create](/reference/replicated-cli-customer-create). + +For information about creating and managing customers with the Vendor API v3, see the [customers](https://replicated-vendor-api.readme.io/reference/getcustomerentitlements) section in the Vendor API v3 documentation. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. + +1. For **Customer email**, enter the email address for the customer. + + :::note + A customer email address is required for Helm installations. This email address is never used to send emails to customers. + ::: + +1. For **Assigned channel**, assign the customer to one of your channels. You can select any channel that has at least one release. The channel a customer is assigned to determines the application releases that they can install. For more information, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. + + :::note + <ChangeChannel/> + ::: + +1. For **Custom ID**, you can enter a custom ID for the customer. Setting a custom ID allows you to easily associate this Replicated customer record to your own internal customer data systems during data exports. Replicated recommends using an alphanumeric value such as your Salesforce ID or Hubspot ID. + + :::note + Replicated does _not_ require that the custom ID is unique. The custom ID is for vendor data reconciliation purposes, and is not used by Replicated for any functionality purposes. + ::: + +1. For **Expiration policy**, by default, **Customer's license does not expire** is enabled. To set an expiration date for the license, enable **Customer's license has an expiration date** and specify an expiration date in the **When does this customer expire?** calendar. + +1. For **Customer type**, set the customer type. Customer type is used only for reporting purposes. Customer access to your application is not affected by the type you assign to them. By default, **Trial** is selected. For more information, see [About Customer License Types](licenses-about-types). + +1. Enable any of the available options for the customer. For more information about the license options, see [Built-in License Fields](/vendor/licenses-using-builtin-fields). For more information about enabling install types, see [Managing Install Types for a License (Beta)](/vendor/licenses-install-types). + +1. For **Custom fields**, configure any custom fields that you have added for your application. For more information about how to create custom fields for your application, see [Managing Customer License Fields](licenses-adding-custom-fields). + +1. Click **Save Changes**. + +## Edit a Customer + +You can edit the built-in and custom license fields for a customer at any time by going to the **Manage customer** for a customer. For more information, see [Manage Customer Page](licenses-about#about-the-manage-customer-page) in _About Customers and Licensing_. + +Replicated recommends that you test any licenses changes in a development environment. If needed, install the application using a developer license matching the current customer's entitlements before editing the developer license. Then validate the updated license. + +:::important +For online environments, changing license entitlements can trigger changes to the customer's installed application instance during runtime. Replicated recommends that you verify the logic your application uses to query and enforce the target entitlement before making any changes. +::: + +To edit license fields: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers**. + +1. Select the target customer and click the **Manage customer** tab. + +1. On the **Manage customer** page, edit the desired fields and click **Save**. + + ![Full manage customer page for a customer named Prestige Financial](/images/customer-details.png) + +1. Test the changes by installing or updating in a development environment. Do one of the following, depending on the installation method for your application: + * For applications installed with Helm that use the Replicated SDK, you can add logic to your application to enforce entitlements before installation or during runtime using the Replicated SDK API license endpoints. See [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). + * For applications installed with Replicated KOTS, update the license in the admin console. See [Update Online Licenses](/enterprise/updating-licenses#update-online-licenses) and [Update Air Gap Licenses](/enterprise/updating-licenses#update-air-gap-licenses) in _Updating Licenses in the Admin Console_. + +## Archive a Customer + +When you archive a customer in the Vendor Portal, the customer is hidden from search by default and becomes read-only. Archival does not affect the utility of license files downloaded before the customer was archived. + +To expire a license, set an expiration date and policy in the **Expiration policy** field before you archive the customer. + +To archive a customer: + +1. In the Vendor Portal, click **Customers**. Select the target customer then click the **Manage customer** tab. + +1. Click **Archive Customer**. In the confirmation dialog, click **Archive Customer** again. + +You can unarchive by clicking **Unarchive Customer** in the customer's **Manage customer** page. + +## Export Customer and Instance Data {#export} + +<Download/> + +For more information about the data fields in the CSV downloads, see [Data Dictionary](/vendor/instance-data-export#data-dictionary) in _Export Customers and Instance Data_. +## Filter and Search Customers + +The **Customers** page provides a search box and filters that help you find customers: + +<img alt="search box and filters on the customers page" src="/images/customers-filter.png" width="400px"/> + +[View a larger version of this image](/images/customers-filter.png) + +You can filter customers based on whether they are active, by license type, and by channel name. You can filter using more than one criteria, such as Active, Paid, and Stable. However, you can select only one license type and one channel at a time. + +If there is adoption rate data available for the channel that you are filtering by, you can also filter by current version, previous version, and older versions. + +You can also filter customers by custom ID or email address. To filter customers by custom ID or email, use the search box and prepend your search term with "customId:" (ex: `customId:1234`) or "email:" (ex: `email:bob@replicated.com`). + +If you want to filter information using multiple license types or channels, you can download a CSV file instead. For more information, see [Export Customer and Instance Data](#export) above. + +================ +File: docs/vendor/releases-creating-releases.mdx +================ +import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" +import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" + +# Managing Releases with the Vendor Portal + +This topic describes how to use the Replicated Vendor Portal to create and promote releases, edit releases, edit release properties, and archive releases. + +For information about creating and managing releases with the CLI, see [Managing Releases with the CLI](/vendor/releases-creating-cli). + +For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) and [channelReleases](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) sections in the Vendor API v3 documentation. + +## Create a Release + +To create and promote a release in the Vendor Portal: + +1. From the **Applications** dropdown list, select **Create an app** or select an existing application to update. + +1. Click **Releases > Create release**. + + ![Create Release](/images/release-create-new.png) + + [View a larger version of this image](/images/release-create-new.png) + +1. Add your files to the release. You can do this by dragging and dropping files to the file directory in the YAML editor or clicking the plus icon to add a new, untitled YAML file. + +1. For any Helm charts that you add to the release, in the **Select Installation Method** dialog, select the version of the HelmChart custom resource that KOTS will use to install the chart. kots.io/v1beta2 is recommended. For more information about the HelmChart custom resource, see [Configuring the HelmChart Custom Resource](helm-native-v2-using). + + <img src="/images/helm-select-install-method.png" alt="select installation method dialog" width="550px"/> + + [View a larger version of this image](/images/helm-select-install-method.png) + +1. Click **Save release**. This saves a draft that you can continue to edit until you promote it. + +1. Click **Promote**. In the **Promote Release** dialog, edit the fields: + + For more information about the requirements and limitations of each field, see <a href="releases-about#properties">Properties</a> in _About Channels and Releases_. + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Description</th> + </tr> + <tr> + <td>Channel</td> + <td> + <p>Select the channel where you want to promote the release. If you are not sure which channel to use, use the default Unstable channel.</p> + </td> + </tr> + <tr> + <td>Version label</td> + <td> + <p>Enter a version label.</p> + <p>If you have one or more Helm charts in your release, the Vendor Portal automatically populates this field. You can change the version label to any <code>version</code> specified in any of the <code>Chart.yaml</code> files included in the release.</p> + </td> + </tr> + <tr> + <td>Requirements</td> + <td> + Select the <strong>Prevent this release from being skipped during upgrades</strong> to mark the release as required for KOTS installations. This option does not apply to installations with Helm. + </td> + </tr> + <tr> + <td>Release notes</td> + <td>Add release notes. The release notes support markdown and are shown to your customer.</td> + </tr> + </table> + +1. Click **Promote**. + + The release appears in an **Active** state on the Releases page. + +## Edit a Draft Release + +To edit a draft release: + +1. From the **Applications** dropdown list, select an existing application to update. +1. On the **Releases** page, find the draft release you want to edit and click **Edit YAML**. + + <img src="/images/releases-edit-draft.png" alt="Edit YAML button for a draft release in the Vendor Portal" width="400"/> + + [View a larger image](/images/releases-edit-draft.png) + +1. Click **Save** to save your updated draft. +1. (Optional) Click **Promote**. + +## Edit Release Properties + +You can edit the properties of a release at any time. For more information about release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. + +To edit release properties: + +1. Go to **Channels**. +1. In the channel where the release was promoted, click **Release History**. +1. For the release sequence that you want to edit, open the dot menu and click **Edit release**. +1. Edit the properties as needed. + <img src="/images/release-properties.png" alt="Release Properties dialog in the Vendor Portal" width="300"/> + + [View a larger image](/images/release-properties.png) +1. Click **Update Release**. + +## Archive a Release + +You can archive releases to remove them from view on the **Releases** page. Archiving a release that has been promoted does _not_ remove the release from the channel's **Release History** page or prevent KOTS from downloading the archived release. + +To archive one or more releases: + +1. From the **Releases** page, click the trash can icon in the upper right corner. +1. Select one or more releases. +1. Click **Archive Releases**. +1. Confirm the archive action when prompted. + +## Demote a Release + +A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. For more information, see [Demotion](/vendor/releases-about#demotion) in _About Channels and Releases_. + +For information about demoting and un-demoting releases with the Replicated CLI, see [channel demote](/reference/replicated-cli-channel-demote) and [channel un-demote](/reference/replicated-cli-channel-un-demote). + +To demote a release in the Vendor Portal: + +1. Go to **Channels**. +1. In the channel where the release was promoted, click **Release History**. +1. For the release sequence that you want to demote, open the dot menu and select **Demote Release**. + + ![Release history page](/images/channels-release-history.png) + [View a larger version of this image](/images/channels-release-history.png) + + After the release is demoted, the given release sequence is greyed out and a **Demoted** label is displayed next to the release on the **Release History** page. + +================ +File: docs/vendor/releases-share-download-portal.md +================ +import DownloadPortal from "../partials/kots/_download-portal-about.mdx" + +# Downloading Assets from the Download Portal + +This topic describes how to download customer license files, air gap bundles, and other assets from the Replicated Download Portal. + +For information about downloading air gap bundles and licenses with the Vendor API v3, see the following pages in the Vendor API v3 documentation: +* [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) +* [Trigger airgap build for a channel's release](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbuild) +* [Get airgap bundle download URL for the active release on the channel](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) + +## Overview + +<DownloadPortal/> + +The most common use case for the Download Portal is for customers installing into air gap environments who need to download both their license file as well as multiple air gap bundles. + +The following is an example of the Download Portal for an air gap customer installing in their own existing cluster: + +![Download Portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) + +[View a larger version of this image](/images/download-portal-existing-cluster.png) + +## Limitations + +* Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. + +* Sessions in the Download Portal are valid for 72 hours. After the session expires, your customer must log in again. The Download Portal session length is not configurable. + +## Download Assets from the Download Portal + +To log in to the Download Portal and download assets: + +1. In the [Vendor Portal](https://vendor.replicated.com), on the **Customers** page, click on the name of the customer. + +1. (Optional) On the **Manage customer** tab, enable the **Airgap Download Enabled** option. This makes air gap bundles available in the Download Portal. + + ![airgap download enabled license option](/images/airgap-download-enabled.png) + + [View a larger version of this image](/images/airgap-download-enabled.png) + +1. On the **Reporting** tab, in the **Download portal** section, click **Manage customer password**. + + ![download portal section](/images/download-portal-link.png) + + [View a larger version of this image](/images/download-portal-link.png) + +1. In the pop-up window, enter a password or click **Generate**. + + <img alt="download portal password pop-up" src="/images/download-portal-password-popup.png" width="450px"/> + + [View a larger version of this image](/images/download-portal-password-popup.png) + +1. Click **Copy** to copy the password to your clipboard. + + After the password is saved, it cannot be retrieved again. If you lose the password, you can generate a new one. + +1. Click **Save** to set the password. + +1. Click **Visit download portal** to log in to the Download Portal +and preview your customer's experience. + + :::note + By default, the Download Portal uses the domain `get.replicated.com`. You can optionally use a custom domain for the Download Portal. For more information, see [Using Custom Domains](/vendor/custom-domains-using). + ::: + +1. In the Download Portal, on the left side of the screen, select one of the following: + * **Bring my own Kubernetes**: View the downloadable assets for existing cluster installations with KOTS. + * **Embedded Kubernetes**: View the downloadable assets for Replicated kURL installations. + + :::note + Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. + ::: + + The following is an example of the Download Portal for an air gap customer: + + ![download portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) + + [View a larger version of this image](/images/download-portal-existing-cluster.png) + +1. Under **Select application version**, use the dropdown to select the target application release version. The Download Portal automatically makes the correct air gap bundles available for download based on the selected application version. + +1. Click the download button to download each asset. + +1. To share installation files with a customer, send the customer their unique link and password for the Download Portal. + +================ +File: docs/vendor/releases-sharing-license-install-script.mdx +================ +import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; + +# Finding Installation Commands for a Release + +This topic describes where to find the installation commands and instructions for releases in the Replicated Vendor Portal. + +For information about getting installation commands with the Replicated CLI, see [channel inspect](/reference/replicated-cli-channel-inspect). For information about getting installation commands with the Vendor API v3, see [Get install commands for a specific channel release](https://replicated-vendor-api.readme.io/reference/getchannelreleaseinstallcommands) in the Vendor API v3 documentation. + +## Get Commands for the Latest Release + +Every channel in the Vendor Portal has an **Install** section where you can find installation commands for the latest release on the channel. + +To get the installation commands for the latest release: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. + +1. On the target channel card, under **Install**, click the tab for the type of installation command that you want to view: + + <Tabs> + <TabItem value="kots" label="KOTS" default> + <p>View the command for installing with Replicated KOTS in existing clusters.</p> + + <img alt="Install section of the channel card" src="/images/channel-card-install-kots.png" width="400px"/> + [View a larger version of this image](/images/channel-card-install-kots.png) + </TabItem> + <TabItem value="embedded" label="Embedded K8s" default> + <p>View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.</p> + + <p>In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:</p> + + <img alt="Install section of the channel card" src="/images/channel-card-install-kurl.png" width="400px"/> + [View a larger version of this image](/images/channel-card-install-kurl.png) + + <img alt="Install section of the channel card" src="/images/channel-card-install-ec.png" width="400px"/> + [View a larger version of this image](/images/channel-card-install-ec.png) + + :::note + The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: + </TabItem> + <TabItem value="helm" label="Helm" default> + <p>View the command for installing with the Helm CLI in an existing cluster.</p> + + <img alt="Install section of the channel card" src="/images/channel-card-install-helm.png" width="400px"/> + [View a larger version of this image](/images/channel-card-install-helm.png) + + :::note + The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: + </TabItem> + </Tabs> + +## Get Commands for a Specific Release + +Every channel in the Vendor Portal has a **Release history** page where you can find the installation commands for specific release versions. + +To get the command for a specific release version: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. + +1. On the channel card, click **Release history**. + + <img alt="Release history link on channel card" src="/images/release-history-link.png" width="500px"/> + + [View a larger version of this image](/images/release-history-link.png) + +1. For the target release version, open the dot menu and click **Install Commands**. + + ![Release history page](/images/channels-release-history.png) + + [View a larger version of this image](/images/channels-release-history.png) + +1. In the **Install Commands** dialog, click the tab for the type of installation command that you want to view: + + <Tabs> + <TabItem value="kots" label="KOTS" default> + <p>View the command for installing with Replicated KOTS in existing clusters.</p> + + <img alt="Install section of the channel card" src="/images/release-history-install-kots.png" width="500px"/> + [View a larger version of this image](/images/release-history-install-kots.png) + </TabItem> + <TabItem value="embedded" label="Embedded K8s" default> + <p>View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.</p> + + <p>In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:</p> + + <img alt="Install section of the channel card" src="/images/release-history-install-kurl.png" width="500px"/> + [View a larger version of this image](/images/release-history-install-kurl.png) + + <img alt="Install section of the channel card" src="/images/release-history-install-embedded-cluster.png" width="500px"/> + [View a larger version of this image](/images/release-history-install-embedded-cluster.png) + + :::note + The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: + </TabItem> + <TabItem value="helm" label="Helm" default> + <p>View the command for installing with the Helm CLI in an existing cluster.</p> + + <img alt="Install section of the channel card" src="/images/release-history-install-helm.png" width="500px"/> + [View a larger version of this image](/images/release-history-install-helm.png) + + :::note + The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: + </TabItem> + </Tabs> + +## Get Customer-Specific Installation Instructions for Helm or Embedded Cluster {#customer-specific} + +Installation instructions for the Helm CLI and Replicated Embedded Cluster are customer-specific. You can find installation instructions on the page for the target customer. + +To get customer-specific Helm or Embedded Cluster installation instructions: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page and click on the target customer. + +1. At the top of the page, click the **Install instructions** drop down, then click **Helm** or **Embedded cluster**. + + ![Install instructions button](/images/customer-install-instructions-dropdown.png) + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + +1. In the dialog that opens, follow the installation instructions to install. + + <Tabs> + <TabItem value="helm" label="Helm" default> + <p>View the customer-specific Helm CLI installation instructions. For more information about installing with the Helm CLI, see [Installing with Helm](/vendor/install-with-helm).</p> + <img alt="Helm install button" src="/images/helm-install-instructions-dialog.png" width="500px"/> + [View a larger version of this image](/images/helm-install-instructions-dialog.png) + </TabItem> + <TabItem value="ec" label="Embedded Cluster" default> + <p>View the customer-specific Embedded Cluster installation instructions. For more information about installing with Embedded Cluster, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded).</p> + <img alt="Embedded cluster install instructions" src="/images/embedded-cluster-install-dialog-latest.png" width="500px"/> + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + </TabItem> + </Tabs> + +================ +File: docs/vendor/replicated-api-tokens.md +================ +import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" + +# Generating API Tokens + +This topic describes the available types of API tokens and how to generate them for use with the Replicated CLI and Replicated Vendor API v3. + +## About API Tokens + +The Vendor API v3 is the API that manages applications in the Replicated Vendor Portal. The Replicated CLI is an implementation of the Vendor API v3. + +Using the Replicated CLI and Vendor API V3 requires an API token for authorization. Tokens are primarily used for automated customer, channel, and release management. You create tokens in the Vendor Portal. + +The following types of tokens are available: + +- [Service Accounts](#service-accounts) +- [User API Tokens](#user-api-tokens) + +<TeamTokenNote/> + +### Service Accounts + +Service accounts are assigned a token and associated with an RBAC policy. Users with the proper permissions can create, retrieve, or revoke service account tokens. Admin users can assign any RBAC policy to a service account. Non-admin users can only assign their own RBAC policy when they create a service account. + +Service accounts are useful for operations that are not tied to a particular user, such as CI/CD or integrations. + +Updates to a service account's RBAC policy are automatically applied to its associated token. When a service account is removed, its tokens are also invalidated. + +### User API Tokens + +User API tokens are private to the user creating the token. User tokens assume the user's account when used, including any RBAC permissions. + +Updates to a user's RBAC role are applied to all of the tokens belonging to that user. + +Revoking a user token immediately invalidates that token. When a user account is deleted, its user tokens are also deleted. + +## Generate Tokens + +To use the Replicated CLI or the Vendor API v3, you need a User API token or a Service Account token. Existing team API tokens also continue to work. + +### Generate a Service Account + +To generate a service account: + +1. Log in to the Vendor Portal, and select [**Team > Service Accounts**](https://vendor.replicated.com/team/serviceaccounts). +1. Select **New Service Account**. If one or more service accounts already exist, you can add another by selecting **New Service Account**. + +1. Edit the fields in the **New Service Account** dialog: + + <img alt="New Service Accounts Dialog" src="/images/service-accounts.png" width="400px"/> + + [View a larger version of this image](/images/service-accounts.png) + + 1. For **Nickname**, enter a name the token. Names for service accounts must be unique within a given team. + + 1. For **RBAC**, select the RBAC policy from the dropdown list. The token must have `Admin` access to create new releases. + + This list includes the Vendor Portal default policies `Admin` and `Read Only`. Any custom policies also display in this list. For more information, see [Configuring RBAC Policies](team-management-rbac-configuring). + + Users with a non-admin RBAC role cannot select any other RBAC role when creating a token. They are restricted to creating a token with their same level of access to avoid permission elevation. + + 1. (Optional) For custom RBAC policies, select the **Limit to read-only version of above policy** check box to if you want use a policy that has Read/Write permissions but limit this service account to read-only. This option lets you maintain one version of a custom RBAC policy and use it two ways: as read/write and as read-only. + +1. Select **Create Service Account**. + +1. Copy the service account token and save it in a secure location. The token will not be available to view again. + + :::note + To remove a service account, select **Remove** for the service account that you want to delete. + ::: + +### Generate a User API Token + +To generate a user API token: + +1. Log in to the Vendor Portal and go to the [Account Settings](https://vendor.replicated.com/account-settings) page. +1. Under **User API Tokens**, select **Create a user API token**. If one or more tokens already exist, you can add another by selecting **New user API token**. + + <img alt="User API Token Page" src="/images/user-token-list.png" width="600px"/> + + [View a larger version of this image](/images/user-token-list.png) + +1. In the **New user API token** dialog, enter a name for the token in the **Nickname** field. Names for user API tokens must be unique per user. + + <img alt="Create New User Token Dialog" src="/images/user-token-create.png" width="400px"/> + + [View a larger version of this image](/images/user-token-create.png) + +1. Select the required permissions or use the default **Read and Write** permissions. Then select **Create token**. + + :::note + The token must have `Read and Write` access to create new releases. + ::: + +1. Copy the user API token that displays and save it in a secure location. The token will not be available to view again. + + :::note + To revoke a token, select **Revoke token** for the token that you want to delete. + ::: + +================ +File: docs/vendor/replicated-onboarding.mdx +================ +import CreateRelease from "../partials/getting-started/_create-promote-release.mdx" +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import EcCr from "../partials/embedded-cluster/_ec-config.mdx" +import HelmPackage from "../partials/helm/_helm-package.mdx" +import Requirements from "../partials/embedded-cluster/_requirements.mdx" +import SDKOverview from "../partials/replicated-sdk/_overview.mdx" +import TestYourChanges from "../partials/getting-started/_test-your-changes.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + +# Replicated Onboarding + +This topic describes how to onboard applications to the Replicated Platform. + +## Before You Begin + +This section includes guidance and prerequisites to review before you begin onboarding your application. + +### Best Practices and Recommendations + +The following are some best practices and recommendations for successfully onboarding with Replicated: + +* When integrating new Replicated features with an application, make changes in small iterations and test frequently by installing or upgrading the application in a development environment. This will help you to more easily identify issues and troubleshoot. This onboarding workflow will guide you through the process of integrating features in small iterations. + +* Use the Replicated CLI to create and manage your application and releases. Getting familiar with the Replicated CLI will also help later on when integrating Replicated workflows into your CI/CD pipelines. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* These onboarding tasks assume that you will test the installation of each release on a VM with the Replicated Embedded Cluster installer _and_ in a cluster with the Replicated KOTS installer. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then can choose to test with Embedded Cluster only. + +* Ask for help from the Replicated community. For more information, see [Getting Help from the Community](#community) below. + +### Getting Help from the Community {#community} + +The [Replicated community site](https://community.replicated.com/) is a forum where Replicated team members and users can post questions and answers related to working with the Replicated Platform. It is designed to help Replicated users troubleshoot and learn more about common tasks involved with distributing, installing, observing, and supporting their application. + +Before posting in the community site, use the search to find existing knowledge base articles related to your question. If you are not able to find an existing article that addresses your question, create a new topic or add a reply to an existing topic so that a member of the Replicated community or team can respond. + +To search and participate in the Replicated community, see https://community.replicated.com/. + +### Prerequisites + +* Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). + +* Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* Complete a basic quick start workflow to create an application with a sample Helm chart and then promote and install releases in a development environment. This helps you get familiar with the process of creating, installing, and updating releases in the Replicated Platform. See [Replicated Quick Start](/vendor/quick-start). + +* Ensure that you have access to a VM that meets the requirements for the Replicated Embedded Cluster installer. You will use this VM to test installation with Embedded Cluster. + + Embedded Cluster has the following requirements: + + <Requirements/> + +* (Optional) Ensure that you have kubectl access to a Kubernetes cluster. You will use this cluster to test installation with KOTS. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then you do not need access to a cluster for the main onboarding tasks. + + You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. + +## Onboard + +Complete the tasks in this section to onboard your application. When you are done, you can continue to [Next Steps](#next-steps) to integrate other Replicated features with your application. + +### Task 1: Create An Application + +To get started with onboarding, first create a new application. This will be the official Vendor Portal application used by your team to create and promote both internal and customer-facing releases. + +To create an application: + +1. Create a new application using the Replicated CLI or the Vendor Portal. Use an official name for your application. See [Create an Application](/vendor/vendor-portal-manage-app#create-an-application). + + <details> + <summary>Can I change the application name in the future?</summary> + + You can change the application name, but you cannot change the application _slug_. + + The Vendor Portal automatically generates and assigns a unique slug for each application based on the application's name. For example, the slug for "Example App" would be `example-app`. + + Application slugs are unique across all of Replicated. This means that, if necessary, the Vendor Portal will append a random word to the end of slug to ensure uniqueness. For example, `example-app-flowers`. + </details> + +1. Set the `REPLICATED_APP` environment variable to the unique slug of the application that you created. This will allow you to interact with the application from the Replicated CLI throughout onboarding. See [Set Environment Variables](/reference/replicated-cli-installing#replicated_app) in _Installing the Replicated CLI_. + + For example: + + ```bash + export REPLICATED_APP=my-app + ``` + +### Task 2: Connect Your Image Registry + +Add credentials for your image registry to the Vendor Portal. This will allow you to use the Replicated proxy registry in a later step so that you can grant proxy access to application images without exposing registry credentials to your customers. + +For more information, see [Connecting to an External Registry](/vendor/packaging-private-images). + +### Task 3: Add the Replicated SDK and Package your Chart + +Next, add the Replicated SDK as a dependency of your Helm chart and package the chart as a `.tgz` archive. + +The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. The SDK provides access to key Replicated functionality, including an in-cluster API and automatic access to insights and operational telemetry for instances running in customer environments. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +To package your Helm chart with the Replicated SDK: + +1. Go to the local directory where your Helm chart is. + +1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. + + If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. For more information, see [Packaging a Helm Chart for a Release](helm-install-release). + + <DependencyYaml/> + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + + <UnauthorizedError/> + +1. If your application is deployed as multiple Helm charts, package each chart as a separate `.tgz` archive using the `helm package -u PATH_TO_CHART` command. Do not declare the SDK in more than one chart. + +### Task 4: Create the Initial Release with KOTS HelmChart and Embedded Cluster Config {#first-release} + +After packaging your Helm chart, you can create a release. The initial release for your application will include the minimum files required to install a Helm chart with the Embedded Cluster installer: +* The Helm chart `.tgz` archive +* [KOTS HelmChart custom resource](/reference/custom-resource-helmchart-v2) +* [Embedded Cluster Config](/reference/embedded-config) + +If you have multiple charts, you will add each chart archive to the release, plus a corresponding KOTS HelmChart custom resource for each archive. + +:::note +Configuring the KOTS HelmChart custom resource includes several tasks, and involves the use of KOTS template functions. Depending on how many Helm charts your application uses, Replicated recommends that you allow about two to three hours for configuring the HelmChart custom resource and creating and testing your initial release. +::: + +To create the first release for your application: + +1. In the local directory for your Helm chart, create a subdirectory named `manifests` where you will add the files for the release. + +1. In the `manifests` directory: + + 1. Move the `.tgz` chart archive that you packaged. If your application is deployed as multiple Helm charts, move each `.tgz` archive to `manifests`. + + 1. Create an `embedded-cluster.yaml` file with the following default Embedded Cluster Config: + + <EcCr/> + + <details> + <summary>What is the Embedded Cluster Config?</summary> + + The Embedded Cluster Config is required to install with Embedded Cluster. + </details> + + For more information, see [Using Embedded Cluster](/vendor/embedded-overview). + + 1. Create a new YAML file. In this file, configure the KOTS HelmChart custom resource by completing the workflow in [Configuring the HelmChart Custom Resource](helm-native-v2-using). + + <details> + <summary>What is the KOTS HelmChart custom resource?</summary> + + The KOTS HelmChart custom resource is required to install Helm charts with KOTS and Embedded Cluster. As part of configuring the KOTS HelmChart custom resource, you will rewrite image names and add image pull secrets to allow your application images to be accessed through the Replicated proxy registry. + </details> + + 1. If your application is deployed as multiple Helm charts, repeat the step above to add a separate HelmChart custom resource for each Helm chart archive in the release. + + 1. If there are values in any of your Helm charts that need to be set for the installation to succeed, you can set those values using the `values` key in the corresponding HelmChart custom resource. See [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys). + + This is a temporary measure to ensure the values get passed to the Helm chart during installation until you configure the Admin Console Config screen in a later onboarding task. If your default Helm values are sufficient for installation, you can skip this step. + + 1. If your application requires that certain components are deployed before the application and as part of the Embedded Cluster itself, then update the Embedded Cluster Config to add [extensions](/reference/embedded-config#extensions). Extensions allow you to provide Helm charts that are deployed before your application. For example, one situation where this is useful is if you want to ship an ingress controller because Embedded Cluster does not include one. + + For more information, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. + +1. From the `manifests` directory, create a release and promote it to the Unstable channel. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + + ```bash + replicated release create --yaml-dir . --promote Unstable + ``` + +1. Install the release in your development environment to test: + + 1. Install with Embedded Cluster on a VM. See [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + + 1. (Optional) Install in an existing cluster with KOTS. See [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + +After successfully installing the initial release on a VM with Embedded Cluster (and optionally in an existing cluster with KOTS), go to the next task. You will continue to iterate throughout the rest of the onboarding process by creating and promoting new releases, then upgrading to the new version in your development environment. + +### Task 5: Customize the KOTS Admin Console {#admin-console} + +Configure the KOTS Application custom resource to add an application name, icon, and status informers. The name and icon will be displayed in the Admin Console and the Replicated Download Portal. The status informers will be used to display the application status on the Admin Console dashboard. + +To configure the KOTS Application custom resource: + +1. In your `manifests` directory, create a new `kots-app.yaml` file. + +1. In the `kots-app.yaml` file, add the [KOTS Application](/reference/custom-resource-application) custom resource YAML and set the `title`, `icon`, and `statusInformers` fields. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: gitea + spec: + title: Gitea + # Base64 encoded image string + icon: fyJINrigNkt5VsRiub9nXICdsYyVd2NcVvA3ScE5t2rb5JuEeyZnAhmLt9NK63vX1O + statusInformers: + - deployment/gitea + ``` + For more information, see: + * [Customizing the Application Icon](/vendor/admin-console-customize-app-icon) + * [Enabling and Understanding Application Status](/vendor/insights-app-status) + * [Application](/reference/custom-resource-application) + <br/> + <details> + <summary>Can I preview the icon before installing the release?</summary> + + Yes. The Vendor Portal includes a **Application icon preview** in the **Help** pane on the **Edit release** page. + + ![Icon preview](/images/icon-preview.png) + + [View a larger version of this image](/images/icon-preview.png) + + </details> + +1. <CreateRelease/> + +1. <TestYourChanges/> + +### Task 6: Set Up the Admin Console Config Screen and Map to Helm Values + +The KOTS Admin Console Config screen is used to collect required and optional application configuration values from your users. User-supplied values provided on the Config screen can be mapped to your Helm values. + +Before you begin this task, you can complete the [Set Helm Values with KOTS](/vendor/tutorial-config-setup) tutorial to learn how to map user-supplied values from the Admin Console Config screen to a Helm chart. + +:::note +Setting up the Admin Console config screen can include the use of various types of input fields, conditional statements, and KOTS template functions. Depending on your application's configuration options, Replicated recommends that you allow about two to three hours for configuring the Config custom resource and testing the Admin Console config screen. +::: + +To set up the Admin Console Config screen for your application: + +1. In your `manifests` directory, create a new file named `kots-config.yaml`. + +1. In `kots-config.yaml`, add the KOTS Config custom resource. Configure the KOTS Config custom resource based on the values that you need to collect from users. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: example_group + title: Example Group + items: + - name: example_item + title: Example Item + type: text + default: "Hello World" + ``` + + For more information, see: + * [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen) + * [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional) + * [Config](/reference/custom-resource-config) + + <br/> + + <details> + <summary>Can I preview the Admin Console config screen before installing the release?</summary> + + Yes. The Vendor Portal includes a **Config preview** in the **Help** pane on the **Edit release** page. + + For example: + + ![Config preview](/images/config-preview.png) + + [View a larger version of this image](/images/config-preview.png) + </details> + +1. <CreateRelease/> + +1. <TestYourChanges/> + +1. In `manifests`, open the KOTS HelmChart custom resource that you configured in a previous step. Configure the `values` key of the HelmChart custom resource to map the fields in the KOTS Config custom resource to your Helm values. + + For more information, see: + * [Mapping User-Supplied Values](/vendor/config-screen-map-inputs) + * [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup) + * [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) + * [`values`](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_ + +1. <CreateRelease/> + +1. <TestYourChanges/> + +1. Continue to create and test new releases with new config fields until you are ready to move on to the next task. + +### Task 7: Define Preflight Checks + +In the next two tasks, you will add specs for _preflight checks_ and _support bundles_. + +Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. Troubleshoot is a kubectl plugin that provides diagnostic tools for Kubernetes applications. For more information, see the open source [Troubleshoot](https://troubleshoot.sh/docs/) documentation. + +Preflight checks and support bundles analyze data from customer environments to provide insights that help users to avoid or troubleshoot common issues with an application: +* **Preflight checks** run before an application is installed to check that the customer environment meets the application requirements. +* **Support bundles** collect troubleshooting data from customer environments to help users diagnose problems with application deployments. + +:::note +Before you begin this task, you can complete the [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) tutorial to learn how to add a preflight spec to a Helm chart in a Kubernetes secret and run the preflight checks before installation. +::: + +To define preflight checks for your application: + +1. In your Helm chart `templates` directory, add a Kubernetes Secret that includes a preflight spec. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). For examples, see [Example Preflight Specs](/vendor/preflight-examples). + :::note + If your application is deployed as multiple Helm charts, add the Secret to the `templates` directory for the chart that is installed first. + ::: + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + +1. Move the `.tgz` file to the `manifests` directory. + +1. <CreateRelease/> + +1. <TestYourChanges/> + + Preflight checks run automatically during installation. + +1. Continue to create and test new releases with additional preflight checks until you are ready to move on to the next task. + +### Task 8: Add a Support Bundle Spec + +To add the default support bundle spec to your application: + +1. In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret to enable the default support bundle spec for your application: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + labels: + troubleshoot.sh/kind: support-bundle + name: example + stringData: + support-bundle-spec: | + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: support-bundle + spec: + collectors: [] + analyzers: [] + ``` + :::note + If your application is installed as multiple Helm charts, you can optionally create separate support bundle specs in each chart. The specs are automatically merged when a support bundle is generated. Alternatively, continue with a single support bundle spec and then optionally revisit how you organize your support bundle specs after you finish onboarding. + ::: + +1. (Recommended) At a minimum, Replicated recommends that all support bundle specs include the `logs` collector. This collects logs from running Pods in the cluster. + + **Example:** + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle + stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - logs: + selector: + - app.kubernetes.io/name=myapp + namespace: {{ .Release.Namespace }} + limits: + maxAge: 720h + maxLines: 10000 + ``` + + For more information, see: + * [Adding and Customizing Support Bundles](/vendor/support-bundle-customizing) + * [Example Support Bundle Specs](/vendor/support-bundle-examples) + * [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. + +1. (Recommended) Ensure that any preflight checks that you added are also include in your support bundle spec. This ensures that support bundles collect at least the same information collected when running preflight checks. + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + +1. Move the `.tgz` file to the `manifests` directory. + +1. <CreateRelease/> + +1. <TestYourChanges/> + + For information about how to generate support bundles, see [Generating Support Bundles](/vendor/support-bundle-generating). + +1. (Optional) Customize the support bundle spec by adding additional collectors and analyzers. + +### Task 9: Alias Replicated Endpoints with Your Own Domains + +Your customers are exposed to several Replicated domains by default. Replicated recommends you use custom domains to unify the customer's experience with your brand and simplify security reviews. + +For more information, see [Using Custom Domains](/vendor/custom-domains-using). + +## Next Steps + +After completing the main onboarding tasks, Replicated recommends that you also complete the following additional tasks to integrate other Replicated features with your application. You can complete these next recommended tasks in any order and at your own pace. + +### Add Support for Helm Installations + +Existing KOTS releases that include one or more Helm charts can be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. + +To enable Helm installations for Helm charts distributed with Replicated, the only extra step is to add a Secret to your chart to authenticate with the Replicated proxy registry. + +This is the same secret that is passed to KOTS in the HelmChart custom resource using `'{{repl ImagePullSecretName }}'`, which you did as part of [Task 4: Create and Install the Initial Release](#first-release). So, whereas this Secret is created automatically for KOTS and Embedded Cluster installations, you need to create it and add it to your Helm chart for Helm installations. + +:::note +Before you test Helm installations for your application, you can complete the [Deploy a Helm Chart with KOTS and the Helm CLI](tutorial-kots-helm-setup) tutorial to learn how to install a single release with both KOTS and Helm. +::: + +To support and test Helm installations: + +1. Follow the steps in [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) to authenticate with the Replicated proxy registry by creating a Secret with `type: kubernetes.io/dockerconfigjson` in your Helm chart. + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + +1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + +1. Install the release in a cluster with the Helm CLI to test your changes. For more information, see [Installing with Helm](/vendor/install-with-helm). + +### Add Support for Air Gap Installations + +Replicated Embedded Cluster and KOTS support installations in _air gap_ environments with no outbound internet access. Users can install with Embedded Cluster and KOTS in air gap environments by providing air gap bundles that contain the required images for the installers and for your application. + +:::note +Replicated also offers Alpha support for air gap installations with Helm. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. +::: + +To add support for air gap installations: + +1. If there are any images for your application that are not listed in your Helm chart, list these images in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release. One common use case for this is applications that use Kubernetes Operators. See [Define Additional Images](/vendor/operator-defining-additional-images). + +1. In the KOTS HelmChart custom resource `builder` key, pass any values that are required in order for `helm template` to yield all the images needed to successfully install your application. See [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). + + :::note + If the default values in your Helm chart already enable all the images needed to successfully deploy, then you do not need to configure the `builder` key. + ::: + + <details> + <summary>How do I know if I need to configure the `builder` key?</summary> + + When building an air gap bundle, the Vendor Portal templates the Helm charts in a release with `helm template` in order to detect the images that need to be included in the bundle. Images yielded by `helm template` are included in the bundle for the release. + + For many applications, running `helm template` with the default values would not yield all the images required to install. In these cases, vendors can pass the additional values in the `builder` key to ensure that the air gap bundle includes all the necessary images. + </details> + +1. If you have not done so already as part of [Task 4: Create and Install the Initial Release](#first-release), ensure that the `values` key in the KOTS HelmChart custom resource correctly rewrites image names for air gap installations. This is done using the KOTS HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace template functions to render the location of the given image in the user's own local registry. + + For more information, see [Rewrite Image Names](/vendor/helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart Custom Resource v2_. + +1. Create and promote a new release with your changes. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + +1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the release was promoted to build the air gap bundle. Do one of the following: + * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. + * If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. + +1. Create a customer with the **Airgap Download Enabled** entitlement enabled so that you can test air gap installations. See [Creating and Managing Customers](/vendor/releases-creating-customer). + +1. Download the Embedded Cluster air gap installation assets, then install with Embedded Cluster on an air gap VM to test. See [Installing in Air Gap Environments with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +1. (Optional) Download the `.airgap` bundle for the release and the air gap bundle for the KOTS Admin Console. You can also download both bundles from the Download Portal for the target customer. Then, install in an air gap existing cluster to test. See [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). + +1. (Optional) Follow the steps in [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap) to test air gap installation with Helm. + + :::note + Air gap Helm installations are an Alpha feature. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. + ::: + +### Add Roles for Multi-Node Clusters in Embedded Cluster Installations + +The Embedded Cluster Config supports roles for multi-node clusters. One or more roles can be selected and assigned to a node when it is joined to the cluster. Node roles can be used to determine which nodes run the Kubernetes control plane, and to assign application workloads to particular nodes. + +For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. + +### Add and Map License Entitlements + +You can add custom license entitlements for your application in the Vendor Portal. Custom license fields are useful when there is entitlement information that applies to a subset of customers. For example, you can use entitlements to: +* Limit the number of active users permitted +* Limit the number of nodes a customer is permitted on their cluster +* Identify a customer on a "Premium" plan that has access to additional features or functionality not available with your base plan + +For more information about how to create and assign custom entitlements in the Vendor Portal, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields) and [Creating and Managing Customers](/vendor/releases-creating-customer). + +#### Map Entitlements to Helm Values + +You can map license entitlements to your Helm values using KOTS template functions. This can be useful when you need to set certain values based on the user's license information. For more information, see [Using KOTS Template Functions](/vendor/helm-optional-value-keys#using-kots-template-functions) in _Setting Helm Values with KOTS_. + +#### Query Entitlements Before Installation and at Runtime + +You can add logic to your application to query license entitlements both before deployment and at runtime. For example, you might want to add preflight checks that verify a user's entitlements before installing. Or, you can expose additional product functionality dynamically at runtime based on a customer's entitlements. + +For more information, see: +* [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk) +* [Checking Entitlements in Preflights with KOTS Template Functions](/vendor/licenses-referencing-fields) + +### Add Application Links to the Admin Console Dashboard + +You can add the Kubernetes SIG Application custom resource to your release to add a link to your application from the Admin Console dashboard. This makes it easier for users to access your application after installation. + +You can also configure the Kubernetes SIG Application resource add links to other resources like documentation or dashboards. + +For more information, see [Adding Application Links to the Dashboard](/vendor/admin-console-adding-buttons-links). + +### Update the Preflight and Support Bundles Specs + +After adding basic specs for preflights and support bundles, you can continue to add more collectors and analyzers as needed. + +Consider the following recommendations and best practices: + +* Revisit your preflight and support bundle specs when new support issues arise that are not covered by your existing specs. + +* Your support bundles should include all of the same collectors and analyzers that are in your preflight checks. This ensures that support bundles include all the necessary troubleshooting information, including any failures in preflight checks. + +* Your support bundles will most likely need to include other collectors and analyzers that are not in your preflight checks. This is because some of the information used for troubleshooting (such as logs) is not necessary when running preflight checks before installation. + +* If your application is installed as multiple Helm charts, you can optionally add separate support bundle specs in each chart. This can make it easier to keep the specs up-to-date and to avoid merge conflicts that can be caused when multiple team members contribute to a single, large support bundle spec. When an application has multiple support bundle specs, the specs are automatically merged when generating a support bundle so that only a single support bundle is provided to the user. + +The documentation for the open-source Troubleshoot project includes the full list of available collectors and analyzers that you can use. See [All Collectors](https://troubleshoot.sh/docs/collect/all/) and the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. + +You can also view common examples of collectors and analyzers used in preflight checks and support bundles in [Preflight Spec Examples](preflight-examples) and [Support Bundle Spec Examples](support-bundle-examples). + +### Configure Backup and Restore + +Enable backup and restore with Velero for your application so that users can back up and restore their KOTS Admin Console and application data. + +There are different steps to configure backup and restore for Embedded Cluster and for existing cluster installations with KOTS: +* To configure the disaster recovery feature for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery) +* To configure the snapshots feature for existing cluster KOTS installations, see [Configuring Snapshots](snapshots-configuring-backups). + +### Add Custom Metrics + +In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running in customer environments. Custom metrics can be collected for application instances running in online or air gap environments using the Replicated SDK. + +For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). + +### Integrate with CI/CD + +Replicated recommends that teams integrate the Replicated Platform into their existing develeopment and production CI/CD workflows. This can be useful for automating the processes of creating new releases, promoting releases, and testing releases with the Replicated Compatibility Matrix. + +For more information, see: +* [About Integrating with CI/CD](/vendor/ci-overview) +* [About Compatibility Matrix](/vendor/testing-about) +* [Recommended CI/CD Workflows](/vendor/ci-workflows) + +### Customize Release Channels + +By default, the Vendor Portal includes Unstable, Beta, and Stable channels. You can customize the channels in the Vendor Portal based on your application needs. + +Consider the following recommendations: +* Use the Stable channel for your primary release cadence. Releases should be promoted to the Stable channel only as frequently as your average customer can consume new releases. Typically, this is no more than monthly. However, this cadence varies depending on the customer base. +* If you have a SaaS product, you might want to create an "Edge" channel where you promote the latest SaaS releases. +* You can consider a “Long Term Support” channel where you promote new releases less frequently and support those releases for longer. +* It can be useful to create channels for each feature branch so that internal teams reviewing a PR can easily get the installation artifacts as well as review the code. You can automate channel creation as part of a pipeline or Makefile. + +For more information, see: +* [About Channels and Releases](/vendor/releases-about) +* [Creating and Editing Channels](/vendor/releases-creating-channels) + +### Write Your Documentation + +Before distributing your application to customers, ensure that your documentation is up-to-date. In particular, be sure to update the installation documentation to include the procedures and requirements for installing with Embedded Cluster, Helm, and any other installation methods that you support. + +For guidance on how to get started with documentation for applications distributed with Replicated, including key considerations, examples, and templates, see [Writing Great Documentation for On-Prem Software Distributed with Replicated](https://www.replicated.com/blog/writing-great-documentation-for-on-prem-software-distributed-with-replicated) in the Replicated blog. + +================ +File: docs/vendor/replicated-sdk-airgap.mdx +================ +# Installing the SDK in Air Gap Environments + +This topic explains how to install the Replicated SDK in air gap environments by enabling air gap mode. + +## Overview + +The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. + +Air gap mode is enabled when `isAirgap: true` is set in the values for the SDK Helm chart. For more information, see [Install the SDK in Air Gap Mode](#install) below. Allowing air gap mode to be controlled with the `isAirgap` value means that vendors and enterprise customers do not need to rely on air gap environments being automatically detected, which is unreliable and error-prone. The `isAirgap` value also allows the SDK to be installed in air gap mode even if the instance can access the internet. + +## Differences in Air Gap Mode + +Air gap mode differs from non-air gap installations of the SDK in the following ways: +* The SDK stores instance telemetry and custom metrics in a Kubernetes Secret in the customer environment, rather than attempting to send telemetry and custom metrics back to the Replicated Vendor Portal. The telemetry and custom metrics stored in the Secret are collected whenever a support bundle is generated in the environment, and are reported when the support bundle is uploaded to the Vendor Portal. For more information about telemetry for air gap instances, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). +* The SDK returns an empty array (`[]`) for any requests to check for updates using the [`/api/v1/app/updates`](/reference/replicated-sdk-apis#get-appupdates) SDK API endpoint. This is because the SDK is not able to receive updates from the Vendor Portal when running in air gap environments. +* Instance tags cannot be updated with the [`/app/instance-tags`](/reference/replicated-sdk-apis#post-appinstance-tags) SDK API endpoint. + +In air gap mode, the SDK can still make requests to SDK API endpoints that do not require outbound internet access, such as the [`license`](/reference/replicated-sdk-apis#license) endpoints and the [`/app/info`](/reference/replicated-sdk-apis#get-appinfo) endpoint. However, these endpoints will return whatever values were injected into the SDK when the chart was most recently pulled. These values might not match the latest information available in the Vendor Portal because the SDK cannot receive updates when running in air gap environments. + +## Install the SDK in Air Gap Mode {#install} + +This section describes how to install the Replicated SDK in air gap mode with the Helm CLI and with Replicated KOTS. + +### Helm CLI + +When the SDK is installed with the Helm CLI, air gap mode can be enabled by passing `--set replicated.isAirgap=true` with the Helm CLI installation command. + +For example: + +``` +helm install gitea oci://registry.replicated.com/my-app/gitea --set replicated.isAirgap=true +``` + +For more information about Helm CLI installations with Replicated, see [Installing with Helm](/vendor/install-with-helm). For more information about setting Helm values with the `helm install` command, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. + +:::note +Replicated does not provide air gap bundles for applications installed with the Helm CLI. Air gap bundles are a feature of KOTS. +::: + +### KOTS + +When the SDK is installed by KOTS in an air gap environment, KOTS automatically sets `isAirGap: true` in the SDK Helm chart values to enable air gap mode. No additional configuration is required. + +================ +File: docs/vendor/replicated-sdk-customizing.md +================ +# Customizing the Replicated SDK + +This topic describes various ways to customize the Replicated SDK, including customizing RBAC, setting environment variables, adding tolerations, and more. + +## Customize RBAC for the SDK + +This section describes role-based access control (RBAC) for the Replicated SDK, including the default RBAC, minimum RBAC requirements, and how to install the SDK with custom RBAC. + +### Default RBAC + +The SDK creates default Role, RoleBinding, and ServiceAccount objects during installation. The default Role allows the SDK to get, list, and watch all resources in the namespace, to create Secrets, and to update the `replicated` and `replicated-instance-report` Secrets: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "replicated.labels" . | nindent 4 }} + name: replicated-role +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - 'get' + - 'list' + - 'watch' +- apiGroups: + - '' + resources: + - 'secrets' + verbs: + - 'create' +- apiGroups: + - '' + resources: + - 'secrets' + verbs: + - 'update' + resourceNames: + - replicated + - replicated-instance-report + - replicated-custom-app-metrics-report +``` + +### Minimum RBAC Requirements + +The SDK requires the following minimum RBAC permissions: +* Create Secrets. +* Get and update Secrets named `replicated`, `replicated-instance-report`, and `replicated-custom-app-metrics-report`. +* The SDK requires the following minimum RBAC permissions for status informers: + * If you defined custom status informers, then the SDK must have permissions to get, list, and watch all the resources listed in the `replicated.statusInformers` array in your Helm chart `values.yaml` file. + * If you did _not_ define custom status informers, then the SDK must have permissions to get, list, and watch the following resources: + * Deployments + * Daemonsets + * Ingresses + * PersistentVolumeClaims + * Statefulsets + * Services + * For any Ingress resources used as status informers, the SDK requires `get` permissions for the Service resources listed in the `backend.Service.Name` field of the Ingress resource. + * For any Daemonset and Statefulset resources used as status informers, the SDK requires `list` permissions for pods in the namespace. + * For any Service resources used as status informers, the SDK requires `get` permissions for Endpoint resources with the same name as the service. + + The Replicated Vendor Portal uses status informers to provide application status data. For more information, see [Helm Installations](/vendor/insights-app-status#helm-installations) in _Enabling and Understanding Application Status_. +### Install the SDK with Custom RBAC + +#### Custom ServiceAccount + +To use the SDK with custom RBAC permissions, provide the name for a custom ServiceAccount object during installation. When a service account is provided, the SDK uses the RBAC permissions granted to the service account and does not create the default Role, RoleBinding, or ServiceAccount objects. + +To install the SDK with custom RBAC: + +1. Create custom Role, RoleBinding, and ServiceAccount objects. The Role must meet the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. +1. During installation, provide the name of the service account that you created by including `--set replicated.serviceAccountName=CUSTOM_SERVICEACCOUNT_NAME`. + + **Example**: + + ``` + helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.serviceAccountName=mycustomserviceaccount + ``` + + For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). + +#### Custom ClusterRole + +To use the SDK with an existing ClusterRole, provide the name for a custom ClusterRole object during installation. When a cluster role is provided, the SDK uses the RBAC permissions granted to the cluster role and does not create the default RoleBinding. Instead, the SDK creates a ClusterRoleBinding as well as a ServiceAccount object. + +To install the SDK with a custom ClusterRole: + +1. Create a custom ClusterRole object. The ClusterRole must meet at least the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. However, it can also provide additional permissions that can be used by the SDK, such as listing cluster Nodes. +1. During installation, provide the name of the cluster role that you created by including `--set replicated.clusterRole=CUSTOM_CLUSTERROLE_NAME`. + + **Example**: + + ``` + helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.clusterRole=mycustomclusterrole + ``` + + For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). + +## Set Environment Variables {#env-var} + +The Replicated SDK provides a `replicated.extraEnv` value that allows users to set additional environment variables for the deployment that are not exposed as Helm values. + +This ensures that users can set the environment variables that they require without the SDK Helm chart needing to be modified to expose the values. For example, if the SDK is running behind an HTTP proxy server, then the user could set `HTTP_PROXY` or `HTTPS_PROXY` environment variables to provide the hostname or IP address of their proxy server. + +To add environment variables to the Replicated SDK deployment, include the `replicated.extraEnv` array in your Helm chart `values.yaml` file. The `replicated.extraEnv` array accepts a list of environment variables in the following format: + +```yaml +# Helm chart values.yaml + +replicated: + extraEnv: + - name: ENV_VAR_NAME + value: ENV_VAR_VALUE +``` + +:::note +If the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` variables are configured with the [kots install](/reference/kots-cli-install) command, these variables will also be set automatically in the Replicated SDK. +::: + +**Example**: + +```yaml +# Helm chart values.yaml + +replicated: + extraEnv: + - name: MY_ENV_VAR + value: my-value + - name: MY_ENV_VAR_2 + value: my-value-2 +``` + +## Custom Certificate Authority + +When installing the Replicated SDK behind a proxy server that terminates TLS and injects a custom certificate, you must provide the CA to the SDK. This can be done by storing the CA in a ConfigMap or a Secret prior to installation and providing appropriate values during installation. + +### Using a ConfigMap + +To use a CA stored in a ConfigMap: + +1. Create a ConfigMap and the CA as the data value. Note that name of the ConfigMap and data key can be anything. + ```bash + kubectl -n <NAMESPACE> create configmap private-ca --from-file=ca.crt=./ca.crt + ``` +1. Add the name of the config map to the values file: + ```yaml + replicated: + privateCAConfigmap: private-ca + ``` + +:::note +If the `--private-ca-configmap` flag is used with the [kots install](/reference/kots-cli-install) command, this value will be populated in the Replicated SDK automatically. +::: + +### Using a Secret + +To use a CA stored in a Secret: + +1. Create a Secret and the CA as a data value. Note that the name of the Secret and the key can be anything. + ```bash + kubectl -n <NAMESPACE> create secret generic private-ca --from-file=ca.crt=./ca.crt + ``` +1. Add the name of the secret and the key to the values file: + ```yaml + replicated: + privateCASecret: + name: private-ca + key: ca.crt + ``` + +## Add Tolerations + +The Replicated SDK provides a `replicated.tolerations` value that allows users to add custom tolerations to the deployment. For more information about tolerations, see [Taints and Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the Kubernetes documentation. + +To add tolerations to the Replicated SDK deployment, include the `replicated.tolerations` array in your Helm chart `values.yaml` file. The `replicated.tolerations` array accepts a list of tolerations in the following format: + +```yaml +# Helm chart values.yaml + +replicated: + tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule" +``` + +## Add Affinity + +The Replicated SDK provides a `replicated.affinity` value that allows users to add custom affinity to the deployment. For more information about affinity, see [Affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) in the Kubernetes documentation. + +To add affinity to the Replicated SDK deployment, include the `replicated.affinity` map in your Helm chart `values.yaml` file. The `replicated.affinity` map accepts a standard Kubernets affinity object in the following format: + +```yaml +# Helm chart values.yaml + +replicated: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: production/node-pool + operator: In + values: + - private-node-pool +``` +## Add Custom Labels + +With the Replicated SDK version 1.1.0 and later, you can pass custom labels to the Replicated SDK Helm Chart by setting the `replicated.commonLabels` and `replicated.podLabels` Helm values in your Helm chart. + +### Requirement + +The `replicated.commonLabels` and `replicated.podLabels` values are available with the Replicated SDK version 1.1.0 and later. + +### commonLabels + +The `replicated.commonLabels` value allows you to add one or more labels to all resources created by the SDK chart. + +For example: + +```yaml +# Helm chart values.yaml + +replicated: + commonLabels: + environment: production + team: platform +``` + +### podLabels + +The `replicated.podLabels` value allows you to add pod-specific labels to the pod template. + +For example: + +```yaml +# Helm chart values.yaml + +replicated: + podLabels: + monitoring: enabled + custom.company.io/pod-label: value +``` + +================ +File: docs/vendor/replicated-sdk-development.mdx +================ +import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" + +# Developing Against the SDK API + +This topic describes how to develop against the SDK API to test changes locally. It includes information about installing the SDK in integration mode and port forwarding the SDK API service to your local machine. For more information about the SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). + +## Install the SDK in Integration Mode + +<IntegrationMode/> + +## Port Forwarding the SDK API Service {#port-forward} + +After the Replicated SDK is installed and initialized in a cluster, the Replicated SDK API is exposed at `replicated:3000`. You can access the SDK API for testing by forwarding port 3000 to your local machine. + +To port forward the SDK API service to your local machine: + +1. Run the following command to port forward to the SDK API service: + + ```bash + kubectl port-forward service/replicated 3000 + ``` + ``` + Forwarding from 127.0.0.1:3000 -> 3000 + Forwarding from [::1]:3000 -> 3000 + ``` + +1. With the port forward running, test the SDK API endpoints as desired. For example: + + ```bash + curl localhost:3000/api/v1/license/fields/expires_at + curl localhost:3000/api/v1/license/fields/{field} + ``` + + For more information, see [Replicated SDK API](/reference/replicated-sdk-apis). + + :::note + When the SDK is installed in integration mode, requests to the `license` endpoints use your actual development license data, while requests to the `app` endpoints use the default mock data. + ::: + +================ +File: docs/vendor/replicated-sdk-installing.mdx +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" +import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" +import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" + +# Installing the Replicated SDK + +This topic describes the methods for distributing and installing the Replicated SDK. + +It includes information about how to install the SDK alongside Helm charts or Kubernetes manifest-based applications using the Helm CLI or a Replicated installer (Replicated KOTS, kURL, Embedded Cluster). It also includes information about installing the SDK as a standalone component in integration mode. + +For information about installing the SDK in air gap mode, see [Installing the SDK in Air Gap Environments](replicated-sdk-airgap). + +## Requirement + +<KotsVerReq/> + +## Install the SDK as a Subchart + +When included as a dependency of your application Helm chart, the SDK is installed as a subchart alongside the application. + +To install the SDK as a subchart: + +1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. + + <DependencyYaml/> + +1. Update the `charts/` directory: + + ``` + helm dependency update + ``` + :::note + <RegistryLogout/> + ::: + +1. Package the Helm chart into a `.tgz` archive: + + ``` + helm package . + ``` + +1. Add the chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + +1. (Optional) Add a KOTS HelmChart custom resource to the release to support installation with Embedded Cluster, KOTS, or kURL. For more information, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). + +1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. + +1. Install the release using Helm or a Replicated installer. For more information, see: + * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) + * [Installing with Helm](/vendor/install-with-helm) + * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) + * [Online Installation with kURL](/enterprise/installing-kurl) + +1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: + + ``` + kubectl get deploy --namespace NAMESPACE + ``` + Where `NAMESPACE` is the namespace in the cluster where the application and the SDK are installed. + + **Example output**: + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + my-app 1/1 1 1 35s + replicated 1/1 1 1 35s + ``` + +## Install the SDK Alongside a Kubernetes Manifest-Based Application {#manifest-app} + +For applications that use Kubernetes manifest files instead of Helm charts, the SDK Helm chart can be added to a release and then installed by KOTS alongside the application. + +<KotsVerReq/> + +To add the SDK Helm chart to a release for a Kubernetes manifest-based application: + +1. Install the Helm CLI using Homebrew: + + ``` + brew install helm + ``` + For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. + +1. Download the `.tgz` chart archive for the SDK Helm chart: + + ``` + helm pull oci://registry.replicated.com/library/replicated --version SDK_VERSION + ``` + Where `SDK_VERSION` is the version of the SDK to install. For a list of available SDK versions, see the [replicated-sdk repository](https://github.com/replicatedhq/replicated-sdk/tags) in GitHub. + + The output of this command is a `.tgz` file with the naming convention `CHART_NAME-CHART_VERSION.tgz`. For example, `replicated-1.1.1.tgz`. + + For more information and additional options, see [Helm Pull](https://helm.sh/docs/helm/helm_pull/) in the Helm documentation. + +1. Add the SDK `.tgz` chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + + The following shows an example of the SDK Helm chart added to a draft release for a standard manifest-based application: + + ![SDK Helm chart in a draft release](/images/sdk-kots-release.png) + + [View a larger version of this image](/images/sdk-kots-release.png) + +1. If one was not created automatically, add a KOTS HelmChart custom resource to the release. HelmChart custom resources have `apiVersion: kots.io/v1beta2` and `kind: HelmChart`. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: replicated + spec: + # chart identifies a matching chart from a .tgz + chart: + # for name, enter replicated + name: replicated + # for chartversion, enter the version of the + # SDK Helm chart in the release + chartVersion: 1.1.1 + ``` + + As shown in the example above, the HelmChart custom resource requires the name and version of the SDK Helm chart that you added to the release: + * **`chart.name`**: The name of the SDK Helm chart is `replicated`. You can find the chart name in the `name` field of the SDK Helm chart `Chart.yaml` file. + * **`chart.chartVersion`**: The chart version varies depending on the version of the SDK that you pulled and added to the release. You can find the chart version in the `version` field of SDK Helm chart `Chart.yaml` file. + + For more information about configuring the HelmChart custom resource to support KOTS installations, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) and [HelmChart v2](/reference/custom-resource-helmchart-v2). + +1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. + +1. Install the release using a Replicated installer. For more information, see: + * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) + * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) + * [Online Installation with kURL](/enterprise/installing-kurl) + +1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: + + ``` + kubectl get deploy --namespace NAMESPACE + ``` + Where `NAMESPACE` is the namespace in the cluster where the application, the Admin Console, and the SDK are installed. + + **Example output**: + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + kotsadm 1/1 1 1 112s + my-app 1/1 1 1 28s + replicated 1/1 1 1 27s + ``` + +## Install the SDK in Integration Mode + +<IntegrationMode/> + +## Troubleshoot + +### 401 Unauthorized Error When Updating Helm Dependencies {#401} + +#### Symptom + +You see an error message similar to the following after adding the Replicated SDK as a dependency in your Helm chart then running `helm dependency update`: + +``` +Error: could not download oci://registry.replicated.com/library/replicated-sdk: failed to authorize: failed to fetch oauth token: unexpected status from GET request to https://registry.replicated.com/v2/token?scope=repository%3Alibrary%2Freplicated-sdk%3Apull&service=registry.replicated.com: 401 Unauthorized +``` + +#### Cause + +When you run `helm dependency update`, Helm attempts to pull the Replicated SDK chart from the Replicated registry. An error can occur if you are already logged in to the Replicated registry with a license that has expired, such as when testing application releases. + +#### Solution + +To solve this issue: + +1. Run the following command to remove login credentials for the Replicated registry: + + ``` + helm registry logout registry.replicated.com + ``` + +1. Re-run `helm dependency update` for your Helm chart. + +================ +File: docs/vendor/replicated-sdk-overview.mdx +================ +import SDKOverview from "../partials/replicated-sdk/_overview.mdx" +import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" + +# About the Replicated SDK + +This topic provides an introduction to using the Replicated SDK with your application. + +## Overview + +<SDKOverview/> + +For more information about the Replicated SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). For information about developing against the SDK API locally, see [Developing Against the SDK API](replicated-sdk-development). + +## Limitations + +The Replicated SDK has the following limitations: + +* Some popular enterprise continuous delivery tools, such as ArgoCD and Pulumi, deploy Helm charts by running `helm template` then `kubectl apply` on the generated manifests, rather than running `helm install` or `helm upgrade`. The following limitations apply to applications installed by running `helm template` then `kubectl apply`: + + * The `/api/v1/app/history` SDK API endpoint always returns an empty array because there is no Helm history in the cluster. See [GET /app/history](/reference/replicated-sdk-apis#get-apphistory) in _Replicated SDK API_. + + * The SDK does not automatically generate status informers to report status data for installed instances of the application. To get instance status data, you must enable custom status informers by overriding the `replicated.statusInformers` Helm value. See [Enable Application Status Insights](/vendor/insights-app-status#enable-application-status-insights) in _Enabling and Understanding Application Status_. + +## SDK Resiliency + +At startup and when serving requests, the SDK retrieves and caches the latest information from the upstream Replicated APIs, including customer license information. + +If the upstream APIs are not available at startup, the SDK does not accept connections or serve requests until it is able to communicate with the upstream APIs. If communication fails, the SDK retries every 10 seconds and the SDK pod is at `0/1` ready. + +When serving requests, if the upstream APIs become unavailable, the SDK serves from the memory cache and sets the `X-Replicated-Served-From-Cache` header to `true`. Additionally, rapid successive requests to same SDK endpoint with the same request properties will be rate-limited returning the last cached payload and status code without reaching out to the upstream APIs. A `X-Replicated-Rate-Limited` header will set to `true`. + +## Replicated SDK Helm Values + +<SdkValues/> + +================ +File: docs/vendor/replicated-sdk-slsa-validating.md +================ +# SLSA Provenance Validation Process for the Replicated SDK + +This topic describes the process to perform provenance validation on the Replicated SDK. + +## About Supply Chain Levels for Software Artifacts (SLSA) + +[Supply Chain Levels for Software Artifacts (SLSA)](https://slsa.dev/), pronounced “salsa,” is a security framework that comprises standards and controls designed to prevent tampering, enhance integrity, and secure software packages and infrastructure. + + +## Purpose of Attestations +Attestations enable the inspection of an image to determine its origin, the identity of its creator, the creation process, and its contents. When building software using the Replicated SDK, the image’s Software Bill of Materials (SBOM) and SLSA-based provenance attestations empower your customers to make informed decisions regarding the impact of an image on the supply chain security of your application. This process ultimately enhances the security and assurances provided to both vendors and end customers. + +## Prerequisite +Before you perform these tasks, you must install [slsa-verifier](https://github.com/slsa-framework/slsa-verifier) and [crane](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md). + +## Validate the SDK SLSA Attestations + +The Replicated SDK build process utilizes Wolfi-based images to minimize the number of CVEs. The build process automatically generates SBOMs and attestations, and then publishes the image along with these metadata components. For instance, you can find all the artifacts readily available on [DockerHub](https://hub.docker.com/r/replicated/replicated-sdk/tags). The following shell script is a tool to easily validate the SLSA attestations for a given Replicated SDK image. + +``` +#!/bin/bash + +# This script verifies the SLSA metadata of a container image +# +# Requires +# - slsa-verifier (https://github.com/slsa-framework/slsa-verifier) +# - crane (https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md) +# + + +# Define the image and version to verify +VERSION=v1.0.0-beta.20 +IMAGE=replicated/replicated-sdk:${VERSION} + +# expected source repository that should have produced the artifact, e.g. github.com/some/repo +SOURCE_REPO=github.com/replicatedhq/replicated-sdk + + +# Use `crane` to retrieve the digest of the image without pulling the image +IMAGE_WITH_DIGEST="${IMAGE}@"$(crane digest "${IMAGE}") + +echo "Verifying artifact" +echo "Image: ${IMAGE_WITH_DIGEST}" +echo "Source Repo: ${SOURCE_REPO}" + +slsa-verifier verify-image "${IMAGE_WITH_DIGEST}" \ + --source-uri ${SOURCE_REPO} \ + --source-tag ${VERSION} + +``` + +================ +File: docs/vendor/resources-annotations-templating.md +================ +# Templating Annotations + +This topic describes how to use Replicated KOTS template functions to template annotations for resources and objects based on user-supplied values. + +## Overview + +It is common for users to need to set custom annotations for a resource or object deployed by your application. For example, you might need to allow your users to provide annotations to apply to a Service or Ingress object in public cloud environments. + +For applications installed with Replicated KOTS, you can apply user-supplied annotations to resources or objects by first adding a field to the Replicated Admin Console **Config** page where users can enter one or more annotations. For information about how to add fields on the **Config** page, see [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen). + +You can then map these user-supplied values from the **Config** page to resources and objects in your release using KOTS template functions. KOTS template functions are a set of custom template functions based on the Go text/template library that can be used to generate values specific to customer environments. The template functions in the Config context return user-supplied values on the **Config** page. + +For more information about KOTS template functions in the Config text, see [Config Context](/reference/template-functions-config-context). For more information about the Go library, see [text/template](https://pkg.go.dev/text/template) in the Go documentation. + +## About `kots.io/placeholder` + +For applications installed with KOTS that use standard Kubernetes manifests, the `kots.io/placeholder` annotation allows you to template annotations in resources and objects without breaking the base YAML or needing to include the annotation key. + +The `kots.io/placeholder` annotation uses the format `kots.io/placeholder 'bool' 'string'`. For example: + +```yaml +# Example manifest file + +annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "additional_annotations" | nindent 4 }} +``` + +:::note +For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file using the Replicated HelmChart custom resource, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. + +For an example, see [Map User-Supplied Annotations to Helm Chart Values](#map-user-supplied-annotations-to-helm-chart-values) below. +::: + +## Annotation Templating Examples + +This section includes common examples of templating annotations in resources and objects to map user-supplied values. + +For additional examples of how to map values to Helm chart-based applications, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. + +### Map Multiple Annotations from a Single Configuration Field + +You can map one or more annotations from a single `textarea` field on the **Config** page. The `textarea` type defines multi-line text input and supports properties such as `rows` and `cols`. For more information, see [textarea](/reference/custom-resource-config#textarea) in _Config_. + +For example, the following Config custom resource adds an `ingress_annotations` field of type `textarea`: + +```yaml +# Config custom resource + +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config +spec: + groups: + - name: ingress_settings + title: Ingress Settings + description: Configure Ingress + items: + - name: ingress_annotations + type: textarea + title: Ingress Annotations + help_text: See your cloud provider’s documentation for the required annotations. +``` + +On the **Config** page, users can enter one or more key value pairs in the `ingress_annotations` field, as shown in the example below: + +![Config page with custom annotations in a Ingress Annotations field](/images/config-map-annotations.png) + +[View a larger version of this image](/images/config-map-annotations.png) + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "ingress_annotations" | nindent 4 }} +``` + +During installation, KOTS renders the YAML with the multi-line input from the configuration field as shown below: + +```yaml +# Rendered Ingress object +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + + key1: value1 + key2: value2 + key3: value3 +``` + +### Map Annotations from Multiple Configuration Fields + +You can specify multiple annotations using the same `kots.io/placeholder` annotation. + +For example, the following Ingress object includes ConfigOption template functions that render the user-supplied values for the `ingress_annotation` and `ingress_hostname` fields: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "ingress_annotation" | nindent 4 }} + repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} +``` + +During installation, KOTS renders the YAML as shown below: + +```yaml +# Rendered Ingress object + +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + + key1: value1 + my.custom/annotation.ingress.hostname: example.hostname.com +``` + +### Map User-Supplied Value to a Key + +You can map a user-supplied value from the **Config** page to a pre-defined annotation key. + +For example, in the following Ingress object, `my.custom/annotation.ingress.hostname` is the key for the templated annotation. The annotation also uses the ConfigOption template function to map the user-supplied value from a `ingress_hostname` configuration field: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} +``` + +During installation, KOTS renders the YAML as shown below: + +```yaml +# Rendered Ingress object + +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + + my.custom/annotation.ingress.hostname: example.hostname.com +``` + +### Include Conditional Statements in Templated Annotations + +You can include or exclude templated annotations based on a conditional statement. + +For example, the following Ingress object includes a conditional statement for `kots.io/placeholder` that renders `my.custom/annotation.class: somevalue` if the user enables a `custom_annotation` field on the **Config** page: + +```yaml +apiVersion: v1 +kind: Ingress +metadata: + name: myapp + labels: + app: myapp +annotations: + kots.io/placeholder: |- + repl{{if ConfigOptionEquals "custom_annotation" "1" }}repl{{ printf "my.custom/annotation.class: somevalue" | nindent 4 }}repl{{end}} +spec: +... +``` + +During installation, if the user enables the `custom_annotation` configuration field, KOTS renders the YAML as shown below: + +```yaml +# Rendered Ingress object + +apiVersion: v1 +kind: Ingress +metadata: + name: myapp + labels: + app: myapp + annotations: + kots.io/placeholder: |- + my.custom/annotation.class: somevalue +spec: +... +``` + +Alternatively, if the condition evaluates to false, the annotation does not appear in the rendered YAML: + +```yaml +apiVersion: v1 +kind: Ingress +metadata: + name: myapp + labels: + app: myapp + annotations: + kots.io/placeholder: |- +spec: +... +``` + +### Map User-Supplied Annotations to Helm Chart Values + +For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. + +To map user-supplied annotations from the **Config** page to the Helm chart `values.yaml` file, you use the `values` field of the Replicated HelmChart custom resource. For more information, see [values](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_. + +For example, the following HelmChart custom resource uses a ConfigOption template function in `values.services.myservice.annotations` to map the value of a configuration field named `additional_annotations`: + +```yaml +# HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: myapp +spec: + values: + services: + myservice: + annotations: repl{{ ConfigOption "additional_annotations" | nindent 10 }} +``` + +The `values.services.myservice.annotations` field in the HelmChart custom resource corresponds to a `services.myservice.annotations` field in the `value.yaml` file of the application Helm chart, as shown in the example below: + +```yaml +# Helm chart values.yaml + +services: + myservice: + annotations: {} +``` + +During installation, the ConfigOption template function in the HelmChart custom resource renders the user-supplied values from the `additional_annotations` configuration field. + +Then, KOTS replaces the value in the corresponding field in the `values.yaml` in the chart archive, as shown in the example below. + +```yaml +# Rendered Helm chart values.yaml + +services: + myservice: + annotations: + key1: value1 +``` + +In your Helm chart templates, you can access these values from the `values.yaml` file to apply the user-supplied annotations to the target resources or objects. For information about how to access values from a `values.yaml` file, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the Helm documentation. + +================ +File: docs/vendor/snapshots-configuring-backups.md +================ +# Configuring Snapshots + +This topic provides information about how to configure the Velero Backup resource to enable Replicated KOTS snapshots for an application. + +For more information about snapshots, see [About Backup and Restore with snapshots](/vendor/snapshots-overview). + +## Configure Snapshots + +Add a Velero Backup custom resource (`kind: Backup`, `apiVersion: velero.io/v1`) to your release and configure it as needed. After configuring the Backup resource, add annotations for each volume that you want to be included in backups. + +To configure snapshots for your application: + +1. In a new release containing your application files, add a Velero Backup resource (`kind: Backup` and `apiVersion: velero.io/v1`): + + ```yaml + apiVersion: velero.io/v1 + kind: Backup + metadata: + name: backup + spec: {} + ``` + +1. Configure the Backup resource to specify the resources that will be included in backups. + + For more information about the Velero Backup resource, including limitations, the list of supported fields for snapshots, and an example, see [Velero Backup Resource for Snapshots](/reference/custom-resource-backup). + +1. (Optional) Configure backup and restore hooks. For more information, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). + +1. For each volume that requires a backup, add the `backup.velero.io/backup-volumes` annotation. The annotation name is `backup.velero.io/backup-volumes` and the value is a comma separated list of volumes to include in the backup. + + <details> + <summary>Why do I need to use the backup annotation?</summary> + <p>By default, no volumes are included in the backup. If any pods mount a volume that should be backed up, you must configure the backup with an annotation listing the specific volumes to include in the backup.</p> + </details> + + **Example:** + + In the following Deployment manifest file, `pvc-volume` is the only volume that is backed up. The `scratch` volume is not included in the backup because it is not listed in annotation on the pod specification. + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: sample + labels: + app: foo + spec: + replicas: 1 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + annotations: + backup.velero.io/backup-volumes: pvc-volume + spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-webserver + volumeMounts: + - name: pvc-volume + mountPath: /volume-1 + - name: scratch + mountPath: /volume-2 + volumes: + - name: pvc-volume + persistentVolumeClaim: + claimName: test-volume-claim + - name: scratch + emptyDir: {} + + ``` + +1. (Optional) Configure manifest exclusions. By default, Velero also includes backups of all of the Kubernetes objects in the namespace. + + To exclude any manifest file, add a [`velero.io/exclude-from-backup=true`](https://velero.io/docs/v1.5/resource-filtering/#veleroioexclude-from-backuptrue) label to the manifest to be excluded. The following example shows the Secret manifest file with the `velero.io/exclude-from-backup` label: + + ```yaml + apiVersion: apps/v1 + kind: Secret + metadata: + name: sample + labels: + velero.io/exclude-from-backup: "true" + stringData: + uri: Secret To Not Include + + ``` + +1. If you distribute multiple applications with Replicated, repeat these steps for each application. Each application must have its own Backup resource to be included in a full backup with snapshots. + +1. (kURL Only) If your application supports installation with Replicated kURL, Replicated recommends that you include the kURL Velero add-on so that customers do not have to manually install Velero in the kURL cluster. For more information, see [Creating a kURL Installer](packaging-embedded-kubernetes). + +================ +File: docs/vendor/snapshots-hooks.md +================ +# Configuring Backup and Restore Hooks for Snapshots + +This topic describes the use of custom backup and restore hooks and demonstrates a common example. + +## About Backup and Restore Hooks + +Velero supports the use of backup hooks and restore hooks. + +Your application workload might require additional processing or scripts to be run before or after creating a backup to prepare the system for a backup. Many application workloads also require additional processing or scripts to run during or after the restore process. + +Some common examples of how to use a hook to create backups are: +- Run `pg_dump` to export a postgres database prior to backup +- Lock a file before running a backup, and unlock immediately after +- Delete TMP files that should not be backed up +- Restore a database file only if that file exists +- Perform required setup tasks in a restored Pod before the application containers can start + +Additionally, for embedded clusters created by Replicated kURL, you must write custom backup and restore hooks to enable back ups for any object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs). For more information about object-stored data, see [Other Object Stored Data](snapshots-overview#other-object-stored-data) in _Backup and Restore_. + +For more information about backup and restore hooks, see [Backup Hooks](https://velero.io/docs/v1.10/backup-hooks/) and [Restore Hooks](https://velero.io/docs/v1.10/restore-hooks) in the Velero documentation. + +## Example + +The following example demonstrates how to include Velero backup and restore hooks for a Postgres database in a Replicated HelmChart custom resource manifest file. + +The use case for this example is an application packaged with a Helm chart that includes a Postgres database. A description of key fields from the YAML follows the example. + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: postgresql +spec: + exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' + + chart: + name: postgresql + chartVersion: 8.7.4 + + values: + + master: + podAnnotations: + backup.velero.io/backup-volumes: backup + pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U username -d dbname -h 127.0.0.1 > /scratch/backup.sql"]' + pre.hook.backup.velero.io/timeout: 3m + post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U username -h 127.0.0.1 -d dbname -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' + + extraVolumes: + - name: backup + emptyDir: + sizeLimit: 1Gi + extraVolumeMounts: + - name: backup + mountPath: /scratch + + global: + postgresql: + postgresqlUsername: username + postgresqlPassword: "repl{{ ConfigOption `embedded_postgres_password` }}" + postgresqlDatabase: dbname +``` + +The following describes key fields from the example above: + +* `spec.exclude`: A common and recommended pattern for applications. The customer can choose to bring an external Postgres instance instead of running it in-cluster. The Replicated KOTS template function in `spec.exclude` evaluates to true when the user specifies the external database option in the Admin Console **Config** page. This means that the internal Postgres database is not included in the deployment. + +* `spec.values.master.podAnnotations`: Adds podAnnotations to the postgres master PodSpec. Velero backup and restore hooks are included in the podAnnotations. The following table describes the podAnnotations: + + :::note + Run backup hooks inside the container that contains the data to back up. + ::: + + <table> + <tr> + <th>podAnnotation</th> + <th>Description</th> + </tr> + <tr> + <td><code>backup.velero.io/backup-volumes</code></td> + <td>A comma-separated list of volumes from the Pod to include in the backup. The primary data volume is not included in this field because data is exported using the backup hook.</td> + </tr> + <tr> + <td><code>pre.hook.backup.velero.io/command</code></td> + <td>A stringified JSON array containing the command for the backup hook. + This command is a <code>pg_dump</code> from the running database to the backup volume.</td> + </tr> + <tr> + <td><code>pre.hook.backup.velero.io/timeout</code></td> + <td>A duration for the maximum time to let this script run.</td> + </tr> + <tr> + <td><code>post.hook.restore.velero.io/command</code></td> + <td>A Velero exec restore hook that runs a script to check if the database file exists, and restores only if it exists. Then, the script deletes the file after the operation is complete.</td> + </tr> + </table> + +* `spec.master.extraVolumes`: A new volume that is injected into the postgres Pod. The new volume is an empty volume that uses ephemeral storage. The ephemeral storage must have enough space to accommodate the size of the exported data. +The `extraVolumeMounts` field mounts the volume into the `/scratch` directory of the master Pod. The volume is used as a destination when the backup hook command described above runs `pg_dump`. This is the only volume that is backed up. + +================ +File: docs/vendor/snapshots-overview.mdx +================ +import RestoreTable from "../partials/snapshots/_restoreTable.mdx" +import NoEcSupport from "../partials/snapshots/_limitation-no-ec-support.mdx" +import RestoreTypes from "../partials/snapshots/_restore-types.mdx" +import Dr from "../partials/snapshots/_limitation-dr.mdx" +import Os from "../partials/snapshots/_limitation-os.mdx" +import InstallMethod from "../partials/snapshots/_limitation-install-method.mdx" +import CliRestores from "../partials/snapshots/_limitation-cli-restores.mdx" + +# About Backup and Restore with Snapshots + +This topic provides an introduction to the Replicated KOTS snapshots feature for backup and restore. It describes how vendors enable snapshots, the type of data that is backed up, and how to troubleshoot issues for enterprise users. + +:::note +<NoEcSupport/> +::: + +## Overview + +An important part of the lifecycle of an application is backup and restore. You can enable Replicated KOTS snapshots to support backup and restore for existing cluster installations with KOTS and Replicated kURL installations. + +When snapshots is enabled for your application, your customers can manage and perform backup and restore from the Admin Console or KOTS CLI. + +Snapshots uses the Velero open source project as the backend to back up Kubernetes manifests and persistent volumes. Velero is a mature, fully-featured application. For more information, see the [Velero documentation](https://velero.io/docs/). + +In addition to the default functionality that Velero provides, KOTS exposes hooks that let you inject scripts that can execute both before and after a backup, and before and after a restore. For more information, see [Configuring Backup and Restore Hooks for Snapshots](/vendor/snapshots-hooks). + +### Limitations and Considerations + +* <NoEcSupport/> + +- The snapshots feature is available only for licenses with the **Allow Snapshots** option enabled. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). + +- Snapshots are useful for rollback and disaster recovery scenarios. They are not intended to be used for application migration. + +- <Dr/> + +- <Os/> + +- <InstallMethod/> + +- <CliRestores/> + +- Removing data from the snapshot storage itself results in data corruption and the loss of snapshots. Instead, use the **Snapshots** tab in the Admin Console to cleanup and remove snapshots. + +- Snapshots does not support Amazon Simple Storage Service (Amazon S3) buckets that have a bucket policy requiring the server-side encryption header. If you want to require server-side encryption for objects, you can enable default encryption on the bucket instead. For more information about Amazon S3, see the [Amazon S3](https://docs.aws.amazon.com/s3/?icmpid=docs_homepage_featuredsvcs) documentation. + +### Velero Version Compatibility + +The following table lists which versions of Velero are compatible with each version of KOTS. For more information, see the [Velero documentation](https://velero.io/docs/). + +| KOTS version | Velero version | +|------|-------------| +| 1.15 to 1.20.2 | 1.2.0 | +| 1.20.3 to 1.94.0 | 1.5.1 through 1.9.x | +| 1.94.1 and later | 1.6.x through 1.12.x | + +## About Backups + +This section describes the types of backups that are supported with snapshots. For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. + +### Application and Admin Console (Full) Backups + +Full backups (also referred to as _instance_ backups) include the KOTS Admin Console and all application data, including application volumes and manifest files. + +For clusters created with Replicated kURL, full backups also back up the Docker registry, which is required for air gapped installations. + +If you manage multiple applications with the Admin Console, data from all applications that support backups is included in a full backup. To be included in full backups, each application must include a manifest file with `kind: Backup` and `apiVersion: velero.io/v1`, which you can check for in the Admin Console. + +Full backups are recommended because they support all types of restores. For example, you can restore both the Admin Console and application from a full backup to a new cluster in disaster recovery scenarios. Or, you can use a full backup to restore only application data for the purpose of rolling back after deploying a new version of an application. + +### Application-Only (Partial) Backups + +Partial backups back up the application volumes and manifest files only. Partial backups do not back up the KOTS Admin Console. + +Partial backups can be useful if you need to roll back after deploying a new application version. Partial backups of the application only _cannot_ be restored to a new cluster, and are therefore not useable for disaster recovery scenarios. + +### Backup Storage Destinations + +For disaster recovery, backups should be configured to use a storage destination that exists outside of the cluster. This is especially true for installations in clusters created with Replicated kURL, because the default storage location on these clusters is internal. + +You can use a storage provider that is compatible with Velero as the storage destination for backups created with the Replicated snapshots feature. For a list of the compatible storage providers, see [Providers](https://velero.io/docs/v1.9/supported-providers/) in the Velero documentation. + +You initially configure backups on a supported storage provider backend using the KOTS CLI. If you want to change the storage destination after the initial configuration, you can use the the **Snapshots** page in the Admin Console, which has built-in support for the following storage destinations: + +- Amazon Web Services (AWS) +- Google Cloud Provider (GCP) +- Microsoft Azure +- S3-Compatible +- Network File System (NFS) +- Host Path + +kURL installers that include the Velero add-on also include a locally-provisioned object store. By default, kURL clusters are preconfigured in the Admin Console to store backups in the locally-provisioned object store. This object store is sufficient for only rollbacks and downgrades and is not a suitable configuration for disaster recovery. Replicated recommends that you configure a snapshots storage destination that is external to the cluster in the Admin Console for kURL clusters. + +For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. + +### What Data is Backed Up? + +Full backups include the Admin Console and all application data, including KOTS-specific object-stored data. For Replicated kURL installations, this also backs up the Docker registry, which is required for air gapped installations. + +#### Other Object-Stored Data + +For kURL clusters, you might be using object-stored data that is not specific to the kURL KOTS add-on. + +For object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs), you must write custom backup and restore hooks to enable back ups for that object-stored data. For example, Rook and Ceph do not use PVCs and so require custom backup and restore hooks. For more information about writing custom hooks, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). + +#### Pod Volume Data + +Replicated supports only the restic backup program for pod volume data. + +By default, Velero requires that you opt-in to have pod volumes backed up. In the Backup resource that you configure to enable snapshots, you must annotate each specific volume that you want to back up. For more information about including and excluding pod volumes, see [Configuring Snapshots](/vendor/snapshots-configuring-backups). + +## About Restores {#restores} + +<RestoreTypes/> + +When you restore an application with snapshots, KOTS first deletes the selected application. All existing application manifests are removed from the cluster, and all `PersistentVolumeClaims` are deleted. This action is not reversible. + +Then, the restore process redeploys all of the application manifests. All Pods are given an extra `initContainer` and an extra directory named `.velero`, which are used for restore hooks. For more information about the restore process, see [Restore Reference](https://velero.io/docs/v1.9/restore-reference/) in the Velero documentation. + +When you restore the Admin Console only, no changes are made to the application. + +For information about how to restore using the Admin Console or the KOTS CLI, see [Restoring from Backups](/enterprise/snapshots-restoring-full). + +## Using Snapshots + +This section provides an overview of how vendors and enterprise users can configure and use the snapshots feature. + +### How to Enable Snapshots for Your Application + +To enable the snapshots backup and restore feature for your users, you must: + +- Have the snapshots entitlement enabled in your Replicated vendor account. For account entitlements, contact the Replicated TAM team. +- Define a manifest for creating backups. See [Configuring Snapshots](snapshots-configuring-backups). +- When needed, configure backup and restore hooks. See [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). +- Enable the **Allow Snapshot** option in customer licenses. See [Creating and Managing Customers](releases-creating-customer). + +### Understanding Backup and Restore for Users {#how-users} + +After vendors enable backup and restore, enterprise users install Velero and configure a storage destination in the Admin Console. Then users can create backups manually or schedule automatic backups. + +Replicated recommends advising your users to make full backups for disaster recovery purposes. Additionally, full backups give users the flexibility to do a full restore, a partial restore (application only), or restore just the Admin Console. + +From a full backup, users restore using the KOTS CLI or the Admin Console as indicated in the following table: + +<RestoreTable/> + +Partial backups are not recommended as they are a legacy feature and only back up the application volumes and manifests. Partial backups can be restored only from the Admin Console. + +### Troubleshooting Snapshots + +To support end users with backup and restore, use the following resources: + +- To help troubleshoot error messages, see [Troubleshooting Snapshots](/enterprise/snapshots-troubleshooting-backup-restore). + +- Review the Limitations and Considerations section to make sure an end users system is compliant. + +- Check that the installed Velero version and KOTS version are compatible. + +================ +File: docs/vendor/support-bundle-customizing.mdx +================ +# Adding and Customizing Support Bundles + +This topic describes how to add a default support bundle spec to a release for your application. It also describes how to customize the default support bundle spec based on your application's needs. For more information about support bundles, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). + +The information in this topic applies to Helm applications and Kubernetes manifest-based application installed with Helm or with Replicated KOTS. + +## Step 1: Add the Default Spec to a Manifest File + +You can add the support bundle spec to a Kubernetes Secret or a SupportBundle custom resource. The type of manifest file that you use depends on your application type (Helm or manifest-based) and installation method (Helm or KOTS). + +Use the following guidance to determine which type of manifest file to use for creating a support bundle spec: + +* **Helm Applications**: For Helm applications, see the following guidance: + + * **(Recommended) Helm or KOTS v1.94.2 and Later**: For Helm applications installed with Helm or KOTS v1.94.2 or later, create the support bundle spec in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). + + * **KOTS v1.94.1 and Earlier**: For Helm applications installed with KOTS v1.94.1 or earlier, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). + +* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). + +### Kubernetes Secret {#secret} + +You can define support bundle specs in a Kubernetes Secret for the following installation types: +* Installations with Helm +* Helm applications installed with KOTS v1.94.2 and later + +In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: support-bundle + name: example +stringData: + support-bundle-spec: | + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: support-bundle + spec: + collectors: [] + analyzers: [] +``` + +As shown above, the Secret must include the following: + +* The label `troubleshoot.sh/kind: support-bundle` +* A `stringData` field with a key named `support-bundle-spec` + +This empty support bundle spec includes the following collectors by default: +* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) +* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) + +You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. + +:::note +If your application is deployed as multiple Helm charts, Replicated recommends that you create separate support bundle specs for each subchart. This allows you to make specs that are specific to different components of your application. When a support bundle is generated, all the specs are combined to provide a single bundle. +::: + +After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. + +### SupportBundle Custom Resource {#sb-cr} + +You can define support bundle specs in a SupportBundle custom resource for the following installation types: +* Kubernetes manifest-based applications installed with KOTS +* Helm applications installed with KOTS v1.94.1 and earlier + +In a release for your application, add the following YAML to a new `support-bundle.yaml` manifest file: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: [] +``` +For more information about the SupportBundle custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). + +This empty support bundle spec includes the following collectors by default: +* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) +* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) + +You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. + +After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. + +## Step 2: Customize the Spec {#customize-the-spec} + +You can customize the support bundles for your application by: +* Adding collectors and analyzers +* Editing or excluding the default `clusterInfo` and `clusterResources` collectors + +### Add Collectors + +Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define. + +In addition to the default `clusterInfo` and `clusterResources` collectors, the Troubleshoot open source project includes several collectors that you can include in the spec to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. + +The following are some recommended collectors: + +- [logs](https://troubleshoot.sh/docs/collect/logs/) +- [secret](https://troubleshoot.sh/docs/collect/secret/) and [configMap](https://troubleshoot.sh/docs/collect/configmap/) +- [postgresql](https://troubleshoot.sh/docs/collect/postgresql/), [mysql](https://troubleshoot.sh/docs/collect/mysql/), and [redis](https://troubleshoot.sh/docs/collect/redis/) +- [runPod](https://troubleshoot.sh/docs/collect/run-pod/) +- [copy](https://troubleshoot.sh/docs/collect/copy/) and [copyFromHost](https://troubleshoot.sh/docs/collect/copy-from-host/) +- [http](https://troubleshoot.sh/docs/collect/http/) + +### Add Analyzers + +Analyzers use the data from the collectors to generate output for the support bundle. Good analyzers clearly identify failure modes and provide troubleshooting guidance for the user. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log and provides a description of the error to the user. + +The Troubleshoot open source project includes several analyzers that you can include in the spec. To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. + +The following are some recommended analyzers: + +- [textAnalyze](https://troubleshoot.sh/docs/analyze/regex/) +- [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) +- [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) +- [replicasetStatus](https://troubleshoot.sh/docs/analyze/replicaset-status/) +- [statefulsetStatus](https://troubleshoot.sh/docs/analyze/statefulset-status/) +- [postgresql](https://troubleshoot.sh/docs/analyze/postgresql/), [mysql](https://troubleshoot.sh/docs/analyze/mysql/), and [redis](https://troubleshoot.sh/docs/analyze/redis/) + +### Customize the Default `clusterResources` Collector + +You can edit the default `clusterResources` using the following properties: + +* `namespaces`: The list of namespaces where the resources and information is collected. If the `namespaces` key is not specified, then the `clusterResources` collector defaults to collecting information from all namespaces. The `default` namespace cannot be removed, but you can specify additional namespaces. + +* `ignoreRBAC`: When true, the `clusterResources` collector does not check for RBAC authorization before collecting resource information from each namespace. This is useful when your cluster uses authorization webhooks that do not support SelfSubjectRuleReviews. Defaults to false. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. + +The following example shows how to specify the namespaces where the `clusterResources` collector collects information: + +```yaml +spec: + collectors: + - clusterResources: + namespaces: + - default + - my-app-namespace + ignoreRBAC: true +``` + +The following example shows how to use Helm template functions to set the namespace: + +```yaml +spec: + collectors: + - clusterResources: + namespaces: {{ .Release.Namespace }} + ignoreRBAC: true +``` + +The following example shows how to use the Replicated Namespace template function to set the namespace: + +```yaml +spec: + collectors: + - clusterResources: + namespaces: '{{repl Namespace }}' + ignoreRBAC: true +``` +For more information, see [Namespace](/reference/template-functions-static-context#namespace) in _Static Context_. + +### Exclude the Default Collectors + +Although Replicated recommends including the default `clusterInfo` and `clusterResources` collectors because they collect a large amount of data to help with installation and debugging, you can optionally exclude them. + +The following example shows how to exclude both the clusterInfo and clusterResources collectors from your support bundle spec: + +```yaml +spec: + collectors: + - clusterInfo: + exclude: true + - clusterResources: + exclude: true +``` + +### Examples + +For common examples of collectors and analyzers used in support bundle specs, see [Examples of Support Bundle Specs](/vendor/support-bundle-examples). + +================ +File: docs/vendor/support-bundle-embedded.mdx +================ +import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" +import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" + +# Generating Support Bundles for Embedded Cluster + +This topic describes how to generate a support bundle that includes cluster- and host-level information for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. + +For information about generating host support bundles for Replicated kURL installations, see [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). + +## Overview + +<SupportBundleIntro/> + +## Generate a Support Bundle + +<EmbeddedClusterSupportBundle/> + +================ +File: docs/vendor/support-bundle-examples.mdx +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HttpSecret from "../partials/support-bundles/_http-requests-secret.mdx" +import HttpCr from "../partials/support-bundles/_http-requests-cr.mdx" +import NodeStatusSecret from "../partials/support-bundles/_node-status-secret.mdx" +import NodeStatusCr from "../partials/support-bundles/_node-status-cr.mdx" +import K8sVersionSecret from "../partials/support-bundles/_k8s-version-secret.mdx" +import K8sVersionCr from "../partials/support-bundles/_k8s-version-cr.mdx" +import DeployStatusSecret from "../partials/support-bundles/_deploy-status-secret.mdx" +import DeployStatusCr from "../partials/support-bundles/_deploy-status-cr.mdx" +import NodeResourcesSecret from "../partials/support-bundles/_node-resources-secret.mdx" +import NodeResourcesCr from "../partials/support-bundles/_node-resources-cr.mdx" +import LogsSelectorsSecret from "../partials/support-bundles/_logs-selectors-secret.mdx" +import LogsSelectorsCr from "../partials/support-bundles/_logs-selectors-cr.mdx" +import LogsLimitsSecret from "../partials/support-bundles/_logs-limits-secret.mdx" +import LogsLimitsCr from "../partials/support-bundles/_logs-limits-cr.mdx" +import RedisMysqlSecret from "../partials/support-bundles/_redis-mysql-secret.mdx" +import RedisMysqlCr from "../partials/support-bundles/_redis-mysql-cr.mdx" +import RunPodsSecret from "../partials/support-bundles/_run-pods-secret.mdx" +import RunPodsCr from "../partials/support-bundles/_run-pods-cr.mdx" + +# Example Support Bundle Specs + +This topic includes common examples of support bundle specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/support-bundle) in GitHub. + +## Check API Deployment Status + +The examples below use the `deploymentStatus` analyzer to check the version of Kubernetes running in the cluster. The `deploymentStatus` analyzer uses data from the default `clusterResources` collector. + +For more information, see [Deployment Status](https://troubleshoot.sh/docs/analyze/deployment-status/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <DeployStatusSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <DeployStatusCr/> + </TabItem> +</Tabs> + +## Check HTTP Requests + +If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. + +The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. + +For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <HttpSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <HttpCr/> + </TabItem> +</Tabs> + +## Check Kubernetes Version + +The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. + +For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <K8sVersionSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <K8sVersionCr/> + </TabItem> +</Tabs> + +## Check Node Resources + +The examples below use the `nodeResources` analyzer to check that the minimum requirements are met for memory, CPU cores, number of nodes, and ephemeral storage. The `nodeResources` analyzer uses data from the default `clusterResources` collector. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeResourcesSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <NodeResourcesCr/> + </TabItem> +</Tabs> + +## Check Node Status + +The following examples use the `nodeResources` analyzers to check the status of the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. + +For more information, see [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeStatusSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <NodeStatusCr/> + </TabItem> +</Tabs> + +## Collect Logs Using Multiple Selectors + +The examples below use the `logs` collector to collect logs from various Pods where application workloads are running. They also use the `textAnalyze` collector to analyze the logs for a known error. + +For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. + +You can use the `selector` attribute of the `logs` collector to find Pods that have the specified labels. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector, as shown in the examples below. You can include the `logs` collector as many times as needed. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <LogsSelectorsSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <LogsSelectorsCr/> + </TabItem> +</Tabs> + +## Collect Logs Using `limits` + +The examples below use the `logs` collector to collect Pod logs from the Pod where the application is running. These specifications use the `limits` field to set a `maxAge` and `maxLines` to limit the output provided. + +For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <LogsLimitsSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <LogsLimitsCr/> + </TabItem> +</Tabs> + +## Collect Redis and MySQL Server Information + +The following examples use the `redis` and `mysql` collectors to collect information about Redis and MySQL servers running in the cluster. + +For more information, see [Redis](https://troubleshoot.sh/docs/collect/redis/) and [MySQL](https://troubleshoot.sh/docs/collect/mysql/) and in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <RedisMysqlSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <RedisMysqlCr/> + </TabItem> +</Tabs> + +## Run and Analyze a Pod + +The examples below use the `textAnalyze` analyzer to check that a command successfully executes in a Pod running in the cluster. The Pod specification is defined in the `runPod` collector. + +For more information, see [Run Pods](https://troubleshoot.sh/docs/collect/run-pod/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <RunPodsSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <RunPodsCr/> + </TabItem> +</Tabs> + +================ +File: docs/vendor/support-bundle-generating.mdx +================ +import InstallPlugin from "../partials/support-bundles/_install-plugin.mdx" +import GenerateBundle from "../partials/support-bundles/_generate-bundle.mdx" + +# Generating Support Bundles + +This topic describes how to generate support bundles from the command line using the kubectl support-bundle plugin. For more information about support bundles, see [About Preflights and Support Bundles](/vendor/preflight-support-bundle-about). + +The information in this topic applies to generating support bundles in clusters where you have kubectl access. For information about generating support bundles that include cluster- and host-level information for Replicated Embedded Cluster installations, see [Generating Support Bundles for Embedded Cluster](support-bundle-embedded). + +## Prerequisite: Install the support-bundle Plugin + +<InstallPlugin/> + +## Generate a Bundle + +<GenerateBundle/> + +## Generate a Bundle when a Helm Installation Fails + +If a Helm installation fails and you want to collect a support bundle to assist with diagnostics, you can use a Replicated default specification to generate the support bundle. + +Run the following command: + +```bash +kubectl support-bundle https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml +``` + +================ +File: docs/vendor/support-enabling-direct-bundle-uploads.md +================ +# Enabling Support Bundle Uploads (Beta) + +:::note +Direct bundle uploads is in beta. The functionality, requirements, and limitations of direct bundle uploads are subject to change. +::: + +When this feature is enabled, customers using online KOTS installations can upload support bundles directly through the Admin Console UI, eliminating the need to share the generated bundle with you manually. + +When enabled, your customers can use the **Send bundle to vendor button** in the Admin Console to upload a generated support bundle. + +<img alt="Send bundle to vendor screen" src="/images/send-bundle-to-vendor.png" width="600px"/> + +After clicking this button, the bundle will be immediately available under the Troubleshoot tab in the Vendor Portal team account associated with this customer. + +For more information on how your customer can use this feature, see [Generating Support Bundles from the Admin Console](/enterprise/troubleshooting-an-app). + +### How to Enable Direct Bundle Uploads + +Direct bundle uploads are disabled by default. To enable this feature for your customer: + +1. Log in to the Vendor Portal and navigate to your customer's **Manage Customer** page. +1. Under the **License options** section, make sure your customer has **KOTS Install Enabled** checked, and then check the **Support Bundle Upload Enabled (Beta)** option. + <img alt="Customer license options: configure direct support bundle upload" src="/images/configure-direct-support-bundle-upload.png" width="400px"/> + + [View a larger version of this image](/images/configure-direct-support-bundle-upload.png) +1. Click **Save**. + +### Limitations + +- You will not receive a notification when a customer sends a support bundle to the Vendor Portal. To avoid overlooking these uploads, activate this feature only if there is a reliable escalation process already in place for the customer license. +- This feature only supports online KOTS installations. If enabled, but installed in air gap mode, the upload button will not appear. +- There is a 500mb limit for support bundles uploaded directly via the Admin Console. + +================ +File: docs/vendor/support-host-support-bundles.md +================ +import GenerateBundleHost from "../partials/support-bundles/_generate-bundle-host.mdx" + +# Generating Host Bundles for kURL + +This topic describes how to configure a host support bundle spec for Replicated kURL installations. For information about generating host support bundles for Replicated Embedded Cluster installations, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). + +## Overview + +Host support bundles can be used to collect information directly from the host where a kURL cluster is running, such as CPU, memory, available block devices, and the operating system. Host support bundles can also be used for testing network connectivity and gathering the output of provided commands. + +Host bundles for kURL are useful when: +- The kURL cluster is offline +- The kURL installer failed before the control plane was initialized +- The Admin Console is not working +- You want to debug host-specific performance and configuration problems even when the cluster is running + +You can create a YAML spec to allow users to generate host support bundles for kURL installations. For information, see [Create a Host Support Bundle Spec](#create-a-host-support-bundle-spec) below. + +Replicated also provides a default support bundle spec to collect host-level information for installations with the Embedded Cluster installer. For more information, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). + +## Create a Host Support Bundle Spec + +To allow users to generate host support bundles for kURL installations, create a host support bundle spec in a YAML manifest that is separate from your application release and then share the file with customers to run on their hosts. This spec is separate from your application release because host collectors and analyzers are intended to run directly on the host and not with Replicated KOTS. If KOTS runs host collectors, the collectors are unlikely to produce the desired results because they run in the context of the kotsadm Pod. + +To configure a host support bundle spec for kURL: + +1. Create a SupportBundle custom resource manifest file (`kind: SupportBundle`). + +1. Configure all of your host collectors and analyzers in one manifest file. You can use the following resources to help create your specification: + + - Access sample specifications in the the Replicated troubleshoot-specs repository, which provides specifications for supporting your customers. See [troubleshoot-specs/host](https://github.com/replicatedhq/troubleshoot-specs/tree/main/host) in GitHub. + + - View a list and details of the available host collectors and analyzers. See [All Host Collectors and Analyzers](https://troubleshoot.sh/docs/host-collect-analyze/all/) in the Troubleshoot documentation. + + **Example:** + + The following example shows host collectors and analyzers for the number of CPUs and the amount of memory. + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: host-collectors + spec: + hostCollectors: + - cpu: {} + - memory: {} + hostAnalyzers: + - cpu: + checkName: "Number of CPUs" + outcomes: + - fail: + when: "count < 2" + message: At least 2 CPU cores are required, and 4 CPU cores are recommended. + - pass: + message: This server has at least 4 CPU cores. + - memory: + checkName: "Amount of Memory" + outcomes: + - fail: + when: "< 4G" + message: At least 4G of memory is required, and 8G is recommended. + - pass: + message: The system has at least 8G of memory. + ``` + +1. Share the file with your customers to run on their hosts. + +:::important +Do not store support bundles on public shares, as they may still contain information that could be used to infer private data about the installation, even if some values are redacted. +::: + +## Generate a Host Bundle for kURL + +<GenerateBundleHost/> + +================ +File: docs/vendor/support-inspecting-support-bundles.md +================ +# Inspecting Support Bundles + +You can use the Vendor Portal to get a visual analysis of customer support bundles and use the file inspector to drill down into the details and logs files. Use this information to get insights and help troubleshoot your customer issues. + +To inspect a support bundle: + +1. In the Vendor Portal, go to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Add support bundle > Upload a support bundle**. + +1. In the **Upload a support bundle** dialog, drag and drop or use the file selector to upload a support bundle file to the Vendor Portal. + + <img alt="Upload a support bundle dialog" src="/images/support-bundle-analyze.png" width="500px"/> + + [View a larger version of this image](/images/support-bundle-analyze.png) + +1. (Optional) If the support bundle relates to an open support issue, select the support issue from the dropdown to share the bundle with Replicated. + +1. Click **Upload support bundle**. + + The **Support bundle analysis** page opens. The **Support bundle analysis** page includes information about the bundle, any available instance reporting data from the point in time when the bundle was collected, an analysis overview that can be filtered to show errors and warnings, and a file inspector. + + ![Support bundle analysis overview](/images/support-bundle-analysis-overview.png) + + [View a larger version of this image](/images/support-bundle-analysis-overview.png) + +1. On the **File inspector** tab, select any files from the directory tree to inspect the details of any files included in the support bundle, such as log files. + +1. (Optional) Click **Download bundle** to download the bundle. This can be helpful if you want to access the bundle from another system or if other team members want to access the bundle and use other tools to examine the files. + +1. (Optional) Navigate back to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Create cluster** to provision a cluster with Replicated Compatibility Matrix. This can be helpful for creating customer-representative environments for troubleshooting. For more information about creating clusters with Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). + + <img alt="Cluster configuration dialog" src="/images/cmx-cluster-configuration.png" width="400px"/> + + [View a larger version of this image](/images/cmx-cluster-configuration.png) + +1. If you cannot resolve your customer's issue and need to submit a support request, go to the [**Support**](https://vendor.replicated.com/) page and click **Open a support request**. For more information, see [Submitting a Support Request](support-submit-request). + + :::note + The **Share with Replicated** button on the support bundle analysis page does _not_ open a support request. You might be directed to use the **Share with Replicated** option when you are already interacting with a Replicated team member. + ::: + + ![Submit a Support Request](/images/support.png) + + [View larger version of this image](/images/support.png) + +================ +File: docs/vendor/support-modular-support-bundle-specs.md +================ +# About Creating Modular Support Bundle Specs + +This topic describes how to use a modular approach to creating support bundle specs. + +## Overview + +Support bundle specifications can be designed using a modular approach. This refers to creating multiple different specs that are scoped to individual components or microservices, rather than creating a single, large spec. For example, for applications that are deployed as multiple Helm charts, vendors can create a separate support bundle spec in the `templates` directory in the parent chart as well as in each subchart. + +This modular approach helps teams develop specs that are easier to maintain and helps teams to avoid merge conflicts that are more likely to occur when making to changes to a large spec. When generating support bundles for an application that includes multiple modular specs, the specs are merged so that only one support bundle archive is generated. + +## Example: Support Bundle Specifications by Component {#component} + +Using a modular approach for an application that ships MySQL, NGINX, and Redis, your team can add collectors and analyzers in using a separate support bundle specification for each component. + +`manifests/nginx/troubleshoot.yaml` + +This collector and analyzer checks compliance for the minimum number of replicas for the NGINX component: + + ```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: nginx +spec: + collectors: + - logs: + selector: + - app=nginx + analyzers: + - deploymentStatus: + name: nginx + outcomes: + - fail: + when: replicas < 2 + ``` + +`manifests/mysql/troubleshoot.yaml` + +This collector and analyzer checks compliance for the minimum version of the MySQL component: + + ```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: mysql +spec: + collectors: + - mysql: + uri: 'dbuser:**REDACTED**@tcp(db-host)/db' + analyzers: + - mysql: + checkName: Must be version 8.x or later + outcomes: + - fail: + when: version < 8.x +``` + +`manifests/redis/troubleshoot.yaml` + +This collector and analyzer checks that the Redis server is responding: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: redis +spec: + collectors: + - redis: + collectorName: redis + uri: rediss://default:password@hostname:6379 +``` + +A single support bundle archive can be generated from a combination of these manifests using the `kubectl support-bundle --load-cluster-specs` command. +For more information and additional options, see [Generating Support Bundles](support-bundle-generating). + +================ +File: docs/vendor/support-online-support-bundle-specs.md +================ +# Making Support Bundle Specs Available Online + +This topic describes how to make your application's support bundle specs available online as well as how to link to online specs. + +## Overview + +You can make the definition of one or more support bundle specs available online in a source repository and link to it from the specs in the cluster. This approach lets you update collectors and analyzers outside of the application release and notify customers of potential problems and fixes in between application updates. + +The schema supports a `uri:` field that, when set, causes the support bundle generation to use the online specification. If the URI is unreachable or unparseable, any collectors or analyzers in the specification are used as a fallback. + +You update collectors and analyzers in the online specification to manage bug fixes. When a customer generates a support bundle, the online specification can detect those potential problems in the cluster and let them know know how to fix it. Without the URI link option, you must wait for the next time your customers update their applications or Kubernetes versions to get notified of potential problems. The URI link option is particularly useful for customers that do not update their application routinely. + +If you are using a modular approach to designing support bundles, you can use multiple online specs. Each specification supports one URI link. For more information about modular specs, see [About Creating Modular Support Bundle Specs](support-modular-support-bundle-specs). + +## Example: URI Linking to a Source Repository + +This example shows how Replicated could set up a URI link for one of its own components. You can follow a similar process to link to your own online repository for your support bundles. + +Replicated kURL includes an EKCO add-on for maintenance on embedded clusters, such as automating certificate rotation or data migration tasks. Replicated can ship this component with a support bundle manifest that warns users if they do not have this add-on installed or if it is not running in the cluster. + +**Example: Release v1.0.0** + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: +  name: ekco +spec: + collectors: + analyzers: + - deploymentStatus: + checkName: Check EKCO is operational + name: ekc-operator + namespace: kurl + outcomes: + - fail: + when: absent + message: EKCO is not installed - please add the EKCO component to your kURL spec and re-run the installer script + - fail: + when: "< 1" + message: EKCO does not have any ready replicas + - pass: + message: EKCO has at least 1 replica +``` + +If a bug is discovered at any time after the release of the specification above, Replicated can write an analyzer for it in an online specification. By adding a URI link to the online specification, the support bundle uses the assets hosted in the online repository, which is kept current. + +The `uri` field is added to the specification as a raw file link. Replicated hosts the online specification on [GitHub](https://github.com/replicatedhq/troubleshoot-specs/blob/main/in-cluster/default.yaml). + +**Example: Release v1.1.0** + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: +  name: ekco +spec: + uri: https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml + collectors: [...] + analyzers: [...] +``` + +Using the `uri:` property, the support bundle gets the latest online specification if it can, or falls back to the collectors and analyzers listed in the specification that is in the cluster. + +Note that because the release version 1.0.0 did not contain the URI, Replicated would have to wait until existing users upgrade a cluster before getting the benefit of the new analyzer. Then, going forward, those users get any future online analyzers without having to upgrade. New users who install the version containing the URI as their initial installation automatically get any online analyzers when they generate a support bundle. + +For more information about the URI, see [Troubleshoot schema supports a `uri://` field](https://troubleshoot.sh/docs/support-bundle/supportbundle/#uri) in the Troubleshoot documentation. For a complete example, see [Debugging Kubernetes: Enhancements to Troubleshoot](https://www.replicated.com/blog/debugging-kubernetes-enhancements-to-troubleshoot/#Using-online-specs-for-support-bundles) in The Replicated Blog. + +================ +File: docs/vendor/support-submit-request.md +================ +# Submitting a Support Request + +You can submit a support request and a support bundle using the Replicated Vendor Portal. Uploading a support bundle is secure and helps the Replicated support team troubleshoot your application faster. Severity 1 issues are resolved three times faster when you submit a support bundle with your support request. + +### Prerequisites + +The following prerequisites must be met to submit support requests: + +* Your Vendor Portal account must be configured for access to support before you can submit support requests. Contact your administrator to ensure that you are added to the correct team. + +* Your team must have a replicated-collab repository configured. If you are a team administrator and need information about getting a collab repository set up and adding users, see [Adding Users to the Collab Repository](team-management-github-username#add). + + +### Submit a Support Request + +To submit a support request: + +1. From the [Vendor Portal](https://vendor.replicated.com), click **Support > Submit a Support Request** or go directly to the [Support page](https://vendor.replicated.com/support). + +1. In section 1 of the Support Request form, complete the fields with information about your issue. + +1. In section 2, do _one_ of the following actions: + - Use your pre-selected support bundle or select a different bundle in the pick list + - Select **Upload and attach a new support bundle** and attach a bundle from your file browser + +1. Click **Submit Support Request**. You receive a link to your support issue, where you can interact with the support team. + + :::note + Click **Back** to exit without submitting a support request. + ::: + +================ +File: docs/vendor/team-management-github-username.mdx +================ +import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" +import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" +import CollabExistingUser from "../partials/collab-repo/_collab-existing-user.mdx" + + +# Managing Collab Repository Access + +This topic describes how to add users to the Replicated collab GitHub repository automatically through the Replicated Vendor Portal. It also includes information about managing user roles in this repository using Vendor Portal role-based access control (RBAC) policies. + +## Overview {#overview} + +<CollabRepoAbout/> + +To get access to the collab repository, members of a Vendor Portal team can add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. The Vendor Portal then automatically provisions the team member as a user in the collab repository in GitHub. The RBAC policy that the member is assigned in the Vendor Portal determines the GitHub role that they have in the collab repository. + +Replicated recommends that Vendor Portal admins manage user access to the collab repository through the Vendor Portal, rather than manually managing users through GitHub. Managing access through the Vendor Portal has the following benefits: +* Users are automatically added to the collab repository when they add their GitHub username in the Vendor Portal. +* Users are automatically removed from the collab repository when they are removed from the Vendor Portal team. +* Vendor portal and collab repository RBAC policies are managed from a single location. + +## Add Users to the Collab Repository {#add} + +This procedure describes how to use the Vendor Portal to access the collab repository for the first time as an Admin, then automatically add new and existing users to the repository. This allows you to use the Vendor Portal to manage the GitHub roles for users in the collab repository, rather than manually adding, managing, and removing users from the repository through GitHub. + +### Prerequisite + +Your team must have a replicated-collab repository configured to add users to +the repository and to manage repository access through the Vendor Portal. To get +a collab support repository configured in GitHub for your team, complete the onboarding +instructions in the email you received from Replicated. You can also access the [Replicated community help forum](https://community.replicated.com/) for assistance. + +### Procedure + +To add new and existing users to the collab repository through the Vendor Portal: + +1. As a Vendor Portal admin, log in to your Vendor Portal account. In the [Account Settings](https://vendor.replicated.com/account-settings) page, add your GitHub username and click **Save Changes**. + + <img src="/images/account-info.png" alt="Account info in the Vendor Portal" width="600"/> + + The Vendor Portal automatically adds your GitHub username to the collab repository and assigns it the Admin role. You receive an email with details about the collab repository when you are added. + +1. Follow the collab repository link from the email that you receive to log in to your GitHub account and access the repository. + +1. (Recommended) Manually remove any users in the collab repository that were previously added through GitHub. + + :::note + <CollabExistingUser/> + ::: + +1. (Optional) In the Vendor Portal, go to the [Team](https://vendor.replicated.com/team/members) page. For each team member, click **Edit permissions** as necessary to specify their GitHub role in the collab repository. + + For information about which policies to select, see [About GitHub Roles](#about-github-roles). + +1. Instruct each Vendor Portal team member to add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. + + The Vendor Portal adds the username to the collab repository and assigns a GitHub role to the user based on their Vendor Portal policy. + + Users receive an email when they are added to the collab repository. + +## About GitHub Roles + +When team members add a GitHub username to their Vendor Portal account, the Vendor Portal determines how to assign the user a default GitHub role in the collab repository based on the following criteria: +* If the GitHub username already exists in the collab repository +* The RBAC policy assigned to the member in the Vendor Portal + +You can also update any custom RBAC policies in the Vendor Portal to change the default GitHub roles for those policies. + +### Default Roles for Existing Users {#existing-username} + +<CollabExistingUser/> + +### Default Role Mapping {#role-mapping} + +When team members add a GitHub username to their Vendor Portal account, the Vendor Portal assigns them to a GitHub role in the collab repository that corresponds to their Vendor Portal policy. For example, users with the default Read Only policy in the Vendor Portal are assigned the Read GitHub role in the collab repository. + +For team members assigned custom RBAC policies in the Vendor Portal, you can edit the custom policy to change their GitHub role in the collab repository. For more information, see [About Changing the Default GitHub Role](#custom) below. + +The table below describes how each default and custom Vendor Portal policy corresponds to a role in the collab repository in GitHub. For more information about each of the GitHub roles described in this table, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<table> + <tr> + <th width="25%">Vendor Portal Role</th> + <th width="25%">GitHub collab Role</th> + <th width="50%">Description</th> + </tr> + <tr> + <td>Admin</td> + <td>Admin</td> + <td><p>Members assigned the default Admin role in the Vendor Portal are assigned the GitHub Admin role in the collab repository.</p></td> + </tr> + <tr> + <td>Support Engineer</td> + <td>Triage</td> + <td><p>Members assigned the custom Support Engineer role in the Vendor Portal are assigned the GitHub Triage role in the collab repository.</p><p>For information about creating a custom Support Engineer policy in the Vendor Portal, see <a href="team-management-rbac-configuring#support-engineer">Support Engineer</a> in <em>Configuring RBAC Policies</em>.</p><p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p></td> + </tr> + <tr> + <td>Read Only</td> + <td>Read</td> + <td>Members assigned the default Read Only role in the Vendor Portal are assigned the GitHub Read role in the collab repository.</td> + </tr> + <tr> + <td>Sales</td> + <td>N/A</td> + <td><p>Users assigned the custom Sales role in the Vendor Portal do not have access to the collab repository.</p><p>For information about creating a custom Sales policy in the Vendor Portal, see <a href="team-management-rbac-configuring#sales">Sales</a> in <em>Configuring RBAC Policies</em>.</p><p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p></td> + </tr> + <tr> + <td>Custom policies with <code>**/admin</code> under <code>allowed:</code></td> + <td>Admin</td> + <td> + <p>By default, members assigned to a custom RBAC policy that specifies <code>**/admin</code> under <code>allowed:</code> are assigned the GitHub Admin role in the collab repository.</p> + <p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p> + </td> + </tr> + <tr> + <td>Custom policies <em>without</em> <code>**/admin</code> under <code>allowed:</code></td> + <td>Read Only</td> + <td> + <p>By default, members assigned to any custom RBAC policies that do not specify <code>**/admin</code> under <code>allowed:</code> are assigned the Read Only GitHub role in the collab repository.</p> + <p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p> + </td> + </tr> +</table> + +### Change the Default Role {#custom} + +You can update any custom RBAC policies that you create in the Vendor Portal to change the default GitHub roles for those policies. For example, by default, any team members assigned a custom policy with `**/admin` under `allowed:` are assigned the Admin role in the collab repository in GitHub. You can update the custom policy to specify a more restrictive GitHub role. + +To edit a custom policy to change the default GitHub role assigned to users with that policy, add one of the following RBAC resources to the `allowed:` or `denied:` list in the custom policy: + +* `team/support-issues/read` +* `team/support-issues/write` +* `team/support-issues/triage` +* `team/support-issues/admin` + +For more information about each of these RBAC resources, see [Team](team-management-rbac-resource-names#team) in _RBAC Resource Names_. + +For more information about how to edit the `allowed:` or `denied:` lists for custom policies in the Vendor Portal, see [Configuring Custom RBAC Policies](team-management-rbac-configuring). + +<CollabRbacResourcesImportant/> + +================ +File: docs/vendor/team-management-google-auth.md +================ +# Managing Google Authentication + +This topic describes the Google authentication options that you can configure to control access to the Replicated Vendor Portal. + +## Manage Google Authentication Options + +As a team administrator, you can enable, disable, or require Google authentication for all accounts in the team. + +A core benefit of using Google authentication is that when a user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal, unless the username and password is also allowed. Requiring Google authentication is an effective way of centrally removing access to the Vendor Portal. + +To manage Google authentication settings: + +1. Click **Team Settings > [Google Authentication](https://vendor.replicated.com/team/google-authentication)**. + + ![Google Auth Settings](/images/team-mgmt-google-auth.png) + +1. Enable or disable the settings: + + | Field | Instructions | + |-----------------------|------------------------| + | Allow Google authentication for team members | Enables team members to log in using a Google account. | + | Restrict login to only allow to Google authentication | Requires new users to accept an invitation and sign up with a Google account that exactly matches the email address that was invited to the team. The email address can be a gmail.com address or user from another domain, but it must match the email address from the invitation exactly. Disabling this setting requires users to accept the invitation by creating a username and password (or use the SAML workflow). | + + +## Migrating Existing Accounts +Excluding some teams that restrict end users to use only Security Assertion Markup Language (SAML) or require two-factor authentication (2FA), existing end users can seamlessly sign into an account that exactly matches their Google Workspace (formerly GSuite) email address. However, Google authentication only matches existing user accounts, so for users who have signed up using task-based email addresses (such as name+news@domain.com), you can continue to use email/password to sign in, invite your normal email address to your team, or contact support to change your email address. For more information about task-based email addresses, see [Create task-specific email addresses](https://support.google.com/a/users/answer/9308648?hl=en) in the Google Support site. + +Migrated accounts maintain the same role-based access control (RBAC) permissions that were previously assigned. After signing in with Google, users can choose to disable username/password-based authentication on their account or maintain both authentication methods using the Vendor Portal [account settings page](https://vendor.replicated.com/account-settings). + +## Limitations + +Using distribution lists for sending invitations to join a team are not supported. The invitations are sent, but are invalid and cannot be used to join a team using Google authentication. + +## Compatibility with Two-Factor Authentication +Google authentication is not entirely compatible with Replicated two-factor authentication (2FA) implementation because Google authentication bypasses account-based 2FA, relying on your Google Authentication instead. However, the Vendor Portal continues to enforce 2FA on all email/password-based authentication, even for the same user, if both options are enabled. + +## Related Topic + +[Managing Team Members](team-management) + +================ +File: docs/vendor/team-management-rbac-configuring.md +================ +import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" + +# Configuring RBAC Policies + +This topic describes how to use role-based access policies (RBAC) to grant or deny team members permissions to use Replicated services in the Replicated Vendor Portal. + +## About RBAC Policies + +By default, every team has two policies created automatically: **Admin** and **Read Only**. If you have an Enterprise plan, you will also have the **Sales** and **Support** policies created automatically. These default policies are not configurable. For more information, see [Default RBAC Policies](#default-rbac) below. + +You can configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. For example, you can limit access for the sales team to one application and to specific channels. Or, you can grant only certain users permission to promote releases to your production channels. + +You can also create custom RBAC policies in the Vendor Portal to manage user access and permissions in the Replicated collab repository in GitHub. For more information, see [Managing Access to the Collab Repository](team-management-github-username). + +## Default RBAC Policies {#default-rbac} + +This section describes the default RBAC policies that are included for Vendor Portal teams, depending on the team's Replicated pricing plan. + +### Admin + +The Admin policy grants read/write permissions to all resources on the team. + +:::note +This policy is automatically created for all plans. +::: + +```json +{ + "v1": { + "name": "Admin", + "resources": { + "allowed": [ + "**/*" + ], + "denied": [] + } + } +} +``` + +### Read Only + +The Read Only policy grants read permission to all resources on the team except for API tokens. + +:::note +This policy is automatically created for all plans. +::: + +```json +{ + "v1": { + "name": "Read Only", + "resources": { + "allowed": [ + "**/list", + "**/read" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +### Support Engineer + +The Support Engineer policy grants read access to release, channels, and application data, and read-write access to customer and license details. It also grants permission to open Replicated support issues and upload support bundles. + +:::note +This policy is automatically created for teams with the Enterprise plan only. +::: + +```json +{ + "v1": { + "name": "Support Engineer", + "resources": { + "allowed": [ + "**/read", + "**/list", + "kots/app/*/license/**", + "team/support-issues/read", + "team/support-issues/write" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +### Sales + +The Sales policy grants read-write access to customers and license details and read-only access to resources necessary to manage licenses (applications, channels, and license fields). No additional access is granted. + +:::note +This policy is automatically created for teams with the Enterprise plan only. +::: + +```json +{ + "v1": { + "name": "Sales", + "resources": { + "allowed": [ + "kots/app/*/read", + "kots/app/*/channel/*/read", + "kots/app/*/licensefields/read", + "kots/app/*/license/**" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +## Configure a Custom RBAC Policy + +To configure a custom RBAC policy: + +1. From the Vendor Portal [Team page](https://vendor.replicated.com/team), select **RBAC** from the left menu. + +1. Do _one_ of the following: + + - Click **Create Policy** from the RBAC page to create a new policy. + - Click **View policy** to edit an existing custom policy in the list. + + <CollabRbacResourcesImportant/> + +1. Edit the fields in the policy dialog. In the **Definition** pane, specify the `allow` and `denied` arrays in the resources key to create limits for the role. + + The default policy allows everything and the **Config help** pane displays any errors. + + ![Create RBAC Policy](/images/policy-create.png) + + - For more information, see [Policy Definition](#policy-definition). + - For more information about and examples of rule order, see [Rule Order](#rule-order). + - For a list of resource names, see [RBAC Resource Names](team-management-rbac-resource-names). + +1. Click **Create Policy** to create a new policy, or click **Update Policy** to update an existing policy. + + :::note + Click **Cancel** to exit without saving changes. + ::: + +1. To apply RBAC policies to Vendor Portal team members, you can: + + - Assign policies to existing team members + - Specify a policy when inviting new team members + - Set a default policy for auto-joining a team + + See [Managing Team Members](team-management). + +## Policy Definition + +A policy is defined in a single JSON document: + +``` +{ + "v1": { + "name": "Read Only", + "resources": { + "allowed": [ + "**/read", + "**/list" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +The primary content of a policy document is the resources key. The resources key should contain two arrays, identified as `allowed` and `denied`. Resources specified in the allowed list are allowed for users assigned to the policy, and resources specified in the denied list are denied. + +Resource names are hierarchical, and support wildcards and globs. For a complete list of resource names that can be defined in a policy document, see [RBAC Resource Names](team-management-rbac-resource-names). + +When a policy document has conflicting rules, the behavior is predictable. For more information about conflicting rules, see [Rule Order](#rule-order). + +### Example: View Specific Application and Channel + + The following policy definition example limits any user with this role to viewing a specific application and a specific channel for that application: + + ``` + { + "v1": { + "name": "Policy Name", + "resources": { + "allowed": [ + "kots/app/appID/list", + "kots/app/appID/read", + "kots/app/appID/channel/channelID/list", + "kots/app/appID/channel/channelID/read" + ], + "denied": [] + } + } + } + ``` + The example above uses an application ID and a channel ID to scope the permissions of the RBAC policy. To find your application and channel IDs, do the following: + + - To get the application ID, click **Settings > Show Application ID (Advanced)** in the Vendor Portal. + + - To get the channel ID, click **Channels** in the Vendor Portal. Then click the Release History link for the channel that you want to limit access to. The channel ID displays in your browser URL. + +## Rule Order + +When a resource name is specified in both the `allow` and the `deny` chains of a policy, defined rules determine which rule is applied. + +If `denied` is left empty, it is implied as a `**/*` rule, unless `**/*` rule is specified in the `allowed` resources. If a rule exactly conflicts with another rule, the `denied` rule takes precedence. + +### Defining Precedence Using Rule Specificity +The most specific rule definition is always applied, when compared with less specific rules. Specificity of a rule is calculated by the number of asterisks (`**` and `*`) in the definition. A `**` in the rule definition is the least specific, followed by rules with `*`, and finally rules with no wildcards as the most specific. + +### Example: No Access To Stable Channel + +In the following example, a policy grants access to promote releases to any channel except the Stable channel. It uses the rule pattern `kots/app/[:appId]/channel/[:channelId]/promote`. Note that you specify the channel ID, rather than the channel name. To find the channel ID, go to the Vendor Portal **Channels** page and click the **Settings** icon for the target channel. + +```json +{ + "v1": { + "name": "No Access To Stable Channel", + "resources": { + "allowed": [ + "**/*" + ], + "denied": [ + "kots/app/*/channel/1eg7CyEofYSmVAnK0pEKUlv36Y3/promote" + ] + } + } +} +``` + +### Example: View Customers Only + +In the following example, a policy grants access to viewing all customers, but not to creating releases, promoting releases, or creating new customers. + +```json +{ + "v1": { + "name": "View Customers Only", + "resources": { + "allowed": [ + "kots/app/*/license/*/read", + "kots/app/*/license/*/list", + "kots/app/*/read", + "kots/app/*/list" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +================ +File: docs/vendor/team-management-rbac-resource-names.md +================ +import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" + +# RBAC Resource Names + +This a list of all available resource names for the Replicated vendor role-based access control (RBAC) policy: + +## Integration Catalog + +### integration/catalog/list + +Grants the holder permission to view the catalog events and triggers available for integrations. + +## kots + +### kots/app/create + +When allowed, the holder will be allowed to create new applications. + +### kots/app/[:appId]/read +Grants the holder permission to view the application. If the holder does not have permissions to view an application, it will not appear in lists. + +### kots/externalregistry/list +Grants the holder the ability to list external docker registry for application(s). + +### kots/externalregistry/create + +Grants the holder the ability to link a new external docker registry to application(s). + +### kots/externalregistry/[:registryName]/delete + +Grants the holder the ability to delete the specified linked external docker registry in application(s). + +### kots/app/[:appId]/channel/create + +Grants the holder the ability to create a new channel in the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/archive + +Grants the holder permission to archive the specified channel(s) of the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/promote + +Grants the holder the ability to promote a new release to the specified channel(s) of the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/update + +Grants the holder permission to update the specified channel of the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/read + +Grants the holder the permission to view information about the specified channel of the specified application(s). + +### kots/app/[:appId]/enterprisechannel/[:channelId]/read + +Grants the holder the permission to view information about the specified enterprise channel of the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/releases/airgap + +Grants the holder permission to trigger airgap builds for the specified channel. + +### kots/app/[:appId]/channel/[:channelId]/releases/airgap/download-url + +Grants the holder permission to get an airgap bundle download URL for any release on the specified channel. + +### kots/app/[:appId]/installer/create + +Grants the holder permission to create kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). + +### kots/app/[:appId]/installer/update + +Grants the holder permission to update kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). + +### kots/app/[:appId]/installer/read + +Grants the holder permission to view kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). + +### kots/app/[:appId]/installer/promote + +Grants the holder permission to promote kURL installers to a channel. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). + +:::note +The `kots/app/[:appId]/installer/promote` policy does not grant the holder permission to view and create installers. Users must be assigned both the `kots/app/[:appId]/installers` and `kots/app/[:appId]/installer/promote` policies to have permissions to view, create, and promote installers. +::: + +### kots/app/[:appId]/license/create + +Grants the holder permission to create a new license in the specified application(s). + +### kots/app/[:appId]/license/[:customerId]/read + +Grants the holder permission to view the license specified by ID. If this is denied, the licenses will not show up in search, CSV export or on the Vendor Portal, and the holder will not be able to subscribe to this license's instance notifications. + +### kots/app/[:appId]/license/[:customerId]/update + +Grants the holder permission to edit the license specified by ID for the specified application(s). + +### kots/app/[:appId]/license/[:customerId]/slack-notifications/read + +Grants the holder permission to view the team's Slack notification subscriptions for instances associated with the specified license. + +### kots/app/[:appId]/license/[:customerId]/slack-notifications/update + +Grants the holder permission to edit the team's Slack notification subscriptions for instances associated with the specified license. + +### kots/app/[:appId]/builtin-licensefields/update + +Grants the holder permission to edit the builtin license field override values for the specified application(s). + +### kots/app/[:appId]/builtin-licensefields/delete + +Grants the holder permission to delete the builtin license field override values for the specified application(s). + +### kots/license/[:customerId]/airgap/password + +Grants the holder permission to generate a new download portal password for the license specified (by ID) for the specified application(s). + +### kots/license/[:customerId]/archive + +Grants the holder permission to archive the specified license (by ID). + +### kots/license/[:customerId]/unarchive + +Grants the holder permissions to unarchive the specified license (by ID). + +### kots/app/[:appId]/licensefields/create + +Grants the holder permission to create new license fields in the specified application(s). + +### kots/app/[:appId]/licensefields/read + +Grants the holder permission to view the license fields in the specified application(s). + +### kots/app/[:appId]/licensefields/update + +Grants the holder permission to edit the license fields for the specified application(s). + +### kots/app/[:appId]/licensefields/delete + +Grants the holder permission to delete the license fields for the specified application(s). + +### kots/app/[:appId]/release/create + +Grants the holder permission to create a new release in the specified application(s). + +### kots/app/[:appId]/release/[:sequence]/update + +Grants the holder permission to update the files saved in release sequence `[:sequence]` in the specified application(s). Once a release is promoted to a channel, it's not editable by anyone. + +### kots/app/[:appId]/release/[:sequence]/read + +Grants the holder permission to read the files at release sequence `[:sequence]` in the specified application(s). + +### kots/app/[:appId]/customhostname/list + +Grants the holder permission to view custom hostnames for the team. + +### kots/app/[:appId]/customhostname/create + +Grants the holder permission to create custom hostnames for the team. + +### kots/app/[:appId]/customhostname/delete + +Grants the holder permission to delete custom hostnames for the team. + +### kots/app/[:appId]/customhostname/default/set + +Grants the holder permission to set default custom hostnames. + +### kots/app/[:appId]/customhostname/default/unset + +Grants the holder permission to unset the default custom hostnames. + +### kots/app/[:appId]/supportbundle/read + +Grants the holder permission to view and download support bundles. + +## Registry + +### registry/namespace/:namespace/pull + +Grants the holder permission to pull images from Replicated registry. + +### registry/namespace/:namespace/push + +Grants the holder permission to push images into Replicated registry. + +## Compatibility Matrix + +### kots/cluster/create + +Grants the holder permission to create new clusters. + +### kots/cluster/list + +Grants the holder permission to list running and terminated clusters. + +### kots/cluster/[:clusterId] + +Grants the holder permission to get cluster details. + +### kots/cluster/[:clusterId]/upgrade + +Grants the holder permission to upgrade a cluster. + +### kots/cluster/tag/update + +Grants the holder permission to update cluster tags. + +### kots/cluster/ttl/update + +Grants the holder permission to update cluster ttl. + +### kots/cluster/[:clusterId]/nodegroup + +Grants the holder permission to update nodegroup details. + +### kots/cluster[:clusterId]/kubeconfig + +Grants the holder permision to get the kubeconfig for a cluster. + +### kots/cluster/[:clusterId]/delete + +Grants the holder permission to delete a cluster. + +### kots/cluster/[:clusterId]/addon/list + +Grants the holder permission to list addons for a cluster. + +### kots/cluster/[:clusterId]/addon/[:addonId]/read + +Grants the holder permission to read the addon for a cluster. + +### kots/cluster/[:clusterId]/addon/[:addonId]/delete + +Grants the holder permission to delete the addon for a cluster. + +### kots/cluster/[:clusterId]/addon/create/objectStore + +Grants the holder permission to create an object store for a cluster. + +### kots/cluster/[:clusterId]/port/expose + +Grants the holder permission to expose a port for a cluster. + +### kots/cluster/[:clusterId]/port/delete + +Grants the holder permission to delete a port for a cluster. + +### kots/cluster/[:clusterId]/port/list + +Grants the holder permission to list exposed ports for a cluster. + +### kots/cluster/list-quotas + +Grants the holder permission to list the quotas. + +### kots/cluster/increase-quota + +Grants the holder permission to request an increase in the quota. + +### kots/vm/tag/update + +Grants the holder permission to update vm tags. + +### kots/vm/ttl/update + +Grants the holder permission to update vm ttl. + +### kots/vm/[:vmId]/port/expose + +Grants the holder permission to expose a port for a vm. + +### kots/vm/[:vmId]/port/list + +Grants the holder permission to list exposed ports for a vm. + +### kots/vm/[:vmId]/addon/[:addonId]/delete + +Grants the holder permission to delete the addon for a vm. + +## Team + +### team/auditlog/read + +Grants the holder permission to view the audit log for the team. + +### team/authentication/update + +Grants the holder permission to manage the following team authentication settings: Google authentication, Auto-join, and SAML authentication. + +### team/authentication/read + +Grants the holder permission to read the following authentication settings: Google authentication, Auto-join, and SAML authentication. + +### team/integration/list + +Grants the holder permission to view team's integrations. + +### team/integration/create + +Grants the holder permission to create an integration. + +### team/integration/[:integrationId]/delete + +Grants the holder permission to delete specified integration(s). + +### team/integration/[:integrationId]/update + +Grants the holder permission to update specified integration(s). + +### team/members/list + +Grants the holder permission to list team members and invitations. + +### team/member/invite + +Grants the holder permission to invite additional people to the team. + +### team/members/delete + +Grants the holder permission to delete other team members. + +### team/notifications/slack-webhook/read + +Grants the holder permission to view the team's Slack webhook for instance notifications. + +### team/notifications/slack-webhook/update + +Grants the holder permission to edit the team's Slack webhook for instance notifications. + +### team/policy/read + +Grants the holder permission to view RBAC policies for the team. + +### team/policy/update + +Grants the holder permission to update RBAC policies for the team. + +### team/policy/delete + +Grants the holder permission to delete RBAC policies for the team. + +### team/policy/create + +Grants the holder permission to create RBAC policies for the team. + +### team/security/update + +Grants the holder permission to manage team password requirements including two-factor authentication and password complexity requirements. + +### team/serviceaccount/list + +Grants the holder permission to list service accounts. + +### team/serviceaccount/create + +Grants the holder permission to create new service accounts. + +### team/serviceaccount/[:name]/delete + +Grants the holder permission to delete the service account identified by the name specified. + +### team/support-issues/read + +Grants the holder Read permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. + +To prevent access to the collab repository for an RBAC policy, add `team/support-issues/read` to the `denied:` list in the policy. For example: + +``` +{ + "v1": { + "name": "Policy Name", + "resources": { + "allowed": [], + "denied": [ + "team/support-issues/read" + ] + } + } +} +``` + +For more information about the Read role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<CollabRbacResourcesImportant/> + +### team/support-issues/write + +Grants the holder Write permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. + +For more information about the Write role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<CollabRbacResourcesImportant/> + +### team/support-issues/triage + +Grants the holder Triage permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. + +For more information about the Triage role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<CollabRbacResourcesImportant/> + +### team/support-issues/admin + +Grants the holder Admin permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. + +For more information about the Admin role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<CollabRbacResourcesImportant/> + +## User + +### user/token/list + +Grants the holder permission to list user tokens. + +### user/token/create + +Grants the holder permission to create new user tokens. + +### user/token/delete + +Grants the holder permission to delete user tokens. + +================ +File: docs/vendor/team-management-saml-auth.md +================ +# Managing SAML Authentication + +This topic describes how to enable or disable SAML authentication for the Replicated Vendor Portal. + +## About Using SAML with the Vendor Portal + +After starting out with Replicated, most teams grow, adding more developers, support engineers, and sales engineers. Eventually, managing access to the Vendor Portal can become difficult. Replicated supports logging in using SAML, which lets you manage access (provisioning and unprovisioning accounts) through your SAML identity provider. + +Using SAML, everyone on your team logs in with their existing usernames and passwords through your identity provider's dashboard. Users do not need to sign up through the Vendor Portal or log in with a separate Vendor Portal account, simplifying their experience. + +### Enabling SAML in Your Vendor Account + +To enable SAML in your Vendor Portal account, you must have an Enterprise plan. For access to SAML, you can contact Replicated through [Support](https://vendor.replicated.com/support). For information about the Enterprise plan, see [pricing](https://www.replicated.com/pricing/). + +### SCIM + +Replicated does not implement System for Cross-domain Identity Management (SCIM). Instead, we use SAML to authenticate and create just-in-time user identities in our system. We resolve the username (email address) as the actor and use this to ensure that audit log events follow these dynamically provisioned users. If a user's email address is already associated with a Replicated account, by using your SAML integration to access the Vendor Portal, they automatically leave their current team and join the team associated with the SAML login. + +### Compatibility with Two-Factor Authentication + +If SAML authentication is configured for your team, Replicated two-factor authentication (2FA) is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. + +### Role Based Access Control + +Replicated supports Role Based Access Control (RBAC) in the Vendor Portal. To use RBAC with SAML, you must configure policies and add users to the policies by their username. Usernames are the identity of the user in your identity provide (IDP). Typically, this username is the full email address. For more information about configuring RBAC, see [Configuring RBAC Policies](team-management-rbac-configuring). + +## Downloading Certificates from Supported SAML providers + +You must retrieve the metadata and x.509 public certificate files from your SAML provider before configuring SAML in the Vendor Portal. The certificate file must be in PEM format. + +Replicated tests several SAML providers, but the service should be compatible with any SAML 2.0 compliant service provider. We provide full support for the following SAML providers: + +* Okta. For more information about integrating Okta with Replicated, see [Configure Okta](#configure-okta). + +* OneLogin + + +## Configure Okta + +The first part of the Vendor Portal and Okta integration is configured in the Okta dashboard. This configuration lets you download the XML Metadata file and x.509 public certificate in PEM format required for the SAML authentication. + +This procedure outlines the basic configuration steps, recommended settings, and the specific fields to configure in Okta. For more information about using Okta, see the [Okta](https://help.okta.com/en/prod/Content/index.htm) documentation. + +To configure Okta and download the required files: + +1. Log in to your Okta Admin dashboard, and click applications. + +1. Select **Create new app integration**, and create a new application as a SAML 2.0 application. + +1. Provide a name and icon for the application, such as Replicated Vendor Portal. You can download a high quality Replicated icon [here](https://help.replicated.com/images/guides/vendor-portal-saml/replicated-application-icon.png). + +1. Click **Next**. + + The Configuring SAML page opens. + +1. Click **Download Okta Certificate**. This downloads your x.509 certificate to provide to Replicated. Save this file to safe location. + +1. On this same page, edit the following fields: + + | Field Name | Description | + | :---------------------- | ----------------------------------------------------------------------------------------------- | + | Single Sign On URL | Set this to `https://id.replicated.com/v1/saml`. | + | Audience URI (SP Entity ID) | Displays on the Vendor Portal [SAML authentication](https://vendor.replicated.com/team/saml-authentication) tab, and is unique to your team. | + | Name ID Format | Change this to `EmailAddress`. | + +1. Click **Next**. + +1. Select **I’m an Okta customer adding an internal app** on the final screen, and click **Finish**. + +1. Click **Identity provider metadata** to download the Metadata.xml file. This likely opens an XML download that you can right-click and select **Save Link As…** to download this file. + +### Next Step + +Configure and enable SAML in the Vendor Portal. For more information, see [Configure SAML](#configure-saml). + +## Configure SAML + +When you initially configure SAML, we do not recommend that you disable username/password access at the same time. It is possible, and recommended during testing, to support both SAML and non-SAML authentication on your account simultaneously. + +**Prerequisite** + +- Download your XML Metadata file and x.509 public certificate PEM file from your SAML provider. For more information on supported SAML providers and how to find these files, see [Supported SAML providers](#downloading-certificates-from-supported-saml-providers). + +To configure SAML: + +1. Log in to the Vendor Portal [Team Members page](https://vendor.replicated.com/team/members) as a user with Admin access. +1. Click [SAML Authentication](https://vendor.replicated.com/team/saml-authentication) from the left menu. If you do not see these options, contact [Support](https://vendor.replicated.com/support). + + The SAML Authentication page opens. + + ![SAML Authentication](/images/team-mgmt-saml-authentication.png) + + [View a larger version of this image](/images/team-mgmt-saml-authentication.png) + +1. Browse for, or drag and drop, your XML Metadata file and x.509 PEM file from your SAML provider. + +1. Click **Upload Metadata & Cert**. + +### Next Step + +At this point, SAML is configured, but not enabled. The next step is to enable SAML enforcement options. For more information, see [Enable SAML Enforcement](#enable-saml-enforcement). + +## Enable SAML Enforcement + +After you have uploaded the metadata and x.509 public certificate PEM file, you must enable SAML enforcement options. Replicated provides options that can be enabled or disabled at any time. You can also change the IDP metadata if needed. + +To enable SAML enforcement: + +1. From the Vendor Portal, select **Team > [SAML Authentication](https://vendor.replicated.com/team/saml-authentication)**. + +1. Select either or both login method options in the the Manage your SAML authentication pane. Allowing both login methods is a good way to test SAML without risking any interruption for the rest of your team. + + **Enable SAML for team logins** - Allows members of your team to log in to the Vendor Portal through your identity provider. This option does not remove, change, or restrict any other authentication that methods you have configured in the Vendor Portal. If you enable SAML and your team already is logging in with accounts provisioned in the Vendor Portal, they will be able to continue logging in with those accounts. + + **Only allow SAML logins** - Requires members of your team to log in to the Vendor Portal through your identity provider. Prevents any non-SAML accounts from logging in. Replicated does not delete the existing accounts. If you turn on this option and then later disable it, accounts that never logged in using SAML will be able to log in again. If an account exists outside of SAML and then is authenticated with SAML, the account is converted and cannot authenticate using a password again. + + ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) + + [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) + +1. (Optional) Set a default policy for new accounts from the drop-down list. +1. (Optional) Click **Change IdP Metadata** and follow the prompts to upload any changes to your metadata. + +SAML is now enabled on your account. For your team to use the SAML login option, you must enable access through your SAML identity provider’s dashboard. For example, if you use Okta, assign the application to users or groups. When a user clicks through to use the application, they are granted access as described in [SCIM](#scim). + +## Disable SAML Enforcement + +You can disable SAML authentication options at any time and re-enable them later if needed. + +To disable SAML enforcement: + +1. From the Vendor Portal, select **Team > SAML Authentication**. + +1. Click **Deprovision SAML** in the Manage your SAML authentication pane. + + ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) + + [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) + +================ +File: docs/vendor/team-management-slack-config.mdx +================ +import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" + + +# Configuring a Slack Webhook (Beta) + +As a vendor, anyone on your team can set up Slack notifications, which are sent to a shared Slack channel. Notifications give your team visibility into customer instance statuses and changes. + +<NotificationsAbout/> + +While email notifications are specific to each user, Slack notifications settings are shared, viewable, and editable by the entire team. Any changes made by a team member impacts the team. + +## Limitations + +As a Beta feature, the following limitations apply: + +- Only one Slack channel per team is supported. + +- RBAC policies are not supported for configuring granular permissions. + +## Prerequisite + +Create a Slack webhook URL. For more information, see [Sending Messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) in the Slack API documentation. + +Make sure to keep the URL secure because it contains a Secret that allows write access to one or more channels in your Slack Workspace. + +## Configure the Webhook in the Vendor Portal + +When you enable Slack notifications for a team, you must first configure the Slack webhook in the Vendor Portal. Typically you do this one time. Then you can configure notifications for individual customer instances. + +To configure the Slack webhook: + +1. From the **[Team Vendor Portal](https://vendor.replicated.com/team/members)** page, click **Slack Notifications**. + +1. On the **Slack Notifications Setup** page, paste the Slack webhook URL. Click **Save**. + +## Next Step + +[Configure Slack notifications for customer instances](instance-notifications-config). + +================ +File: docs/vendor/team-management-two-factor-auth.md +================ +# Managing Two-Factor Authentication + +This topic describes how to enable and disable Replicated two-factor authentication for individual and team accounts in the Replicated Vendor Portal. + +Alternatively, you can use Google Authentication or SAML Authentication to access the Vendor Portal. For more information about those options, see [Managing Google Authentication](team-management-google-auth) and [Managing SAML Authentication](team-management-saml-auth). + +## About Two-Factor Authentication + +Two-factor authentication (2FA) provides additional security by requiring two methods of authentication to access resources and data. When you enable the 2FA option in the Vendor Portal, you are asked to provide an authentication code and your password during authentication. Replicated uses the open algorithm known as the Time-based One-time Password (TOTP 7), which is specified by the Internet Engineering Task Force (IETF) under RFC 6238 2. + +## Limitation + +If SAML Authentication or Google Authentication is configured and 2FA is also enabled, then 2FA is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. + +## Enable 2FA on Individual Accounts + +If you are an administrator or if 2FA is enabled for your team, you can enable 2FA on your individual account. + +To enable two-factor authentication on your individual account: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. + + <img src="/images/vendor-portal-account-settings.png" alt="Vendor portal account settings" width="200"/> + + [View a larger version of this image](/images/vendor-portal-account-settings.png) + +1. In the **Two-Factor Authentication** pane, click **Turn on two-factor authentication**. + + <img src="/images/vendor-portal-password-2fa.png" alt="Turn on 2FA in the Vendor Portal" width="600"/> + + [View a larger version of this image](/images/vendor-portal-password-2fa.png) + +1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. + +1. Scan the QR code that displays using a supported two-factor authentication application on your mobile device, such as Google Authenticator. Alternatively, click **Use this text code** in the Vendor Portal to generate an alphanumeric code that you enter in the mobile application. + + <img src="/images/vendor-portal-scan-qr.png" alt="Turn on 2FA in the Vendor Portal" width="400"/> + + [View a larger version of this image](/images/vendor-portal-scan-qr.png) + + Your mobile application displays an authentication code. + +1. Enter the authentication code in the Vendor Portal. + + Two-factor authentication is enabled and a list of recovery codes is displayed at the bottom of the **Two-Factor Authentication** pane. + +1. Save the recovery codes in a secure location. These codes can be used any time (one time per code), if you lose your mobile device. + +1. Log out of your account, then log back in to test that it is enabled. You are prompted to enter a one-time code generated by the application on your mobile device. + + +## Disable 2FA on Individual Accounts + +To disable two-factor authentication on your individual account: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. + + <img src="/images/vendor-portal-account-settings.png" alt="Vendor portal account settings" width="200"/> + + [View a larger version of this image](/images/vendor-portal-account-settings.png) + +1. In the **Two-Factor Authentication** pane, click **Turn off two-factor authentication**. + +1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. + +## Enable or Disable 2FA for a Team + +As an administrator, you can enable and disable 2FA for teams. You must first enable 2FA on your individual account before you can enable 2FA for teams. After you enable 2FA for your team, team members can enable 2FA on their individual accounts. + +To enable or disable 2FA for a team: + +1. In the [Vendor Portal](https://vendor.replicated.com), select the **Team** tab, then select **Multifactor Auth**. + + <img src="/images/team-2fa-auth.png" alt="Multifactor authentication for teams in the Vendor Portal" width="600"/> + + [View a larger image](/images/team-2fa-auth.png) + +1. On the **Multifactor Authentication** page, do one of the following with the **Require Two-Factor Authentication for all Username/Password authenticating users** toggle: + + - Turn on the toggle to enable 2FA + - Turn off the toggle to disable 2FA + +1. Click **Save changes**. + +================ +File: docs/vendor/team-management.md +================ +import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" +import CollabRbacImportant from "../partials/collab-repo/_collab-rbac-important.mdx" + +# Managing Team Members + +This topic describes how to manage team members in the Replicated Vendor Portal, such as inviting and removing members, and editing permissions. For information about managing user access to the Replicated collab repository in GitHub, see [Managing Collab Repository Access](team-management-github-username). + +## Viewing Team Members +The [Team](https://vendor.replicated.com/team/members) page provides a list of all accounts currently associated with or invited to your team. Each row contains information about the user, including their two-factor authentication (2FA) status and role-based access control (RBAC) role, and lets administrators take additional actions, such as remove, re-invite, and edit permissions. + +<img src="/images/teams-view.png" alt="View team members list in the Vendor Portal" width="700"/> + +[View a larger image](/images/teams-view.png) + +All users, including read-only, can see the name of the RBAC role assigned to each team member. When SAML authentication is enabled, users with the built-in read-only policy cannot see the RBAC role assigned to team members. + +## Invite Members +By default, team administrators can invite more team members to collaborate. Invited users receive an email to activate their account. The activation link in the email is unique to the invited user. Following the activation link in the email also ensures that the invited user joins the team from which the invitation originated. + +:::note +Teams that have enforced SAML-only authentication do not use the email invitation flow described in this procedure. These teams and their users must log in through their SAML provider. +::: + +To invite a new team member: + +1. From the [Team Members](https://vendor.replicated.com/team/members) page, click **Invite team member**. + + The Invite team member dialog opens. + + <img src="/images/teams-invite-member.png" alt="Invite team member dialog in the Vendor Portal" width="500"/> + + [Invite team member dialog](/images/teams-invite-member.png) + +1. Enter the email address of the member. + +1. In the **Permissions** field, assign an RBAC policy from the dropdown list. + + <CollabRbacImportant/> + +1. Click **Invite member**. + + People invited to join your team receive an email notification to accept the invitation. They must follow the link in the email to accept the invitation and join the team. If they do not have a Replicated account already, they can create one that complies with your password policies, 2FA, and Google authentication requirements. If an invited user's email address is already associated with a Replicated account, by accepting your invitation, they automatically leave their current team and join the team that you have invited them to. + +## Managing Invitations + +Invitations expire after 7 days. If a prospective member has not accepted their invitation in this time frame, you can re-invite them without having to reenter their details. You can also remove the prospective member from the list. + +You must be an administrator to perform this action. + +To re-invite or remove a prospective member, do one of the following on the **Team Members** page: + +* Click **Reinvite** from the row with the user's email address, and then click **Reinvite** in the confirmation dialog. + +* Click **Remove** from the row with the user's email address, and then click **Delete Invitation** in the confirmation dialog. + +## Edit Policy Permissions + +You can edit the RBAC policy that is assigned to a member at any time. + +<CollabRbacImportant/> + +To edit policy permissions for individual team members: + +1. From the the Team Members list, click **Edit permissions** next to a members name. + + :::note + The two-factor authentication (2FA) status displays on the **Team members** page, but it is not configured on this page. For more information about configuring 2FA, see [Managing Two-Factor Authentication](team-management-two-factor-auth). + ::: + +1. Select an RBAC policy from the **Permissions** dropdown list, and click **Save**. For information about configuring the RBAC policies that display in this list, see [Configuring RBAC Policies](team-management-rbac-configuring). + + <img src="/images/teams-edit-permissions.png" alt="Edit team member permissions in the Vendor Portal" width="400"/> + +## Enable Users to Auto-join Your Team +By default, users must be invited to your team. Team administrators can use the auto-join feature to allow users from the same email domain to join their team automatically. This applies to users registering with an email, or with Google authentication if it is enabled for the team. The auto-join feature does not apply to SAML authentication because SAML users log in using their SAML provider's application portal instead of the Vendor Portal. + +To add, edit, or delete custom RBAC policies, see [Configuring RBAC Policies](team-management-rbac-configuring). + +To enable users to auto-join your team: + +1. From the Team Members page, click **Auto-join** from the left navigation. +1. Enable the **Allow all users from my domain to be added to my team** toggle. + + <img src="/images/teams-auto-join.png" alt="Auto join dialog in the Vendor Portal" width="600"/> + + [View a larger image](/images/teams-auto-join.png) + +1. For **Default RBAC policy level for new accounts**, you can use the default Read Only policy or select another policy from the list. This RBAC policy is applied to all users who join the team with the auto-join feature. + + <CollabRbacImportant/> + + +## Remove Members and End Sessions +As a Vendor Portal team admin, you can remove team members, except for the account you are currently logged in with. + +If the team member that you remove added their GitHub username to their Account Settings page in the Vendor Portal to access the Replicated collab repository, then the Vendor Portal also automatically removes their username from the collab repository. For more information, see [Managing Collab Repository Access](team-management-github-username). + +SAML-created users must be removed using this method to expire their existing sessions because Replicated does not support System for Cross-domain Identity Management (SCIM). + +To remove a member: + +1. From the Team Members page, click **Remove** on the right side of a user's row. + +1. Click **Remove** in the confirmation dialog. + + The member is removed. All of their current user sessions are deleted and their next attempt at communicating with the server logs them out of their browser's session. + + If the member added their GitHub username to the Vendor Portal to access the collab repository, then the Vendor Portal also removes their GitHub username from the collab repository. + + For Google-authenticated users, if the user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal unless the username and password are allowed. + +## Update Email Addresses + +:::important +Changing team member email addresses has security implications. Replicated advises that you avoid changing team member email addresses if possible. +::: + +Updating the email address for a team member requires creating a new account with the updated email address, and then deactivating the previous account. + +To update the email address for a team member: + +1. From the Team Members page, click **Invite team member**. + +1. Assign the required RBAC policies to the new user. + +1. Deactivate the previous team member account. + +================ +File: docs/vendor/telemetry-air-gap.mdx +================ +import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" + +# Collecting Telemetry for Air Gap Instances + +This topic describes how to collect telemetry for instances in air gap environments. + +## Overview + +Air gap instances run in environments without outbound internet access. This limitation prevents these instances from periodically sending telemetry to the Replicated Vendor Portal through the Replicated SDK or Replicated KOTS. For more information about how the Vendor Portal collects telemetry from online (internet-connected) instances, see [About Instance and Event Data](/vendor/instance-insights-event-data#about-reporting). + +<AirGapTelemetry/> + +The following diagram demonstrates how air gap telemetry is collected and stored by the Replicated SDK in a customer environment, and then shared to the Vendor Portal in a support bundle: + +<img alt="Air gap telemetry collected by the SDK in a support bundle" src="/images/airgap-telemetry.png" width="800px"/> + +[View a larger version of this image](/images/airgap-telemetry.png) + +All support bundles uploaded to the Vendor Portal from air gap customers contributes to a comprehensive dataset, providing parity in the telemetry for air gap and online instances. Replicated recommends that you collect support bundles from air gap customers regularly (monthly or quarterly) to improve the completeness of the dataset. The Vendor Portal handles any overlapping event archives idempotently, ensuring data integrity. + +## Requirement + +Air gap telemetry has the following requirements: + +* To collect telemetry from air gap instances, one of the following must be installed in the cluster where the instance is running: + + * The Replicated SDK installed in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). + + * KOTS v1.92.1 or later + + :::note + When both the Replicated SDK and KOTS v1.92.1 or later are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), both collect and store instance telemetry in their own dedicated secret, subject to the size limitation noted below. In the case of any overlapping data points, the Vendor Portal will report these data points chronologically based on their timestamp. + ::: + +* To collect custom metrics from air gap instances, the Replicated SDK must installed in the cluster in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). + + For more information about custom metrics, see [Configuring Custom Metrics](https://docs.replicated.com/vendor/custom-metrics). + +Replicated strongly recommends that all applications include the Replicated SDK because it enables access to both standard instance telemetry and custom metrics for air gap instances. + +## Limitation + +Telemetry data is capped at 4,000 events or 1MB per Secret; whichever limit is reached first. + +When a limit is reached, the oldest events are purged until the payload is within the limit. For optimal use, consider collecting support bundles regularly (monthly or quarterly) from air gap customers. + +## Collect and View Air Gap Telemetry + +To collect telemetry from air gap instances: + +1. Ask your customer to collect a support bundle. See [Generating Support Bundles](/vendor/support-bundle-generating). + +1. After receiving the support bundle from your customer, go to the Vendor Portal **Customers**, **Customer Reporting**, or **Instance Details** page and upload the support bundle: + + ![upload new bundle button on instance details page](/images/airgap-upload-telemetry.png) + + The telemetry collected from the support bundle appears in the instance data shortly. Allow a few minutes for all data to be processed. + +================ +File: docs/vendor/testing-about.md +================ +import Overview from "../partials/cmx/_overview.mdx" +import SupportedClusters from "../partials/cmx/_supported-clusters-overview.mdx" + +# About Compatibility Matrix + +This topic describes Replicated Compatibility Matrix, including use cases, billing, limitations, and more. + +## Overview + +<Overview/> + +You can use Compatibility Matrix with the Replicated CLI or the Replicated Vendor Portal. For more information about how to use Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). + +### Supported Clusters + +<SupportedClusters/> + +### Billing and Credits + +Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a `running` status and ends when the cluster is deleted. Compatibility Matrix marks a cluster as `running` when a working kubeconfig for the cluster is accessible. + +You are billed only for the time that the cluster is in a `running` status. You are _not_ billed for the time that it takes Compatibility Matrix to create and tear down clusters, including when the cluster is in an `assigned` status. + +For more information about pricing, see [Compatibility Matrix Pricing](testing-pricing). + +To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. +If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to [**Compatibility Matrix > Buy additional credits**](https://vendor.replicated.com/compatibility-matrix). +Otherwise, to request credits, log in to the Vendor Portal and go to [**Compatibility Matrix > Request more credits**](https://vendor.replicated.com/compatibility-matrix). + +### Quotas and Capacity + +By default, Compatibility Matrix sets quotas for the capacity that can be used concurrently by each vendor portal team. These quotas are designed to ensure that Replicated maintains a minimum amount of capacity for provisioning both VM and cloud-based clusters. + +By default, the quota for cloud-based cluster distributions (AKS, GKE, EKS) is three clusters running concurrently. + +VM-based cluster distributions (such as kind, OpenShift, and Replicated Embedded Cluster) have the following default quotas: +* 32 vCPUs +* 128 GiB memory +* 800 GiB disk size + +You can request increased quotas at any time with no additional cost. To view your team's current quota and capacity usage, or to request a quota increase, go to [**Compatibility Matrix > Settings**](https://vendor.replicated.com/compatibility-matrix/settings) in the vendor portal: + +![Compatibility matrix settings page](/images/compatibility-matrix-settings.png) + +[View a larger version of this image](/images/compatibility-matrix-settings.png) + +### Cluster Status + +Clusters created with Compatibility Matrix can have the following statuses: + +* `assigned`: The cluster resources were requested and Compatibility Matrix is provisioning the cluster. You are not billed for the time that a cluster spends in the `assigned` status. + +* `running`: A working kubeconfig for the cluster is accessible. Billing begins when the cluster reaches a `running` status. + + Additionally, clusters are verified prior to transitioning to a `running` status. Verification includes checking that the cluster is healthy and running with the correct number of nodes, as well as passing [sonobuoy](https://sonobuoy.io/) tests in `--quick` mode. + +* `terminated`: The cluster is deleted. Billing ends when the cluster status is changed from `running` to `terminated`. + +* `error`: An error occured when attempting to provision the cluster. + +You can view the status of clusters using the `replicated cluster ls` command. For more information, see [cluster ls](/reference/replicated-cli-cluster-ls). + +### Cluster Add-ons + +The Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. +This allows you to more easily provision dependencies required by your application. + +For more information about how to use the add-ons, see [Compatibility Matrix Cluster Add-ons](testing-cluster-addons). + +## Limitations + +Compatibility Matrix has the following limitations: + +- Clusters cannot be resized. Create another cluster if you want to make changes, such as add another node. +- Clusters cannot be rebooted. Create another cluster if you need to reset/reboot the cluster. +- On cloud clusters, node groups are not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- Multi-node support is not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- ARM instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- GPU instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- There is no support for IPv6 as a single stack. Dual stack support is available on kind clusters. +- There is no support for air gap testing. +- The `cluster upgrade` feature is available only for kURL distributions. See [cluster upgrade](/reference/replicated-cli-cluster-upgrade). +- Cloud clusters do not allow for the configuration of CNI, CSI, CRI, Ingress, or other plugins, add-ons, services, and interfaces. +- The node operating systems for clusters created with Compatibility Matrix cannot be configured nor replaced with different operating systems. +- The Kubernetes scheduler for clusters created with Compatibility Matrix cannot be replaced with a different scheduler. +- Each team has a quota limit on the amount of resources that can be used simultaneously. This limit can be raised by messaging your account representative. +- Team actions with Compatibility Matrix (for example, creating and deleting clusters and requesting quota increases) are not logged and displayed in the [Vendor Team Audit Log](https://vendor.replicated.com/team/audit-log). + +For additional distribution-specific limitations, see [Supported Compatibility Matrix Cluster Types](testing-supported-clusters). + +================ +File: docs/vendor/testing-cluster-addons.md +================ +# Compatibility Matrix Cluster Add-ons (Alpha) + +This topic describes the supported cluster add-ons for Replicated Compatibility Matrix. + +## Overview + +Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. +This allows you to more easily provision dependencies required by your application. + +## CLI + +The Replicated CLI can be used to [create](/reference/replicated-cli-cluster-addon-create), [manage](/reference/replicated-cli-cluster-addon-ls) and [remove](/reference/replicated-cli-cluster-addon-rm) cluster add-ons. + +## Supported Add-ons + +This section lists the supported cluster add-ons for clusters created with Compatibility Matrix. + +### object-store (Alpha) + +The Replicated cluster object store add-on can be used to create S3 compatible object store buckets for clusters (currently only AWS S3 is supported for EKS clusters). + +Assuming you already have a cluster, run the following command with the cluster ID to create an object store bucket: + +```bash +$ replicated cluster addon create object-store 4d2f7e70 --bucket-prefix mybucket +05929b24 Object Store pending {"bucket_prefix":"mybucket"} +$ replicated cluster addon ls 4d2f7e70 +ID TYPE STATUS DATA +05929b24 Object Store ready {"bucket_prefix":"mybucket","bucket_name":"mybucket-05929b24-cmx","service_account_namespace":"cmx","service_account_name":"mybucket-05929b24-cmx","service_account_name_read_only":"mybucket-05929b24-cmx-ro"} +``` + +This will create two service accounts in a namespace, one read-write and the other read-only access to the object store bucket. + +Additional service accounts can be created in any namespace with access to the object store by annotating the new service account with the same `eks.amazonaws.com/role-arn` annotation found in the predefined ones (`service_account_name` and `service_account_name_read_only`). + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Distributions</th> + <td>EKS (AWS S3)</td> + </tr> + <tr> + <th>Cost</th> + <td>Flat fee of $0.50 per bucket.</td> + </tr> + <tr> + <th>Options</th> + <td> + <ul> + <li><strong>bucket_prefix (string):</strong> A prefix for the bucket name to be created (required)</li> + </ul> + </td> + </tr> + <tr> + <th>Data</th> + <td> + <ul> + <li><strong>bucket_prefix:</strong> The prefix specified by the user for the bucket name</li> + </ul> + <ul> + <li><strong>bucket_name:</strong> The actual bucket name</li> + </ul> + <ul> + <li><strong>service_account_namespace:</strong> The namespace in which the service accounts (`service_account_name` and `service_account_name_read_only`) have been created.</li> + </ul> + <ul> + <li><strong>service_account_name:</strong> The service account name for read-write access to the bucket.</li> + </ul> + <ul> + <li><strong>service_account_name_read_only:</strong> The service account name for read-only access to the bucket.</li> + </ul> + </td> + </tr> +</table> + +================ +File: docs/vendor/testing-how-to.md +================ +import TestRecs from "../partials/ci-cd/_test-recs.mdx" +import Prerequisites from "../partials/cmx/_prerequisites.mdx" + +# Using Compatibility Matrix + +This topic describes how to use Replicated Compatibility Matrix to create ephemeral clusters. + +## Prerequisites + +Before you can use Compatibility Matrix, you must complete the following prerequisites: + +<Prerequisites/> + +* Existing accounts must accept the TOS for the trial on the [**Compatibility Matrix**](https://vendor.replicated.com/compatibility-matrix) page in the Replicated Vendor Portal. + +## Create and Manage Clusters + +This section explains how to use Compatibility Matrix to create and manage clusters with the Replicated CLI or the Vendor Portal. + +For information about creating and managing clusters with the Vendor API v3, see the [clusters](https://replicated-vendor-api.readme.io/reference/listclusterusage) section in the Vendor API v3 documentation. + +### Create Clusters + +You can create clusters with Compatibility Matrix using the Replicated CLI or the Vendor Portal. + +#### Replicated CLI + +To create a cluster using the Replicated CLI: + +1. (Optional) View the available cluster distributions, including the supported Kubernetes versions, instance types, and maximum nodes for each distribution: + + ```bash + replicated cluster versions + ``` + For command usage, see [cluster versions](/reference/replicated-cli-cluster-versions). + +1. Run the following command to create a cluster: + + ``` + replicated cluster create --name NAME --distribution K8S_DISTRO --version K8S_VERSION --disk DISK_SIZE --instance-type INSTANCE_TYPE [--license-id LICENSE_ID] + ``` + Where: + * `NAME` is any name for the cluster. If `--name` is excluded, a name is automatically generated for the cluster. + * `K8S_DISTRO` is the Kubernetes distribution for the cluster. + * `K8S_VERSION` is the Kubernetes version for the cluster if creating a standard Cloud or VM-based cluster. If creating an Embedded Cluster or kURL cluster type,`--version` is optional: + * For Embedded Cluster types, `--verison` is the latest available release on the channel by default. Otherwise, to specify a different release, set `--version` to the `Channel release sequence` value for the release. + * For kURL cluster types, `--verison` is the `"latest"` kURL Installer ID by default. Otherwise, to specify a different kURL Installer, set `--version` to the kURL Installer ID. + * `DISK_SIZE` is the disk size (GiB) to request per node. + * `INSTANCE_TYPE` is the instance type to use for each node. + * (Embedded Cluster Only) `LICENSE_ID` is a valid customer license. Required to create an Embedded Cluster. + + For command usage and additional optional flags, see [cluster create](/reference/replicated-cli-cluster-create). + + **Example:** + + The following example creates a kind cluster with Kubernetes version 1.27.0, a disk size of 100 GiB, and an instance type of `r1.small`. + + ```bash + replicated cluster create --name kind-example --distribution kind --version 1.27.0 --disk 100 --instance-type r1.small + ``` + +1. Verify that the cluster was created: + + ```bash + replicated cluster ls CLUSTER_NAME + ``` + Where `CLUSTER_NAME` is the name of the cluster that you created. + + In the output of the command, you can see that the `STATUS` of the cluster is `assigned`. When the kubeconfig for the cluster is accessible, the cluster's status is changed to `running`. For more information about cluster statuses, see [Cluster Status](testing-about#cluster-status) in _About Compatibility Matrix._ + +#### Vendor Portal + +To create a cluster using the Vendor Portal: + +1. Go to [**Compatibility Matrix > Create cluster**](https://vendor.replicated.com/compatibility-matrix/create-cluster). + + <img alt="Create a cluster page" src="/images/create-a-cluster.png" width="650px"/> + + [View a larger version of this image](/images/create-a-cluster.png) + +1. On the **Create a cluster** page, complete the following fields: + + <table> + <tr> + <th>Field</th> + <th>Description</th> + </tr> + <tr> + <td>Kubernetes distribution</td> + <td>Select the Kubernetes distribution for the cluster.</td> + </tr> + <tr> + <td>Version</td> + <td>Select the Kubernetes version for the cluster. The options available are specific to the distribution selected.</td> + </tr> + <tr> + <td>Name (optional)</td> + <td>Enter an optional name for the cluster.</td> + </tr> + <tr> + <td>Tags</td> + <td>Add one or more tags to the cluster as key-value pairs.</td> + </tr> + <tr> + <td>Set TTL</td> + <td>Select the Time to Live (TTL) for the cluster. When the TTL expires, the cluster is automatically deleted. TTL can be adjusted after cluster creation with [cluster update ttl](/reference/replicated-cli-cluster-update-ttl).</td> + </tr> + </table> + +1. For **Nodes & Nodes Groups**, complete the following fields to configure nodes and node groups for the cluster: + + <table> + <tr> + <td>Instance type</td> + <td>Select the instance type to use for the nodes in the node group. The options available are specific to the distribution selected.</td> + </tr> + <tr> + <td>Disk size</td> + <td>Select the disk size in GiB to use per node.</td> + </tr> + <tr> + <td>Nodes</td> + <td>Select the number of nodes to provision in the node group. The options available are specific to the distribution selected.</td> + </tr> + </table> + +1. (Optional) Click **Add node group** to add additional node groups. + +1. Click **Create cluster**. + + The cluster is displayed in the list of clusters on the **Compatibility Matrix** page with a status of Assigned. When the kubeconfig for the cluster is accessible, the cluster's status is changed to Running. + + :::note + If the cluster is not automatically displayed, refresh your browser window. + ::: + + <img alt="Cluster configuration dialog" src="/images/cmx-assigned-cluster.png" width="700px"/> + + [View a larger version of this image](/images/cmx-assigned-cluster.png) + +### Prepare Clusters + +For applications distributed with the Replicated Vendor Portal, the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) command reduces the number of steps required to provision a cluster and then deploy a release to the cluster for testing. This is useful in continuous integration (CI) workflows that run multiple times a day. For an example workflow that uses the `cluster prepare` command, see [Recommended CI/CD Workflows](/vendor/ci-workflows). + +The `cluster prepare` command does the following: +* Creates a cluster +* Creates a release for your application based on either a Helm chart archive or a directory containing the application YAML files +* Creates a temporary customer of type `test` + :::note + Test customers created by the `cluster prepare` command are not saved in your Vendor Portal team. + ::: +* Installs the release in the cluster using either the Helm CLI or Replicated KOTS + +The `cluster prepare` command requires either a Helm chart archive or a directory containing the application YAML files to be installed: + +* **Install a Helm chart with the Helm CLI**: + + ```bash + replicated cluster prepare \ + --distribution K8S_DISTRO \ + --version K8S_VERSION \ + --chart HELM_CHART_TGZ + ``` + The following example creates a kind cluster and installs a Helm chart in the cluster using the `nginx-chart-0.0.14.tgz` chart archive: + ```bash + replicated cluster prepare \ + --distribution kind \ + --version 1.27.0 \ + --chart nginx-chart-0.0.14.tgz \ + --set key1=val1,key2=val2 \ + --set-string s1=val1,s2=val2 \ + --set-json j1='{"key1":"val1","key2":"val2"}' \ + --set-literal l1=val1,l2=val2 \ + --values values.yaml + ``` + +* **Install with KOTS from a YAML directory**: + + ```bash + replicated cluster prepare \ + --distribution K8S_DISTRO \ + --version K8S_VERSION \ + --yaml-dir PATH_TO_YAML_DIR + ``` + The following example creates a k3s cluster and installs an application in the cluster using the manifest files in a local directory named `config-validation`: + ```bash + replicated cluster prepare \ + --distribution k3s \ + --version 1.26 \ + --namespace config-validation \ + --shared-password password \ + --app-ready-timeout 10m \ + --yaml-dir config-validation \ + --config-values-file conifg-values.yaml \ + --entitlements "num_of_queues=5" + ``` + +For command usage, including additional options, see [cluster prepare](/reference/replicated-cli-cluster-prepare). + +### Access Clusters + +Compatibility Matrix provides the kubeconfig for clusters so that you can access clusters with the kubectl command line tool. For more information, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. + +To access a cluster from the command line: + +1. Verify that the cluster is in a Running state: + + ```bash + replicated cluster ls + ``` + In the output of the command, verify that the `STATUS` for the target cluster is `running`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). + +1. Run the following command to open a new shell session with the kubeconfig configured for the cluster: + + ```bash + replicated cluster shell CLUSTER_ID + ``` + Where `CLUSTER_ID` is the unique ID for the running cluster that you want to access. + + For command usage, see [cluster shell](/reference/replicated-cli-cluster-shell). + +1. Verify that you can interact with the cluster through kubectl by running a command. For example: + + ```bash + kubectl get ns + ``` + +1. Press Ctrl-D or type `exit` when done to end the shell and the connection to the server. + +### Upgrade Clusters (kURL Only) + +For kURL clusters provisioned with Compatibility Matrix, you can use the the `cluster upgrade` command to upgrade the version of the kURL installer specification used to provision the cluster. A recommended use case for the `cluster upgrade` command is for testing your application's compatibility with Kubernetes API resource version migrations after upgrade. + +The following example upgrades a kURL cluster from its previous version to version `9d5a44c`: + +```bash +replicated cluster upgrade cabb74d5 --version 9d5a44c +``` + +For command usage, see [cluster upgrade](/reference/replicated-cli-cluster-upgrade). + +### Delete Clusters + +You can delete clusters using the Replicated CLI or the Vendor Portal. + +#### Replicated CLI + +To delete a cluster using the Replicated CLI: + +1. Get the ID of the target cluster: + + ``` + replicated cluster ls + ``` + In the output of the command, copy the ID for the cluster. + + **Example:** + + ``` + ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES + 1234abc My Test Cluster eks 1.27 running 2023-10-09 17:08:01 +0000 UTC - + ``` + + For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). + +1. Run the following command: + + ``` + replicated cluster rm CLUSTER_ID + ``` + Where `CLUSTER_ID` is the ID of the target cluster. + For command usage, see [cluster rm](/reference/replicated-cli-cluster-rm). +1. Confirm that the cluster was deleted: + ``` + replicated cluster ls CLUSTER_ID --show-terminated + ``` + Where `CLUSTER_ID` is the ID of the target cluster. + In the output of the command, you can see that the `STATUS` of the cluster is `terminated`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). +#### Vendor Portal + +To delete a cluster using the Vendor Portal: + +1. Go to **Compatibility Matrix**. + +1. Under **Clusters**, in the vertical dots menu for the target cluster, click **Delete cluster**. + + <img alt="Delete cluster button" src="/images/cmx-delete-cluster.png" width="700px"/> + + [View a larger version of this image](/images/cmx-delete-cluster.png) + +## About Using Compatibility Matrix with CI/CD + +Replicated recommends that you integrate Compatibility Matrix into your existing CI/CD workflow to automate the process of creating clusters to install your application and run tests. For more information, including additional best practices and recommendations for CI/CD, see [About Integrating with CI/CD](/vendor/ci-overview). + +### Replicated GitHub Actions + +Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to using Compatibility Matrix and distributing applications with Replicated. + +If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. + +To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. + +For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). + +### Recommended Workflows + +Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). For example development and release workflows that integrate Compatibility Matrix for testing, see [Recommended CI/CD Workflows](/vendor/ci-workflows). + +### Test Script Recommendations + +Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: + +<TestRecs/> + +================ +File: docs/vendor/testing-ingress.md +================ +# Accessing Your Application + +This topic describes the networking options for accessing applications deployed on clusters created with Replicated Compatibility Matrix. It also describes how to use and manage Compatibility Matrix tunnels. + +## Networking Options + +After deploying your application into Compatibility Matrix clusters, you will want to execute your tests using your own test runner. +In order to do this, you need to access your application. +Compatibility matrix offers several methods to access your application. + +Some standard Kubernetes networking options are available, but vary based on the distribution. +For VM-based distributions, there is no default network route into the cluster, making inbound connections challenging to create. + +### Port Forwarding +Port forwarding is a low-cost and portable mechanism to access your application. +Port forwarding works on all clusters supported by Compatibility Matrix because the connection is initiated from the client, over the Kubernetes API server port. +If you have a single service or pod and are not worried about complex routing, this is a good mechanism. +The basic steps are to connect the port-forward, execute your tests against localhost, and then shut down the port-forward. + +### LoadBalancer +If your application is only running on cloud services (EKS, GKE, AKS) you can create a service of type `LoadBalancer`. +This will provision the cloud-provider specific load balancer. +The `LoadBalancer` service will be filled by the in-tree Kubernetes functionality that's integrated with the underlying cloud provider. +You can then query the service definition using `kubectl` and connect to and execute your tests over the `LoadBalancer` IP address. + +### Ingress +Ingress is a good way to recreate customer-representative environments, but the problem still remains on how to get inbound access to the IP address that the ingress controller allocates. +Ingress is also not perfectly portable; each ingress controller might require different annotations in the ingress resource to work properly. +Supported ingress controllers vary based on the distribution. +Compatibility matrix supports ingress controllers that are running as a `NodePort` service. + +### Compatibility Matrix Tunnels +All VM-based Compatibility Matrix clusters support tunneling traffic into a `NodePort` service. +When this option is used, Replicated is responsible for creating the DNS record and TLS certs. +Replicated will route traffic from `:443` and/or `:80` into the `NodePort` service you defined. For more information about using tunnels, see [Managing Compatibility Matrix Tunnels](#manage-nodes) below. + +The following diagram shows how the traffic is routed into the service using Compatibility Matrix tunnels: + +<img src="/images/compatibility-matrix-ingress.png" alt="Compatibility Matrix ingress"></img> + +[View a larger version of this image](/images/compatibility-matrix-ingress.png) + +## Managing Compatibility Matrix Tunnels {#manage-nodes} + +Tunnels are viewed, created, and removed using the Compatibility Matrix UI within Vendor Portal, the Replicated CLI, GitHub Actions, or directly with the Vendor API v3. There is no limit to the number of tunnels you can create for a cluster and multiple tunnels can connect to a single service, if desired. + +### Limitations + +Compatibility Matrix tunnels have the following limitations: +* One tunnel can only connect to one service. If you need fanout routing into different services, consider installing the nginx ingress controller as a `NodePort` service and exposing it. +* Tunnels are not supported for cloud distributions (EKS, GKE, AKS). + +### Supported Protocols + +A tunnel can support one or more protocols. +The supported protocols are HTTP, HTTPS, WS and WSS. +GRPC and other protocols are not routed into the cluster. + +### Exposing Ports +Once you have a node port available on the cluster, you can use the Replicated CLI to expose the node port to the public internet. +This can be used multiple times on a single cluster. + +Optionally, you can specify the `--wildcard` flag to expose this port with wildcard DNS and TLS certificate. +This feature adds extra time to provision the port, so it should only be used if necessary. + +```bash +replicated cluster port expose \ + [cluster id] \ + --port [node port] \ + --protocol [protocol] \ + --wildcard +``` + +For example, if you have the nginx ingress controller installed and the node port is 32456: + +```bash +% replicated cluster ls +ID NAME DISTRIBUTION VERSION STATUS +1e616c55 tender_ishizaka k3s 1.29.2 running + +% replicated cluster port expose \ + 1e616c55 \ + --port 32456 \ + --protocol http \ + --protocol https \ + --wildcard +``` + +:::note +You can expose a node port that does not yet exist in the cluster. +This is useful if you have a deterministic node port, but need the DNS name as a value in your Helm chart. +::: + +### Viewing Ports +To view all exposed ports, use the Replicated CLI `port ls` subcommand with the cluster ID: + +```bash +% replicated cluster port ls 1e616c55 +ID CLUSTER PORT PROTOCOL EXPOSED PORT WILDCARD STATUS +d079b2fc 32456 http http://happy-germain.ingress.replicatedcluster.com true ready + +d079b2fc 32456 https https://happy-germain.ingress.replicatedcluster.com true ready +``` + +### Removing Ports +Exposed ports are automatically deleted when a cluster terminates. +If you want to remove a port (and the associated DNS records and TLS certs) prior to cluster termination, run the `port rm` subcommand with the cluster ID: + +```bash +% replicated cluster port rm 1e616c55 --id d079b2fc +``` + +You can remove just one protocol, or all. +Removing all protocols also removes the DNS record and TLS cert. + +================ +File: docs/vendor/testing-pricing.mdx +================ +# Compatibility Matrix Pricing + +This topic describes the pricing for Replicated Compatibility Matrix. + +## Pricing Overview + +Compatibility Matrix usage-based pricing includes a $0.50 per cluster startup cost, plus by the minute pricing based on instance size and count (starting at the time the cluster state changed to "running" and ending when the cluster is either expired (TTL) or removed). Minutes will be rounded up, so there will be a minimum charge of $0.50 plus 1 minute for all running clusters. Each cluster's cost will be rounded up to the nearest cent and subtracted from the available credits in the team account. Remaining credit balance is viewable on the Replicated Vendor Portal [Cluster History](https://vendor.replicated.com/compatibility-matrix/history) page or with the Vendor API v3 [/vendor/v3/cluster/stats](https://replicated-vendor-api.readme.io/reference/getclusterstats) endpoint. Cluster [add-ons](/vendor/testing-cluster-addons) may incur additional charges. + +If the team's available credits are insufficient to run the cluster for the full duration of the TTL, the cluster creation will be rejected. + +## Cluster Quotas + +Each team is limited by the number of clusters that they can run concurrently. To increase the quota, reach out to your account manager. + +## VM Cluster Pricing (Openshift, RKE2, K3s, Kind, Embedded Cluster, kURL) + +VM-based clusters approximately match the AWS m6.i instance type pricing. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="25%">VCPUs</th> + <th width="25%">Memory (GiB)</th> + <th width="25%">USD/Credit per hour</th> + </tr> + <tr> + <td>r1.small</td> + <td>2</td> + <td>8</td> + <td>$0.096</td> + </tr> + <tr> + <td>r1.medium</td> + <td>4</td> + <td>16</td> + <td>$0.192</td> + </tr> + <tr> + <td>r1.large</td> + <td>8</td> + <td>32</td> + <td>$0.384</td> + </tr> + <tr> + <td>r1.xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.768</td> + </tr> + <tr> + <td>r1.2xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.536</td> + </tr> +</table> + +## Cloud Cluster Pricing + +### AWS EKS Cluster Pricing + +AWS clusters will be charged AWS pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. Pricing for Extended Support EKS versions (those Kubernetes versions considered deprecated by upstream Kubernetes) will have additional charges applied. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="25%">VCPUs</th> + <th width="25%">Memory (GiB)</th> + <th width="25%">USD/Credit per hour</th> + </tr> + <tr> + <td>m6i.large</td> + <td>2</td> + <td>8</td> + <td>$0.115</td> + </tr> + <tr> + <td>m6i.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.230</td> + </tr> + <tr> + <td>m6i.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.461</td> + </tr> + <tr> + <td>m6i.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.922</td> + </tr> + <tr> + <td>m6i.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.843</td> + </tr> +<tr> + <td>m7i.large</td> + <td>2</td> + <td>8</td> + <td>$0.121</td> + </tr> + <tr> + <td>m7i.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.242</td> + </tr> + <tr> + <td>m7i.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.484</td> + </tr> + <tr> + <td>m7i.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.968</td> + </tr> + <tr> + <td>m7i.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.935</td> + </tr> + <tr> + <td>m5.large</td> + <td>2</td> + <td>8</td> + <td>$0.115</td> + </tr> + <tr> + <td>m5.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.230</td> + </tr> + <tr> + <td>m5.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.461</td> + </tr> + <tr> + <td>m5.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.922</td> + </tr> + <tr> + <td>m5.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.843</td> + </tr> + <tr> + <td>m7g.large</td> + <td>2</td> + <td>8</td> + <td>$0.098</td> + </tr> + <tr> + <td>m7g.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.195</td> + </tr> + <tr> + <td>m7g.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.392</td> + </tr> + <tr> + <td>m7g.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.784</td> + </tr> + <tr> + <td>m7g.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.567</td> + </tr> + <tr> + <td>c5.large</td> + <td>2</td> + <td>4</td> + <td>$0.102</td> + </tr> + <tr> + <td>c5.xlarge</td> + <td>4</td> + <td>8</td> + <td>$0.204</td> + </tr> + <tr> + <td>c5.2xlarge</td> + <td>8</td> + <td>16</td> + <td>$0.408</td> + </tr> + <tr> + <td>c5.4xlarge</td> + <td>16</td> + <td>32</td> + <td>$0.816</td> + </tr> + <tr> + <td>c5.9xlarge</td> + <td>36</td> + <td>72</td> + <td>$1.836</td> + </tr> + <tr> + <td>g4dn.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.631</td> + </tr> + <tr> + <td>g4dn.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.902</td> + </tr> + <tr> + <td>g4dn.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$1.445</td> + </tr> + <tr> + <td>g4dn.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$2.611</td> + </tr> + <tr> + <td>g4dn.12xlarge</td> + <td>48</td> + <td>192</td> + <td>$4.964</td> + </tr> + <tr> + <td>g4dn.16xlarge</td> + <td>64</td> + <td>256</td> + <td>$5.222</td> + </tr> +</table> + +### GCP GKE Cluster Pricing + +GCP clusters will be charged GCP list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="25%">VCPUs</th> + <th width="25%">Memory (GiB)</th> + <th width="25%">USD/Credit per hour</th> + </tr> + <tr> + <td>n2-standard-2</td> + <td>2</td> + <td>8</td> + <td>$0.117</td> + </tr> + <tr> + <td>n2-standard-4</td> + <td>4</td> + <td>16</td> + <td>$0.233</td> + </tr> + <tr> + <td>n2-standard-8</td> + <td>8</td> + <td>32</td> + <td>$0.466</td> + </tr> + <tr> + <td>n2-standard-16</td> + <td>16</td> + <td>64</td> + <td>$0.932</td> + </tr> + <tr> + <td>n2-standard-32</td> + <td>32</td> + <td>128</td> + <td>$1.865</td> + </tr> + <tr> + <td>t2a-standard-2</td> + <td>2</td> + <td>8</td> + <td>$0.092</td> + </tr> + <tr> + <td>t2a-standard-4</td> + <td>4</td> + <td>16</td> + <td>$0.185</td> + </tr> + <tr> + <td>t2a-standard-8</td> + <td>8</td> + <td>32</td> + <td>$0.370</td> + </tr> + <tr> + <td>t2a-standard-16</td> + <td>16</td> + <td>64</td> + <td>$0.739</td> + </tr> + <tr> + <td>t2a-standard-32</td> + <td>32</td> + <td>128</td> + <td>$1.478</td> + </tr> + <tr> + <td>t2a-standard-48</td> + <td>48</td> + <td>192</td> + <td>$2.218</td> + </tr> + <tr> + <td>e2-standard-2</td> + <td>2</td> + <td>8</td> + <td>$0.081</td> + </tr> + <tr> + <td>e2-standard-4</td> + <td>4</td> + <td>16</td> + <td>$0.161</td> + </tr> + <tr> + <td>e2-standard-8</td> + <td>8</td> + <td>32</td> + <td>$0.322</td> + </tr> + <tr> + <td>e2-standard-16</td> + <td>16</td> + <td>64</td> + <td>$0.643</td> + </tr> + <tr> + <td>e2-standard-32</td> + <td>32</td> + <td>128</td> + <td>$1.287</td> + </tr> + <tr> + <td>n1-standard-1+nvidia-tesla-t4+1</td> + <td>1</td> + <td>3.75</td> + <td>$0.321</td> + </tr> + <tr> + <td>n1-standard-1+nvidia-tesla-t4+2</td> + <td>1</td> + <td>3.75</td> + <td>$0.585</td> + </tr> + <tr> + <td>n1-standard-1+nvidia-tesla-t4+4</td> + <td>1</td> + <td>3.75</td> + <td>$1.113</td> + </tr> + <tr> + <td>n1-standard-2+nvidia-tesla-t4+1</td> + <td>2</td> + <td>7.50</td> + <td>$0.378</td> + </tr> + <tr> + <td>n1-standard-2+nvidia-tesla-t4+2</td> + <td>2</td> + <td>7.50</td> + <td>$0.642</td> + </tr> + <tr> + <td>n1-standard-2+nvidia-tesla-t4+4</td> + <td>2</td> + <td>7.50</td> + <td>$1.170</td> + </tr> + <tr> + <td>n1-standard-4+nvidia-tesla-t4+1</td> + <td>4</td> + <td>15</td> + <td>$0.492</td> + </tr> + <tr> + <td>n1-standard-4+nvidia-tesla-t4+2</td> + <td>4</td> + <td>15</td> + <td>$0.756</td> + </tr> + <tr> + <td>n1-standard-4+nvidia-tesla-t4+4</td> + <td>4</td> + <td>15</td> + <td>$1.284</td> + </tr> + <tr> + <td>n1-standard-8+nvidia-tesla-t4+1</td> + <td>8</td> + <td>30</td> + <td>$0.720</td> + </tr> + <tr> + <td>n1-standard-8+nvidia-tesla-t4+2</td> + <td>8</td> + <td>30</td> + <td>$0.984</td> + </tr> + <tr> + <td>n1-standard-8+nvidia-tesla-t4+4</td> + <td>8</td> + <td>30</td> + <td>$1.512</td> + </tr> + <tr> + <td>n1-standard-16+nvidia-tesla-t4+1</td> + <td>16</td> + <td>60</td> + <td>$1.176</td> + </tr> + <tr> + <td>n1-standard-16+nvidia-tesla-t4+2</td> + <td>16</td> + <td>60</td> + <td>$1.440</td> + </tr> + <tr> + <td>n1-standard-16+nvidia-tesla-t4+4</td> + <td>16</td> + <td>60</td> + <td>$1.968</td> + </tr> + <tr> + <td>n1-standard-32+nvidia-tesla-t4+1</td> + <td>32</td> + <td>120</td> + <td>$2.088</td> + </tr> + <tr> + <td>n1-standard-32+nvidia-tesla-t4+2</td> + <td>32</td> + <td>120</td> + <td>$2.352</td> + </tr> + <tr> + <td>n1-standard-32+nvidia-tesla-t4+4</td> + <td>32</td> + <td>120</td> + <td>$2.880</td> + </tr> + <tr> + <td>n1-standard-64+nvidia-tesla-t4+1</td> + <td>64</td> + <td>240</td> + <td>$3.912</td> + </tr> + <tr> + <td>n1-standard-64+nvidia-tesla-t4+2</td> + <td>64</td> + <td>240</td> + <td>$4.176</td> + </tr> + <tr> + <td>n1-standard-64+nvidia-tesla-t4+4</td> + <td>64</td> + <td>240</td> + <td>$4.704</td> + </tr> + <tr> + <td>n1-standard-96+nvidia-tesla-t4+1</td> + <td>96</td> + <td>360</td> + <td>$5.736</td> + </tr> + <tr> + <td>n1-standard-96+nvidia-tesla-t4+2</td> + <td>96</td> + <td>360</td> + <td>$6.000</td> + </tr> + <tr> + <td>n1-standard-96+nvidia-tesla-t4+4</td> + <td>96</td> + <td>360</td> + <td>$6.528</td> + </tr> +</table> + +### Azure AKS Cluster Pricing + +Azure clusters will be charged Azure list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="15%">VCPUs</th> + <th width="15%">Memory (GiB)</th> + <th width="15%">Rate</th> + <th width="15%">List Price</th> + <th width="15%">USD/Credit per hour</th> + </tr> + <tr> + <td>Standard_B2ms</td> + <td>2</td> + <td>8</td> + <td>8320</td> + <td>$0.083</td> + <td>$0.100</td> + </tr> + <tr> + <td>Standard_B4ms</td> + <td>4</td> + <td>16</td> + <td>16600</td> + <td>$0.166</td> + <td>$0.199</td> + </tr> + <tr> + <td>Standard_B8ms</td> + <td>8</td> + <td>32</td> + <td>33300</td> + <td>$0.333</td> + <td>$0.400</td> + </tr> + <tr> + <td>Standard_B16ms</td> + <td>16</td> + <td>64</td> + <td>66600</td> + <td>$0.666</td> + <td>$0.799</td> + </tr> + <tr> + <td>Standard_DS2_v2</td> + <td>2</td> + <td>7</td> + <td>14600</td> + <td>$0.146</td> + <td>$0.175</td> + </tr> + <tr> + <td>Standard_DS3_v2</td> + <td>4</td> + <td>14</td> + <td>29300</td> + <td>$0.293</td> + <td>$0.352</td> + </tr> + <tr> + <td>Standard_DS4_v2</td> + <td>8</td> + <td>28</td> + <td>58500</td> + <td>$0.585</td> + <td>$0.702</td> + </tr> + <tr> + <td>Standard_DS5_v2</td> + <td>16</td> + <td>56</td> + <td>117000</td> + <td>$1.170</td> + <td>$1.404</td> + </tr> + <tr> + <td>Standard_D2ps_v5</td> + <td>2</td> + <td>8</td> + <td>14600</td> + <td>$0.077</td> + <td>$0.092</td> + </tr> + <tr> + <td>Standard_D4ps_v5</td> + <td>4</td> + <td>16</td> + <td>7700</td> + <td>$0.154</td> + <td>$0.185</td> + </tr> + <tr> + <td>Standard_D8ps_v5</td> + <td>8</td> + <td>32</td> + <td>15400</td> + <td>$0.308</td> + <td>$0.370</td> + </tr> + <tr> + <td>Standard_D16ps_v5</td> + <td>16</td> + <td>64</td> + <td>30800</td> + <td>$0.616</td> + <td>$0.739</td> + </tr> + <tr> + <td>Standard_D32ps_v5</td> + <td>32</td> + <td>128</td> + <td>61600</td> + <td>$1.232</td> + <td>$1.478</td> + </tr> + <tr> + <td>Standard_D48ps_v5</td> + <td>48</td> + <td>192</td> + <td>23200</td> + <td>$1.848</td> + <td>$2.218</td> + </tr> + <tr> + <td>Standard_NC4as_T4_v3</td> + <td>4</td> + <td>28</td> + <td>52600</td> + <td>$0.526</td> + <td>$0.631</td> + </tr> + <tr> + <td>Standard_NC8as_T4_v3</td> + <td>8</td> + <td>56</td> + <td>75200</td> + <td>$0.752</td> + <td>$0.902</td> + </tr> + <tr> + <td>Standard_NC16as_T4_v3</td> + <td>16</td> + <td>110</td> + <td>120400</td> + <td>$1.204</td> + <td>$1.445</td> + </tr> + <tr> + <td>Standard_NC64as_T4_v3</td> + <td>64</td> + <td>440</td> + <td>435200</td> + <td>$4.352</td> + <td>$5.222</td> + </tr> + <tr> + <td>Standard_D2S_v5</td> + <td>2</td> + <td>8</td> + <td>9600</td> + <td>$0.096</td> + <td>$0.115</td> + </tr> + <tr> + <td>Standard_D4S_v5</td> + <td>4</td> + <td>16</td> + <td>19200</td> + <td>$0.192</td> + <td>$0.230</td> + </tr> + <tr> + <td>Standard_D8S_v5</td> + <td>8</td> + <td>32</td> + <td>38400</td> + <td>$0.384</td> + <td>$0.461</td> + </tr> + <tr> + <td>Standard_D16S_v5</td> + <td>16</td> + <td>64</td> + <td>76800</td> + <td>$0.768</td> + <td>$0.922</td> + </tr> + <tr> + <td>Standard_D32S_v5</td> + <td>32</td> + <td>128</td> + <td>153600</td> + <td>$1.536</td> + <td>$1.843</td> + </tr> + <tr> + <td>Standard_D64S_v5</td> + <td>64</td> + <td>192</td> + <td>230400</td> + <td>$2.304</td> + <td>$2.765</td> + </tr> +</table> + +### Oracle OKE Cluster Pricing + +Oracle based clusters will be charged Oracle list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="25%">VCPUs</th> + <th width="25%">Memory (GiB)</th> + <th width="25%">USD/Credit per hour</th> + </tr> + <tr> + <td>VM.Standard2.1</td> + <td>1</td> + <td>15</td> + <td>$0.076</td> + </tr> + <tr> + <td>VM.Standard2.2</td> + <td>2</td> + <td>30</td> + <td>$0.153</td> + </tr> + <tr> + <td>VM.Standard2.4</td> + <td>4</td> + <td>60</td> + <td>$0.306</td> + </tr> + <tr> + <td>VM.Standard2.8</td> + <td>8</td> + <td>120</td> + <td>$0.612</td> + </tr> + <tr> + <td>VM.Standard2.16</td> + <td>16</td> + <td>240</td> + <td>$1.225</td> + </tr> + <tr> + <td>VM.Standard3Flex.1</td> + <td>1</td> + <td>4</td> + <td>$0.055</td> + </tr> + <tr> + <td>VM.Standard3Flex.2</td> + <td>2</td> + <td>8</td> + <td>$0.110</td> + </tr> + <tr> + <td>VM.Standard3Flex.4</td> + <td>4</td> + <td>16</td> + <td>$0.221</td> + </tr> + <tr> + <td>VM.Standard3Flex.8</td> + <td>8</td> + <td>32</td> + <td>$0.442</td> + </tr> + <tr> + <td>VM.Standard3Flex.16</td> + <td>16</td> + <td>64</td> + <td>$0.883</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.1</td> + <td>1</td> + <td>4</td> + <td>$0.019</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.2</td> + <td>2</td> + <td>8</td> + <td>$0.038</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.4</td> + <td>4</td> + <td>16</td> + <td>$0.077</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.8</td> + <td>8</td> + <td>32</td> + <td>$0.154</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.16</td> + <td>16</td> + <td>64</td> + <td>$0.309</td> + </tr> +</table> + +Last modified January 06, 2025 + +================ +File: docs/vendor/testing-supported-clusters.md +================ +import Pool from "../partials/cmx/\_openshift-pool.mdx" + +# Supported Compatibility Matrix Cluster Types + +This topic describes the supported Kubernetes distributions, Kubernetes versions, instance types, nodes, limitations, and common use cases for clusters created with Replicated Compatibility Matrix. + +Compatibility Matrix provisions cloud-based or virtual machine (VM) clusters. + +## VM Clusters + +This section lists the supported VM cluster distributions for clusters created with Compatibility Matrix. + +### kind + +Compatibility Matrix supports creating [kind](https://kind.sigs.k8s.io/) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_kind_VERSIONS */}1.26.15, 1.27.16, 1.28.15, 1.29.14, 1.30.10, 1.31.6, 1.32.2{/* END_kind_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>No</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports a single node.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4` or `dual`.</td> + </tr> + <tr> + <th>Limitations</th> + <td>See <a href="testing-about#limitations">Limitations</a></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Smoke tests</td> + </tr> +</table> + +### k3s + +Compatibility Matrix supports creating [k3s](https://k3s.io) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported k3s Versions</th> + <td>The upstream k8s version that matches the Kubernetes version requested.</td> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_k3s_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.1, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_k3s_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</td> + </tr> + <tr> + <th>Common Use Cases</th> + <td><ul><li>Smoke tests</li><li>Customer release tests</li></ul></td> + </tr> +</table> + +### RKE2 (Beta) + +Compatibility Matrix supports creating [RKE2](https://docs.rke2.io/) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported RKE2 Versions</th> + <td>The upstream k8s version that matches the Kubernetes version requested.</td> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_rke2_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_rke2_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</td> + </tr> + <tr> + <th>Common Use Cases</th> + <td><ul><li>Smoke tests</li><li>Customer release tests</li></ul></td> + </tr> +</table> + +### OpenShift OKD + +Compatibility Matrix supports creating [Red Hat OpenShift OKD](https://www.okd.io/) clusters, which is the community distribution of OpenShift, using CodeReady Containers (CRC). + +OpenShift clusters are provisioned with two users: + +- (Default) A `kubeadmin` user with `cluster-admin` priviledges. Use the `kubeadmin` user only for administrative tasks such as creating new users or setting roles. +- A `developer` user with namespace-scoped priviledges. The `developer` user can be used to better simulate access in end-customer environments. + +By default, kubeconfig context is set to the `kubeadmin` user. To switch to the `developer` user, run the command `oc login --username developer`. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported OpenShift Versions</th> + <td>{/* START_openshift_VERSIONS */}4.10.0-okd, 4.11.0-okd, 4.12.0-okd, 4.13.0-okd, 4.14.0-okd, 4.15.0-okd, 4.16.0-okd, 4.17.0-okd{/* END_openshift_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes for versions 4.13.0-okd and later.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td> + <ul> + <li>OpenShift does not support r1.small instance types.</li> + <li>OpenShift versions earlier than 4.13-okd do not have a registry mirror and so may be subject to rate limiting from Docker Hub. For information about Docker Hub rate limiting, see <a href="https://docs.docker.com/docker-hub/download-rate-limit/">Docker Hub rate limit</a>. To increase limits, Replicated recommends that you configure an image pull secret to pull public Docker Hub images as an authenticated user. For more information about how to configure image pull secrets, see <a href="https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/">Pull an Image from a Private Registry</a> in the Kubernetes documentation.</li> + <li> + <p>OpenShift builds take approximately 17 minutes.</p> + <p><Pool/></p> + </li> + </ul> + <p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p> + </td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### Embedded Cluster + +Compatibility Matrix supports creating clusters with Replicated Embedded Cluster. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Embedded Cluster Versions</th> + <td> + Any valid release sequence that has previously been promoted to the channel where the customer license is assigned. + Version is optional and defaults to the latest available release on the channel. + </td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes (alpha).</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td> + <ul> + <li>The Admin Console UI is not exposed publicly and must be exposed via `kubectl -n kotsadm port-forward svc/kurl-proxy-kotsadm 38800:8800`. The password for the Admin Console is `password`.</li> + <li><strong>A valid customer license is required to create an Embedded Cluster.</strong></li> + <li>The [cluster prepare](/vendor/testing-how-to#prepare-clusters) command is not supported.</li> + </ul> + <p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p> + </td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### kURL + +Compatibility Matrix supports creating [kURL](https://kurl.sh) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported kURL Versions</th> + <td>Any promoted kURL installer. Version is optional. For an installer version other than "latest", you can find the specific Installer ID for a previously promoted installer under the relevant **Install Command** (ID after kurl.sh/) on the **Channels > kURL Installer History** page in the Vendor Portal. For more information about viewing the history of kURL installers promoted to a channel, see [Installer History](/vendor/installer-history).</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>Does not work with the <a href="https://kurl.sh/docs/add-ons/longhorn">Longhorn add-on</a>.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +## Cloud Clusters + +This section lists the supported cloud clusters for compatibility testing. + +### EKS + +Compatibility Matrix supports creating [AWS EKS](https://aws.amazon.com/eks/?nc2=type_a) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td><p>{/* START_eks_VERSIONS */}1.25, 1.26, 1.27, 1.28, 1.29, 1.30, 1.31, 1.32{/* END_eks_VERSIONS */}</p><p>Extended Support Versions: 1.25, 1.26, 1.27, 1.28</p></td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td><p>m6i.large, m6i.xlarge, m6i.2xlarge, m6i.4xlarge, m6i.8xlarge, m7i.large, m7i.xlarge, m7i.2xlarge, m7i.4xlarge, m7i.8xlarge, m5.large, m5.xlarge, m5.2xlarge, + m5.4xlarge, m5.8xlarge, m7g.large (arm), m7g.xlarge (arm), m7g.2xlarge (arm), m7g.4xlarge (arm), m7g.8xlarge (arm), c5.large, c5.xlarge, c5.2xlarge, c5.4xlarge, + c5.9xlarge, g4dn.xlarge (gpu), g4dn.2xlarge (gpu), g4dn.4xlarge (gpu), g4dn.8xlarge (gpu), g4dn.12xlarge (gpu), g4dn.16xlarge (gpu)</p><p>g4dn instance types depend on available capacity. After a g4dn cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [Amazon EKS optimized accelerated Amazon Linux AMIs](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) in the AWS documentation.</p></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>Yes. Cost will be based on the max number of nodes.</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>You can only choose a minor version, not a patch version. The EKS installer chooses the latest patch for that minor version.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### GKE + +Compatibility Matrix supports creating [Google GKE](https://cloud.google.com/kubernetes-engine) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_gke_VERSIONS */}1.29, 1.30, 1.31, 1.32{/* END_gke_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td><p>n2-standard-2, n2-standard-4, n2-standard-8, n2-standard-16, n2-standard-32, t2a-standard-2 (arm), t2a-standard-4 (arm), t2a-standard-8 (arm), t2a-standard-16 (arm), t2a-standard-32 (arm), t2a-standard-48 (arm), e2-standard-2, e2-standard-4, e2-standard-8, e2-standard-16, e2-standard-32, n1-standard-1+nvidia-tesla-t4+1 (gpu), n1-standard-1+nvidia-tesla-t4+2 (gpu), n1-standard-1+nvidia-tesla-t4+4 (gpu), n1-standard-2+nvidia-tesla-t4+1 (gpu), n1-standard-2+nvidia-tesla-t4+2 (gpu), n1-standard-2+nvidia-tesla-t4+4 (gpu), n1-standard-4+nvidia-tesla-t4+1 (gpu), n1-standard-4+nvidia-tesla-t4+2 (gpu), n1-standard-4+nvidia-tesla-t4+4 (gpu), n1-standard-8+nvidia-tesla-t4+1 (gpu), n1-standard-8+nvidia-tesla-t4+2 (gpu), n1-standard-8+nvidia-tesla-t4+4 (gpu), n1-standard-16+nvidia-tesla-t4+1 (gpu), n1-standard-16+nvidia-tesla-t4+2 (gpu), n1-standard-16+nvidia-tesla-t4+4 (gpu), n1-standard-32+nvidia-tesla-t4+1 (gpu), n1-standard-32+nvidia-tesla-t4+2 (gpu), n1-standard-32+nvidia-tesla-t4+4 (gpu), n1-standard-64+nvidia-tesla-t4+1 (gpu), n1-standard-64+nvidia-tesla-t4+2 (gpu), n1-standard-64+nvidia-tesla-t4+4 (gpu), n1-standard-96+nvidia-tesla-t4+1 (gpu), n1-standard-96+nvidia-tesla-t4+2 (gpu), n1-standard-96+nvidia-tesla-t4+4 (gpu)</p><p>You can specify more than one node.</p></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>Yes. Cost will be based on the max number of nodes.</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>You can choose only a minor version, not a patch version. The GKE installer chooses the latest patch for that minor version.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### AKS + +Compatibility Matrix supports creating [Azure AKS](https://azure.microsoft.com/en-us/products/kubernetes-service) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_aks_VERSIONS */}1.29, 1.30, 1.31{/* END_aks_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td><p>Standard_B2ms, Standard_B4ms, Standard_B8ms, Standard_B16ms, Standard_DS2_v2, Standard_DS3_v2, Standard_DS4_v2, Standard_DS5_v2, Standard_DS2_v5, Standard_DS3_v5, Standard_DS4_v5, Standard_DS5_v5, Standard_D2ps_v5 (arm), Standard_D4ps_v5 (arm), Standard_D8ps_v5 (arm), Standard_D16ps_v5 (arm), Standard_D32ps_v5 (arm), Standard_D48ps_v5 (arm), Standard_NC4as_T4_v3 (gpu), Standard_NC8as_T4_v3 (gpu), Standard_NC16as_T4_v3 (gpu), Standard_NC64as_T4_v3 (gpu)</p><p>GPU instance types depend on available capacity. After a GPU cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [NVIDIA GPU Operator with Azure Kubernetes Service](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/microsoft-aks.html) in the NVIDIA documentation.</p></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>Yes. Cost will be based on the max number of nodes.</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>You can choose only a minor version, not a patch version. The AKS installer chooses the latest patch for that minor version.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### OKE (Beta) + +Compatibility Matrix supports creating [Oracle Container Engine for Kubernetes (OKE)](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_oke_VERSIONS */}1.29.1, 1.30.1, 1.31.1{/* END_oke_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td><p>VM.Standard2.1, VM.Standard2.2, VM.Standard2.4, VM.Standard2.8, VM.Standard2.16, VM.Standard3.Flex.1, VM.Standard3.Flex.2, VM.Standard3.Flex.4, VM.Standard3.Flex.8, VM.Standard3.Flex.16, VM.Standard.A1.Flex.1 (arm), VM.Standard.A1.Flex.2 (arm), VM.Standard.A1.Flex.4 (arm), VM.Standard.A1.Flex.8 (arm), VM.Standard.A1.Flex.16 (arm)</p></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No.</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>Provising an OKE cluster does take between 8 to 10 minutes. If needed, some timeouts in your CI pipelines might have to be adjusted.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +## Replicated Instance Types {#types} + +When creating a VM-based cluster with Compatibility Matrix, you must specify a Replicated instance type. + +<table> + <tr> + <th width="30%">Type</th> + <th width="35%">Memory (GiB)</th> + <th width="35%">VCPU Count</th> + </tr> + <tr> + <th>r1.small</th> + <td>8 GB</td> + <td>2 VCPUs</td> + </tr> + <tr> + <th>r1.medium</th> + <td>16 GB</td> + <td>4 VCPUs</td> + </tr> + <tr> + <th>r1.large</th> + <td>32 GB</td> + <td>8 VCPUs</td> + </tr> + <tr> + <th>r1.xlarge</th> + <td>64 GB</td> + <td>16 VCPUs</td> + </tr> + <tr> + <th>r1.2xlarge</th> + <td>128 GB</td> + <td>32 VCPUs</td> + </tr> +</table> + +## Kubernetes Version Support Policy + +We do not maintain forks or patches of the supported distributions. When a Kubernetes version in Compatibility Matrix is out of support (EOL), Replicated will attempt to continue to support this version for six months for compatibility testing to support customers who are running out-of-date versions of Kubernetes. In the event that a critical security issue or bug is found and unresolved, we might discontinue support for EOL versions of Kubernetes prior to 6 months post EOL. + +================ +File: docs/vendor/tutorial-adding-db-config.md +================ +# Example: Adding Database Configuration Options + +In this tutorial, we'll explore ways to give your end user the option to either embed a database instance with the application, or connect your application to an external database instance that they will manage. +We'll use a PostgreSQL database as an example, configuring an example app to connect. + +This tutorial explores advanced topics like workload coordination, credential management, and refactoring your application's user-facing configuration in the Replicated Admin Console. We'll also review best practices for integrating persistent stores like databases, queues, and caches. + +It is split into 5 sections: + +- [The Example Application](#the-example-application) +- [User-Facing Configuration](#user-facing-configuration) +- [Embedding a Database](#embedding-a-database) +- [Connecting to an External Database](#connecting-to-an-external-database) + +### Prerequisites + +This guide assumes you have: + +* A running instance of the Replicated Admin Console (`kotsadm`) to iterate against in either an existing cluster or an embedded cluster created with Replicated kURL. If you do not have a running instance of the Admin Console in an existing or kURL cluster, complete the [Install with KOTS in an Existing Cluster](tutorial-cli-setup) tutorial to package and install a sample application. +* A local git checkout of your application manifests. + +### Accompanying Code Examples + +A full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). + +* * * + +## The Example Application + +For demonstration purposes, we'll use a simple app that connects to a Postgres database via the `psql` CLI. +Once you've finished this guide, you should feel confident replacing it with any Kubernetes workload(s) that need to connect to a database. +The deployment we'll use can be seen below: + +```yaml +# pg-consumer.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: postgres:10 + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + # hard coded for now, we'll wire these up later + env: + - name: DB_HOST + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_USER + value: postgres + - name: DB_PASSWORD + value: postgres + - name: DB_NAME + value: postgres +``` + +This app simply connects to the database every 20 seconds and writes the server timestamp to stdout. +Even though `psql` supports [default environment variables](https://www.postgresql.org/docs/current/libpq-envars.html) for host, username, etc that can be read transparently, we're intentionally using these generic `DB_` variables for clarity. +Later, you can change these environment variable names to whatever format your application consumes. + +For now we'll hard code the DB variable values, in the next sections we'll wire these up to the user-provided configuration. + + +### Deploying the example application + + Once you've added this deployment to you application's `manifests` directory, create a release by running `replicated release create --auto` locally. + Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: + +![View Update](/images/guides/kots/view-update.png) + +Click **Deploy**. You should be able to review the logs and see `deployment.apps/pg-consumer created` in `applyStdout`: + + +![Deployed PG Consumer](/images/guides/kots/pg-consumer-deployed.png) + + +After it is deployed, you can run `kubectl get pods` to inspect the cluster. +We should expect the Pod to be crashlooping at this point, since there's no database to connect to just yet: + +```text +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +kotsadm-5bbf54df86-p7kqg 1/1 Running 0 12m +kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 12m +kotsadm-minio-0 1/1 Running 0 12m +kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 12m +kotsadm-postgres-0 1/1 Running 0 12m +pg-consumer-75f49bfb69-mljr6 0/1 CrashLoopBackOff 1 10s +``` + +Checking the logs, we should see a connect error: + +```text +$ kubectl logs -l app=pg-consumer +psql: could not translate host name "postgres" to address: Name or service not known +``` + +If the `kubectl logs` command hangs, you can try using the `--previous` flag to fetch the logs of the most recent crash: + + +```text +$ kubectl logs -l app=pg-consumer --previous +psql: could not translate host name "postgres" to address: Name or service not known +``` + +Now that our test app is deployed, we'll walk through presenting options to the end user for connecting a Postgres instance to this app. + +* * * + +## User-Facing Configuration + +The core of this guide will be around how to give your end users the option to do one of the following actions: + +* Bring their own PostgreSQL instance for your app to connect to +* Use an "embedded" database bundled in with the application + +The first step here is to present that option to the user, then we'll walk through implementing each scenario. +The `kots.io/v1beta1` `Config` resource controls what configuration options are presented to the end user. +If you followed one of the "Getting Started" guides, you probably have a `config.yaml` in your manifests that looks something like the following YAML file: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Ingress object. + items: + - name: use_ingress + title: Use Ingress? + help_text: An example field to toggle inclusion of an Ingress Object + type: bool + default: "0" + - name: ingress_hostname + title: Ingress Hostname + help_text: If desired, enter the hostname for ingress to this application. You can enter the IP of this instance, or a DNS hostname. + type: text + when: repl{{ ConfigOptionEquals "use_ingress" "1" }} +``` + +To add a database section, we'll modify it to include some database settings. +In this case we'll remove the Ingress toggle that is included as an example, although you might also choose to leave this in. None of these database settings will have any effect yet, but we'll still be able to preview what the end user will see. +Modify your YAML to include this database section: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: database + title: Database + items: + - name: postgres_type + help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres + - name: embedded_postgres_password + hidden: true + type: password + value: "{{repl RandomString 32}}" +``` + +This creates a toggle to allow the user to choose between an embedded or external Postgres instance, and a `hidden` field to generate a unique password for the embedded instance. + +As mentioned in the introduction, a full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). + + +### Validating Config Changes + +Even though the options aren't wired, let's create a new release to validate the configuration screen was modified. +Create a release by running `replicated release create --auto`. +Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: + +![View Update](/images/guides/kots/view-update.png) + +After the update is deployed, click the Config tab and review our new toggle. +You might also notice that we've removed the Ingress settings to simplify things for this guide: + +![Database Config](/images/guides/kots/database-config.png) + +Now that we have the configuration screen started, we can proceed to implement the "Embedded Postgres" option. + +* * * + +## Embedding a Database + +To implement the embedded Database option, we'll add a Kubernetes [Statefulset](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), and use the [annotations for optional resources](packaging-include-resources/) to control when it will be included in the application. + +### Adding the Secret and StatefulSet + +First, we'll create a secret to store the root password for our embedded postgres instance: + +```yaml +# postgres-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' +``` + +Next, create a new YAML file in your `manifests` directory with the following contents. +Note the use of `kots.io/when` to only conditionally include this based on end-user inputs: + +```yaml +# postgres-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres + labels: + app: pg-provider + annotations: + kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' +spec: + replicas: 1 + selector: + matchLabels: + app: pg-provider + serviceName: postgres + template: + metadata: + labels: + app: pg-provider + spec: + containers: + - env: + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + # create a db called "postgres" + - name: POSTGRES_DB + value: postgres + # create admin user with name "postgres" + - name: POSTGRES_USER + value: postgres + # use admin password from secret + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + key: DB_PASSWORD + name: postgres + image: postgres:10 + name: postgres + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: pgdata + volumes: + - name: pgdata + persistentVolumeClaim: + claimName: pgdata + volumeClaimTemplates: + - metadata: + name: pgdata + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Finally, lets add a Service object so we can route traffic to our postgres instance, again using `kots.io/when` to conditionally include this resource: + + +```yaml +# postgres-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: postgres + labels: + app: pg-provider + annotations: + kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' +spec: + ports: + - port: 5432 + selector: + app: pg-provider + type: ClusterIP +``` + +### Validating the embedded Database + +After you've added these resources, you can push a new release and update in the Admin Console. +You should see the following in the deployment logs: + +![Embedded PG Deployed](/images/guides/kots/embedded-pg-deployed.png) + +We should now see an instance of Postgres running in our namespace as well. +The consumer may still be crashlooping, but we can see the error is different now: + +```text +$ kubectl logs -l app=pg-consumer +psql: FATAL: password authentication failed for user "postgres" +``` + +This is because we still need to deliver the generated password to our workload pod. +In `pg-consumer.yaml`, we'll remove this section: + +```yaml + - name: DB_PASSWORD + value: postgres +``` + +and replace it with: + +```yaml + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD +``` + +The full Deployment should now look like the following YAML file: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + # hard coded for now, we'll wire these up later + env: + - name: DB_HOST + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_USER + value: postgres + - name: DB_NAME + value: postgres + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD +``` + +From here, make another release and deploy it. +You should see the consumer pod is now able to connect to the database: + + +```text +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +kotsadm-5bbf54df86-p7kqg 1/1 Running 0 144m +kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 144m +kotsadm-minio-0 1/1 Running 0 144m +kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 144m +kotsadm-postgres-0 1/1 Running 0 144m +pg-consumer-77b868d7d8-xdn9v 1/1 Running 0 20s +postgres-0 1/1 Running 0 6m22s +``` + +Checking the logs, we can connect now: + +```text +$ kubectl logs -l app=pg-consumer + now +------------------------------- + 2020-04-12 17:11:45.019293+00 +(1 row) + + now +------------------------------- + 2020-04-12 17:11:55.072041+00 +(1 row) +``` + +Now that we've configured our application to read from an embedded postgres instance, we'll switch to allowing the end user to provide their own database connection parameters. + +* * * + +## Connecting to an External Database + +In this section, we'll expand our configuration section to allow end users to bring their own Postgres instance. + +### Modifying the Config Screen + +Let's update our config screen to allow an end user to input some details about their database. +We'll add the following YAML, noting the use of the `when` field to conditionally hide or show fields in the user-facing config screen: + +```yaml + - name: external_postgres_host + title: Postgres Host + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: postgres + - name: external_postgres_port + title: Postgres Port + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: "5432" + - name: external_postgres_user + title: Postgres Username + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + required: true + - name: external_postgres_password + title: Postgres Password + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: password + required: true + - name: external_postgres_db + title: Postgres Database + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: sentry +``` + +Your full configuration screen should now look something like the following YAMl file: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: database + title: Database + items: + - name: postgres_type + help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres + - name: embedded_postgres_password + hidden: true + type: password + value: "{{repl RandomString 32}}" + - name: external_postgres_host + title: Postgres Host + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: postgres + - name: external_postgres_port + title: Postgres Port + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: "5432" + - name: external_postgres_user + title: Postgres Username + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + required: true + - name: external_postgres_password + title: Postgres Password + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: password + required: true + - name: external_postgres_db + title: Postgres Database + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: postgres +``` + +Let's save this and create a new release. After deploying the release in the Admin Console, click **Config** and set the toggle to "External Postgres" to see the new fields: + +In order to demonstrate that these are working, let's add some values that we know won't work, and just check to confirm that checking "External Postgres" will remove our embedded postgres instance: + + +![External PG Config Fake](/images/guides/kots/external-pg-config-fake.png) + +Save these settings, and then you'll be directed back to the Version History page to apply the change: + +![Deploy Config Change](/images/guides/kots/deploy-config-change.png) + +after this is deployed, we should see that the postgres statefulset has been removed, and that our sample application is back to failing: + + +```text +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +kotsadm-5bbf54df86-8ws98 1/1 Running 0 12m +kotsadm-api-cbccb97ff-r7mz6 1/1 Running 2 12m +kotsadm-minio-0 1/1 Running 0 12m +kotsadm-operator-84477b5c4-4gmbm 1/1 Running 0 12m +kotsadm-postgres-0 1/1 Running 0 12m +pg-consumer-6bd78594d-n7nmw 0/1 Error 2 29s +``` + +You'll note that it is failing, but it is still using our hardcoded environment variables, not the user-entered config. +In the next step, we'll wire the end-user configuration values into our service. + +```text +$ kubectl logs -l app=pg-consumer +psql: could not translate host name "postgres" to address: Name or service not known +``` + +### Mapping User Inputs + +To map the user-supplied configuration, we'll start by expanding our secret we created before, adding fields for additional variables, using `{{repl if ... }}` blocks to switch between embedded/external contexts. + +To start, you can add a field for hostname, using Base64Encode. You must use a single line, as shown in the following example. + + + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' + DB_HOST: + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" }}{{repl Base64Encode "postgres" }}{{repl else}}{{repl ConfigOption"external_postgres_host" | Base64Encode }}{{repl end}} +``` + +Now that we have the value in our Secret, we can modify our deployment to consume it. +Replace this text: + +```yaml + - name: DB_HOST + value: postgres +``` + +with this text: + +```yaml + - name: DB_HOST + valueFrom: + secretKeyRef: + name: postgres + key: DB_HOST +``` + +Your full deployment should look something like the following YAML file: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + env: + - name: DB_HOST + valueFrom: + secretKeyRef: + name: postgres + key: DB_HOST + - name: DB_PORT + value: "5432" + - name: DB_USER + value: postgres + - name: DB_NAME + value: postgres + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD +``` + +From here, let's create and deploy a release, and verify that the secret has the customer-provided value, base64 decoding the secret contents: + +```text +$ kubectl get secret postgres -o yaml | head -n 4 +apiVersion: v1 +data: + DB_HOST: ZmFrZQ== + DB_PASSWORD: ajNVWDd1RnRfc0NkVTJqOFU3Q25xUkxRQk5fUlh3RjA= +``` + +You can verify we pulled in our user-provided config by base64-decoding the `DB_HOST` field: + +```text +$ echo ZmFrZQ== | base64 --decode +fake +``` + +Checking on our service itself, we can verify that it's now trying to connect to the `fake` hostname instead of `postgres`: + +```text +$ kubectl logs -l app=pg-consumer +psql: could not translate host name "fake" to address: Name or service not known +``` + +We'll optionally wire this to a real external Postgres database later, but for now we'll proceed to add the rest of the fields. + +### Extending this to All Fields + +Now that we've wired the DB_HOST field all the way through, we'll do the same for the other fields. +In the end, your Secret and Deployment should look like the following YAML files: + +```yaml +# postgres-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + DB_HOST: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_host" | Base64Encode }} + {{repl end}} + DB_PORT: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "5432" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_port" | Base64Encode }} + {{repl end}} + DB_USER: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_user" | Base64Encode }} + {{repl end}} + DB_PASSWORD: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl ConfigOption "embedded_postgres_password" | Base64Encode }} + {{repl else -}} + {{repl ConfigOption "external_postgres_password" | Base64Encode }} + {{repl end}} + DB_NAME: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_db" | Base64Encode }} + {{repl end}} +``` + +```yaml +# pg-consumer.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + env: + - name: DB_HOST + valueFrom: + secretKeyRef: + name: postgres + key: DB_HOST + - name: DB_PORT + valueFrom: + secretKeyRef: + name: postgres + key: DB_PORT + - name: DB_USER + valueFrom: + secretKeyRef: + name: postgres + key: DB_USER + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD + - name: DB_NAME + valueFrom: + secretKeyRef: + name: postgres + key: DB_NAME +``` + +Optionally, you can be extra concise and collapse each individual `env` `valueFrom` into a single `envFrom` `secretRef` entry: + +```yaml +# pg-consumer.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + envFrom: + - secretRef: + name: postgres +``` + + +After deploying this, you should see all of the fields in the secret: + +```text +$ kubectl get secret postgres -o yaml +apiVersion: v1 +data: + DB_HOST: ZmFrZQ== + DB_NAME: ZmFrZQ== + DB_PASSWORD: ZXh0cmEgZmFrZQ== + DB_PORT: NTQzMjE= + DB_USER: ZmFrZQ== +kind: Secret +# ...snip... +``` + +We can also print the environment in our sample app to verify that all of the values are piped properly: + +```text +$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' +DB_PORT=54321 +DB_NAME=fake +DB_PASSWORD=extra fake +DB_HOST=fake +DB_USER=fake +``` + +### Testing Config Changes + +Now let's make some changes to the database credentials. In this case, we'll use a Postgres database provisioned in Amazon RDS, but you can use any external database. +To start, head to the "Config" screen and input your values: + +![Real Postgres Values](/images/guides/kots/real-postgres-values.png) + +Let's save and apply this config and check in our pod again: + +```text +$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' +DB_PORT=54321 +DB_NAME=fake +DB_PASSWORD=extra fake +DB_HOST=fake +DB_USER=fake +``` + +Uh oh, It appears that our values did not get updated! If you've worked with Secrets before, you may know that there's a [long-standing issue in Kubernetes](https://github.com/kubernetes/kubernetes/issues/22368) where pods that load config from Secrets or ConfigMaps won't automatically restart when underlying config is changed. +There are some tricks to make this work, and in the next step we'll implement one of them, but for now we can delete the pod to verify that the configuration is being piped through to our sample application: + +```text +$ kubectl delete pod -l app=pg-consumer +pod "pg-consumer-6df9d5d7fd-bd5z6"" deleted +``` + +If the pod is crashlooping, you might need to add `--force --grace-period 0` to force delete it. +In either case, once a new pod starts, we should now see it loading the correct config: + +```text +$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' +DB_PORT=5432 +DB_NAME=postgres +DB_PASSWORD=<redacted> +DB_HOST=10.128.0.12 +DB_USER=postgres +``` + +### Triggering Restarts on Changes + +In order to automate this restart on changes, we're going to use a hash of all database parameters to trigger a rolling update whenever database parameters are changed. +We'll use a `hidden`, `readonly` field to store this in our config screen: + +```yaml + - name: external_postgres_confighash + hidden: true + readonly: true + type: text + value: '{{repl (sha256sum (print (ConfigOption "external_postgres_host") (ConfigOption "external_postgres_port") (ConfigOption "external_postgres_user") (ConfigOption "external_postgres_password") (ConfigOption "external_postgres_db") ))}}' +``` + +The `hidden` flag will hide it from the UI, and the `readonly` flag in this case will cause the value to be re-computed any time an upstream `ConfigOption` value changes. + +Next, let's add this as an annotation to our deployment's pod template at `spec.template.metadata.annotations`: + +```yaml +annotations: + kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' +``` + +**Note**: It's worth noting here that there's nothing special about the `kots.io/config-hash` annotation. We could have just as easily called this annotation `my-app-something-fake` instead. +What matters here is that when the value in a Deployment annotation changes, it will cause Kubernetes to roll out a new version of the pod, stopping the old one and thus picking up our config changes. + + +Your full deployment should now look like the following YAML file: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + annotations: + kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + envFrom: + - secretRef: + name: postgres +``` + + +### Integrating a Real Database + +If you'd like at this point, you can integrate a real database in your environment, just fill out your configuration fields. You'll know you did it right if your pg-consumer pod can connect. + +================ +File: docs/vendor/tutorial-cli-create-app.mdx +================ +# Step 2: Create an Application + +After you install the Replicated CLI and create an API token, you can use the CLI to create a new application. + +To create an application: + +1. Run the following command to create an application named `cli-tutorial`: + + ``` + replicated app create cli-tutorial + ``` + + **Example output**: + + ``` + ID NAME SLUG SCHEDULER + 2GmY... cli-tutorial cli-tutorial kots + ``` + +1. Export the application slug in the output of the `app create` command as an environment variable: + + ``` + export REPLICATED_APP=YOUR_SLUG + ``` + Replace `YOUR_SLUG` with the slug for the application you created in the previous step. + +1. Verify that both the `REPLICATED_API_TOKEN` environment variable that you created as part of [Step 1: Install the Replicated CLI](tutorial-cli-install-cli) and the `REPLICATED_APP` environment variable are set correctly: + + ``` + replicated release ls + ``` + + In the output of this command, you now see an empty list of releases for the application: + + ``` + SEQUENCE CREATED EDITED ACTIVE_CHANNELS + ``` + +## Next Step + +Continue to [Step 3: Get the Sample Manifests](tutorial-cli-manifests) to download the manifest files for a sample Kubernetes application. You will use these manifest files to create the first release for the `cli-tutorial` application. + +================ +File: docs/vendor/tutorial-cli-create-customer.mdx +================ +# Step 5: Create a Customer + +After promoting the first release for the `cli-tutorial` application, create a customer so that you can install the application. + +A _customer_ is an object in the Vendor Portal that represents a single licensed user of your application. When you create a customer, you define entitlement information for the user, and the Vendor Portal generates a YAML license file for the customer that you can download. + +When you install the application later in this tutorial, you will upload the license file that you create in this step to allow KOTS to create the application containers. + +To create a customer and download the license file: + +1. From the `replicated-cli-tutorial` directory, create a license for a customer named `Some-Big-Bank` that is assigned to the Unstable channel and expires in 10 days: + + ``` + replicated customer create \ + --name "Some-Big-Bank" \ + --expires-in "240h" \ + --channel "Unstable" + ``` + The Unstable channel is the channel where you promoted the release in [Step 4: Create a Release](tutorial-cli-create-release). Assigning the customer to a channel allows them to install the releases that are promoted to that channel. + + **Example output:** + + ``` + ID NAME CHANNELS EXPIRES TYPE + 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev + ``` + +1. Verify the customer creation details: + + ``` + replicated customer ls + ``` + + **Example output:** + + ``` + ID NAME CHANNELS EXPIRES TYPE + 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev + ``` + +1. Download the license file for the customer that you just created: + + ``` + replicated customer download-license \ + --customer "Some-Big-Bank" + ``` + + The license downloads to `stdout`. + + **Example output**: + + ``` + apiVersion: kots.io/v1beta1 + kind: License + metadata: + name: some-big-bank + spec: + appSlug: cli-tutorial + channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 + channelName: Unstable + customerName: Some-Big-Bank + endpoint: https://replicated.app + entitlements: + expires_at: + description: License Expiration + title: Expiration + value: "2022-11-10T14:59:49Z" + valueType: String + isNewKotsUiEnabled: true + licenseID: 2GuB3ZLQsU38F5SX3n03x8qBzeL + licenseSequence: 1 + licenseType: dev + signature: eyJsaW... + ``` + +1. Rename the license file and save it to your Desktop folder: + + ``` + export LICENSE_FILE=~/Desktop/Some-Big-Bank-${REPLICATED_APP}-license.yaml + replicated customer download-license --customer "Some-Big-Bank" > "${LICENSE_FILE}" + ``` + +1. Verify that the license was written properly using either `cat` or `head`: + + ``` + head ${LICENSE_FILE} + ``` + + **Example output**: + + ``` + apiVersion: kots.io/v1beta1 + kind: License + metadata: + name: some-big-bank + spec: + appSlug: cli-tutorial + channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 + channelName: Unstable + customerName: Some-Big-Bank + endpoint: https://replicated.app + ``` + +## Next Step + +Continue to [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to get the installation commands from the Unstable channel, then install the KOTS components and the sample application in your cluster. + +================ +File: docs/vendor/tutorial-cli-create-new-version.mdx +================ +# Step 8: Create a New Version + +In this step, you make an edit to the Config custom resource manifest file in the `replicated-cli-tutorial/manifests` directory for the `cli-tutorial` application to create a new field on the **Config** page in the Admin Console. You will then create and promote a new release to the Unstable channel with your changes. + +To create and promote a new version of the application: + +1. In your local directory, go to the the `replicated-cli-tutorial/manifests` folder and open the `kots-config.yaml` file in a text editor. + +1. Copy and paste the following YAML into the file under the `example_default_value` field to create a new text field on the **Config** page: + + ```yaml + - name: more_text + title: Another Text Example + type: text + value: "" + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + ``` + The following shows the full YAML for the `kots-config.yaml` file after you add the new field: + + ```yaml + --- + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: config-sample + spec: + groups: + - name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Nginx welcome page. + items: + - name: show_text_inputs + title: Customize Text Inputs + help_text: "Show custom user text inputs" + type: bool + default: "0" + recommended: true + - name: example_default_value + title: Text Example (with default value) + type: text + value: "" + default: please change this value + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + # Add the new more_text field here + - name: more_text + title: Another Text Example + type: text + value: "" + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + - name: api_token + title: API token + type: password + props: + rows: 5 + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + - name: readonly_text_left + title: Readonly Text + type: text + value: "{{repl RandomString 10}}" + readonly: true + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + - name: hidden_text + title: Secret Key + type: password + hidden: true + value: "{{repl RandomString 40}}" + + ``` + +1. Open the `example-configmap.yaml` file. + +1. In the `example-configmap.yaml` file, copy and paste the following HTML to replace the `<body>` section: + + ``` + <body> + This is an example KOTS application. + <p>This is text from a user config value: '{{repl ConfigOption "example_default_value"}}' </p> + <p>This is more text from a user config value: '{{repl ConfigOption "more_text"}}' </p> + <p>This is a hidden value: '{{repl ConfigOption "hidden_text"}}'</p> + </body> + ``` + This creates a reference to the `more_text` field using a Replicated KOTS template function. The ConfigOption template function renders the user input from the configuration item that you specify. For more information, see [Config Context](/reference/template-functions-config-context) in _Reference_. + +1. Save the changes to both YAML files. + +1. Change to the root `replicated-cli-tutorial` directory, then run the following command to verify that there are no errors in the YAML: + + ``` + replicated release lint --yaml-dir=manifests + ``` + +1. Create a new release and promote it to the Unstable channel: + + ``` + replicated release create --auto + ``` + + **Example output**: + + ``` + • Reading manifests from ./manifests ✓ + • Creating Release ✓ + • SEQUENCE: 2 + • Promoting ✓ + • Channel 2GxpUm7lyB2g0ramqUXqjpLHzK0 successfully set to release 2 + ``` + +1. Type `y` and press **Enter** to continue with the defaults. + + **Example output**: + + ``` + RULE TYPE FILENAME LINE MESSAGE + + • Reading manifests from ./manifests ✓ + • Creating Release ✓ + • SEQUENCE: 2 + • Promoting ✓ + • Channel 2GmYFUFzj8JOSLYw0jAKKJKFua8 successfully set to release 2 + ``` + + The release is created and promoted to the Unstable channel with `SEQUENCE: 2`. + +1. Verify that the release was promoted to the Unstable channel: + + ``` + replicated release ls + ``` + **Example output**: + + ``` + SEQUENCE CREATED EDITED ACTIVE_CHANNELS + 2 2022-11-03T19:16:24Z 0001-01-01T00:00:00Z Unstable + 1 2022-11-03T18:49:13Z 0001-01-01T00:00:00Z + ``` + +## Next Step + +Continue to [Step 9: Update the Application](tutorial-cli-update-app) to return to the Admin Console and update the application to the new version that you promoted. + +================ +File: docs/vendor/tutorial-cli-create-release.mdx +================ +# Step 4: Create a Release + +Now that you have the manifest files for the sample Kubernetes application, you can create a release for the `cli-tutorial` application and promote the release to the Unstable channel. + +By default, the Vendor Portal includes Unstable, Beta, and Stable release channels. The Unstable channel is intended for software vendors to use for internal testing, before promoting a release to the Beta or Stable channels for distribution to customers. For more information about channels, see [About Channels and Releases](releases-about). + +To create and promote a release to the Unstable channel: + +1. From the `replicated-cli-tutorial` directory, lint the application manifest files and ensure that there are no errors in the YAML: + + ``` + replicated release lint --yaml-dir=manifests + ``` + + If there are no errors, an empty list is displayed with a zero exit code: + + ```text + RULE TYPE FILENAME LINE MESSAGE + ``` + + For a complete list of the possible error, warning, and informational messages that can appear in the output of the `release lint` command, see [Linter Rules](/reference/linter). + +1. Initialize the project as a Git repository: + + ``` + git init + git add . + git commit -m "Initial Commit: CLI Tutorial" + ``` + + Initializing the project as a Git repository allows you to track your history. The Replicated CLI also reads Git metadata to help with the generation of release metadata, such as version labels. + +1. From the `replicated-cli-tutorial` directory, create a release with the default settings: + + ``` + replicated release create --auto + ``` + + The `--auto` flag generates release notes and metadata based on the Git status. + + **Example output:** + + ``` + • Reading Environment ✓ + + Prepared to create release with defaults: + + yaml-dir "./manifests" + promote "Unstable" + version "Unstable-ba710e5" + release-notes "CLI release of master triggered by exampleusername [SHA: d4173a4] [31 Oct 22 08:51 MDT]" + ensure-channel true + lint-release true + + Create with these properties? [Y/n] + ``` + +1. Type `y` and press **Enter** to confirm the prompt. + + **Example output:** + + ```text + • Reading manifests from ./manifests ✓ + • Creating Release ✓ + • SEQUENCE: 1 + • Promoting ✓ + • Channel VEr0nhJBBUdaWpPvOIK-SOryKZEwa3Mg successfully set to release 1 + ``` + The release is created and promoted to the Unstable channel. + +1. Verify that the release was promoted to the Unstable channel: + + ``` + replicated release ls + ``` + **Example output:** + + ```text + SEQUENCE CREATED EDITED ACTIVE_CHANNELS + 1 2022-10-31T14:55:35Z 0001-01-01T00:00:00Z Unstable + ``` + +## Next Step + +Continue to [Step 5: Create a Customer](tutorial-cli-create-customer) to create a customer license file that you will upload when installing the application. + +================ +File: docs/vendor/tutorial-cli-deploy-app.mdx +================ +# Step 7: Configure the Application + +After you install KOTS, you can log in to the KOTS Admin Console. This procedure shows you how to make a configuration change for the application from the Admin Console, which is a typical task performed by end users. + +To configure the application: + +1. Access the Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: + + ```bash + kubectl kots admin-console --namespace NAMESPACE + ``` + + Replace `NAMESPACE` with the namespace where KOTS is installed. + +1. Enter the password that you created in [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to log in to the Admin Console. + + The Admin Console dashboard opens. On the Admin Console **Dashboard** tab, users can take various actions, including viewing the application status, opening the application, checking for application updates, syncing their license, and setting up application monitoring on the cluster with Prometheus. + + ![Admin Console app dashboard](/images/tutorials/tutorial-admin-console-dashboard.png) + +1. On the **Config** tab, select the **Customize Text Inputs** checkbox. In the **Text Example** field, enter any text. For example, `Hello`. + + ![Admin Console configuration tab](/images/tutorials/tutorial-install-config-tab.png) + + This page displays configuration settings that are specific to the application. Software vendors define the fields that are displayed on this page in the KOTS Config custom resource. For more information, see [Config](/reference/custom-resource-config) in _Reference_. + +1. Click **Save config**. In the dialog that opens, click **Go to updated version**. + + The **Version history** tab opens. + +1. Click **Deploy** for the new version. Then click **Yes, deploy** in the confirmation dialog. + + ![Admin Console configuration tab](/images/tutorials/tutorial-install-version-history.png) + +1. Click **Open App** to view the application in your browser. + + ![web page that displays text](/images/tutorials/tutorial-open-app.png) + + Notice the text that you entered previously on the configuration page is displayed on the screen. + + :::note + If you do not see the new text, refresh your browser. + ::: + +## Next Step + +Continue to [Step 8: Create a New Version](tutorial-cli-create-new-version) to make a change to one of the manifest files for the `cli-tutorial` application, then use the Replicated CLI to create and promote a new release. + +================ +File: docs/vendor/tutorial-cli-install-app-manager.mdx +================ +# Step 6: Install KOTS and the Application + +The next step is to test the installation process for the application release that you promoted. Using the KOTS CLI, you will install KOTS and the sample application in your cluster. + +KOTS is the Replicated component that allows your users to install, manage, and upgrade your application. Users can interact with KOTS through the Admin Console or through the KOTS CLI. + +To install KOTS and the application: + +1. From the `replicated-cli-tutorial` directory, run the following command to get the installation commands for the Unstable channel, where you promoted the release for the `cli-tutorial` application: + + ``` + replicated channel inspect Unstable + ``` + + **Example output:** + + ``` + ID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 + NAME: Unstable + DESCRIPTION: + RELEASE: 1 + VERSION: Unstable-d4173a4 + EXISTING: + + curl -fsSL https://kots.io/install | bash + kubectl kots install cli-tutorial/unstable + + EMBEDDED: + + curl -fsSL https://k8s.kurl.sh/cli-tutorial-unstable | sudo bash + + AIRGAP: + + curl -fSL -o cli-tutorial-unstable.tar.gz https://k8s.kurl.sh/bundle/cli-tutorial-unstable.tar.gz + # ... scp or sneakernet cli-tutorial-unstable.tar.gz to airgapped machine, then + tar xvf cli-tutorial-unstable.tar.gz + sudo bash ./install.sh airgap + ``` + This command prints information about the channel, including the commands for installing in: + * An existing cluster + * An _embedded cluster_ created by Replicated kURL + * An air gap cluster that is not connected to the internet + +1. If you have not already, configure kubectl access to the cluster you provisioned as part of [Set Up the Environment](tutorial-cli-setup#set-up-the-environment). For more information about setting the context for kubectl, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. + +1. Run the `EXISTING` installation script with the following flags to automatically upload the license file and run the preflight checks at the same time you run the installation. + + **Example:** + + ``` + curl -fsSL https://kots.io/install | bash + kubectl kots install cli-tutorial/unstable \ + --license-file ./LICENSE_YAML \ + --shared-password PASSWORD \ + --namespace NAMESPACE + ``` + + Replace: + + - `LICENSE_YAML` with the local path to your license file. + - `PASSWORD` with a password to access the Admin Console. + - `NAMESPACE` with the namespace where KOTS and application will be installed. + + When the Admin Console is ready, the script prints the `https://localhost:8800` URL where you can access the Admin Console and the `http://localhost:8888` URL where you can access the application. + + **Example output**: + + ``` + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + • Waiting for Admin Console to be ready ✓ + • Waiting for installation to complete ✓ + • Waiting for preflight checks to complete ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + + • Go to http://localhost:8888 to access the application + ``` + +1. Verify that the Pods are running for the example NGNIX service and for kotsadm: + + ```bash + kubectl get pods --namespace NAMESPACE + ``` + + Replace `NAMESPACE` with the namespace where KOTS and application was installed. + + **Example output:** + + ```NAME READY STATUS RESTARTS AGE + kotsadm-7ccc8586b8-n7vf6 1/1 Running 0 12m + kotsadm-minio-0 1/1 Running 0 17m + kotsadm-rqlite-0 1/1 Running 0 17m + nginx-688f4b5d44-8s5v7 1/1 Running 0 11m + ``` + +## Next Step + +Continue to [Step 7: Configure the Application](tutorial-cli-deploy-app) to log in to the Admin Console and make configuration changes. + +================ +File: docs/vendor/tutorial-cli-install-cli.mdx +================ +# Step 1: Install the Replicated CLI + +In this tutorial, you use the Replicated CLI to create and promote releases for a sample application with Replicated. The Replicated CLI is the CLI for the Replicated Vendor Portal. + +This procedure describes how to create a Vendor Portal account, install the Replicated CLI on your local machine, and set up a `REPLICATED_API_TOKEN` environment variable for authentication. + +To install the Replicated CLI: + +1. Do one of the following to create an account in the Replicated Vendor Portal: + * **Join an existing team**: If you have an existing Vendor Portal team, you can ask your team administrator to send you an invitation to join. + * **Start a trial**: Alternatively, go to [vendor.replicated.com](https://vendor.replicated.com/) and click **Sign up** to create a 21-day trial account for completing this tutorial. + +1. Run the following command to use [Homebrew](https://brew.sh) to install the CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + + For the latest Linux or macOS versions of the Replicated CLI, see the [replicatedhq/replicated](https://github.com/replicatedhq/replicated/releases) releases in GitHub. + +1. Verify the installation: + + ``` + replicated version + ``` + **Example output**: + + ```json + { + "version": "0.37.2", + "git": "8664ac3", + "buildTime": "2021-08-24T17:05:26Z", + "go": { + "version": "go1.14.15", + "compiler": "gc", + "os": "darwin", + "arch": "amd64" + } + } + ``` + If you run a Replicated CLI command, such as `replicated release ls`, you see the following error message about a missing API token: + + ``` + Error: set up APIs: Please provide your API token + ``` + +1. Create an API token for the Replicated CLI: + + 1. Log in to the Vendor Portal, and go to the [Account settings](https://vendor.replicated.com/account-settings) page. + + 1. Under **User API Tokens**, click **Create user API token**. For Nickname, provide a name for the token. For Permissions, select **Read and Write**. + + For more information about User API tokens, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. + + 1. Click **Create Token**. + + 1. Copy the string that appears in the dialog. + +1. Export the string that you copied in the previous step to an environment variable named `REPLICATED_API_TOKEN`: + + ```bash + export REPLICATED_API_TOKEN=YOUR_TOKEN + ``` + Replace `YOUR_TOKEN` with the token string that you copied from the Vendor Portal in the previous step. + +1. Verify the User API token: + + ``` + replicated release ls + ``` + + You see the following error message: + + ``` + Error: App not found: + ``` + +## Next Step + +Continue to [Step 2: Create an Application](tutorial-cli-create-app) to use the Replicated CLI to create an application. + +================ +File: docs/vendor/tutorial-cli-manifests.mdx +================ +# Step 3: Get the Sample Manifests + +To create a release for the `cli-tutorial` application, first create the Kubernetes manifest files for the application. This tutorial provides a set of sample manifest files for a simple Kubernetes application that deploys an NGINX service. + +To get the sample manifest files: + +1. Run the following command to create and change to a `replicated-cli-tutorial` directory: + + ``` + mkdir replicated-cli-tutorial + cd replicated-cli-tutorial + ``` + +1. Create a `/manifests` directory and download the sample manifest files from the [kots-default-yaml](https://github.com/replicatedhq/kots-default-yaml) repository in GitHub: + + ``` + mkdir ./manifests + curl -fSsL https://github.com/replicatedhq/kots-default-yaml/archive/refs/heads/main.zip | \ + tar xzv --strip-components=1 -C ./manifests \ + --exclude README.md --exclude LICENSE --exclude .gitignore + ``` + +1. Verify that you can see the YAML files in the `replicated-cli-tutorial/manifests` folder: + + ``` + ls manifests/ + ``` + ``` + example-configmap.yaml example-service.yaml kots-app.yaml kots-lint-config.yaml kots-support-bundle.yaml + example-deployment.yaml k8s-app.yaml kots-config.yaml kots-preflight.yaml + ``` + +## Next Step + +Continue to [Step 4: Create a Release](tutorial-cli-create-release) to create and promote the first release for the `cli-tutorial` application using these manifest files. + +================ +File: docs/vendor/tutorial-cli-setup.mdx +================ +import KubernetesTraining from "../partials/getting-started/_kubernetes-training.mdx" +import LabsIntro from "../partials/getting-started/_labs-intro.mdx" +import TutorialIntro from "../partials/getting-started/_tutorial-intro.mdx" +import RelatedTopics from "../partials/getting-started/_related-topics.mdx" +import VMRequirements from "../partials/getting-started/_vm-requirements.mdx" + +# Introduction and Setup + +<TutorialIntro/> + +The steps in this KOTS CLI-based tutorial show you how to use the Replicated CLI to perform these tasks. The Replicated CLI is the CLI for the Replicated Vendor Portal. You can use the Replicated CLI as a software vendor to programmatically create, configure, and manage your application artifacts, including application releases, release channels, customer entitlements, private image registries, and more. + +<KubernetesTraining/> + +## Set Up the Environment + +As part of this tutorial, you will install a sample application into a Kubernetes cluster. Before you begin, do the following to set up your environment: + +* Create a Kubernetes cluster that meets the minimum system requirements described in [KOTS Installation Requirements](/enterprise/installing-general-requirements). You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. + + **Example:** + + For example, to create a cluster in GKE, run the following command in the gcloud CLI: + + ``` + gcloud container clusters create NAME --preemptible --no-enable-ip-alias + ``` + Where `NAME` is any name for the cluster. + +* Install kubectl, the Kubernetes command line tool. See [Install Tools](https://kubernetes.io/docs/tasks/tools/) in the Kubernetes documentation. +* Configure kubectl command line access to the cluster that you created. See [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. + +## Related Topics + +<RelatedTopics/> + +================ +File: docs/vendor/tutorial-cli-update-app.mdx +================ +# Step 9: Update the Application + +To test the new release that you promoted, return to the Admin Console in a browser to update the application. + +To update the application: + +1. Access the KOTS Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: + + ```bash + kubectl kots admin-console --namespace NAMESPACE + ``` + + Replace `NAMESPACE` with the namespace where the Admin Console is installed. + +1. Go to the Version history page, and click **Check for update**. + + ![Admin Console version history page](/images/tutorials/tutorial-check-for-update.png) + + The Admin Console loads the new release that you promoted. + +1. Click **Deploy**. In the dialog, click **Yes, deploy** to deploy the new version. + + ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-app.png) + +1. After the Admin Console deploys the new version, go to the **Config** page where the **Another Text Example** field that you added is displayed. + + ![Admin Console configuration page with Another Text Example field](/images/tutorials/tutorial-new-config-item.png) + +1. In the new **Another Text Example** field, enter any text. Click **Save config**. + + The Admin Console notifies you that the configuration settings for the application have changed. + + ![dialog over Admin Console configuration screen](/images/tutorials/tutorial-go-to-updated-version.png) + +1. In the dialog, click **Go to updated version**. + + The Admin Console loads the updated version on the Version history page. + +1. On the Version history page, click **Deploy** next to the latest version to deploy the configuration change. + + ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-config-change.png) + +1. Go to the **Dashboard** page and click **Open App**. The application displays the text that you added to the field. + + ![web page with text from the new configuration field](/images/tutorials/tutorial-updated-app.png) + + :::note + If you do not see the new text, refresh your browser. + ::: + +## Summary + +Congratulations! As part of this tutorial, you: +* Created and promoted a release for a Kubernetes application using the Replicated CLI +* Installed the application in a Kubernetes cluster +* Edited the manifest files for the application, adding a new configuration field and using template functions to reference the field +* Promoted a new release with your changes +* Used the Admin Console to update the application to the latest version + +================ +File: docs/vendor/tutorial-config-create-app.md +================ +# Step 2: Create an Application + +Next, install the Replicated CLI and then create an application. + +To create an application: + +1. Install the Replicated CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Authorize the Replicated CLI: + + ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + +1. Create an application named `Grafana`: + + ``` + replicated app create Grafana + ``` + +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: + + 1. Get the slug for the application that you created: + + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Grafana grafana-python kots + ``` + In the example above, the application slug is `grafana-python`. + + :::info + The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. + ::: + + 1. Set the `REPLICATED_APP` environment variable to the application slug. + + **MacOS Example:** + + ``` + export REPLICATED_APP=grafana-python + ``` + +## Next Step + +Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-config-package-chart). + +## Related Topics + +* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [replicated app create](/reference/replicated-cli-app-create) + +================ +File: docs/vendor/tutorial-config-create-customer.md +================ +# Step 5: Create a KOTS-Enabled Customer + +After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. + +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + +1. For **License type**, select Development. + +1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. + +1. Click **Save Changes**. + +1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. + + ![Download license button on the customer page](/images/customer-download-license.png) + + [View a larger version of this image](/images/customer-download-license.png) + +## Next Step + +Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-config-install-kots). + +## Related Topics + +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) + +================ +File: docs/vendor/tutorial-config-create-release.md +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChart from "../partials/getting-started/_grafana-helmchart.mdx" +import KotsApp from "../partials/getting-started/_grafana-kots-app.mdx" +import K8sApp from "../partials/getting-started/_grafana-k8s-app.mdx" +import Config from "../partials/getting-started/_grafana-config.mdx" + +# Step 4: Add the Chart Archive to a Release + +Next, add the Helm chart archive to a new release for the application in the Replicated vendor platform. + +The purpose of this step is to configure a release that supports installation with KOTS. Additionally, this step defines a user-facing application configuration page that displays in the KOTS Admin Console during installation where users can set their own Grafana login credentials. + +To create a release: + +1. In the `grafana` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with Replicated KOTS to this subdirectory. + +1. Move the Helm chart archive that you created to `manifests`: + + ``` + mv grafana-9.6.5.tgz manifests + ``` + +1. In the `manifests` directory, create the following YAML files to configure the release: + + ``` + cd manifests + ``` + ``` + touch kots-app.yaml k8s-app.yaml kots-config.yaml grafana.yaml + ``` + +1. In each file, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="kots-app" label="kots-app.yaml" default> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>grafana</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Grafana application in a browser.</p> + <h5>YAML</h5> + <KotsApp/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sApp/> + </TabItem> + <TabItem value="config" label="kots-config.yaml"> + <h5>Description</h5> + <p>The Config custom resource specifies a user-facing configuration page in the Admin Console designed for collecting application configuration from users. The YAML below creates "Admin User" and "Admin Password" fields that will be shown to the user on the configuration page during installation. These fields will be used to set the login credentials for Grafana.</p> + <h5>YAML</h5> + <Config/> + </TabItem> + <TabItem value="helmchart" label="grafana.yaml"> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart.</p> + <p>The HelmChart custom resource below contains a <code>values</code> key, which creates a mapping to the Grafana <code>values.yaml</code> file. In this case, the <code>values.admin.user</code> and <code>values.admin.password</code> fields map to <code>admin.user</code> and <code>admin.password</code> in the Grafana <code>values.yaml</code> file.</p> + <p>During installation, KOTS renders the ConfigOption template functions in the <code>values.admin.user</code> and <code>values.admin.password</code> fields and then sets the corresponding Grafana values accordingly.</p> + <h5>YAML</h5> + <HelmChart/> + </TabItem> + </Tabs> + +1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: + + ``` + replicated release lint --yaml-dir . + ``` + `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. + + **Example output**: + + ``` + RULE TYPE FILENAME LINE MESSAGE + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `grafana` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. + ::: + +1. Create a release: + + ``` + replicated release create --yaml-dir . + ``` + **Example output**: + ``` + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 + ``` + +1. Log in to the Vendor Portal and go to **Releases**. + + The release that you created is listed under **All releases**. + + ![Release page in the Vendor Portal with one release](/images/grafana-release-seq-1.png) + + [View a larger version of this image](/images/grafana-release-seq-1.png) + +1. Click **Edit release** to view the files in the release. + + In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Grafana Helm chart. You can also see the same warning messages that were displayed in the CLI output. + + ![Edit Release page in the Vendor Portal](/images/grafana-edit-release-seq-1.png) + + [View a larger version of this image](/images/grafana-edit-release-seq-1.png) + +1. At the top of the page, click **Promote**. + +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. + + <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> + + [View a larger version of this image](/images/release-promote.png) + +## Next Step + +Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-config-create-customer). + +## Related Topics + +* [About Channels and Releases](/vendor/releases-about) +* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) +* [Config Custom Resource](/reference/custom-resource-config) +* [Manipulating Helm Chart Values with KOTS](/vendor/helm-optional-value-keys) + +================ +File: docs/vendor/tutorial-config-get-chart.md +================ +# Step 1: Get the Sample Chart and Test + +To begin, get the sample Grafana Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated vendor platform. + +To get the sample Grafana chart and test installation: + +1. Run the following command to pull and untar version 9.6.5 of the Bitnami Grafana Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/grafana --version 9.6.5 + ``` + For more information about this chart, see the [bitnami/grafana](https://github.com/bitnami/charts/tree/main/bitnami/grafana) repository in GitHub. + +1. Change to the new `grafana` directory that was created: + ``` + cd grafana + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` +1. Install the chart in your cluster: + + ``` + helm install grafana . --namespace grafana --create-namespace + ``` + To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/grafana/README.md#installing-the-chart) in the `bitnami/grafana` repository. + + After running the installation command, the following output is displayed: + + ``` + NAME: grafana + LAST DEPLOYED: Thu Dec 14 14:54:50 2023 + NAMESPACE: grafana + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + CHART NAME: grafana + CHART VERSION: 9.6.5 + APP VERSION: 10.2.2 + + ** Please be patient while the chart is being deployed ** + + 1. Get the application URL by running these commands: + echo "Browse to http://127.0.0.1:8080" + kubectl port-forward svc/grafana 8080:3000 & + + 2. Get the admin credentials: + + echo "User: admin" + echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" + # Note: Do not include grafana.validateValues.database here. See https://github.com/bitnami/charts/issues/20629 + ``` + +1. Watch the `grafana` Deployment until it is ready: + + ``` + kubectl get deploy grafana --namespace grafana --watch + ``` + +1. When the Deployment is created, run the commands provided in the output of the installation command to get the Grafana login credentials: + + ``` + echo "User: admin" + echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" + ``` + +1. Run the commands provided in the ouptut of the installation command to get the Grafana URL: + + ``` + echo "Browse to http://127.0.0.1:8080" + kubectl port-forward svc/grafana 8080:3000 --namespace grafana + ``` + + :::note + Include `--namespace grafana` in the `kubectl port-forward` command. + ::: + +1. In a browser, go to the URL to open the Grafana login page: + + <img alt="Grafana login page" src="/images/grafana-login.png" width="300px"/> + + [View a larger version of this image](/images/grafana-login.png) + +1. Log in using the credentials provided to open the Grafana dashboard: + + <img alt="Grafana dashboard" src="/images/grafana-dashboard.png" width="500px"/> + + [View a larger version of this image](/images/grafana-dashboard.png) + +1. Uninstall the Helm chart: + + ``` + helm uninstall grafana --namespace grafana + ``` + This command removes all the Kubernetes resources associated with the chart and uninstalls the `grafana` release. + +1. Delete the namespace: + + ``` + kubectl delete namespace grafana + ``` + +## Next Step + +Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-config-create-app). + +## Related Topics + +* [Helm Install](https://helm.sh/docs/helm/helm_install/) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Create](https://helm.sh/docs/helm/helm_create/) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) +* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) + +================ +File: docs/vendor/tutorial-config-install-kots.md +================ +# Step 6: Install the Release with KOTS + +Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. + +As part of installation, you will set Grafana login credentials on the KOTS Admin Console configuration page. + +To install the release with KOTS: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. + + ![KOTS Install tab on the Unstable channel card](/images/grafana-unstable-channel.png) + + [View a larger version of this image](/images/grafana-unstable-channel.png) + +1. On the command line, run the **KOTS Install** command that you copied: + + ```bash + curl https://kots.io/install | bash + kubectl kots install $REPLICATED_APP/unstable + ``` + + This installs the latest version of the KOTS CLI and the Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. + + For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + + :::note + KOTS v1.104.0 or later is required to deploy the Replicated SDK. You can verify the version of KOTS installed with `kubectl kots version`. + ::: + +1. Complete the installation command prompts: + + 1. For `Enter the namespace to deploy to`, enter `grafana`. + + 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. + + When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. + + **Example output:** + + ```bash + Enter the namespace to deploy to: grafana + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password for the Admin Console (6+ characters): •••••••• + • Waiting for Admin Console to be ready ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + ``` + +1. With the port forward running, go to `http://localhost:8800` in a browser to access the Admin Console. + +1. On the login page, enter the password that you created for the Admin Console. + +1. On the license page, select the license file that you downloaded previously and click **Upload license**. + +1. On the **Configure Grafana** page, enter a username and password. You will use these credentials to log in to Grafana. + + ![Admin Console config page with username and password fields](/images/grafana-config.png) + + [View a larger version of this image](/images/grafana-config.png) + +1. Click **Continue**. + + The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `grafana` Deployment is being created. + + ![Admin Console dashboard showing unavailable application status](/images/grafana-unavailable.png) + + [View a larger version of this image](/images/grafana-unavailable.png) + +1. On the command line, press Ctrl+C to exit the port forward. + +1. Watch for the `grafana` Deployment to become ready: + + ``` + kubectl get deploy grafana --namespace grafana --watch + ``` + +1. After the Deployment is ready, run the following command to confirm that the `grafana-admin` Secret was updated with the new password that you created on the **Configure Grafana** page: + + ``` + echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" + ``` + + The ouput of this command displays the password that you created. + +1. Start the port foward again to access the Admin Console: + + ``` + kubectl kots admin-console --namespace grafana + ``` + +1. Go to `http://localhost:8800` to open the Admin Console. + + On the Admin Console dashboard, the application status is now displayed as Ready: + + ![Admin console dashboard showing ready application status](/images/grafana-ready.png) + + [View a larger version of this image](/images/grafana-ready.png) + +1. Click **Open App** to open the Grafana login page in a browser. + + <img alt="Grafana login webpage" src="/images/grafana-login.png" width="300px"/> + + [View a larger version of this image](/images/grafana-login.png) + +1. On the Grafana login page, enter the username and password that you created on the **Configure Grafana** page. Confirm that you can log in to the application to access the Grafana dashboard: + + <img alt="Grafana dashboard" src="/images/grafana-dashboard.png" width="500px"/> + + [View a larger version of this image](/images/grafana-dashboard.png) + +1. On the command line, press Ctrl+C to exit the port forward. + +1. Uninstall the Grafana application from your cluster: + + ```bash + kubectl kots remove $REPLICATED_APP --namespace grafana --undeploy + ``` + **Example output**: + ``` + • Removing application grafana-python reference from Admin Console and deleting associated resources from the cluster ✓ + • Application grafana-python has been removed + ``` + +1. Remove the Admin Console from the cluster: + + 1. Delete the namespace where the Admin Console is installed: + + ``` + kubectl delete namespace grafana + ``` + 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: + + ``` + kubectl delete clusterrole kotsadm-role + ``` + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` + +## Next Step + +Congratulations! As part of this tutorial, you used the KOTS Config custom resource to define a configuration page in the Admin Console. You also used the KOTS HelmChart custom resource and KOTS ConfigOption template function to override the default Grafana login credentials with a user-supplied username and password. + +To learn more about how to customize the Config custom resource to create configuration fields for your application, see [Config](/reference/custom-resource-config). + +## Related Topics + +* [kots install](/reference/kots-cli-install/) +* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) +* [Installing an Application](/enterprise/installing-overview) +* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) + +================ +File: docs/vendor/tutorial-config-package-chart.md +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + +# Step 3: Package the Helm Chart + +Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. + +To add the Replicated SDK and package the Helm chart: + +1. In your local file system, go to the `grafana` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-config-get-chart). + +1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: + + <DependencyYaml/> + +1. Update dependencies and package the Helm chart to a `.tgz` chart archive: + + ```bash + helm package . --dependency-update + ``` + <UnauthorizedError/> + +## Next Step + +Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-config-create-release). + +## Related Topics + +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) + +================ +File: docs/vendor/tutorial-config-setup.md +================ +# Introduction and Setup + +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. + +## Summary + +This tutorial introduces you to mapping user-supplied values from the Replicated KOTS Admin Console configuration page to a Helm chart `values.yaml` file. + +In this tutorial, you use a sample Helm chart to learn how to: + +* Define a user-facing application configuration page in the KOTS Admin Console +* Set Helm chart values with the user-supplied values from the Admin Console configuration page + +## Set Up the Environment + +Before you begin, ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. + +## Next Step + +Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-config-get-chart) + +================ +File: docs/vendor/tutorial-ecr-private-images.md +================ +# Tutorial: Using ECR for Private Images + +## Objective + +The purpose of this tutorial is to walk you through how to configure Replicated KOTS to pull images from a private registry in Amazon's Elastic Container Registry (ECR). This tutorial demonstrates the differences between using public and private images with KOTS. + +## Prerequisites + +* To install the application in this tutorial, you must have a virtual machine (VM) that meets the following minimum requirements: + * Ubuntu 18.04 + * At least 8 GB of RAM + * 4 CPU cores + * At least 40GB of disk space + +* To pull a public NGINX container and push it to a private repository in ECR as part of this tutorial, you must have the following: + * An ECR Repository + * An AWS account to use with Docker to pull and push the public NGINX image to the ECR repository. The AWS account must be able to create a read-only user. + * Docker + * The AWS CLI + +## Overview + +The guide is divided into the following steps: + + 1. [Set Up the Testing Environment](#set-up) + + 2. [Configure Private Registries in Replicated](#2-configure-private-registries-in-replicated) + + 3. [Update Definition Files](#3-update-definition-files) + + 4. [Install the New Version](#4-install-the-new-version) + +## 1. Set Up the Testing Environment {#set-up} + +We are going to use the default NGINX deployment to create our application and then update it to pull the same container from a private repository in ECR and note the differences. + +### Create Sample Application and deploy the first release + +In this section, we cover at a high level the steps to create a new application and install it on a VM. + +To create our sample application follow these steps: + +* Create a new application in the Replicated [vendor portal](https://vendor.replicated.com) and call it 'MySampleECRApp'. +* Create the first release using the default definition files and promote it to the *unstable* channel. +* Create a customer, assign it to the *Unstable* channel and download the license file after creating the customer. +* Install the application to a VM + +Log in to the Replicated admin console. To inspect what was deployed, look at the files under **View Files** from the admin console. +In the Upstream files (files from the release created in the vendor portal) show that we are pulling the public image. + +![admin-console-view-files-upstream-release1](/images/guides/kots/priv-reg-ecr-ups-files-rel1.png) + +We can further validate this if we switch back to the terminal window on the VM where we installed the application. +If we run `kubectl describe pod <pod-name>` on the NGINX pod, we can confirm that it was in fact pulled from the public repository. + +![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubctl-describe-rel1.png) + +Now that we have the basic application installed, we are now going to pull the same image, but from an ECR repository. + +### Pull Public Image and Push to ECR + +To keep the changes to a minimum and only focus on using a private registry, we are going to pull the public NGINX container (as specified in the `deployment.yaml` file) to our local environment, and then push it to a repository in ECR. +To use `docker login` with ECR, we will need to configure AWS CLI with the AWS Access Key ID and AWS Secret Key for this user. + +Let's start by pulling the public image: + +```shell +$ docker pull nginx +``` + +You should have an output similar to this: + +```shell +Using default tag: latest +latest: Pulling from library/nginx +d121f8d1c412: Pull complete +ebd81fc8c071: Pull complete +655316c160af: Pull complete +d15953c0e0f8: Pull complete +2ee525c5c3cc: Pull complete +Digest: sha256:c628b67d21744fce822d22fdcc0389f6bd763daac23a6b77147d0712ea7102d0 +Status: Downloaded newer image for nginx:latest +docker.io/library/nginx:latest +``` + +Next, log in to ECR and push this container. +To use `docker login` with ECR, [install the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) and [configure it](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) if not already done. +As part of this, we will need to provide the AWS Access Key ID and AWS Secret Key for a user that has permissions to create and push images to the repository. For more information about working with containers and ECR in the AWS CLI, see [Using Amazon ECR with the AWS CLI](https://docs.aws.amazon.com/AmazonECR/latest/userguide/getting-started-cli.html). + +Just like with any other private registry, we need to know the registry endpoint to pass the `docker login` command. +The syntax is as follows: + +```shell + +docker login [some.private.registry]:[port] + +``` +In this case, the endpoint is the **[some.private.registry]:[port]** + +To determine the endpoint for ECR, log in to the AWS console and search for 'ECR', which should bring up Elastic Container Registry as an option as shown below. + +![search-4-ecr](/images/guides/kots/priv-reg-ecr-search-4-ecr.png) + +Select 'Elastic Container Registry' from the options in the dropdown to get to the list of repositories. + +![ecr-repos](/images/guides/kots/priv-reg-ecr-repos.png) + +As you can see from the screenshot above, you can see the endpoints for each repository under the URI column. +For the purpose of this guide, we will push the NGINX image to the **demo-apps** repository. + +To determine the endpoint to use in the login command, use the URL without the repository name. + +When logging in to ECR, use the AWS CLI to the user credentials. +For example, to log in to ECR, we run the following command: + +```shell + +$ aws ecr get-login-password --region us-east-2 | docker login --username AWS --password-stdin 4999999999999.dkr.ecr.us-east-2.amazonaws.com +``` + +A successful login will display a `Login Succeeded` message. +To push this image to our private repository, tag the image. +The new tag will consist of: + +`<ecr repoendpoint>/image` + +For example, to tag the public NGINX image, we run the following command: + +```shell +$ docker tag nginx 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx +``` + +Assuming the tagging is successful, push the container to our ECR repository: + +```shell +$ docker push 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx +The push refers to repository [4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx] +908cf8238301: Pushed +eabfa4cd2d12: Pushed +60c688e8765e: Pushed +f431d0917d41: Pushed +07cab4339852: Pushed +latest: digest: sha256:794275d96b4ab96eeb954728a7bf11156570e8372ecd5ed0cbc7280313a27d19 size: 1362 + +``` +Our testing environment is all set. +We are now ready to update Replicated to use the private registry. + +* * * + +## 2. Configure Private Registries in Replicated + +To configure a Private Registry in Replicated, we need to provide the same information we needed to login to ECR in the previous step: + +- **Endpoint** +- **Username** +- **Password** + +The difference is that we'll use a different user than the one we used previously. Since Replicated only needs to pull images, it is a best practice to create a 'read-only' user for this specific purpose. + +### Determine the endpoint + +The endpoint should be the same as the one we provided in the previous step. + +### Setting up the Service Account User + +Replicated only needs access to pull images from the private registry. Let's create a new user in AWS: + +![aws-new-user](/images/guides/kots/priv-reg-ecr-new-user.png) + +As far as permissions go, there are a couple of options, depending on scope of access. +If exposing all images to Replicated is an acceptable solution, the Amazon-provided [AmazonEC2ContainerRegistryReadOnly](https://docs.aws.amazon.com/AmazonECR/latest/userguide/ecr_managed_policies.html#AmazonEC2ContainerRegistryReadOnly) policy will work: + +```shell +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + }] +} +``` +If you wish to limit Replicated to only certain images, this policy should be used instead: + +```shell +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage" + ], + "Resource": [ + "arn:aws:ecr:us-east-1:<account-id>:repository/<repo1>", + "arn:aws:ecr:us-east-1:<account-id>:repository/<repo2>" + ] + }] +}{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + }, + ] +} +``` + +We will need the AWS Access Key ID and AWS Secret Key in the next section as these will map to the *Username* and *Password* fields. You can obtain these as you create the user or after the user has been created. + +### Enter Registry Information in Replicated + +First, we must link Replicated with the registry. To do this, click on **Add External Registry** from the *Images* tab. + +<img src="/images/add-external-registry.png" alt="/images/add-external-registry.png" width="400px"></img> + +[View a larger version of this image](/images/add-external-registry.png) + +The values for the fields are: + +**Endpoint:** +Enter the same URL used to log in to ECR. +For example, to link to the same registry as the one in the section, we would enter *4999999999999.dkr.ecr.us-east-2.amazonaws.com*. + +**Username:** +Enter the AWS Access Key ID for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. + +**Password:** +Enter the AWS Secret Key for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. + +* * * + +## 3. Update Definition Files + +Last step is to update our definition manifest to pull the image from the ECR repository. +To do this, we'll update the `deployment.yaml` file by adding the ECR registry URL to the `image` value. +Below is an example using the registry URL used in this guide. + +```diff + spec: + containers: + - name: nginx +- image: nginx ++ image: 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx + envFrom: +``` + +Save your changes and create the new release and promote it to the *Unstable* channel. + +* * * + +## 4. Install the New Version + +To deploy the new version of the application, go back to the admin console and select the *Version History* tab. +Click on **Check for Updates** and then **Deploy** when the new version is listed. +To confirm that the new version was in fact installed, it should look like the screenshot below. + +![version-history](/images/guides/kots/priv-reg-ecr-version-history.png) + +Now, we can inspect to see the changes in the definition files. +Looking at the `deployment.yaml` upstream file, we see the image path as we set it in the [Update Definition Files](#3-update-definition-files) section. + +![admin-console-view-files-upstream-release2](/images/guides/kots/priv-reg-ecr-upstream-file-rel2.png) + +Because KOTS is able to detect that it cannot pull this image anonymously, it then tries to proxy the private registries configured. Looking at the `kustomization.yaml` downstream file we can see that the image path is changed to use the Replicated proxy. + +![admin-console-view-files-downstream-release2](/images/guides/kots/priv-reg-ecr-downstream-file-rel2.png) + +The install of the new version should have created a new pod. If we run `kubectl describe pod` on the new NGINX pod, we can confirm that the image was in fact pulled from the ECR repository. + +![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubectl-describe-rel2.png) + +* * * + +## Related Topics + +- [Connecting to an External Registry](packaging-private-images/) + +- [Replicated Community Thread on AWS Roles and Permissions](https://help.replicated.com/community/t/what-are-the-minimal-aws-iam-permissions-needed-to-proxy-images-from-elastic-container-registry-ecr/267) + +- [AWS ECR Managed Policies Documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecr_managed_policies.html) + +================ +File: docs/vendor/tutorial-embedded-cluster-create-app.mdx +================ +# Step 1: Create an Application + +To begin, install the Replicated CLI and create an application in the Replicated Vendor Portal. + +An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. + +To create an application: + +1. Install the Replicated CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Authorize the Replicated CLI: + + ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + +1. Create an application named `Gitea`: + + ``` + replicated app create Gitea + ``` + +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: + + 1. Get the slug for the application that you created: + + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-kite kots + ``` + In the example above, the application slug is `gitea-kite`. + + :::note + The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. + ::: + + 1. Set the `REPLICATED_APP` environment variable to the application slug. + + **Example:** + + ``` + export REPLICATED_APP=gitea-kite + ``` + +## Next Step + +Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 2: Package the Helm Chart](tutorial-embedded-cluster-package-chart). + +## Related Topics + +* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [replicated app create](/reference/replicated-cli-app-create) + +================ +File: docs/vendor/tutorial-embedded-cluster-create-customer.mdx +================ +# Step 4: Create an Embedded Cluster-Enabled Customer + +After promoting the release, create a customer with the Replicated KOTS and Embedded Cluster entitlements so that you can install the release with Embedded Cluster. A _customer_ represents a single licensed user of your application. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. + +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + +1. For **License type**, select **Development**. + +1. For **License options**, enable the following entitlements: + * **KOTS Install Enabled** + * **Embedded Cluster Enabled** + +1. Click **Save Changes**. + +## Next Step + +Get the Embedded Cluster installation commands and install. See [Step 5: Install the Release on a VM](tutorial-embedded-cluster-install). + +## Related Topics + +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) + +================ +File: docs/vendor/tutorial-embedded-cluster-create-release.mdx +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import EcCr from "../partials/embedded-cluster/_ec-config.mdx" + +# Step 3: Add the Chart Archive to a Release + +Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with Replicated Embedded Cluster. + +A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. + +To create a release: + +1. In the `gitea` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with Replicated KOTS to this subdirectory. + +1. Move the Helm chart archive that you created to `manifests`: + + ``` + mv gitea-1.0.6.tgz manifests + ``` + +1. In `manifests`, create the YAML manifests required by KOTS: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml + ``` + +1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The <a href="/vendor/helm-optional-value-keys#conditionally-set-values"><code>optionalValues</code></a> field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment.</p> + <h5>YAML</h5> + <HelmChartCr/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml"> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.</p> + <h5>YAML</h5> + <KotsCr/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sCr/> + </TabItem> + <TabItem value="ec" label="embedded-cluster.yaml"> + <h5>Description</h5> + <p>To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.</p> + <h5>YAML</h5> + <EcCr/> + </TabItem> + </Tabs> + +1. Lint: + + ```bash + replicated release lint --yaml-dir . + ``` + ```bash + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + You can ignore any warning messages for the purpose of this tutorial. + ::: + +1. Create a release: + + ``` + replicated release create --yaml-dir . + ``` + **Example output**: + ``` + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 + ``` + +1. Log in to the Vendor Portal and go to **Releases**. + + The release that you created is listed under **All releases**. + + ![Release page in the Vendor Portal with one release](/images/gitea-ec-release-seq-1.png) + + [View a larger version of this image](/images/gitea-ec-release-seq-1.png) + +1. Click the dot menu then **Edit release** to view the files in the release. + + ![dot menu](/images/gitea-ec-release-edit-button.png) + + [View a larger version of this image](/images/gitea-ec-release-edit-button.png) + + In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. + + ![Edit Release page in the Vendor Portal](/images/gitea-ec-release-edit-seq-1.png) + + [View a larger version of this image](/images/gitea-ec-release-edit-seq-1.png) + +1. At the top of the page, click **Promote**. + +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. + + <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> + + [View a larger version of this image](/images/release-promote.png) + +## Next Step + +Create a customer with the Embedded Cluster entitlement so that you can install the release using Embedded Cluster. See [Step 4: Create an Embedded Cluster-Enabled Customer](tutorial-embedded-cluster-create-customer). + +## Related Topics + +* [About Channels and Releases](/vendor/releases-about) +* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) +* [Embedded Cluster Config](/reference/embedded-config) +* [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) + +================ +File: docs/vendor/tutorial-embedded-cluster-install.mdx +================ +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" + +# Step 5: Install the Release on a VM + +Next, get the customer-specific Embedded Cluster installation commands and then install the release on a Linux VM. + +To install the release with Embedded Cluster: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers**. Click on the name of the customer you created. + +1. Click **Install instructions > Embedded cluster**. + + <img alt="Customer install instructions dropdown" src="/images/customer-install-instructions-dropdown.png" width="600px"/> + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + + The **Embedded cluster install instructions** dialog opens. + + <img alt="Embedded Cluster install instructions dialog" src="/images/embedded-cluster-install-dialog-latest.png" width="600px"/> + + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + +1. On the command line, SSH onto your Linux VM. + +1. Run the first command in the **Embedded cluster install instructions** dialog to download the latest release. + +1. Run the second command to extract the release. + +1. Run the third command to install the release. + +1. When prompted, enter a password for accessing the KOTS Admin Console. + + The installation command takes a few minutes to complete. + +1. When the installation command completes, go to the URL provided in the output to log in to the Admin Console. + + **Example output:** + + ```bash + ✔ Host files materialized + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Node installation finished + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Finished! + Visit the admin console to configure and install gitea-kite: http://104.155.145.60:30000 + ``` + + At this point, the cluster is provisioned and the KOTS Admin Console is deployed, but the application is not yet installed. + +1. Bypass the browser TLS warning by clicking **Continue to Setup**. + +1. Click **Advanced > Proceed**. + +1. On the **HTTPS for the Gitea Admin Console** page, select **Self-signed** and click **Continue**. + +1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. + +1. On the **Nodes** page, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. + + The Admin Console dashboard opens. + +1. In the **Version** section, for version `0.1.0`, click **Deploy** then **Yes, Deploy**. + + The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. + +1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser: + + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + + [View a larger version of this image](/images/gitea-ec-ready.png) + + <img alt="Gitea app landing page" src="/images/gitea-app.png" width="600px"/> + + [View a larger version of this image](/images/gitea-app.png) + +1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created. + + On the **Reporting** page for the customer, you can see details about the customer's license and installed instances: + + ![Customer reporting page](/images/gitea-customer-reporting-ec.png) + + [View a larger version of this image](/images/gitea-customer-reporting-ec.png) + +1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. + + On the instance details page, you can see additional insights such as the version of Embedded Cluster that is running, instance status and uptime, and more: + + ![Customer instance details page](/images/gitea-instance-insights-ec.png) + + [View a larger version of this image](/images/gitea-instance-insights-ec.png) + +1. (Optional) Reset the node to remove the cluster and the application from the node. This is useful for iteration and development so that you can reset a machine and reuse it instead of having to procure another machine. + + ```bash + sudo ./APP_SLUG reset --reboot + ``` + Where `APP_SLUG` is the unique slug for the application that you created. You can find the appication slug by running `replicated app ls` on the command line on your local machine. + +## Summary + +Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with Replicated Embedded Cluster in a VM. To learn more about Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). + +## Related Topics + +* [Embedded Cluster Overview](embedded-overview) +* [Customer Reporting](/vendor/customer-reporting) +* [Instance Details](/vendor/instance-insights-details) +* [Reset a Node](/vendor/embedded-using#reset-a-node) + +================ +File: docs/vendor/tutorial-embedded-cluster-package-chart.mdx +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + +# Step 2: Package the Gitea Helm Chart + +Next, get the sample Gitea Helm chart from Bitnami. Add the Replicated SDK as a dependency of the chart, then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. + +The Replicated SDK is a Helm chart that can be optionally added as a dependency of your application Helm chart. The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. Additionally, the Replicated SDK provides access to insights and telemetry for instances of your application installed with the Helm CLI. + +To add the Replicated SDK and package the Helm chart: + +1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. + +1. Change to the new `gitea` directory that was created: + ``` + cd gitea + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` + +1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: + + <DependencyYaml/> + +1. Update dependencies and package the Helm chart to a `.tgz` chart archive: + + ```bash + helm package . --dependency-update + ``` + <UnauthorizedError/> + +## Next Step + +Create a release using the Helm chart archive. See [Step 3: Add the Chart Archive to a Release](tutorial-embedded-cluster-create-release). + +## Related Topics + +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release.md) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) + +================ +File: docs/vendor/tutorial-embedded-cluster-setup.mdx +================ +import Requirements from "../partials/embedded-cluster/_requirements.mdx" + +# Introduction and Setup + +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. + +## Summary + +This tutorial introduces you to installing an application on a Linux virtual machine (VM) using Replicated Embedded Cluster. Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. + +In this tutorial, you use a sample application to learn how to: + +* Add the Embedded Cluster Config to a release +* Use Embedded Cluster to install the application on a Linux VM + +## Set Up the Environment + +Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: + +<Requirements/> + +## Next Step + +Install the Replicated CLI and create an application in the Replicated Vendor Portal. See [Step 1: Create an Application](/vendor/tutorial-embedded-cluster-create-app). + +================ +File: docs/vendor/tutorial-kots-helm-create-app.md +================ +# Step 2: Create an Application + +Next, install the Replicated CLI and then create an application. + +An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. + +To create an application: + +1. Install the Replicated CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Authorize the Replicated CLI: + + ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + +1. Create an application named `Gitea`: + + ``` + replicated app create Gitea + ``` + +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: + + 1. Get the slug for the application that you created: + + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots + ``` + In the example above, the application slug is `gitea-boxer`. + + :::note + The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. + ::: + + 1. Set the `REPLICATED_APP` environment variable to the application slug. + + **Example:** + + ``` + export REPLICATED_APP=gitea-boxer + ``` + +## Next Step + +Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-kots-helm-package-chart). + +## Related Topics + +* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [replicated app create](/reference/replicated-cli-app-create) + +================ +File: docs/vendor/tutorial-kots-helm-create-customer.md +================ +# Step 5: Create a KOTS-Enabled Customer + +After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. A _customer_ represents a single licensed user of your application. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. + +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + +1. For **License type**, select Development. + +1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. + +1. Click **Save Changes**. + +1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. + + ![Download license button on the customer page](/images/customer-download-license.png) + + [View a larger version of this image](/images/customer-download-license.png) + +## Next Step + +Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-kots-helm-install-kots). + +## Related Topics + +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) + +================ +File: docs/vendor/tutorial-kots-helm-create-release.md +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" + +# Step 4: Add the Chart Archive to a Release + +Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with both Replicated KOTS and with the Helm CLI. + +A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. + +To create a release: + +1. In the `gitea` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with Replicated KOTS to this subdirectory. + +1. Move the Helm chart archive that you created to `manifests`: + + ``` + mv gitea-1.0.6.tgz manifests + ``` + +1. In `manifests`, create the YAML manifests required by KOTS: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml + ``` + +1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.</p> + <h5>YAML</h5> + <HelmChartCr/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml"> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the KOTS Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.</p> + <h5>YAML</h5> + <KotsCr/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the KOTS Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sCr/> + </TabItem> + </Tabs> + +1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: + + ``` + replicated release lint --yaml-dir . + ``` + `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. + + **Example output**: + + ``` + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `gitea` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. + ::: + +1. Create a release: + + ``` + replicated release create --yaml-dir . + ``` + **Example output**: + ``` + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 + ``` + +1. Log in to the Vendor Portal and go to **Releases**. + + The release that you created is listed under **All releases**. + + ![Release page in the Vendor Portal with one release](/images/tutorial-kots-helm-release-seq-1.png) + + [View a larger version of this image](/images/tutorial-kots-helm-release-seq-1.png) + +1. Click **Edit release** to view the files in the release. + + In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. + + ![Edit Release page in the Vendor Portal](/images/tutorial-kots-helm-release-edit-seq-1.png) + + [View a larger version of this image](/images/tutorial-kots-helm-release-edit-seq-1.png) + +1. At the top of the page, click **Promote**. + +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. + + <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> + + [View a larger version of this image](/images/release-promote.png) + +## Next Step + +Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-kots-helm-create-customer). + +## Related Topics + +* [About Channels and Releases](/vendor/releases-about) +* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) + +================ +File: docs/vendor/tutorial-kots-helm-get-chart.md +================ +# Step 1: Get the Sample Chart and Test + +To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated Vendor Portal. + +To get the sample Gitea Helm chart and test installation: + +1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. + +1. Change to the new `gitea` directory that was created: + ``` + cd gitea + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` +1. Install the Gitea chart in your cluster: + + ``` + helm install gitea . --namespace gitea --create-namespace + ``` + To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/gitea/README.md#installing-the-chart) in the `bitnami/gitea` repository. + + When the chart is installed, the following output is displayed: + + ``` + NAME: gitea + LAST DEPLOYED: Tue Oct 24 12:44:55 2023 + NAMESPACE: gitea + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + CHART NAME: gitea + CHART VERSION: 1.0.6 + APP VERSION: 1.20.5 + + ** Please be patient while the chart is being deployed ** + + 1. Get the Gitea URL: + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace gitea -w gitea' + + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" + + WARNING: You did not specify a Root URL for Gitea. The rendered URLs in Gitea may not show correctly. In order to set a root URL use the rootURL value. + + 2. Get your Gitea login credentials by running: + + echo Username: bn_user + echo Password: $(kubectl get secret --namespace gitea gitea -o jsonpath="{.data.admin-password}" | base64 -d) + ``` + +1. Watch the `gitea` LoadBalancer service until an external IP is available: + + ``` + kubectl get svc gitea --namespace gitea --watch + ``` + +1. When the external IP for the `gitea` LoadBalancer service is available, run the commands provided in the output of the installation command to get the Gitea URL: + + ``` + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" + ``` + +1. In a browser, go to the Gitea URL to confirm that you can see the welcome page for the application: + + <img alt="Gitea application webpage" src="/images/gitea-app.png" width="500px"/> + + [View a larger version of this image](/images/gitea-app.png) + +1. Uninstall the Helm chart: + + ``` + helm uninstall gitea --namespace gitea + ``` + This command removes all the Kubernetes components associated with the chart and uninstalls the `gitea` release. + +1. Delete the namespace: + + ``` + kubectl delete namespace gitea + ``` + +## Next Step + +Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-kots-helm-create-app). + +## Related Topics + +* [Helm Install](https://helm.sh/docs/helm/helm_install/) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Create](https://helm.sh/docs/helm/helm_create/) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) +* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) + +================ +File: docs/vendor/tutorial-kots-helm-install-helm.md +================ +# Step 7: Install the Release with the Helm CLI + +Next, install the same release using the Helm CLI. All releases that contain one or more Helm charts can be installed with the Helm CLI. + +All Helm charts included in a release are automatically pushed to the Replicated registry when the release is promoted to a channel. Helm CLI installations require that the customer has a valid email address to authenticate with the Replicated registry. + +To install the release with the Helm CLI: + +1. Create a new customer to test the Helm CLI installation: + + 1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + + 1. For **Customer name**, enter a name for the customer. For example, `Helm Customer`. + + 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + + 1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. + + 1. For **License type**, select Trial. + + 1. (Optional) For **License options**, _disable_ the **KOTS Install Enabled** entitlement. + + 1. Click **Save Changes**. + +1. On the **Manage customer** page for the new customer, click **Helm install instructions**. + + ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) + + [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) + + You will use the instructions provided in the **Helm install instructions** dialog to install the chart. + +1. Before you run the first command in the **Helm install instructions** dialog, create a `gitea` namespace for the installation: + + ``` + kubectl create namespace gitea + ``` + +1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: + + ``` + kubectl config set-context --namespace=gitea --current + ``` + +1. Run the commands in the provided in the **Helm install instructions** dialog to log in to the registry and install the Helm chart. + + <img alt="Helm install instructions dialog" src="/images/tutorial-gitea-helm-install-instructions.png" width="500px"/> + + [View a larger version of this image](/images/tutorial-gitea-helm-install-instructions.png) + + :::note + You can ignore the **No preflight checks found** warning for the purpose of this tutorial. This warning appears because there are no specifications for preflight checks in the Helm chart archive. + ::: + +1. After the installation command completes, you can see that both the `gitea` Deployment and the Replicated SDK `replicated` Deployment were created: + + ``` + kubectl get deploy + ``` + **Example output:** + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + gitea 0/1 1 0 35s + replicated 1/1 1 1 35s + ``` + +1. Watch the `gitea` LoadBalancer service until an external IP is available: + + ``` + kubectl get svc gitea --watch + ``` + +1. After an external IP address is available for the `gitea` LoadBalancer service, follow the instructions in the output of the installation command to get the Gitea URL and then confirm that you can open the application in a browser. + +1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created for the Helm CLI installation. + + On the **Reporting** page for the customer, because the Replicated SDK was installed alongside the Gitea Helm chart, you can see details about the customer's license and installed instances: + + ![Customer reporting](/images/tutorial-gitea-helm-reporting.png) + + [View a larger version of this image](/images/tutorial-gitea-helm-reporting.png) + +1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. + + On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of the Replicated SDK running in the cluster, instance status and uptime, and more: + + ![Customer instance details](/images/tutorial-gitea-helm-instance.png) + + [View a larger version of this image](/images/tutorial-gitea-helm-instance.png) + +1. Uninstall the Helm chart and the Replicated SDK: + + ``` + helm uninstall gitea + ``` + +1. Delete the `gitea` namespace: + + ``` + kubectl delete namespace gitea + ``` + +## Next Step + +Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with both KOTS and the Helm CLI. + +## Related Topics + +* [Installing with Helm](/vendor/install-with-helm) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Delete](https://helm.sh/docs/helm/helm_delete/) + +================ +File: docs/vendor/tutorial-kots-helm-install-kots.md +================ +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" + +# Step 6: Install the Release with KOTS + +Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. + +To install the release with KOTS: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. + + ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) + + [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) + +1. On the command line, run the **KOTS Install** command that you copied: + + ```bash + curl https://kots.io/install | bash + kubectl kots install $REPLICATED_APP/unstable + ``` + + This installs the latest version of the KOTS CLI and the Replicated KOTS Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. + + For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + + :::note + <KotsVerReq/> + ::: + +1. Complete the installation command prompts: + + 1. For `Enter the namespace to deploy to`, enter `gitea`. + + 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. + + When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. + + **Example output:** + + ```bash + Enter the namespace to deploy to: gitea + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password for the admin console (6+ characters): •••••••• + • Waiting for Admin Console to be ready ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + ``` + +1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. + +1. On the login page, enter the password that you created. + +1. On the license page, select the license file that you downloaded previously and click **Upload license**. + + The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: + + ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) + + [View a larger version of this image](/images/tutorial-gitea-unavailable.png) + +1. While waiting for the `gitea` Deployment to be created, do the following: + + 1. On the command line, press Ctrl+C to exit the port forward. + + 1. Watch for the `gitea` Deployment to become ready: + + ``` + kubectl get deploy gitea --namespace gitea --watch + ``` + + 1. After the `gitea` Deployment is ready, confirm that an external IP for the `gitea` LoadBalancer service is available: + + ``` + kubectl get svc gitea --namespace gitea + ``` + + 1. Start the port foward again to access the Admin Console: + + ``` + kubectl kots admin-console --namespace gitea + ``` + + 1. Go to `http://localhost:8800` to open the Admin Console. + +1. On the Admin Console dashboard, the application status is now displayed as Ready and you can click **Open App** to view the Gitea application in a browser: + + ![Admin console dashboard showing ready status](/images/tutorial-gitea-ready.png) + + [View a larger version of this image](/images/tutorial-gitea-ready.png) + +1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created. + + On the **Reporting** page for the customer, you can see details about the customer's license and installed instances: + + ![Customer reporting page](/images/tutorial-gitea-customer-reporting.png) + + [View a larger version of this image](/images/tutorial-gitea-customer-reporting.png) + +1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. + + On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of KOTS running in the cluster, instance status and uptime, and more: + + ![Customer instance details page](/images/tutorial-gitea-instance-insights.png) + + [View a larger version of this image](/images/tutorial-gitea-instance-insights.png) + +1. Uninstall the Gitea application from your cluster so that you can install the same release again using the Helm CLI: + + ```bash + kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy + ``` + **Example output**: + ``` + • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ + • Application gitea-boxer has been removed + ``` + +1. Remove the Admin Console from the cluster: + + 1. Delete the namespace where the Admin Console is installed: + + ``` + kubectl delete namespace gitea + ``` + 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: + + ``` + kubectl delete clusterrole kotsadm-role + ``` + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` + +## Next Step + +Install the same release with the Helm CLI. See [Step 7: Install the Release with the Helm CLI](tutorial-kots-helm-install-helm). + +## Related Topics + +* [kots install](/reference/kots-cli-install/) +* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) +* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) +* [Customer Reporting](customer-reporting) +* [Instance Details](instance-insights-details) + +================ +File: docs/vendor/tutorial-kots-helm-package-chart.md +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + +# Step 3: Package the Helm Chart + +Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. + +The Replicated SDK is a Helm chart that can be optionally added as a dependency of your application Helm chart. The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. Additionally, the Replicated SDK provides access to insights and telemetry for instances of your application installed with the Helm CLI. + +To add the Replicated SDK and package the Helm chart: + +1. In your local file system, go to the `gitea` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-kots-helm-get-chart). + +1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: + + <DependencyYaml/> + +1. Update dependencies and package the Helm chart to a `.tgz` chart archive: + + ```bash + helm package . --dependency-update + ``` + <UnauthorizedError/> + +## Next Step + +Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-kots-helm-create-release). + +## Related Topics + +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release.md) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) + +================ +File: docs/vendor/tutorial-kots-helm-setup.md +================ +# Introduction and Setup + +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. + +## Summary + +This tutorial introduces you to the Replicated Vendor Portal, the Replicated CLI, the Replicated SDK, and the Replicated KOTS installer. + +In this tutorial, you use a sample Helm chart to learn how to: + +* Add the Replicated SDK to a Helm chart as a dependency +* Create a release with the Helm chart using the Replicated CLI +* Add custom resources to the release so that it supports installation with both the Helm CLI and Replicated KOTS +* Install the release in a cluster using KOTS and the KOTS Admin Console +* Install the same release using the Helm CLI + +## Set Up the Environment + +Before you begin, do the following to set up your environment: + +* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. + + For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: + * [Install Tools](https://kubernetes.io/docs/tasks/tools/) + * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) + +* Install the Helm CLI. To install the Helm CLI using Homebrew, run: + + ``` + brew install helm + ``` + + For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. + +* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). + + :::note + If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. + ::: + +## Next Step + +Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-kots-helm-get-chart) + +================ +File: docs/vendor/tutorial-preflight-helm-add-spec.mdx +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" + +# Step 2: Add a Preflight Spec to the Chart + +Create a preflight specification that fails if the cluster is running a version of Kubernetes earlier than 1.23.0, and add the specification to the Gitea chart as a Kubernetes Secret. + +To add a preflight specification to the Gitea chart: + +1. In the `gitea/templates` directory, create a `gitea-preflights.yaml` file: + + ``` + touch templates/gitea-preflights.yaml + ``` + +1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a preflight check specification: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + labels: + troubleshoot.sh/kind: preflight + name: gitea-preflight-checks + stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: gitea-preflight-checks + spec: + analyzers: + - clusterVersion: + outcomes: + - fail: + when: "< 1.23.0" + message: |- + Your cluster is running a version of Kubernetes that is not supported and your installation will not succeed. To continue, upgrade your cluster to Kubernetes 1.23.0 or later. + uri: https://www.kubernetes.io + - pass: + message: Your cluster is running the required version of Kubernetes. + ``` + + The YAML above defines a preflight check that fails if the target cluster is running a version of Kubernetes earlier than 1.23.0. The preflight check also includes a message to the user that describes the failure and lists the required Kubernetes version. The `troubleshoot.sh/kind: preflight` label is required to run preflight checks defined in Secrets. + +1. In the Gitea `Chart.yaml` file, add the Replicated SDK as a dependency: + + <DependencyYaml/> + + The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. + +1. Update dependencies and package the chart to a `.tgz` chart archive: + + ```bash + helm package . --dependency-update + ``` + + :::note + If you see a `401 Unauthorized` error message, log out of the Replicated registry by running `helm registry logout registry.replicated.com` and then run `helm package . --dependency-update` again. + ::: + +## Next Step + +Add the chart archive to a release. See [Add the Chart Archive to a Release](tutorial-preflight-helm-create-release). + +## Related Topics + +* [Defining Preflight Checks](/vendor/preflight-defining) +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) + +================ +File: docs/vendor/tutorial-preflight-helm-create-customer.mdx +================ +# Step 4: Create a Customer + +After promoting the release, create a customer so that you can run the preflight checks and install. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. For example, `Preflight Customer`. + +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + +1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. + +1. For **License type**, select Development. + +1. Click **Save Changes**. + +## Next Step + +Use the Helm CLI to run the preflight checks you defined and install Gitea. See [Run Preflights with the Helm CLI](tutorial-preflight-helm-install). + +## Related Topics + +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) + +================ +File: docs/vendor/tutorial-preflight-helm-create-release.mdx +================ +# Step 3: Add the Chart Archive to a Release + +Use the Replicated CLI to add the Gitea Helm chart archive to a release in the Replicated vendor platform. + +To create a release: + +1. Install the Replicated CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Authorize the Replicated CLI: + + ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + +1. Create an application named `Gitea`: + + ``` + replicated app create Gitea + ``` + +1. Get the slug for the application that you created: + + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots + ``` + In the example above, the application slug is `gitea-boxer`. + +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: + + **Example:** + + ``` + export REPLICATED_APP=gitea-boxer + ``` + +1. Go to the `gitea` directory. + +1. Create a release with the Gitea chart archive: + + ``` + replicated release create --chart=gitea-1.0.6.tgz + ``` + ```bash + You are creating a release that will only be installable with the helm CLI. + For more information, see + https://docs.replicated.com/vendor/helm-install#about-helm-installations-with-replicated + + • Reading chart from gitea-1.0.6.tgz ✓ + • Creating Release ✓ + • SEQUENCE: 1 + ``` + +1. Log in to the Vendor Portal and go to **Releases**. + + The release that you created is listed under **All releases**. + +1. Click **View YAML** to view the files in the release. + +1. At the top of the page, click **Promote**. + + <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> + + [View a larger version of this image](/images/release-promote.png) + +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. + +1. For **Version label**, open the dropdown and select **1.0.6**. + +1. Click **Promote**. + + +## Next Step + +Create a customer so that you can install the release in a development environment. See [Create a Customer](tutorial-preflight-helm-create-customer). + +## Related Topics + +* [About Channels and Releases](/vendor/releases-about) +* [Managing Releases with the CLI](/vendor/releases-creating-cli) + +================ +File: docs/vendor/tutorial-preflight-helm-get-chart.mdx +================ +# Step 1: Get the Sample Chart and Test + +To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install the application before adding preflight checks to the chart. + +To get the sample Gitea Helm chart and test installation: + +1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. + +1. Change to the new `gitea` directory that was created: + ``` + cd gitea + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` +1. Install the Gitea chart in your cluster: + + ``` + helm install gitea . --namespace gitea --create-namespace + ``` + To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/gitea/README.md#installing-the-chart) in the `bitnami/gitea` repository. + + When the chart is installed, the following output is displayed: + + ``` + NAME: gitea + LAST DEPLOYED: Tue Oct 24 12:44:55 2023 + NAMESPACE: gitea + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + CHART NAME: gitea + CHART VERSION: 1.0.6 + APP VERSION: 1.20.5 + + ** Please be patient while the chart is being deployed ** + + 1. Get the Gitea URL: + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace gitea -w gitea' + + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" + + WARNING: You did not specify a Root URL for Gitea. The rendered URLs in Gitea may not show correctly. In order to set a root URL use the rootURL value. + + 2. Get your Gitea login credentials by running: + + echo Username: bn_user + echo Password: $(kubectl get secret --namespace gitea gitea -o jsonpath="{.data.admin-password}" | base64 -d) + ``` + +1. Watch the `gitea` LoadBalancer service until an external IP is available: + + ``` + kubectl get svc gitea --namespace gitea --watch + ``` + +1. When the external IP for the `gitea` LoadBalancer service is available, run the commands provided in the output of the installation command to get the Gitea URL: + + ``` + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" + ``` + + :::note + Alternatively, you can run the following command to forward a local port to a port on the Gitea Pod: + + ``` + POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=gitea -o jsonpath='{.items[0].metadata.name}') + kubectl port-forward pod/$POD_NAME 8080:3000 + ``` + ::: + +1. In a browser, go to the Gitea URL to confirm that you can see the welcome page for the application: + + <img alt="Gitea application webpage" src="/images/gitea-app.png" width="500px"/> + + [View a larger version of this image](/images/gitea-app.png) + +1. Uninstall the Helm chart: + + ``` + helm uninstall gitea --namespace gitea + ``` + This command removes all the Kubernetes components associated with the chart and uninstalls the `gitea` release. + +1. Delete the namespace: + + ``` + kubectl delete namespace gitea + ``` + +## Next Step + +Define preflight checks and add them to the Gitea Helm chart. See [Add a Preflight Spec to the Chart](tutorial-preflight-helm-add-spec). + +## Related Topics + +* [Helm Install](https://helm.sh/docs/helm/helm_install/) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) +* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) + +================ +File: docs/vendor/tutorial-preflight-helm-install-kots.mdx +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" + +# Step 6: Run Preflights with KOTS + +Create a KOTS-enabled release and then install Gitea with KOTS. This purpose of this step is to see how preflight checks automatically run in the KOTS Admin Console during installation. + +To run preflight checks during installation with KOTS: + +1. In the `gitea` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with KOTS to this subdirectory. + +1. Move the Helm chart archive to `manifests`: + + ``` + mv gitea-1.0.6.tgz manifests + ``` + +1. In `manifests`, create the YAML manifests required by KOTS: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml + ``` + +1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.</p> + <h5>YAML</h5> + <HelmChartCr/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml"> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.</p> + <h5>YAML</h5> + <KotsCr/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sCr/> + </TabItem> + </Tabs> + +1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: + + ``` + replicated release lint --yaml-dir . + ``` + `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. + + **Example output**: + + ``` + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + + The output includes warning messages, including a warning about a missing preflight spec. This warning appears because the preflight spec is defined in the Helm chart. The warnings can be ignored for the purpose of this tutorial. + +1. Create a release: + + ```bash + replicated release create --yaml-dir . + ``` + **Example output**: + ```bash + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 2 + ``` + +1. Log in to the [vendor portal](https://vendor.replicated.com) and go to **Releases**. The new release is labeled **Sequence 2**. + +1. Promote the release to the Unstable channel. + +1. Go to the **Customers** page. + +1. Create a new customer named `KOTS Preflight Customer`. For **License options**, enable the **KOTS Install Enabled** checkbox. This is the entitlement that allows the customer to install with KOTS. + +1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. + +1. Go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. + + ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) + + [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) + +1. On the command line, run the **KOTS Install** command that you copied: + + ```bash + curl https://kots.io/install | bash + kubectl kots install $REPLICATED_APP/unstable + ``` + + This installs the latest version of the KOTS CLI and the Replicated Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. + + For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + + :::note + <KotsVerReq/> + ::: + +1. Complete the installation command prompts: + + 1. For `Enter the namespace to deploy to`, enter `gitea`. + + 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. + + When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. + + **Example output:** + + ```bash + Enter the namespace to deploy to: gitea + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password for the Admin Console (6+ characters): •••••••• + • Waiting for Admin Console to be ready ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + ``` + +1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. + +1. On the login page, enter the password that you created. + +1. On the license page, select the license file that you downloaded previously and click **Upload license**. + + Preflight checks run automatically: + + ![Gitea preflight checks page](/images/gitea-preflights-admin-console.png) + + [View a larger version of this image](/images/gitea-preflights-admin-console.png) + +1. When the preflight checks finish, click **Deploy** to deploy the application. + + The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: + + ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) + + [View a larger version of this image](/images/tutorial-gitea-unavailable.png) + +1. (Optional) After the application is in a Ready status, click **Open App** to view the Gitea application in a browser. + +1. Uninstall the Gitea application from your cluster: + + ```bash + kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy + ``` + **Example output**: + ``` + • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ + • Application gitea-boxer has been removed + ``` + +1. Remove the Admin Console from the cluster: + + 1. Delete the namespace where the Admin Console is installed: + + ``` + kubectl delete namespace gitea + ``` + 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: + + ``` + kubectl delete clusterrole kotsadm-role + ``` + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` + +## Summary + +Congratulations! In this tutorial, you defined a preflight check for Gitea that checks the version of Kubernetes running in the cluster. You also ran preflight checks before installing with both the Helm CLI and with KOTS. + +To learn more about defining and running preflight checks, see: +* [Defining Preflight Checks](/vendor/preflight-defining) +* [Running Preflight Checks](/vendor/preflight-running) +* [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. + +================ +File: docs/vendor/tutorial-preflight-helm-install.mdx +================ +# Step 5: Run Preflights with the Helm CLI + +Use the Helm CLI installation instructions provided for the customer that you created to run the preflight checks for Gitea and install. The purpose of this step is to demonstrate how enterprise users can run preflight checks defined in a Helm chart before installing. + +To run preflight checks and install with the Helm CLI: + +1. Create a `gitea` namespace for the installation: + + ``` + kubectl create namespace gitea + ``` + +1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: + + ``` + kubectl config set-context --namespace=gitea --current + ``` + +1. In the [vendor portal](https://vendor.replicated.com), go to the **Customers** page. + +1. On the **Customer details** page for the customer that you created, click **Helm install instructions**. + + ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) + + [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) + +1. Run the first command in the **Helm install instructions** dialog to log in to the Replicated registry. + +1. Run the second command to install the preflight kubectl plugin: + + ```bash + curl https://krew.sh/preflight | bash + ``` + The preflight plugin is a client-side utility used to run preflight checks. + +1. Run the third command to run preflight checks: + + ```bash + helm template oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea | kubectl preflight - + ``` + This command templates the Gitea chart and then pipes the result to the preflight plugin. The following shows an example of the ouput for this command: + + <img alt="Preflight CLI output" src="/images/gitea-preflights-cli.png" width="600px"/> + + [View a larger version of this image](/images/gitea-preflights-cli.png) + +1. Run the fourth command listed under **Option 1: Install Gitea** to install the application: + + ```bash + helm install gitea oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea + ``` + +1. Uninstall and delete the namespace: + + ```bash + helm uninstall gitea --namespace gitea + ``` + ```bash + kubectl delete namespace gitea + ``` + +## Next Step + +Install the application with KOTS to see how preflight checks are run from the KOTS Admin Console. See [Run Preflights with KOTS](tutorial-preflight-helm-install-kots). + +## Related Topics + +* [Running Preflight Checks](/vendor/preflight-running) +* [Installing with Helm](/vendor/install-with-helm) + +================ +File: docs/vendor/tutorial-preflight-helm-setup.mdx +================ +# Introduction and Setup + +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. + +## Summary + +This tutorial introduces you to preflight checks. The purpose of preflight checks is to provide clear feedback about any missing requirements or incompatibilities in the customer's cluster _before_ they install or upgrade an application. Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. + +Preflight checks are part of the [Troubleshoot](https://troubleshoot.sh/) open source project, which is maintained by Replicated. + +In this tutorial, you use a sample Helm chart to learn how to: + +* Define custom preflight checks in a Kubernetes Secret in a Helm chart +* Package a Helm chart and add it to a release in the Replicated Vendor Portal +* Run preflight checks using the Helm CLI +* Run preflight checks in the Replicated KOTS Admin Console + +## Set Up the Environment + +Before you begin, do the following to set up your environment: + +* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. + + For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: + * [Install Tools](https://kubernetes.io/docs/tasks/tools/) + * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) + +* Install the Helm CLI. To install the Helm CLI using Homebrew, run: + + ``` + brew install helm + ``` + + For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. + +* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). + + :::note + If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. + ::: + +## Next Step + +Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-preflight-helm-get-chart) + +================ +File: docs/vendor/using-third-party-registry-proxy.mdx +================ +# Using a Registry Proxy for Helm Air Gap Installations + +This topic describes how to connect the Replicated proxy registry to a Harbor or jFrog Artifactory instance to support pull-through image caching. It also includes information about how to set up replication rules in Harbor for image mirroring. + +## Overview + +For applications distributed with Replicated, the [Replicated proxy registry](/vendor/private-images-about) grants proxy, or _pull-through_, access to application images without exposing registry credentials to customers. + +Users can optionally connect the Replicated proxy registry with their own [Harbor](https://goharbor.io) or [jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) instance to proxy and cache the images that are required for installation on demand. This can be particularly helpful in Helm installations in air-gapped environments because it allows users to pull and cache images from an internet-connected machine, then access the cached images during installation from a machine with limited or no outbound internet access. + +In addition to the support for on-demand pull-through caching, connecting the Replicated proxy registry to a Harbor or Artifactory instance also has the following benefits: +* Registries like Harbor or Artifactory typically support access controls as well as scanning images for security vulnerabilities +* With Harbor, users can optionally set up replication rules for image mirroring, which can be used to improve data availability and reliability + +## Limtiation + +Artifactory does not support mirroring or replication for Docker registries. If you need to set up image mirroring, use Harbor. See [Set Up Mirroring in Harbor](#harbor-mirror) below. + +## Connect the Replicated Proxy Registry to Harbor + +[Harbor](https://goharbor.io) is a popular open-source container registry. Users can connect the Replicated proxy registry to Harbor in order to cache images on demand and set up pull-based replication rules to proactively mirror images. Connecting the Replicated proxy registry to Harbor also allows customers use Harbor's security features. + +### Use Harbor for Pull-Through Proxy Caching {#harbor-proxy-cache} + +To connect the Replicated proxy registry to Harbor for pull-through proxy caching: + +1. Log in to Harbor and create a new replication endpoint. This endpoint connects the Replicated proxy registry to the Harbor instance. For more information, see [Creating Replication Endpoints](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-endpoints/) in the Harbor documentation. + +1. Enter the following details for the endpoint: + + * For the provider field, choose Docker Registry. + * For the URL field, enter `https://proxy.replicated.com` or the custom domain that is configured for the Replicated proxy registry. For more information about configuring custom domains in the Vendor Portal, see [Using Custom Domains](/vendor/custom-domains-using). + * For the access ID, enter the email address associated with the customer in the Vendor Portal. + * For the access secret, enter the customer's unique license ID. You can find the license ID in the Vendor Portal by going to **Customers > [Customer Name]**. + +1. Verify your configuration by testing the connection and then save the endpoint. + +1. After adding the Replicated proxy registry as a replication endpoint in Harbor, set up a proxy cache. This allows for pull-through image caching with Harbor. For more information, see [Configure Proxy Cache](https://goharbor.io/docs/2.11.0/administration/configure-proxy-cache/) in the Harbor documentation. + +1. (Optional) Add a pull-based replication rule to support image mirroring. See [Configure Image Mirroring in Harbor](#harbor-mirror) below. + +### Configure Image Mirroring in Harbor {#harbor-mirror} + +To enable image mirroring with Harbor, users create a pull-based replication rule. This periodically (or when manually triggered) pulls images from the Replicated proxy registry to store them in Harbor. + +The Replicated proxy regsitry exposes standard catalog and tag listing endpoints that are used by Harbor to support image mirroring: +* The catalog endpoint returns a list of repositories built from images of the last 10 releases. +* The tags listing endpoint lists the tags available in a given repository for those same releases. + +When image mirroring is enabled, Harbor uses these endpoints to build a list of images to cache and then serve. + +#### Limitations + +Image mirroring with Harbor has the following limitations: + +* Neither the catalog or tags listing endpoints exposed by the Replicated proxy service respect pagination requests. However, Harbor requests 1000 items at a time. + +* Only authenticated users can perform catalog calls or list tags. Authenticated users are those with an email address and license ID associated with a customer in the Vendor Portal. + +#### Create a Pull-Based Replication Rule in Harbor for Image Mirroring + +To configure image mirroring in Harbor: + +1. Follow the steps in [Use Harbor for Pull-Through Proxy Caching](#harbor-proxy-cache) above to add the Replicated proxy registry to Harbor as a replication endpoint. + +1. Create a **pull-based** replication rule in Harbor to mirror images proactively. For more information, see [Creating a replication rule](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-rules/) in the Harbor documentation. + +## Use Artifactory for Pull-Through Proxy Caching + +[jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) supports pull-through caching for Docker registries. + +For information about how to configure a pull-through cache with Artifactory, see [Remote Repository](https://jfrog.com/help/r/jfrog-artifactory-documentation/configure-a-remote-repository) in the Artifactory documentation. + +================ +File: docs/vendor/vendor-portal-application-settings.md +================ +# Application Settings Page + +Each application has its own settings, which include the application name and application slug. + +The following shows the **Application Settings** page, which you access by selecting **_Application Name_ > Settings**: + +<img alt="Settings page" src="/images/application-settings.png" width="600px"/> + +[View a larger version of this image](/images/application-settings.png) + +The following describes each of the application settings: + +- **Application name:** The application name is initially set when you first create the application in the Vendor Portal. You can change the name at any time so that it displays as a user-friendly name that your team can easily identify. +- **Application slug:** The application slug is used with the Replicated CLI and with some of the KOTS CLI commands. You can click on the link below the slug to toggle between the application ID number and the slug name. The application ID and application slug are unique identifiers that cannot be edited. +- **Service Account Tokens:** Provides a link to the the **Service Accounts** page, where you can create or remove a service account. Service accounts are paired with API tokens and are used with the Vendor API to automate tasks. For more information, see [Using Vendor API Tokens](/reference/vendor-api-using). +- **Scheduler:** Displayed if the application has a KOTS entitlement. +- **Danger Zone:** Lets you delete the application, and all of the licenses and data associated with the application. The delete action cannot be undone. + +================ +File: docs/vendor/vendor-portal-creating-account.md +================ +# Creating a Vendor Account + +To get started with Replicated, you must create a Replicated vendor account. When you create your account, you are also prompted to create an application. To create additional applications in the future, log in to the Replicated Vendor Portal and select **Create new app** from the Applications drop-down list. + +To create a vendor account: + +1. Go to the [Vendor Portal](https://vendor.replicated.com), and select **Sign up**. + + The sign up page opens. +3. Enter your email address or continue with Google authentication. + + - If registering with an email, the Activate account page opens and you will receive an activation code in your email. + + :::note + To resend the code, click **Resend it**. + ::: + + - Copy and paste the activation code into the text box and click **Activate**. Your account is now activated. + + :::note + After your account is activated, you might have the option to accept a pending invitation, or to automatically join an existing team if the auto-join feature is enabled by your administrator. For more information about enabling the auto-join feature, see [Enable Users to Auto-join Your Team](https://docs.replicated.com/vendor/team-management#enable-users-to-auto-join-your-team). + ::: + +4. On the Create your team page, enter you first name, last name, and company name. Click **Continue** to complete the setup. + + :::note + The company name you provide is used as your team name in Vendor Portal. + ::: + + The Create application page opens. + +5. Enter a name for the application, such as `My-Application-Demo`. Click **Create application**. + + The application is created and the Channels page opens. + + :::important + Replicated recommends that you use a temporary name for the application at this time such as `My-Application-Demo` or `My-Application-Test`. + + Only use an official name for your application when you have completed testing and are ready to distribute the application to your customers. + + Replicated recommends that you use a temporary application name for testing because you are not able to restore or modify previously-used application names or application slugs in the Vendor Portal. + ::: + +## Next Step + +Invite team members to collaborate with you in Vendor Portal. See [Invite Members](team-management#invite-members). + +================ +File: docs/vendor/vendor-portal-manage-app.md +================ +# Managing Applications + +This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. + +For information about creating and managing application with the Vendor API v3, see the [apps](https://replicated-vendor-api.readme.io/reference/createapp) section in the Vendor API v3 documentation. + +## Create an Application + +Teams can create one or more applications. It is common to create multiple applications for testing purposes. + +### Vendor Portal + +To create a new application: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com/). If you do not have an account, see [Creating a Vendor Account](/vendor/vendor-portal-creating-account). + +1. In the top left of the page, open the application drop down and click **Create new app...**. + + <img alt="create new app drop down" src="/images/create-new-app.png" width="300px"/> + + [View a larger version of this image](/images/create-new-app.png) + +1. On the **Create application** page, enter a name for the application. + + <img alt="create new app page" src="/images/create-application-page.png" width="500px"/> + + [View a larger version of this image](/images/create-application-page.png) + + :::important + If you intend to use the application for testing purposes, Replicated recommends that you use a temporary name such as `My Application Demo` or `My Application Test`. + + You are not able to restore or modify previously-used application names or application slugs. + ::: + +1. Click **Create application**. + +### Replicated CLI + +To create an application with the Replicated CLI: + +1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Run the following command: + + ```bash + replicated app create APP-NAME + ``` + Replace `APP-NAME` with the name that you want to use for the new application. + + **Example**: + + ```bash + replicated app create cli-app + ID NAME SLUG SCHEDULER + 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots + ``` + +## Get the Application Slug {#slug} + +Each application has a slug, which is used for interacting with the application using the Replicated CLI. The slug is automatically generated based on the application name and cannot be changed. + +### Vendor Portal + +To get an application slug in the Vendor Portal: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. + +1. Under **Application Slug**, copy the slug. + + <img alt="Application slug" src="/images/application-settings.png" width="600px"/> + + [View a larger version of this image](/images/application-settings.png) + +### Replicated CLI + +To get an application slug with the Replicated CLI: + +1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Run the following command: + + ```bash + replicated app ls APP-NAME + ``` + Replace `APP-NAME` with the name of the target application. Or, exclude `APP-NAME` to list all applications in the team. + + **Example:** + + ```bash + replicated app ls cli-app + ID NAME SLUG SCHEDULER + 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots + ``` + +1. Copy the value in the `SLUG` field. + +## Delete an Application + +When you delete an application, you also delete all licenses and data associated with the application. You can also optionally delete all images associated with the application from the Replicated registry. Deleting an application cannot be undone. + +### Vendor Portal + +To delete an application in the Vendor Portal: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. + +1. Under **Danger Zone**, click **Delete App**. + + <img alt="Setting page" src="/images/application-settings.png" width="600px"/> + + [View a larger version of this image](/images/application-settings.png) + +1. In the **Are you sure you want to delete this app?** dialog, enter the application name. Optionally, enter your password if you want to delete all images associated with the application from the Replicated registry. + + <img alt="delete app dialog" src="/images/delete-app-dialog.png" width="400px"/> + + [View a larger version of this image](/images/delete-app-dialog.png) + +1. Click **Delete app**. + +### Replicated CLI + +To delete an application with the Replicated CLI: + +1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Run the following command: + + ```bash + replicated app delete APP-NAME + ``` + Replace `APP-NAME` with the name of the target application. + +1. When prompted, type `yes` to confirm that you want to delete the application. + + **Example:** + + ```bash + replicated app delete deletion-example + • Fetching App ✓ + ID NAME SLUG SCHEDULER + 1xyAIzrmbvq... deletion-example deletion-example kots + Delete the above listed application? There is no undo: yes█ + • Deleting App ✓ + ``` + +================ +File: docs/intro-kots.mdx +================ +import Kots from "../docs/partials/kots/_kots-definition.mdx" + +# Introduction to KOTS + +This topic provides an introduction to the Replicated KOTS installer, including information about KOTS features, installation options, and user interfaces. + +:::note +The Replicated KOTS entitlement is required to install applications with KOTS. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. +::: + +## Overview + +<Kots/> + +KOTS communicates securely with the Replicated Vendor Portal to synchronize customer licenses, check for available application updates, send instance data, share customer-generated support bundles, and more. + +Installing an application with KOTS provides access to features such as: + +* Support for air gap installations in environments with limited or no outbound internet access +* Support for installations on VMs or bare metal servers, when using Replicated Embedded Cluster or Replicated kURL +* The KOTS Admin Console, which provides a user interface where customers can install and manage their application instances +* Instance telemetry automatically sent to the Vendor Portal for instances running in customer environments +* Strict preflight checks that block installation if environment requirements are not met +* Backup and restore with Replicated snapshots +* Support for marking releases as required to prevent users from skipping them during upgrades + +KOTS is an open source project that is maintained by Replicated. For more information, see the [kots](https://github.com/replicatedhq/kots) repository in GitHub. + +## About Installing with KOTS + +KOTS can be used to install Kubernetes applications and Helm charts in the following environments: +* Clusters provisioned on VMs or bare metal servers with Replicated Embedded Cluster or Replicated kURL +* Existing clusters brought by the user +* Online (internet-connected) or air-gapped (disconnected) environments + +To install an application with KOTS, users first run an installation script that installs KOTS in the target cluster and deploys the KOTS Admin Console. After KOTS is installed, users can log in to the KOTS Admin Console to upload their license file, configure the application, run preflight checks, and install and deploy the application. + +The following diagram demonstrates how a single release promoted to the Stable channel in the Vendor Portal can be installed with KOTS in an embedded cluster on a VM, in an existing air-gapped cluster, and in an existing internet-connected cluster: + +<img alt="Embedded cluster, air gap, and existing cluster app installation workflows" src="/images/kots-installation-overview.png"/> + +[View a larger version of this image](/images/kots-installation-overview.png) + +As shown in the diagram above: +* For installations in existing online (internet-connected) clusters, users run a command to install KOTS in their cluster. +* For installations on VMs or bare metal servers, users run an Embedded Cluster or kURL installation script that both provisions a cluster in their environment and installs KOTS in the cluster. +* For installations in air-gapped clusters, users download air gap bundles for KOTS and the application from the Replicated Download Portal and then provide the bundles during installation. + +All users must have a valid license file to install with KOTS. After KOTS is installed in the cluster, users can access the KOTS Admin Console to provide their license and deploy the application. + +For more information about how to install applications with KOTS, see the [Installing an Application](/enterprise/installing-overview) section. + +## KOTS User Interfaces + +This section describes the KOTS interfaces available to users for installing and managing applications. + +### KOTS Admin Console + +KOTS provides an Admin Console to make it easy for users to install, manage, update, configure, monitor, backup and restore, and troubleshoot their application instance from a GUI. + +The following shows an example of the Admin Console dashboard for an application: + +![Admin Console Dashboard](/images/guides/kots/application.png) + +[View a larger version of this image](/images/guides/kots/application.png) + +For applications installed with Replicated Embedded Cluster in a VM or bare metal server, the Admin Console also includes a **Cluster Management** tab where users can add and manage nodes in the embedded cluster, as shown below: + +![Admin console dashboard with Cluster Management tab](/images/gitea-ec-ready.png) + +[View a larger version of this image](/images/gitea-ec-ready.png) + +### KOTS CLI + +The KOTS command-line interface (CLI) is a kubectl plugin. Customers can run commands with the KOTS CLI to install and manage their application instances with KOTS programmatically. + +For information about getting started with the KOTS CLI, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + +The KOTS CLI can also be used to install an application without needing to access the Admin Console. This can be useful for automating installations and upgrades, such as in CI/CD pipelines. For information about how to perform headless installations from the command line, see [Installing with the KOTS CLI](/enterprise/installing-existing-cluster-automation). + +================ +File: docs/intro-replicated.mdx +================ +--- +pagination_prev: null +--- + +import ApiAbout from "/docs/partials/vendor-api/_api-about.mdx" +import Replicated from "/docs/partials/getting-started/_replicated-definition.mdx" +import Helm from "/docs/partials/helm/_helm-definition.mdx" +import Kots from "/docs/partials/kots/_kots-definition.mdx" +import KotsEntitlement from "/docs/partials/kots/_kots-entitlement-note.mdx" +import SDKOverview from "/docs/partials/replicated-sdk/_overview.mdx" +import CSDL from "/docs/partials/getting-started/_csdl-overview.mdx" +import PreflightSbAbout from "/docs/partials/preflights/_preflights-sb-about.mdx" + +# Introduction to Replicated + +This topic provides an introduction to the Replicated Platform, including a platform overview and a list of key features. It also describes the Commercial Software Distribution Lifecycle and how Replicated features can be used in each phase of the lifecycle. + +## About the Replicated Platform + +<Replicated/> + +The Replicated Platform features are designed to support ISVs during each phase of the Commercial Software Distribution Lifecycle. For more information, see [Commercial Software Distribution Lifecycle](#csdl) below. + +The following diagram demonstrates the process of using the Replicated Platform to distribute an application, install the application in a customer environment, and support the application after installation: + +![replicated platform features workflow](/images/replicated-platform.png) + +[View a larger version of this image](/images/replicated-platform.png) + +The diagram above shows an application that is packaged with the [**Replicated SDK**](/vendor/replicated-sdk-overview). The application is tested in clusters provisioned with the [**Replicated Compatibility Matrix**](/vendor/testing-about), then added to a new release in the [**Vendor Portal**](/vendor/releases-about) using an automated CI/CD pipeline. + +The application is then installed by a customer ("Big Bank") on a VM. To install, the customer downloads their license, which grants proxy access to the application images through the [**Replicated proxy registry**](/vendor/private-images-about). They also download the installation assets for the [**Replicated Embedded Cluster**](/vendor/embedded-overview) installer. + +Embedded Cluster runs [**preflight checks**](/vendor/preflight-support-bundle-about) to verify that the environment meets the installation requirements, provisions a cluster on the VM, and installs [**Replicated KOTS**](intro-kots) in the cluster. KOTS provides an [**Admin Console**](intro-kots#kots-admin-console) where the customer enters application-specific configurations, runs application preflight checks, optionally joins nodes to the cluster, and then deploys the application. After installation, customers can manage both the application and the cluster from the Admin Console. + +Finally, the diagram shows how [**instance data**](/vendor/instance-insights-event-data) is automatically sent from the customer environment to the Vendor Portal by the Replicated SDK API and the KOTS Admin Console. Additionally, tooling from the open source [**Troubleshoot**](https://troubleshoot.sh/docs/collect/) project is used to generate and send [**support bundles**](/vendor/preflight-support-bundle-about), which include logs and other important diagnostic data. + +## Replicated Platform Features + +The following describes the key features of the Replicated Platform. + +### Compatibility Matrix + +Replicated Compatibility Matrix can be used to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports various Kubernetes distributions and versions and can be interacted with through the Vendor Portal or the Replicated CLI. + +For more information, see [About Compatibility Matrix](/vendor/testing-about). + +### Embedded Cluster + +Replicated Embedded Cluster is a Kubernetes installer based on the open source Kubernetes distribution k0s. With Embedded Cluster, users install and manage both the cluster and the application together as a single appliance on a VM or bare metal server. In this way, Kubernetes is _embedded_ with the application. + +Additionally, each version of Embedded Cluster includes a specific version of [Replicated KOTS](#kots) that is installed in the cluster during installation. KOTS is used by Embedded Cluster to deploy the application and also provides the Admin Console UI where users can manage both the application and the cluster. + +For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). + +### KOTS (Admin Console) {#kots} + +KOTS is a kubectl plugin and in-cluster Admin Console that installs Kubernetes applications in customer-controlled environments. + +KOTS is used by [Replicated Embedded Cluster](#embedded-cluster) to deploy applications and also to provide the Admin Console UI where users can manage both the application and the cluster. KOTS can also be used to install applications in existing Kubernetes clusters in customer-controlled environments, including clusters in air-gapped environments with limited or no outbound internet access. + +For more information, see [Introduction to KOTS](intro-kots). + +### Preflight Checks and Support Bundles + +<PreflightSbAbout/> + +For more information, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). + +### Proxy Registry + +The Replicated proxy registry grants proxy access to an application's images using the customer's unique license. This means that customers can get access to application images during installation without the vendor needing to provide registry credentials. + +For more information, see [About the Replicated Proxy Registry](/vendor/private-images-about). + +### Replicated SDK + +The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. It provides an in-cluster API that can be used to communicate with the Vendor Portal. For example, the SDK API can return details about the customer's license or report telemetry on the application instance back to the Vendor Portal. + +For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +### Vendor Portal + +The Replicated Vendor Portal is the web-based user interface that you can use to configure and manage all of the Replicated features for distributing and managing application releases, supporting your release, viewing customer insights and reporting, and managing teams. + +The Vendor Portal can also be interacted with programmatically using the following developer tools: + +* **Replicated CLI**: The Replicated CLI can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams, license files, and so on. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* **Vendor API v3**: The Vendor API can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams and license files. For more information, see [Using the Vendor API v3](/reference/vendor-api-using). + +## Commercial Software Distribution Lifecycle {#csdl} + +Replicated Platform features are designed to support ISVs in each phase of the Commercial Software Distribution Lifecycle shown below: + +![software distribution lifecycle wheel](/images/software-dev-lifecycle.png) + +[View a larger version of this image](/images/software-dev-lifecycle.png) + +<CSDL/> + +For more information about to download a copy of The Commercial Software Distribution Handbook, see [The Commercial Software Distribution Handbook](https://www.replicated.com/the-commercial-software-distribution-handbook). + +The following describes the phases of the software distribution lifecycle: + +* **[Develop](#develop)**: Application design and architecture decisions align with customer needs, and development teams can quickly iterate on new features. +* **[Test](#test)**: Run automated tests in several customer-representative environments as part of continuous integration and continuous delivery (CI/CD) workflows. +* **[Release](#release)**: Use channels to share releases with external and internal users, publish release artifacts securely, and use consistent versioning. +* **[License](#license)**: Licenses are customized to each customer and are easy to issue, manage, and update. +* **[Install](#install)**: Provide unique installation options depending on customers' preferences and experience levels. +* **[Report](#report)**: Make more informed prioritization decisions by collecting usage and performance metadata for application instances running in customer environments. +* **[Support](#support)**: Diagnose and resolve support issues quickly. + +For more information about the Replicated features that support each of these phases, see the sections below. + +### Develop + +The Replicated SDK exposes an in-cluster API that can be developed against to quickly integrate and test core functionality with an application. For example, when the SDK is installed alongside an application in a customer environment, the in-cluster API can be used to send custom metrics from the instance to the Replicated vendor platform. + +For more information about using the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +### Test + +The Replicated Compatibility Matrix rapidly provisions ephemeral Kubernetes clusters, including multi-node and OpenShift clusters. When integrated into existing CI/CD pipelines for an application, the Compatibility Matrix can be used to automatically create a variety of customer-representative environments for testing code changes. + +For more information, see [About Compatibility Matrix](/vendor/testing-about). + +### Release + +Release channels in the Replicated Vendor Portal allow ISVs to make different application versions available to different customers, without needing to maintain separate code bases. For example, a "Beta" channel can be used to share beta releases of an application with only a certain subset of customers. + +For more information about working with channels, see [About Channels and Releases](/vendor/releases-about). + +Additionally, the Replicated proxy registry grants proxy access to private application images using the customers' license. This ensures that customers have the right access to images based on the channel they are assigned. For more information about using the proxy registry, see [About the Replicated Proxy Registry](/vendor/private-images-about). + +### License + +Create customers in the Replicated Vendor Portal to handle licensing for your application in both online and air gap environments. For example: +* License free trials and different tiers of product plans +* Create and manage custom license entitlements +* Verify license entitlements both before installation and during runtime +* Measure and report usage + +For more information about working with customers and custom license fields, see [About Customers](/vendor/licenses-about). + +### Install + +Applications distributed with the Replicated Platform can support multiple different installation methods from the same application release, helping you to meet your customers where they are. For example: + +* Customers who are not experienced with Kubernetes or who prefer to deploy to a dedicated cluster in their environment can install on a VM or bare metal server with the Replicated Embedded Cluster installer. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). +* Customers familiar with Kubernetes and Helm can install in their own existing cluster using Helm. For more information, see [Installing with Helm](/vendor/install-with-helm). +* Customers installing into environments with limited or no outbound internet access (often referred to as air-gapped environments) can securely access and push images to their own internal registry, then install using Helm or a Replicated installer. For more information, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap). + +### Report + +When installed alongside an application, the Replicated SDK and Replicated KOTS automatically send instance data from the customer environment to the Replicated Vendor Portal. This instance data includes health and status indicators, adoption metrics, and performance metrics. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). + +ISVs can also set up email and Slack notifications to get alerted of important instance issues or performance trends. For more information, see [Configuring Instance Notifications](/vendor/instance-notifications-config). + +### Support + +Support teams can use Replicated features to more quickly diagnose and resolve application issues. For example: + +- Customize and generate support bundles, which collect and analyze redacted information from the customer's cluster, environment, and application instance. See [About Preflights Checks and Support Bundles](/vendor/preflight-support-bundle-about). +- Provision customer-representative environments with Compatibility Matrix to recreate and diagnose issues. See [About Compatibility Matrix](/vendor/testing-about). +- Get insights into an instance's status by accessing telemetry data, which covers the health of the application, the current application version, and details about the infrastructure and cluster where the application is running. For more information, see [Customer Reporting](/vendor/customer-reporting). For more information, see [Customer Reporting](/vendor/customer-reporting). + +================ +File: docs/intro.md +================ +--- +slug: / +pagination_next: null +--- + +# Home + +<section class="tile__container"> + <ul id="whats-new"> + <li class="tile__header"> + <img src="/images/icons/chat_bubble.png" alt="chat bubble icon" width="55px" height="55px"></img> + <p>What's New?</p> + </li> + <li> + <h3>Embedded Cluster 2.0 Release</h3> + <p>The 2.0 release brings improvements to architecture that increase the reliability and stability of Embedded Cluster.</p> + </li> + <li> + <a href="/release-notes/rn-embedded-cluster#200">Learn more</a> + </li> + </ul> + <ul id="did-you-know"> + <li class="tile__header"> + <img src="/images/icons/lightbulb.png" alt="lightbulb icon" width="55px" height="55px"></img> + <p>Did You Know?</p> + </li> + <li> + <h3>Manage Supported Install Methods Per Customer</h3> + <p>Control which installation methods are available for each customer from the **Install types** field in the customer's license.</p> + </li> + <li> + <a href="/vendor/licenses-install-types">Learn more</a> + </li> + </ul> +</section> +<section class="tile__container"> +<ul> + <li class="tile__header"> + <img src="images/icons/alien_vault.png" alt="ufo icon" width="55px" height="55px"></img> + <p>Getting Started with Replicated</p> + </li> + <li> + <p>Onboarding workflows, tutorials, and labs to help you get started with Replicated quickly.</p> + </li> + <li> + <a href="intro-replicated">Introduction to Replicated</a> + </li> + <li> + <a href="/vendor/kots-faq">Replicated FAQs</a> + </li> + <li> + <a href="/vendor/replicated-onboarding">Replicated Onboarding</a> + </li> + <li> + <a href="/vendor/tutorial-embedded-cluster-setup">Tutorials</a> + </li> + </ul> +</section> +<section class="tile__container"> +<ul> + <li class="tile__header"> + <img src="images/icons/vendor_portal_1.png" alt="vendor portal icon" width="55px" height="55px"></img> + <p>Vendor Platform</p> + </li> + <li> + <p>Create and manage your account and team.</p> + </li> + <li> + <a href="/vendor/vendor-portal-creating-account">Creating a Vendor Account</a> + </li> + <li> + <a href="/vendor/team-management#invite-members">Managing Team Members</a> + </li> + <li> + <a href="/vendor/team-management-rbac-configuring">Configuring RBAC Policies</a> + </li> + </ul> + <ul> + <li class="tile__header"> + <img src="images/icons/release.png" alt="rocket ship icon" width="55px" height="55px"></img> + <p>Compatibility Matrix</p> + </li> + <li> + <p>Rapidly create Kubernetes clusters, including OpenShift.</p> + </li> + <li> + <a href="/vendor/testing-about">About Compatibility Matrix</a> + </li> + <li> + <a href="/vendor/testing-how-to">Using Compatibility Matrix</a> + </li> + <li> + <a href="/vendor/testing-supported-clusters">Supported Cluster Types</a> + </li> + <li> + <a href="/vendor/testing-cluster-addons">Cluster Add-ons</a> + </li> + <li> + <a href="/vendor/ci-workflows">Recommended CI/CD Workflows</a> + </li> + </ul> + </section> +<section class="tile__container"> + <ul> + <li class="tile__header"> + <img src="images/icons/helm-logo.png" alt="helm logo" id="helm"></img> + <p>Helm Charts</p> + </li> + <li> + <p>Distribute Helm charts with Replicated.</p> + </li> + <li> + <a href="/vendor/install-with-helm">Helm Installations with Replicated</a> + </li> + <li> + <a href="/vendor/helm-install-release">Packaging a Helm Chart for a Release</a> + </li> + <li> + <a href="/vendor/replicated-sdk-overview">About the Replicated SDK</a> + </li> + </ul> + </section> +<section class="tile__container"> + <ul> + <li class="tile__header"> + <img src="images/icons/admin.png" alt="kots icon"></img> + <p>Replicated KOTS</p> + </li> + <li> + <p>A kubectl plugin and in-cluster Admin Console that installs applications in customer-controlled environments.</p> + </li> + <li> + <a href="intro-kots">Introduction to KOTS</a> + </li> + <li> + <a href="/vendor/helm-native-about">About Distributing Helm Charts with KOTS</a> + </li> + </ul> + <ul> + <li class="tile__header"> + <img src="images/icons/k8s_installer.png" alt="installer icon"></img> + <p>Embedded Cluster</p> + </li> + <li> + <p>Embed Kubernetes with your application to support installations on VMs or bare metal servers.</p> + </li> + <li> + <a href="/vendor/embedded-overview">Embedded Cluster Overview</a> + </li> + <li> + <a href="/enterprise/installing-embedded">Installing with Embedded Cluster</a> + </li> + <li> + <a href="/vendor/tutorial-embedded-cluster-setup">Tutorial: Deploy a Helm Chart on a VM with Embedded Cluster</a> + </li> + </ul> +</section> +<section class="tile__container"> + <ul> + <li class="tile__header"> + <img src="images/icons/dashboard_1.png" alt="dashboard icon" width="55px" height="55px"></img> + <p>Insights and Telemetry</p> + </li> + <li> + <p>Get insights on installed instances of your application.</p> + </li> + <li> + <a href="/vendor/instance-insights-event-data">About Instance and Event Data</a> + </li> + <li> + <a href="/vendor/customer-adoption">Adoption Report</a> + </li> + <li> + <a href="/vendor/instance-insights-details">Instance Details</a> + </li> + <li> + <a href="/vendor/custom-metrics-about">Configuring Custom Metrics</a> + </li> + </ul> + <ul> + <li class="tile__header"> + <img src="images/icons/vendor_portal_2.png" alt="vendor portal icon" width="55px" height="55px"></img> + <p>Channels and Releases</p> + </li> + <li> + <p>Manage application releases with the vendor platform.</p> + </li> + <li> + <a href="/vendor/releases-about">About Channels and Releases</a> + </li> + <li> + <a href="/vendor/releases-creating-releases">Managing Releases with the Vendor Portal</a> + </li> + <li> + <a href="/vendor/releases-creating-cli">Managing Releases with the CLI</a> + </li> + </ul> + <ul> + <li class="tile__header"> + <img src="images/icons/licensing.png" alt="dashboard icon" width="55px" height="55px"></img> + <p>Customer Licensing</p> + </li> + <li> + <p>Create, customize, and issue customer licenses.</p> + </li> + <li> + <a href="/vendor/licenses-about">About Customers</a> + </li> + <li> + <a href="/vendor/releases-creating-customer">Creating and Managing Customers</a> + </li> + <li> + <a href="/vendor/licenses-adding-custom-fields">Managing Customer License Fields</a> + </li> + </ul> +</section> +<section class="tile__container"> + <ul> + <li class="tile__header"> + <img src="images/icons/checklist.png" alt="checklist icon" width="55px" height="55px"></img> + <p>Preflight Checks</p> + </li> + <li> + <p>Define and verify installation environment requirements.</p> + </li> + <li> + <a href="/vendor/preflight-defining">Defining Preflight Checks</a> + </li> + <li> + <a href="/vendor/preflight-running">Running Preflight Checks for Helm Installations</a> + </li> + <li> + <a href="/vendor/tutorial-preflight-helm-setup">Preflight Checks Tutorial for Helm Charts</a> + </li> + <li> + <a href="https://play.instruqt.com/embed/replicated/tracks/avoiding-installation-pitfalls?token=em_gJjtIzzTTtdd5RFG">Preflight Checks Lab in Instruqt</a> + </li> + </ul> + <ul> + <li class="tile__header"> + <img src="images/icons/support_bundle.png" alt="support bundle icon" width="55px" height="55px"></img> + <p>Support Bundles</p> + </li> + <li> + <p>Gather information about customer environments for troubleshooting.</p> + </li> + <li> + <a href="vendor/support-bundle-customizing">Adding and Customizing Support Bundles</a> + </li> + <li> + <a href="/vendor/support-host-support-bundles">Configuring Host Support Bundles</a> + </li> + <li> + <a href="/vendor/support-bundle-generating">Generating Support Bundles</a> + </li> + <li> + <a href="https://play.instruqt.com/embed/replicated/tracks/closing-information-gap?token=em_MO2XXCz3bAgwtEca">Support Bundles Lab in Instruqt</a> + </li> + </ul> +</section> +<section class="tile__container"> + <ul> + <li class="tile__header"> + <img src="images/icons/tools.png" alt="carpenter tools icon" width="55px" height="55px"></img> + <p>Developer Tools</p> + </li> + <li> + <p>APIs, CLIs, and an SDK for interacting with the Replicated platform.</p> + </li> + <li> + <a href="/reference/replicated-cli-installing">Replicated CLI</a> + </li> + <li> + <a href="/reference/vendor-api-using">Vendor API v3</a> + </li> + <li> + <a href="/reference/kots-cli-getting-started">KOTS CLI</a> + </li> + <li> + <a href="/vendor/replicated-sdk-overview">Replicated SDK</a> + </li> + <li> + <a href="/reference/replicated-sdk-apis">Replicated SDK API</a> + </li> + </ul> +</section> + +================ +File: src/components/HomepageFeatures.js +================ +import React from 'react'; +import clsx from 'clsx'; +import styles from './HomepageFeatures.module.css'; + +const FeatureList = [ + { + title: 'Easy to Use', + Svg: require('../../static/images/undraw_docusaurus_mountain.svg').default, + description: ( + <> + Docusaurus was designed from the ground up to be easily installed and + used to get your website up and running quickly. + </> + ), + }, + { + title: 'Focus on What Matters', + Svg: require('../../static/images/undraw_docusaurus_tree.svg').default, + description: ( + <> + Docusaurus lets you focus on your docs, and we'll do the chores. Go + ahead and move your docs into the <code>docs</code> directory. + </> + ), + }, + { + title: 'Powered by React', + Svg: require('../../static/images/undraw_docusaurus_react.svg').default, + description: ( + <> + Extend or customize your website layout by reusing React. Docusaurus can + be extended while reusing the same header and footer. + </> + ), + }, +]; + +function Feature({Svg, title, description}) { + return ( + <div className={clsx('col col--4')}> + <div className="text--center"> + <Svg className={styles.featureSvg} alt={title} /> + </div> + <div className="text--center padding-horiz--md"> + <h3>{title}</h3> + <p>{description}</p> + </div> + </div> + ); +} + +export default function HomepageFeatures() { + return ( + <section className={styles.features}> + <div className="container"> + <div className="row"> + {FeatureList.map((props, idx) => ( + <Feature key={idx} {...props} /> + ))} + </div> + </div> + </section> + ); +} + +================ +File: src/components/HomepageFeatures.module.css +================ +.features { + display: flex; + align-items: center; + padding: 2rem 0; + width: 100%; +} + +.featureSvg { + height: 200px; + width: 200px; +} + +================ +File: src/css/custom.css +================ +@import url('https://fonts.googleapis.com/css2?family=Open+Sans&display=swap'); +@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600&display=swap'); + +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ + +/* You can override the default Infima variables here. */ +:root { + --doc-sidebar-width: 350px !important; + --ifm-color-primary: #00959E; + --ifm-color-primary-dark: #00959E; + --ifm-color-primary-darker: #00959E; + --ifm-color-primary-darkest: #007b81; + --ifm-color-primary-light: #6DD2D2; + --ifm-color-primary-lighter: #97e2e2; + --ifm-color-primary-lightest: rgb(146, 221, 224); + --ifm-code-font-size: 95%; + --ifm-font-family-base: 'Open Sans'; + --ifm-heading-font-family: 'Poppins', sans-serif; + --ifm-heading-font-weight: 600; + /*this variable controls the background when govering over items in the sidebar*/ + --ifm-menu-color-background-hover: transparent; + /*this variable controls the padding between items in the sidebar*/ + --ifm-menu-link-padding-vertical: 0.5rem; +} + +html[data-theme='light'] { + --ifm-heading-color: #2f2f2f; +} + +html[data-theme='dark'] { + --ifm-color-primary: #6DD2D2; +} + +.docusaurus-highlight-code-line { + background-color: rgba(0, 0, 0, 0.1); + display: block; + margin: 0 calc(-1 * var(--ifm-pre-padding)); + padding: 0 var(--ifm-pre-padding); +} + +html[data-theme='dark'] .docusaurus-highlight-code-line { + background-color: rgba(0, 0, 0, 0.3); +} + +article { + /* max-width: 800px; */ + margin: 10px 25px; +} + +.footer--dark { + --ifm-footer-background-color: #2f2f2f; +} + +.alert--warning { + --ifm-alert-background-color: var( --ifm-color-danger-contrast-background ); + --ifm-alert-background-color-highlight: rgba(250, 56, 62, 0.15); + --ifm-alert-foreground-color: var( --ifm-color-danger-contrast-foreground ); + --ifm-alert-border-color: var(--ifm-color-danger-dark); +} + +.alert a { + color: var(--ifm-color-primary-dark); + text-decoration-color: var(--ifm-color-primary-dark); +} + +.theme-admonition-note { + --ifm-code-background: var(--ifm-alert-background-color-highlight); + --ifm-link-color: var(--ifm-alert-foreground-color); + --ifm-link-hover-color: var(--ifm-alert-foreground-color); + --ifm-link-decoration: underline; + --ifm-tabs-color: var(--ifm-alert-foreground-color); + --ifm-tabs-color-active: var(--ifm-alert-foreground-color); + --ifm-tabs-color-active-border: var(--ifm-alert-border-color); + background-color: var(--ifm-alert-background-color); + border-color: var(--ifm-alert-border-color); + border-style: solid; + border-width: var(--ifm-alert-border-width); + border-left-width: var(--ifm-alert-border-left-width); + border-radius: var(--ifm-alert-border-radius); + box-shadow: var(--ifm-alert-shadow); + color: var(--ifm-alert-foreground-color); + padding: var(--ifm-alert-padding-vertical) var(--ifm-alert-padding-horizontal); + --ifm-alert-background-color: var( --ifm-color-info-contrast-background ); + --ifm-alert-background-color-highlight: rgba(84, 199, 236, 0.15); + --ifm-alert-foreground-color: var( --ifm-color-info-contrast-foreground ); + --ifm-alert-border-color: var(--ifm-color-info-dark); +} + +.admonition-note a { + color: var(--ifm-color-primary-dark); + text-decoration-color: var(--ifm-color-primary-dark); +} + +.admonition-important { + --ifm-code-background: var(--ifm-alert-background-color-highlight); + --ifm-link-color: var(--ifm-alert-foreground-color); + --ifm-link-hover-color: var(--ifm-alert-foreground-color); + --ifm-link-decoration: underline; + --ifm-tabs-color: var(--ifm-alert-foreground-color); + --ifm-tabs-color-active: var(--ifm-alert-foreground-color); + --ifm-tabs-color-active-border: var(--ifm-alert-border-color); + background-color: var(--ifm-alert-background-color); + border-color: var(--ifm-alert-border-color); + border-style: solid; + border-width: var(--ifm-alert-border-width); + border-left-width: var(--ifm-alert-border-left-width); + border-radius: var(--ifm-alert-border-radius); + box-shadow: var(--ifm-alert-shadow); + color: var(--ifm-alert-foreground-color); + padding: var(--ifm-alert-padding-vertical) var(--ifm-alert-padding-horizontal); + --ifm-alert-background-color: var( --ifm-color-danger-contrast-background ); + --ifm-alert-background-color-highlight: rgba(250, 56, 62, 0.15); + --ifm-alert-foreground-color: var( --ifm-color-danger-contrast-foreground ); + --ifm-alert-border-color: var(--ifm-color-danger-dark); +} + +.admonition-important a { + color: var(--ifm-color-primary-dark); + text-decoration-color: var(--ifm-color-primary-dark); +} + +ol ol { + list-style-type: lower-alpha; +} + +ol ol ol { + list-style-type: lower-roman +} + +.DocSearch-Logo { + display: none; +} + +.DocSearch-Footer { + justify-content: center !important; +} + +/* Landing page */ + +.tile__container { + display: flex; + flex-direction: row; + gap: 0.7em; +} + +.tile__container > ul { + padding: 1.5em; + list-style: none; + border-radius: 7px; + border: 1px solid rgba(88,88,88,0.2); + width: 100%; + display: flex; + flex-direction: column; + justify-content: flex-start; +} + +.tile__container > ul:hover { + background-image: linear-gradient(to bottom right, rgba(88,88,88,0.03), rgba(88,88,88,0.01)); +} + +[data-theme='dark'] .tile__container > ul:hover { + background-image: linear-gradient(to bottom right, rgba(88,88,88,0.08), rgba(88,88,88,0.02)); +} + +.tile__container ul li h3 { + padding-top: 15px; +} + +.tile__header { + display: flex; + flex-direction: row; + align-items: center; + gap: 1.0em; + font-size: 1.25em; +} + +.tile__header img { + max-width: 55px; + height: 55px; + background-color:rgba(255,72,86,0.1); + border-radius: 7px; +} + +@media (max-width: 1300px) { + .tile__container { + flex-direction: column; + } +} + +#helm { + background-color:rgba(15, 22, 137,0.05); +} + +[data-theme='dark'] #helm { + background-color:rgb(169, 169, 198); +} + +#whats-new, +[data-theme='dark'] #whats-new:hover { + background-image: linear-gradient(to bottom right, rgba(69,145,247,0.1), rgba(69,145,247,0.025)); + border: 1px solid rgba(69,145,247,0.1); +} + +#whats-new:hover, +[data-theme='dark'] #whats-new { + background-image: linear-gradient(to bottom right, rgba(69,145,247,0.2), rgba(69,145,247,0.05)); +} + +#whats-new .tile__header img { + background-color: rgba(69,145,247,0.2); +} + +[data-theme='dark'] #whats-new .tile__header img { + content:url("/images/icons/chat_bubble_white.png"); +} + +#whats-new li a { + color: rgb(69,145,247); +} + +#did-you-know, +[data-theme='dark'] #did-you-know:hover { + background-image: linear-gradient(to bottom right, rgba(21, 140, 54,0.1), rgba(21, 140, 54,0.025)); + border: 1px solid rgba(21, 140, 54,0.1); +} + +#did-you-know:hover, +[data-theme='dark'] #did-you-know { + background-image: linear-gradient(to bottom right, rgba(21, 140, 54,0.2), rgba(21, 140, 54,0.05)); +} + +#did-you-know .tile__header img { + background-color: rgba(21, 140, 54,0.2); +} + +[data-theme='dark'] #did-you-know .tile__header img { + content:url("/images/icons/lightbulb_white.png"); +} + +#did-you-know li a { + color: rgb(21, 140, 54); +} + +/* Sidebar */ + +.menu__list-item > h5 { + text-transform: uppercase; + margin: 1rem 0 0 0; +} + +.theme-doc-sidebar-menu.menu__list { + margin-bottom: 75px; +} + +a.menu__link { + font-size: .9em; +} + +.menu__list-item > a:hover, .menu__list-item-collapsible > a:hover { + color: var(--ifm-color-primary); +} + +/* Navbar */ + +.dropdown__link:hover, .navbar__link { + background-color: transparent; + color: var(--ifm-dropdown-link-color); + text-decoration: none; +} + +.dropdown__link--active, .navbar__link--active { + background-color: transparent; + color: #2f2f2f; +} + +html[data-theme='dark'] .dropdown__link--active, .navbar__link--active { + color: white; +} + +html[data-theme='dark'] .dropdown__link:hover, .navbar__link { + color: var(--ifm-dropdown-link-color); + text-decoration: none; +} + +.dropdown > .navbar__link:after { + content: ''; + border: none; + position: static; + top: auto; + transform: none; + width: 12px; + height: 12px; + background-image: url("data:image/svg+xml,%3Csvg width='24' height='24' fill='none' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M4.293 8.293a1 1 0 0 1 1.414 0L12 14.586l6.293-6.293a1 1 0 1 1 1.414 1.414l-7 7a1 1 0 0 1-1.414 0l-7-7a1 1 0 0 1 0-1.414Z' fill='%23888888'/%3E%3C/svg%3E"); + background-size: 12px; + background-repeat: no-repeat; +} + +html[data-theme='dark'] .dropdown > .navbar__link:after { + background-image: url("data:image/svg+xml,%3Csvg width='24' height='24' fill='none' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M4.293 8.293a1 1 0 0 1 1.414 0L12 14.586l6.293-6.293a1 1 0 1 1 1.414 1.414l-7 7a1 1 0 0 1-1.414 0l-7-7a1 1 0 0 1 0-1.414Z' fill='%23ffffff'/%3E%3C/svg%3E"); +} + +/* Release Note label styling */ + +h3[id^="new-features"] { + background-color: #4BC99C; + border-radius: 7px!important; + color: #fff; + width: max-content; + padding: 0.2em 0.6em 0.2em; + font-weight: 500; + font-size: 20px; +} + +h3[id^="improvements"] { + background-color: #38C1CA; + border-radius: 7px!important; + color: #fff; + width: max-content; + padding: 0.2em 0.6em 0.2em; + font-weight: 500; + font-size: 20px; +} + +h3[id^="bug-fixes"] { + background-color: #F47878; + border-radius: 7px!important; + color: #fff; + width: max-content; + padding: 0.2em 0.6em 0.2em; + font-weight: 500; + font-size: 20px; +} + +h3[id^="breaking-changes"] { + background-color: #d34a54; + /* background-color: #D64399; */ + border-radius: 7px!important; + color: #fff; + width: max-content; + padding: 0.2em 0.6em 0.2em; + font-weight: 500; + font-size: 20px; +} + + +h3[id^="known-issues"] { + background-color: #414288; + /* background-color: #D64399; */ + border-radius: 7px!important; + color: #fff; + width: max-content; + padding: 0.2em 0.6em 0.2em; + font-weight: 500; + font-size: 20px; +} + +h3[id^="new-features"] a { + color: #fff; + opacity: .5; + text-decoration: none; +} + +h3[id^="improvements"] a { + color: #fff; + opacity: .5; + text-decoration: none; +} + +h3[id^="bug-fixes"] a { + color: #fff; + opacity: .5; + text-decoration: none; +} + +h3[id^="breaking-changes"] a { + color: #fff; + opacity: .5; + text-decoration: none; +} + +h3[id^="known-issues"] a { + color: #fff; + opacity: .5; + text-decoration: none; +} + +td#center { + text-align: center; +} + +================ +File: src/theme/Admonition/index.js +================ +import React from 'react'; +import clsx from 'clsx'; +import {ThemeClassNames} from '@docusaurus/theme-common'; +import Translate from '@docusaurus/Translate'; +import styles from './styles.module.css'; + +function NoteIcon() { + return ( + <svg viewBox="0 0 14 16"> + <path + fillRule="evenodd" + d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z" + /> + </svg> + ); +} +function TipIcon() { + return ( + <svg viewBox="0 0 12 16"> + <path + fillRule="evenodd" + d="M6.5 0C3.48 0 1 2.19 1 5c0 .92.55 2.25 1 3 1.34 2.25 1.78 2.78 2 4v1h5v-1c.22-1.22.66-1.75 2-4 .45-.75 1-2.08 1-3 0-2.81-2.48-5-5.5-5zm3.64 7.48c-.25.44-.47.8-.67 1.11-.86 1.41-1.25 2.06-1.45 3.23-.02.05-.02.11-.02.17H5c0-.06 0-.13-.02-.17-.2-1.17-.59-1.83-1.45-3.23-.2-.31-.42-.67-.67-1.11C2.44 6.78 2 5.65 2 5c0-2.2 2.02-4 4.5-4 1.22 0 2.36.42 3.22 1.19C10.55 2.94 11 3.94 11 5c0 .66-.44 1.78-.86 2.48zM4 14h5c-.23 1.14-1.3 2-2.5 2s-2.27-.86-2.5-2z" + /> + </svg> + ); +} +function DangerIcon() { + return ( + <svg viewBox="0 0 12 16"> + <path + fillRule="evenodd" + d="M5.05.31c.81 2.17.41 3.38-.52 4.31C3.55 5.67 1.98 6.45.9 7.98c-1.45 2.05-1.7 6.53 3.53 7.7-2.2-1.16-2.67-4.52-.3-6.61-.61 2.03.53 3.33 1.94 2.86 1.39-.47 2.3.53 2.27 1.67-.02.78-.31 1.44-1.13 1.81 3.42-.59 4.78-3.42 4.78-5.56 0-2.84-2.53-3.22-1.25-5.61-1.52.13-2.03 1.13-1.89 2.75.09 1.08-1.02 1.8-1.86 1.33-.67-.41-.66-1.19-.06-1.78C8.18 5.31 8.68 2.45 5.05.32L5.03.3l.02.01z" + /> + </svg> + ); +} +function InfoIcon() { + return ( + <svg viewBox="0 0 14 16"> + <path + fillRule="evenodd" + d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z" + /> + </svg> + ); +} +function CautionIcon() { + return ( + <svg viewBox="0 0 16 16"> + <path + fillRule="evenodd" + d="M8.893 1.5c-.183-.31-.52-.5-.887-.5s-.703.19-.886.5L.138 13.499a.98.98 0 0 0 0 1.001c.193.31.53.501.886.501h13.964c.367 0 .704-.19.877-.5a1.03 1.03 0 0 0 .01-1.002L8.893 1.5zm.133 11.497H6.987v-2.003h2.039v2.003zm0-3.004H6.987V5.987h2.039v4.006z" + /> + </svg> + ); +} +// eslint-disable-next-line @typescript-eslint/consistent-indexed-object-style +const AdmonitionConfigs = { + note: { + infimaClassName: 'secondary', + iconComponent: NoteIcon, + label: ( + <Translate + id="theme.admonition.note" + description="The default label used for the Note admonition (:::note)"> + note + </Translate> + ), + }, + tip: { + infimaClassName: 'success', + iconComponent: TipIcon, + label: ( + <Translate + id="theme.admonition.tip" + description="The default label used for the Tip admonition (:::tip)"> + tip + </Translate> + ), + }, + danger: { + infimaClassName: 'danger', + iconComponent: DangerIcon, + label: ( + <Translate + id="theme.admonition.danger" + description="The default label used for the Danger admonition (:::danger)"> + danger + </Translate> + ), + }, + info: { + infimaClassName: 'info', + iconComponent: InfoIcon, + label: ( + <Translate + id="theme.admonition.info" + description="The default label used for the Info admonition (:::info)"> + info + </Translate> + ), + }, + caution: { + infimaClassName: 'warning', + iconComponent: CautionIcon, + label: ( + <Translate + id="theme.admonition.caution" + description="The default label used for the Caution admonition (:::caution)"> + caution + </Translate> + ), + }, + important: { + infimaClassName: 'warning', + iconComponent: CautionIcon, + label: ( + <Translate + id="theme.admonition.caution" + description="The default label used for the Caution admonition (:::caution)"> + important + </Translate> + ), + }, +}; +// Legacy aliases, undocumented but kept for retro-compatibility +const aliases = { + secondary: 'note', + success: 'tip', + warning: 'danger', +}; +function getAdmonitionConfig(unsafeType) { + const type = aliases[unsafeType] ?? unsafeType; + const config = AdmonitionConfigs[type]; + if (config) { + return config; + } + console.warn( + `No admonition config found for admonition type "${type}". Using Info as fallback.`, + ); + return AdmonitionConfigs.info; +} +// Workaround because it's difficult in MDX v1 to provide a MDX title as props +// See https://github.com/facebook/docusaurus/pull/7152#issuecomment-1145779682 +function extractMDXAdmonitionTitle(children) { + const items = React.Children.toArray(children); + const mdxAdmonitionTitle = items.find( + (item) => + React.isValidElement(item) && + item.props?.mdxType === 'mdxAdmonitionTitle', + ); + const rest = <>{items.filter((item) => item !== mdxAdmonitionTitle)}</>; + return { + mdxAdmonitionTitle, + rest, + }; +} +function processAdmonitionProps(props) { + const {mdxAdmonitionTitle, rest} = extractMDXAdmonitionTitle(props.children); + return { + ...props, + title: props.title ?? mdxAdmonitionTitle, + children: rest, + }; +} +export default function Admonition(props) { + const {children, type, title, icon: iconProp} = processAdmonitionProps(props); + const typeConfig = getAdmonitionConfig(type); + const titleLabel = title ?? typeConfig.label; + const {iconComponent: IconComponent} = typeConfig; + const icon = iconProp ?? <IconComponent />; + return ( + <div + className={clsx( + ThemeClassNames.common.admonition, + ThemeClassNames.common.admonitionType(props.type), + 'alert', + `alert--${typeConfig.infimaClassName}`, + styles.admonition, + )}> + <div className={styles.admonitionHeading}> + <span className={styles.admonitionIcon}>{icon}</span> + {titleLabel} + </div> + <div className={styles.admonitionContent}>{children}</div> + </div> + ); +} + +================ +File: src/theme/Admonition/styles.module.css +================ +.admonition { + margin-bottom: 1em; +} + +.admonitionHeading { + font: var(--ifm-heading-font-weight) var(--ifm-h5-font-size) / + var(--ifm-heading-line-height) var(--ifm-heading-font-family); + text-transform: uppercase; + margin-bottom: 0.3rem; +} + +.admonitionHeading code { + text-transform: none; +} + +.admonitionIcon { + display: inline-block; + vertical-align: middle; + margin-right: 0.4em; +} + +.admonitionIcon svg { + display: inline-block; + height: 1.6em; + width: 1.6em; + fill: var(--ifm-alert-foreground-color); +} + +.admonitionContent > :last-child { + margin-bottom: 0; +} + +================ +File: src/theme/DocItem/Footer/index.js +================ +import React from 'react'; +import clsx from 'clsx'; +import {ThemeClassNames} from '@docusaurus/theme-common'; +import {useDoc} from '@docusaurus/plugin-content-docs/client'; +import LastUpdated from '@theme/LastUpdated'; +import EditThisPage from '@theme/EditThisPage'; +import TagsListInline from '@theme/TagsListInline'; + +import styles from './styles.module.css'; + +function TagsRow(props) { + return ( + <div + className={clsx( + ThemeClassNames.docs.docFooterTagsRow, + 'row margin-bottom--sm', + )}> + <div className="col"> + <TagsListInline {...props} /> + </div> + </div> + ); +} +function EditMetaRow({ + editUrl, + lastUpdatedAt, + lastUpdatedBy, + formattedLastUpdatedAt, +}) { + return ( + <div className={clsx(ThemeClassNames.docs.docFooterEditMetaRow, 'row')}> + <div className="col">{editUrl && <EditThisPage editUrl={editUrl} />}</div> + + <div className={clsx('col', styles.lastUpdated)}> + {(lastUpdatedAt || lastUpdatedBy) && ( + <LastUpdated + lastUpdatedAt={lastUpdatedAt} + formattedLastUpdatedAt={formattedLastUpdatedAt} + lastUpdatedBy={lastUpdatedBy} + /> + )} + </div> + </div> + ); +} +export default function DocItemFooter() { + const {metadata} = useDoc(); + const {editUrl, lastUpdatedAt, formattedLastUpdatedAt, lastUpdatedBy, tags} = + metadata; + const canDisplayTagsRow = tags.length > 0; + const canDisplayEditMetaRow = !!(editUrl || lastUpdatedAt || lastUpdatedBy); + const canDisplayFooter = canDisplayTagsRow || canDisplayEditMetaRow; + if (!canDisplayFooter) { + return null; + } + return ( + <footer + className={clsx(ThemeClassNames.docs.docFooter, 'docusaurus-mt-lg')}> + {canDisplayTagsRow && <TagsRow tags={tags} />} + {canDisplayEditMetaRow && ( + <EditMetaRow + editUrl={editUrl} + lastUpdatedAt={lastUpdatedAt} + lastUpdatedBy={lastUpdatedBy} + formattedLastUpdatedAt={formattedLastUpdatedAt} + /> + )} + </footer> + ); +} + +================ +File: src/theme/DocItem/Footer/styles.module.css +================ +.lastUpdated { + margin-top: 0.2rem; + font-style: italic; + font-size: smaller; + flex: none; +} + +@media (min-width: 997px) { + .lastUpdated { + text-align: right; + } +} + +================ +File: src/theme/EditThisPage/index.js +================ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ +import React from "react"; +import Translate from "@docusaurus/Translate"; +import { ThemeClassNames } from "@docusaurus/theme-common"; +import ReportIcon from "../../../static/images/report.svg"; +import PullRequestIcon from "../../../static/images/git-pull-request.svg"; +import styles from "./styles.module.css"; + +export default function EditThisPage({ editUrl }) { + const url = typeof window !== "undefined" ? window.location.href : ""; + const issueTitle = + typeof window !== "undefined" + ? url.substring(url.lastIndexOf("/") + 1) + : ""; + + return ( + <div className={styles.githubLinksWrapper}> + <a + href={editUrl} + target="_blank" + rel="noreferrer noopener" + className={ThemeClassNames.common.editThisPage} + style={{ textDecoration: "none" }} + > + <div className={styles.iconTextWrapper}> + <PullRequestIcon className={styles.icon} /> + <Translate + id="theme.common.editThisPage" + description="The link label to edit the current page" + > + Propose Changes + </Translate> + </div> + </a> + <a + href={`https://github.com/replicatedhq/replicated-docs/issues/new?title=Docs%20feedback%20on%20${issueTitle}&body=URL:%20${url}%0AFeedback%20details:`} + target="_blank" + rel="noreferrer noopener" + className={ThemeClassNames.common.editThisPage} + style={{ textAlign: "right", textDecoration: "none" }} + > + <div className={styles.iconTextWrapper}> + <ReportIcon className={styles.icon} /> + <Translate + id="theme.common.provideFeedback" + description="The link label to provide feedback in github" + > + Provide Feedback + </Translate> + </div> + </a> + </div> + ); +} + +================ +File: src/theme/EditThisPage/styles.module.css +================ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +.icon { + width: 18px; + height: 18px; + margin-right: 7px; + fill:#6DD2D2 +} + +.iconTextWrapper { + display: flex; + align-items: center; + border: 2px solid #6DD2D2; + padding: 8px; + border-radius: 8px; +} +.iconTextWrapper:hover { + color: #ffffff; + background-color: #6DD2D2; + fill:#ffffff; + cursor: pointer; +} +.iconTextWrapper:hover .icon { + fill:#ffffff; +} + +.githubLinksWrapper { + display:flex; + gap: 10px; +} + +@media (max-width: 500px) { + .githubLinksWrapper { + flex-direction: column; + } +} + +================ +File: static/images/icons/chat_bubble_white.svg +================ +<?xml version="1.0" standalone="no"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN" + "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd"> +<svg version="1.0" xmlns="http://www.w3.org/2000/svg" + width="285.000000pt" height="285.000000pt" viewBox="0 0 285.000000 285.000000" + preserveAspectRatio="xMidYMid meet"> + +<g transform="translate(0.000000,285.000000) scale(0.100000,-0.100000)" +fill="#000000" stroke="none"> +</g> +</svg> + +================ +File: static/images/icons/vendor_portal_1.svg +================ +<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" + width="100%" viewBox="0 0 285 285" enable-background="new 0 0 285 285" xml:space="preserve"> +<path fill="#000000" opacity="1.000000" stroke="none" + d=" +M159.000000,286.000000 + C106.023033,286.000000 53.546062,286.000000 1.034547,286.000000 + C1.034547,191.064713 1.034547,96.129417 1.034547,1.097060 + C95.893524,1.097060 190.787186,1.097060 285.840424,1.097060 + C285.840424,95.999786 285.840424,190.999863 285.840424,286.000000 + C243.796265,286.000000 201.648132,286.000000 159.000000,286.000000 +M53.231998,50.027176 + C44.789848,52.108334 41.829479,55.291466 41.025131,63.928169 + C41.016796,118.087730 41.004070,172.247284 41.004780,226.406845 + C41.004887,234.702469 47.331913,240.978958 55.881401,240.983490 + C114.040573,241.014328 172.199783,241.021362 230.358948,240.976440 + C239.256378,240.969559 245.953247,234.462296 245.964645,225.650650 + C246.033890,172.157608 246.046539,118.664368 245.958130,65.171387 + C245.940857,54.717991 237.847260,49.831360 230.466095,49.863369 + C173.307861,50.111263 116.148048,49.998680 58.988731,50.007084 + C57.325935,50.007328 55.663151,50.111172 53.231998,50.027176 +z"/> +<path fill="#FF4856" opacity="1.000000" stroke="none" + d=" +M54.000362,50.166710 + C55.663151,50.111172 57.325935,50.007328 58.988731,50.007084 + C116.148048,49.998680 173.307861,50.111263 230.466095,49.863369 + C237.847260,49.831360 245.940857,54.717991 245.958130,65.171387 + C246.046539,118.664368 246.033890,172.157608 245.964645,225.650650 + C245.953247,234.462296 239.256378,240.969559 230.358948,240.976440 + C172.199783,241.021362 114.040573,241.014328 55.881401,240.983490 + C47.331913,240.978958 41.004887,234.702469 41.004780,226.406845 + C41.004070,172.247284 41.016796,118.087730 41.451366,63.431286 + C46.939880,59.648273 52.249680,56.597160 54.000362,50.166710 +M143.500000,228.000061 + C171.497650,228.000046 199.495941,227.905807 227.492294,228.096237 + C231.964478,228.126648 233.104736,226.719482 233.087540,222.393387 + C232.936401,184.397171 233.003052,146.400085 232.988007,108.403267 + C232.987442,106.980667 232.811172,105.558136 232.724457,104.251144 + C172.967789,104.251144 113.699562,104.251144 54.298836,104.251144 + C54.298836,145.556625 54.298836,186.610291 54.298836,228.000061 + C83.897797,228.000061 113.198898,228.000061 143.500000,228.000061 +M122.500000,91.000076 + C159.249832,91.000076 195.999664,91.000076 232.610565,91.000076 + C232.610565,81.257408 232.610565,72.199127 232.610565,63.251472 + C172.957336,63.251472 113.693901,63.251472 54.300911,63.251472 + C54.300911,72.560608 54.300911,81.615784 54.300911,91.000076 + C76.900421,91.000076 99.200211,91.000076 122.500000,91.000076 +z"/> +<path fill="#FF4856" opacity="1.000000" stroke="none" + d=" +M53.616180,50.096943 + C52.249680,56.597160 46.939880,59.648273 41.522152,62.967384 + C41.829479,55.291466 44.789848,52.108334 53.616180,50.096943 +z"/> +<path fill="#000000" opacity="1.000000" stroke="none" + d=" +M143.000000,228.000061 + C113.198898,228.000061 83.897797,228.000061 54.298836,228.000061 + C54.298836,186.610291 54.298836,145.556625 54.298836,104.251144 + C113.699562,104.251144 172.967789,104.251144 232.724457,104.251144 + C232.811172,105.558136 232.987442,106.980667 232.988007,108.403267 + C233.003052,146.400085 232.936401,184.397171 233.087540,222.393387 + C233.104736,226.719482 231.964478,228.126648 227.492294,228.096237 + C199.495941,227.905807 171.497650,228.000046 143.000000,228.000061 +M145.999176,192.917984 + C143.477783,188.261734 141.952728,182.517059 138.261826,179.138412 + C122.621590,164.821426 90.041939,164.592468 75.678360,182.779297 + C70.804405,188.950577 69.766563,195.856094 71.730400,203.131348 + C73.661316,210.284637 80.049728,211.699692 86.091820,211.864151 + C101.073380,212.271988 116.076927,212.184738 131.064499,211.908264 + C137.271454,211.793762 143.417984,210.365051 145.281860,203.101608 + C146.042435,200.137589 145.790878,196.913834 145.999176,192.917984 +M129.720490,137.251480 + C124.363701,118.841919 102.565468,115.476257 92.582565,127.810570 + C86.021599,135.916931 85.549751,144.012527 89.853226,152.163681 + C94.627655,161.206879 104.904396,165.057312 114.090660,162.488068 + C123.007912,159.994064 131.607880,151.338272 129.720490,137.251480 +M186.679199,136.000000 + C185.847198,136.000031 185.015106,135.992523 184.183197,136.001328 + C179.336075,136.052628 176.987747,138.025726 177.001556,142.030212 + C177.015350,146.031998 179.358124,147.984940 184.245148,147.995010 + C192.731583,148.012512 201.220596,147.876373 209.703522,148.050095 + C214.109512,148.140320 215.815903,145.340622 215.820816,141.949463 + C215.825821,138.488846 213.938156,135.843658 209.602646,135.950821 + C202.285446,136.131683 194.960526,136.000153 186.679199,136.000000 +M195.503754,159.999985 + C191.345367,160.000214 187.185928,159.941742 183.028915,160.017593 + C179.059692,160.090012 176.930298,162.162140 177.021591,166.181961 + C177.106766,169.932816 179.120758,171.965912 182.895996,171.984619 + C191.877869,172.029129 200.861038,171.923386 209.841827,172.031921 + C214.209122,172.084702 215.879868,169.233459 215.822357,165.818817 + C215.769012,162.651489 214.090866,159.860321 209.972412,159.972855 + C205.484726,160.095459 200.991074,160.000244 195.503754,159.999985 +M204.400330,183.999985 + C197.245209,184.000275 190.089905,183.971802 182.935013,184.012558 + C179.165619,184.034042 177.161942,186.022980 177.011353,189.781052 + C176.859726,193.565781 178.851669,195.855408 182.499130,195.931976 + C191.813324,196.127518 201.135651,196.088715 210.452026,195.955078 + C214.520706,195.896744 216.831329,191.668106 215.438217,187.837067 + C213.505051,182.520828 209.082306,184.307114 204.400330,183.999985 +z"/> +<path fill="#000000" opacity="1.000000" stroke="none" + d=" +M122.000000,91.000076 + C99.200211,91.000076 76.900421,91.000076 54.300911,91.000076 + C54.300911,81.615784 54.300911,72.560608 54.300911,63.251472 + C113.693901,63.251472 172.957336,63.251472 232.610565,63.251472 + C232.610565,72.199127 232.610565,81.257408 232.610565,91.000076 + C195.999664,91.000076 159.249832,91.000076 122.000000,91.000076 +z"/> +<path fill="#FF4856" opacity="1.000000" stroke="none" + d=" +M145.999481,193.363129 + C145.790878,196.913834 146.042435,200.137589 145.281860,203.101608 + C143.417984,210.365051 137.271454,211.793762 131.064499,211.908264 + C116.076927,212.184738 101.073380,212.271988 86.091820,211.864151 + C80.049728,211.699692 73.661316,210.284637 71.730400,203.131348 + C69.766563,195.856094 70.804405,188.950577 75.678360,182.779297 + C90.041939,164.592468 122.621590,164.821426 138.261826,179.138412 + C141.952728,182.517059 143.477783,188.261734 145.999481,193.363129 +M87.461456,189.028534 + C84.475235,191.603546 83.216606,194.719727 84.259903,198.320251 + C100.911415,198.320251 117.358025,198.320251 133.959763,198.320251 + C131.871246,190.525131 129.680634,187.340637 123.613594,184.985886 + C111.232735,180.180649 99.150154,180.050476 87.461456,189.028534 +z"/> +<path fill="#FF4856" opacity="1.000000" stroke="none" + d=" +M129.838547,137.664871 + C131.607880,151.338272 123.007912,159.994064 114.090660,162.488068 + C104.904396,165.057312 94.627655,161.206879 89.853226,152.163681 + C85.549751,144.012527 86.021599,135.916931 92.582565,127.810570 + C102.565468,115.476257 124.363701,118.841919 129.838547,137.664871 +M111.383621,133.996231 + C107.535873,131.693985 104.378052,134.096191 102.393745,136.502167 + C100.891022,138.324249 100.628708,141.795181 100.980408,144.373291 + C101.500412,148.185120 104.221619,150.085220 108.338448,150.058624 + C112.243759,150.033371 115.133972,148.610168 116.479851,144.965759 + C118.248917,140.175415 116.094482,136.707275 111.383621,133.996231 +z"/> +<path fill="#FF4856" opacity="1.000000" stroke="none" + d=" +M187.159027,136.000000 + C194.960526,136.000153 202.285446,136.131683 209.602646,135.950821 + C213.938156,135.843658 215.825821,138.488846 215.820816,141.949463 + C215.815903,145.340622 214.109512,148.140320 209.703522,148.050095 + C201.220596,147.876373 192.731583,148.012512 184.245148,147.995010 + C179.358124,147.984940 177.015350,146.031998 177.001556,142.030212 + C176.987747,138.025726 179.336075,136.052628 184.183197,136.001328 + C185.015106,135.992523 185.847198,136.000031 187.159027,136.000000 +z"/> +<path fill="#FF4856" opacity="1.000000" stroke="none" + d=" +M196.001892,159.999985 + C200.991074,160.000244 205.484726,160.095459 209.972412,159.972855 + C214.090866,159.860321 215.769012,162.651489 215.822357,165.818817 + C215.879868,169.233459 214.209122,172.084702 209.841827,172.031921 + C200.861038,171.923386 191.877869,172.029129 182.895996,171.984619 + C179.120758,171.965912 177.106766,169.932816 177.021591,166.181961 + C176.930298,162.162140 179.059692,160.090012 183.028915,160.017593 + C187.185928,159.941742 191.345367,160.000214 196.001892,159.999985 +z"/> +<path fill="#FF4856" opacity="1.000000" stroke="none" + d=" +M204.881104,183.999985 + C209.082306,184.307114 213.505051,182.520828 215.438217,187.837067 + C216.831329,191.668106 214.520706,195.896744 210.452026,195.955078 + C201.135651,196.088715 191.813324,196.127518 182.499130,195.931976 + C178.851669,195.855408 176.859726,193.565781 177.011353,189.781052 + C177.161942,186.022980 179.165619,184.034042 182.935013,184.012558 + C190.089905,183.971802 197.245209,184.000275 204.881104,183.999985 +z"/> +<path fill="#000000" opacity="1.000000" stroke="none" + d=" +M87.730316,188.764801 + C99.150154,180.050476 111.232735,180.180649 123.613594,184.985886 + C129.680634,187.340637 131.871246,190.525131 133.959763,198.320251 + C117.358025,198.320251 100.911415,198.320251 84.259903,198.320251 + C83.216606,194.719727 84.475235,191.603546 87.730316,188.764801 +z"/> +<path fill="#000000" opacity="1.000000" stroke="none" + d=" +M111.770950,134.026581 + C116.094482,136.707275 118.248917,140.175415 116.479851,144.965759 + C115.133972,148.610168 112.243759,150.033371 108.338448,150.058624 + C104.221619,150.085220 101.500412,148.185120 100.980408,144.373291 + C100.628708,141.795181 100.891022,138.324249 102.393745,136.502167 + C104.378052,134.096191 107.535873,131.693985 111.770950,134.026581 +z"/> +</svg> + +================ +File: static/images/git-pull-request.svg +================ +<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M7.177 3.073L9.573.677A.25.25 0 0110 .854v4.792a.25.25 0 01-.427.177L7.177 3.427a.25.25 0 010-.354zM3.75 2.5a.75.75 0 100 1.5.75.75 0 000-1.5zm-2.25.75a2.25 2.25 0 113 2.122v5.256a2.251 2.251 0 11-1.5 0V5.372A2.25 2.25 0 011.5 3.25zM11 2.5h-1V4h1a1 1 0 011 1v5.628a2.251 2.251 0 101.5 0V5A2.5 2.5 0 0011 2.5zm1 10.25a.75.75 0 111.5 0 .75.75 0 01-1.5 0zM3.75 12a.75.75 0 100 1.5.75.75 0 000-1.5z"></path></svg> + +================ +File: static/images/logo.svg +================ +<svg width="200" height="200" viewBox="0 0 200 200" xmlns="http://www.w3.org/2000/svg"><g fill="none" fill-rule="evenodd"><path fill="#FFF" d="M99 52h84v34H99z"/><path d="M23 163c-7.398 0-13.843-4.027-17.303-10A19.886 19.886 0 0 0 3 163c0 11.046 8.954 20 20 20h20v-20H23z" fill="#3ECC5F"/><path d="M112.98 57.376L183 53V43c0-11.046-8.954-20-20-20H73l-2.5-4.33c-1.112-1.925-3.889-1.925-5 0L63 23l-2.5-4.33c-1.111-1.925-3.889-1.925-5 0L53 23l-2.5-4.33c-1.111-1.925-3.889-1.925-5 0L43 23c-.022 0-.042.003-.065.003l-4.142-4.141c-1.57-1.571-4.252-.853-4.828 1.294l-1.369 5.104-5.192-1.392c-2.148-.575-4.111 1.389-3.535 3.536l1.39 5.193-5.102 1.367c-2.148.576-2.867 3.259-1.296 4.83l4.142 4.142c0 .021-.003.042-.003.064l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 53l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 63l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 73l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 83l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 93l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 103l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 113l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 123l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 133l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 143l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 153l-4.33 2.5c-1.925 1.111-1.925 3.889 0 5L23 163c0 11.046 8.954 20 20 20h120c11.046 0 20-8.954 20-20V83l-70.02-4.376A10.645 10.645 0 0 1 103 68c0-5.621 4.37-10.273 9.98-10.624" fill="#3ECC5F"/><path fill="#3ECC5F" d="M143 183h30v-40h-30z"/><path d="M193 158c-.219 0-.428.037-.639.064-.038-.15-.074-.301-.116-.451A5 5 0 0 0 190.32 148a4.96 4.96 0 0 0-3.016 1.036 26.531 26.531 0 0 0-.335-.336 4.955 4.955 0 0 0 1.011-2.987 5 5 0 0 0-9.599-1.959c-.148-.042-.297-.077-.445-.115.027-.211.064-.42.064-.639a5 5 0 0 0-5-5 5 5 0 0 0-5 5c0 .219.037.428.064.639-.148.038-.297.073-.445.115a4.998 4.998 0 0 0-9.599 1.959c0 1.125.384 2.151 1.011 2.987-3.717 3.632-6.031 8.693-6.031 14.3 0 11.046 8.954 20 20 20 9.339 0 17.16-6.41 19.361-15.064.211.027.42.064.639.064a5 5 0 0 0 5-5 5 5 0 0 0-5-5" fill="#44D860"/><path fill="#3ECC5F" d="M153 123h30v-20h-30z"/><path d="M193 115.5a2.5 2.5 0 1 0 0-5c-.109 0-.214.019-.319.032-.02-.075-.037-.15-.058-.225a2.501 2.501 0 0 0-.963-4.807c-.569 0-1.088.197-1.508.518a6.653 6.653 0 0 0-.168-.168c.314-.417.506-.931.506-1.494a2.5 2.5 0 0 0-4.8-.979A9.987 9.987 0 0 0 183 103c-5.522 0-10 4.478-10 10s4.478 10 10 10c.934 0 1.833-.138 2.69-.377a2.5 2.5 0 0 0 4.8-.979c0-.563-.192-1.077-.506-1.494.057-.055.113-.111.168-.168.42.321.939.518 1.508.518a2.5 2.5 0 0 0 .963-4.807c.021-.074.038-.15.058-.225.105.013.21.032.319.032" fill="#44D860"/><path d="M63 55.5a2.5 2.5 0 0 1-2.5-2.5c0-4.136-3.364-7.5-7.5-7.5s-7.5 3.364-7.5 7.5a2.5 2.5 0 1 1-5 0c0-6.893 5.607-12.5 12.5-12.5S65.5 46.107 65.5 53a2.5 2.5 0 0 1-2.5 2.5" fill="#000"/><path d="M103 183h60c11.046 0 20-8.954 20-20V93h-60c-11.046 0-20 8.954-20 20v70z" fill="#FFFF50"/><path d="M168.02 124h-50.04a1 1 0 1 1 0-2h50.04a1 1 0 1 1 0 2m0 20h-50.04a1 1 0 1 1 0-2h50.04a1 1 0 1 1 0 2m0 20h-50.04a1 1 0 1 1 0-2h50.04a1 1 0 1 1 0 2m0-49.814h-50.04a1 1 0 1 1 0-2h50.04a1 1 0 1 1 0 2m0 19.814h-50.04a1 1 0 1 1 0-2h50.04a1 1 0 1 1 0 2m0 20h-50.04a1 1 0 1 1 0-2h50.04a1 1 0 1 1 0 2M183 61.611c-.012 0-.022-.006-.034-.005-3.09.105-4.552 3.196-5.842 5.923-1.346 2.85-2.387 4.703-4.093 4.647-1.889-.068-2.969-2.202-4.113-4.46-1.314-2.594-2.814-5.536-5.963-5.426-3.046.104-4.513 2.794-5.807 5.167-1.377 2.528-2.314 4.065-4.121 3.994-1.927-.07-2.951-1.805-4.136-3.813-1.321-2.236-2.848-4.75-5.936-4.664-2.994.103-4.465 2.385-5.763 4.4-1.373 2.13-2.335 3.428-4.165 3.351-1.973-.07-2.992-1.51-4.171-3.177-1.324-1.873-2.816-3.993-5.895-3.89-2.928.1-4.399 1.97-5.696 3.618-1.232 1.564-2.194 2.802-4.229 2.724a1 1 0 0 0-.072 2c3.017.101 4.545-1.8 5.872-3.487 1.177-1.496 2.193-2.787 4.193-2.855 1.926-.082 2.829 1.115 4.195 3.045 1.297 1.834 2.769 3.914 5.731 4.021 3.103.104 4.596-2.215 5.918-4.267 1.182-1.834 2.202-3.417 4.15-3.484 1.793-.067 2.769 1.35 4.145 3.681 1.297 2.197 2.766 4.686 5.787 4.796 3.125.108 4.634-2.62 5.949-5.035 1.139-2.088 2.214-4.06 4.119-4.126 1.793-.042 2.728 1.595 4.111 4.33 1.292 2.553 2.757 5.445 5.825 5.556l.169.003c3.064 0 4.518-3.075 5.805-5.794 1.139-2.41 2.217-4.68 4.067-4.773v-2z" fill="#000"/><path fill="#3ECC5F" d="M83 183h40v-40H83z"/><path d="M143 158c-.219 0-.428.037-.639.064-.038-.15-.074-.301-.116-.451A5 5 0 0 0 140.32 148a4.96 4.96 0 0 0-3.016 1.036 26.531 26.531 0 0 0-.335-.336 4.955 4.955 0 0 0 1.011-2.987 5 5 0 0 0-9.599-1.959c-.148-.042-.297-.077-.445-.115.027-.211.064-.42.064-.639a5 5 0 0 0-5-5 5 5 0 0 0-5 5c0 .219.037.428.064.639-.148.038-.297.073-.445.115a4.998 4.998 0 0 0-9.599 1.959c0 1.125.384 2.151 1.011 2.987-3.717 3.632-6.031 8.693-6.031 14.3 0 11.046 8.954 20 20 20 9.339 0 17.16-6.41 19.361-15.064.211.027.42.064.639.064a5 5 0 0 0 5-5 5 5 0 0 0-5-5" fill="#44D860"/><path fill="#3ECC5F" d="M83 123h40v-20H83z"/><path d="M133 115.5a2.5 2.5 0 1 0 0-5c-.109 0-.214.019-.319.032-.02-.075-.037-.15-.058-.225a2.501 2.501 0 0 0-.963-4.807c-.569 0-1.088.197-1.508.518a6.653 6.653 0 0 0-.168-.168c.314-.417.506-.931.506-1.494a2.5 2.5 0 0 0-4.8-.979A9.987 9.987 0 0 0 123 103c-5.522 0-10 4.478-10 10s4.478 10 10 10c.934 0 1.833-.138 2.69-.377a2.5 2.5 0 0 0 4.8-.979c0-.563-.192-1.077-.506-1.494.057-.055.113-.111.168-.168.42.321.939.518 1.508.518a2.5 2.5 0 0 0 .963-4.807c.021-.074.038-.15.058-.225.105.013.21.032.319.032" fill="#44D860"/><path d="M143 41.75c-.16 0-.33-.02-.49-.05a2.52 2.52 0 0 1-.47-.14c-.15-.06-.29-.14-.431-.23-.13-.09-.259-.2-.38-.31-.109-.12-.219-.24-.309-.38s-.17-.28-.231-.43a2.619 2.619 0 0 1-.189-.96c0-.16.02-.33.05-.49.03-.16.08-.31.139-.47.061-.15.141-.29.231-.43.09-.13.2-.26.309-.38.121-.11.25-.22.38-.31.141-.09.281-.17.431-.23.149-.06.31-.11.47-.14.32-.07.65-.07.98 0 .159.03.32.08.47.14.149.06.29.14.43.23.13.09.259.2.38.31.11.12.22.25.31.38.09.14.17.28.23.43.06.16.11.31.14.47.029.16.05.33.05.49 0 .66-.271 1.31-.73 1.77-.121.11-.25.22-.38.31-.14.09-.281.17-.43.23a2.565 2.565 0 0 1-.96.19m20-1.25c-.66 0-1.3-.27-1.771-.73a3.802 3.802 0 0 1-.309-.38c-.09-.14-.17-.28-.231-.43a2.619 2.619 0 0 1-.189-.96c0-.66.27-1.3.729-1.77.121-.11.25-.22.38-.31.141-.09.281-.17.431-.23.149-.06.31-.11.47-.14.32-.07.66-.07.98 0 .159.03.32.08.47.14.149.06.29.14.43.23.13.09.259.2.38.31.459.47.73 1.11.73 1.77 0 .16-.021.33-.05.49-.03.16-.08.32-.14.47-.07.15-.14.29-.23.43-.09.13-.2.26-.31.38-.121.11-.25.22-.38.31-.14.09-.281.17-.43.23a2.565 2.565 0 0 1-.96.19" fill="#000"/></g></svg> + +================ +File: static/images/report.svg +================ +<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="24" height="24"><path fill-rule="evenodd" d="M3.25 4a.25.25 0 00-.25.25v12.5c0 .138.112.25.25.25h2.5a.75.75 0 01.75.75v3.19l3.427-3.427A1.75 1.75 0 0111.164 17h9.586a.25.25 0 00.25-.25V4.25a.25.25 0 00-.25-.25H3.25zm-1.75.25c0-.966.784-1.75 1.75-1.75h17.5c.966 0 1.75.784 1.75 1.75v12.5a1.75 1.75 0 01-1.75 1.75h-9.586a.25.25 0 00-.177.073l-3.5 3.5A1.457 1.457 0 015 21.043V18.5H3.25a1.75 1.75 0 01-1.75-1.75V4.25zM12 6a.75.75 0 01.75.75v4a.75.75 0 01-1.5 0v-4A.75.75 0 0112 6zm0 9a1 1 0 100-2 1 1 0 000 2z"></path></svg> + +================ +File: static/images/undraw_docusaurus_mountain.svg +================ +<svg xmlns="http://www.w3.org/2000/svg" width="1088" height="687.962" viewBox="0 0 1088 687.962"> + <g id="Group_12" data-name="Group 12" transform="translate(-57 -56)"> + <g id="Group_11" data-name="Group 11" transform="translate(57 56)"> + <path id="Path_83" data-name="Path 83" d="M1017.81,560.461c-5.27,45.15-16.22,81.4-31.25,110.31-20,38.52-54.21,54.04-84.77,70.28a193.275,193.275,0,0,1-27.46,11.94c-55.61,19.3-117.85,14.18-166.74,3.99a657.282,657.282,0,0,0-104.09-13.16q-14.97-.675-29.97-.67c-15.42.02-293.07,5.29-360.67-131.57-16.69-33.76-28.13-75-32.24-125.27-11.63-142.12,52.29-235.46,134.74-296.47,155.97-115.41,369.76-110.57,523.43,7.88C941.15,276.621,1036.99,396.031,1017.81,560.461Z" transform="translate(-56 -106.019)" fill="#3f3d56"/> + <path id="Path_84" data-name="Path 84" d="M986.56,670.771c-20,38.52-47.21,64.04-77.77,80.28a193.272,193.272,0,0,1-27.46,11.94c-55.61,19.3-117.85,14.18-166.74,3.99a657.3,657.3,0,0,0-104.09-13.16q-14.97-.675-29.97-.67-23.13.03-46.25,1.72c-100.17,7.36-253.82-6.43-321.42-143.29L382,283.981,444.95,445.6l20.09,51.59,55.37-75.98L549,381.981l130.2,149.27,36.8-81.27L970.78,657.9l14.21,11.59Z" transform="translate(-56 -106.019)" fill="#f2f2f2"/> + <path id="Path_85" data-name="Path 85" d="M302,282.962l26-57,36,83-31-60Z" opacity="0.1"/> + <path id="Path_86" data-name="Path 86" d="M610.5,753.821q-14.97-.675-29.97-.67L465.04,497.191Z" transform="translate(-56 -106.019)" opacity="0.1"/> + <path id="Path_87" data-name="Path 87" d="M464.411,315.191,493,292.962l130,150-132-128Z" opacity="0.1"/> + <path id="Path_88" data-name="Path 88" d="M908.79,751.051a193.265,193.265,0,0,1-27.46,11.94L679.2,531.251Z" transform="translate(-56 -106.019)" opacity="0.1"/> + <circle id="Ellipse_11" data-name="Ellipse 11" cx="3" cy="3" r="3" transform="translate(479 98.962)" fill="#f2f2f2"/> + <circle id="Ellipse_12" data-name="Ellipse 12" cx="3" cy="3" r="3" transform="translate(396 201.962)" fill="#f2f2f2"/> + <circle id="Ellipse_13" data-name="Ellipse 13" cx="2" cy="2" r="2" transform="translate(600 220.962)" fill="#f2f2f2"/> + <circle id="Ellipse_14" data-name="Ellipse 14" cx="2" cy="2" r="2" transform="translate(180 265.962)" fill="#f2f2f2"/> + <circle id="Ellipse_15" data-name="Ellipse 15" cx="2" cy="2" r="2" transform="translate(612 96.962)" fill="#f2f2f2"/> + <circle id="Ellipse_16" data-name="Ellipse 16" cx="2" cy="2" r="2" transform="translate(736 192.962)" fill="#f2f2f2"/> + <circle id="Ellipse_17" data-name="Ellipse 17" cx="2" cy="2" r="2" transform="translate(858 344.962)" fill="#f2f2f2"/> + <path id="Path_89" data-name="Path 89" d="M306,121.222h-2.76v-2.76h-1.48v2.76H299V122.7h2.76v2.759h1.48V122.7H306Z" fill="#f2f2f2"/> + <path id="Path_90" data-name="Path 90" d="M848,424.222h-2.76v-2.76h-1.48v2.76H841V425.7h2.76v2.759h1.48V425.7H848Z" fill="#f2f2f2"/> + <path id="Path_91" data-name="Path 91" d="M1144,719.981c0,16.569-243.557,74-544,74s-544-57.431-544-74,243.557,14,544,14S1144,703.413,1144,719.981Z" transform="translate(-56 -106.019)" fill="#3f3d56"/> + <path id="Path_92" data-name="Path 92" d="M1144,719.981c0,16.569-243.557,74-544,74s-544-57.431-544-74,243.557,14,544,14S1144,703.413,1144,719.981Z" transform="translate(-56 -106.019)" opacity="0.1"/> + <ellipse id="Ellipse_18" data-name="Ellipse 18" cx="544" cy="30" rx="544" ry="30" transform="translate(0 583.962)" fill="#3f3d56"/> + <path id="Path_93" data-name="Path 93" d="M624,677.981c0,33.137-14.775,24-33,24s-33,9.137-33-24,33-96,33-96S624,644.844,624,677.981Z" transform="translate(-56 -106.019)" fill="#ff6584"/> + <path id="Path_94" data-name="Path 94" d="M606,690.66c0,15.062-6.716,10.909-15,10.909s-15,4.153-15-10.909,15-43.636,15-43.636S606,675.6,606,690.66Z" transform="translate(-56 -106.019)" opacity="0.1"/> + <rect id="Rectangle_97" data-name="Rectangle 97" width="92" height="18" rx="9" transform="translate(489 604.962)" fill="#2f2e41"/> + <rect id="Rectangle_98" data-name="Rectangle 98" width="92" height="18" rx="9" transform="translate(489 586.962)" fill="#2f2e41"/> + <path id="Path_95" data-name="Path 95" d="M193,596.547c0,55.343,34.719,100.126,77.626,100.126" transform="translate(-56 -106.019)" fill="#3f3d56"/> + <path id="Path_96" data-name="Path 96" d="M270.626,696.673c0-55.965,38.745-101.251,86.626-101.251" transform="translate(-56 -106.019)" fill="#6c63ff"/> + <path id="Path_97" data-name="Path 97" d="M221.125,601.564c0,52.57,22.14,95.109,49.5,95.109" transform="translate(-56 -106.019)" fill="#6c63ff"/> + <path id="Path_98" data-name="Path 98" d="M270.626,696.673c0-71.511,44.783-129.377,100.126-129.377" transform="translate(-56 -106.019)" fill="#3f3d56"/> + <path id="Path_99" data-name="Path 99" d="M254.3,697.379s11.009-.339,14.326-2.7,16.934-5.183,17.757-1.395,16.544,18.844,4.115,18.945-28.879-1.936-32.19-3.953S254.3,697.379,254.3,697.379Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/> + <path id="Path_100" data-name="Path 100" d="M290.716,710.909c-12.429.1-28.879-1.936-32.19-3.953-2.522-1.536-3.527-7.048-3.863-9.591l-.368.014s.7,8.879,4.009,10.9,19.761,4.053,32.19,3.953c3.588-.029,4.827-1.305,4.759-3.2C294.755,710.174,293.386,710.887,290.716,710.909Z" transform="translate(-56 -106.019)" opacity="0.2"/> + <path id="Path_101" data-name="Path 101" d="M777.429,633.081c0,38.029,23.857,68.8,53.341,68.8" transform="translate(-56 -106.019)" fill="#3f3d56"/> + <path id="Path_102" data-name="Path 102" d="M830.769,701.882c0-38.456,26.623-69.575,59.525-69.575" transform="translate(-56 -106.019)" fill="#6c63ff"/> + <path id="Path_103" data-name="Path 103" d="M796.755,636.528c0,36.124,15.213,65.354,34.014,65.354" transform="translate(-56 -106.019)" fill="#6c63ff"/> + <path id="Path_104" data-name="Path 104" d="M830.769,701.882c0-49.139,30.773-88.9,68.8-88.9" transform="translate(-56 -106.019)" fill="#3f3d56"/> + <path id="Path_105" data-name="Path 105" d="M819.548,702.367s7.565-.233,9.844-1.856,11.636-3.562,12.2-.958,11.368,12.949,2.828,13.018-19.844-1.33-22.119-2.716S819.548,702.367,819.548,702.367Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/> + <path id="Path_106" data-name="Path 106" d="M844.574,711.664c-8.54.069-19.844-1.33-22.119-2.716-1.733-1.056-2.423-4.843-2.654-6.59l-.253.01s.479,6.1,2.755,7.487,13.579,2.785,22.119,2.716c2.465-.02,3.317-.9,3.27-2.2C847.349,711.159,846.409,711.649,844.574,711.664Z" transform="translate(-56 -106.019)" opacity="0.2"/> + <path id="Path_107" data-name="Path 107" d="M949.813,724.718s11.36-1.729,14.5-4.591,16.89-7.488,18.217-3.667,19.494,17.447,6.633,19.107-30.153,1.609-33.835-.065S949.813,724.718,949.813,724.718Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/> + <path id="Path_108" data-name="Path 108" d="M989.228,734.173c-12.86,1.659-30.153,1.609-33.835-.065-2.8-1.275-4.535-6.858-5.2-9.45l-.379.061s1.833,9.109,5.516,10.783,20.975,1.725,33.835.065c3.712-.479,4.836-1.956,4.529-3.906C993.319,732.907,991.991,733.817,989.228,734.173Z" transform="translate(-56 -106.019)" opacity="0.2"/> + <path id="Path_109" data-name="Path 109" d="M670.26,723.9s9.587-1.459,12.237-3.875,14.255-6.32,15.374-3.095,16.452,14.725,5.6,16.125-25.448,1.358-28.555-.055S670.26,723.9,670.26,723.9Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/> + <path id="Path_110" data-name="Path 110" d="M703.524,731.875c-10.853,1.4-25.448,1.358-28.555-.055-2.367-1.076-3.827-5.788-4.39-7.976l-.32.051s1.547,7.687,4.655,9.1,17.7,1.456,28.555.055c3.133-.4,4.081-1.651,3.822-3.3C706.977,730.807,705.856,731.575,703.524,731.875Z" transform="translate(-56 -106.019)" opacity="0.2"/> + <path id="Path_111" data-name="Path 111" d="M178.389,719.109s7.463-1.136,9.527-3.016,11.1-4.92,11.969-2.409,12.808,11.463,4.358,12.553-19.811,1.057-22.23-.043S178.389,719.109,178.389,719.109Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/> + <path id="Path_112" data-name="Path 112" d="M204.285,725.321c-8.449,1.09-19.811,1.057-22.23-.043-1.842-.838-2.979-4.506-3.417-6.209l-.249.04s1.2,5.984,3.624,7.085,13.781,1.133,22.23.043c2.439-.315,3.177-1.285,2.976-2.566C206.973,724.489,206.1,725.087,204.285,725.321Z" transform="translate(-56 -106.019)" opacity="0.2"/> + <path id="Path_113" data-name="Path 113" d="M439.7,707.337c0,30.22-42.124,20.873-93.7,20.873s-93.074,9.347-93.074-20.873,42.118-36.793,93.694-36.793S439.7,677.117,439.7,707.337Z" transform="translate(-56 -106.019)" opacity="0.1"/> + <path id="Path_114" data-name="Path 114" d="M439.7,699.9c0,30.22-42.124,20.873-93.7,20.873s-93.074,9.347-93.074-20.873S295.04,663.1,346.616,663.1,439.7,669.676,439.7,699.9Z" transform="translate(-56 -106.019)" fill="#3f3d56"/> + </g> + <g id="docusaurus_keytar" transform="translate(312.271 493.733)"> + <path id="Path_40" data-name="Path 40" d="M99,52h91.791V89.153H99Z" transform="translate(5.904 -14.001)" fill="#fff" fill-rule="evenodd"/> + <path id="Path_41" data-name="Path 41" d="M24.855,163.927A21.828,21.828,0,0,1,5.947,153a21.829,21.829,0,0,0,18.908,32.782H46.71V163.927Z" transform="translate(-3 -4.634)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_42" data-name="Path 42" d="M121.861,61.1l76.514-4.782V45.39A21.854,21.854,0,0,0,176.52,23.535H78.173L75.441,18.8a3.154,3.154,0,0,0-5.464,0l-2.732,4.732L64.513,18.8a3.154,3.154,0,0,0-5.464,0l-2.732,4.732L53.586,18.8a3.154,3.154,0,0,0-5.464,0L45.39,23.535c-.024,0-.046,0-.071,0l-4.526-4.525a3.153,3.153,0,0,0-5.276,1.414l-1.5,5.577-5.674-1.521a3.154,3.154,0,0,0-3.863,3.864L26,34.023l-5.575,1.494a3.155,3.155,0,0,0-1.416,5.278l4.526,4.526c0,.023,0,.046,0,.07L18.8,48.122a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,59.05a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,69.977a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,80.9a3.154,3.154,0,0,0,0,5.464L23.535,89.1,18.8,91.832a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,102.76a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,113.687a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,124.615a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,135.542a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,146.469a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,157.4a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,168.324a3.154,3.154,0,0,0,0,5.464l4.732,2.732A21.854,21.854,0,0,0,45.39,198.375H176.52a21.854,21.854,0,0,0,21.855-21.855V89.1l-76.514-4.782a11.632,11.632,0,0,1,0-23.219" transform="translate(-1.681 -17.226)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_43" data-name="Path 43" d="M143,186.71h32.782V143H143Z" transform="translate(9.984 -5.561)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_44" data-name="Path 44" d="M196.71,159.855a5.438,5.438,0,0,0-.7.07c-.042-.164-.081-.329-.127-.493a5.457,5.457,0,1,0-5.4-9.372q-.181-.185-.366-.367a5.454,5.454,0,1,0-9.384-5.4c-.162-.046-.325-.084-.486-.126a5.467,5.467,0,1,0-10.788,0c-.162.042-.325.08-.486.126a5.457,5.457,0,1,0-9.384,5.4,21.843,21.843,0,1,0,36.421,21.02,5.452,5.452,0,1,0,.7-10.858" transform="translate(10.912 -6.025)" fill="#44d860" fill-rule="evenodd"/> + <path id="Path_45" data-name="Path 45" d="M153,124.855h32.782V103H153Z" transform="translate(10.912 -9.271)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_46" data-name="Path 46" d="M194.855,116.765a2.732,2.732,0,1,0,0-5.464,2.811,2.811,0,0,0-.349.035c-.022-.082-.04-.164-.063-.246a2.733,2.733,0,0,0-1.052-5.253,2.7,2.7,0,0,0-1.648.566q-.09-.093-.184-.184a2.7,2.7,0,0,0,.553-1.633,2.732,2.732,0,0,0-5.245-1.07,10.928,10.928,0,1,0,0,21.031,2.732,2.732,0,0,0,5.245-1.07,2.7,2.7,0,0,0-.553-1.633q.093-.09.184-.184a2.7,2.7,0,0,0,1.648.566,2.732,2.732,0,0,0,1.052-5.253c.023-.081.042-.164.063-.246a2.814,2.814,0,0,0,.349.035" transform="translate(12.767 -9.377)" fill="#44d860" fill-rule="evenodd"/> + <path id="Path_47" data-name="Path 47" d="M65.087,56.891a2.732,2.732,0,0,1-2.732-2.732,8.2,8.2,0,0,0-16.391,0,2.732,2.732,0,0,1-5.464,0,13.659,13.659,0,0,1,27.319,0,2.732,2.732,0,0,1-2.732,2.732" transform="translate(0.478 -15.068)" fill-rule="evenodd"/> + <path id="Path_48" data-name="Path 48" d="M103,191.347h65.565a21.854,21.854,0,0,0,21.855-21.855V93H124.855A21.854,21.854,0,0,0,103,114.855Z" transform="translate(6.275 -10.199)" fill="#ffff50" fill-rule="evenodd"/> + <path id="Path_49" data-name="Path 49" d="M173.216,129.787H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0,21.855H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186m0,21.855H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0-54.434H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0,21.652H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186m0,21.855H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186M189.585,61.611c-.013,0-.024-.007-.037-.005-3.377.115-4.974,3.492-6.384,6.472-1.471,3.114-2.608,5.139-4.473,5.078-2.064-.074-3.244-2.406-4.494-4.874-1.436-2.835-3.075-6.049-6.516-5.929-3.329.114-4.932,3.053-6.346,5.646-1.5,2.762-2.529,4.442-4.5,4.364-2.106-.076-3.225-1.972-4.52-4.167-1.444-2.443-3.112-5.191-6.487-5.1-3.272.113-4.879,2.606-6.3,4.808-1.5,2.328-2.552,3.746-4.551,3.662-2.156-.076-3.27-1.65-4.558-3.472-1.447-2.047-3.077-4.363-6.442-4.251-3.2.109-4.807,2.153-6.224,3.954-1.346,1.709-2.4,3.062-4.621,2.977a1.093,1.093,0,0,0-.079,2.186c3.3.11,4.967-1.967,6.417-3.81,1.286-1.635,2.4-3.045,4.582-3.12,2.1-.09,3.091,1.218,4.584,3.327,1.417,2,3.026,4.277,6.263,4.394,3.391.114,5.022-2.42,6.467-4.663,1.292-2,2.406-3.734,4.535-3.807,1.959-.073,3.026,1.475,4.529,4.022,1.417,2.4,3.023,5.121,6.324,5.241,3.415.118,5.064-2.863,6.5-5.5,1.245-2.282,2.419-4.437,4.5-4.509,1.959-.046,2.981,1.743,4.492,4.732,1.412,2.79,3.013,5.95,6.365,6.071l.185,0c3.348,0,4.937-3.36,6.343-6.331,1.245-2.634,2.423-5.114,4.444-5.216Z" transform="translate(7.109 -13.11)" fill-rule="evenodd"/> + <path id="Path_50" data-name="Path 50" d="M83,186.71h43.71V143H83Z" transform="translate(4.42 -5.561)" fill="#3ecc5f" fill-rule="evenodd"/> + <g id="Group_8" data-name="Group 8" transform="matrix(0.966, -0.259, 0.259, 0.966, 109.327, 91.085)"> + <rect id="Rectangle_3" data-name="Rectangle 3" width="92.361" height="36.462" rx="2" transform="translate(0 0)" fill="#d8d8d8"/> + <g id="Group_2" data-name="Group 2" transform="translate(1.531 23.03)"> + <rect id="Rectangle_4" data-name="Rectangle 4" width="5.336" height="5.336" rx="1" transform="translate(16.797 0)" fill="#4a4a4a"/> + <rect id="Rectangle_5" data-name="Rectangle 5" width="5.336" height="5.336" rx="1" transform="translate(23.12 0)" fill="#4a4a4a"/> + <rect id="Rectangle_6" data-name="Rectangle 6" width="5.336" height="5.336" rx="1" transform="translate(29.444 0)" fill="#4a4a4a"/> + <rect id="Rectangle_7" data-name="Rectangle 7" width="5.336" height="5.336" rx="1" transform="translate(35.768 0)" fill="#4a4a4a"/> + <rect id="Rectangle_8" data-name="Rectangle 8" width="5.336" height="5.336" rx="1" transform="translate(42.091 0)" fill="#4a4a4a"/> + <rect id="Rectangle_9" data-name="Rectangle 9" width="5.336" height="5.336" rx="1" transform="translate(48.415 0)" fill="#4a4a4a"/> + <rect id="Rectangle_10" data-name="Rectangle 10" width="5.336" height="5.336" rx="1" transform="translate(54.739 0)" fill="#4a4a4a"/> + <rect id="Rectangle_11" data-name="Rectangle 11" width="5.336" height="5.336" rx="1" transform="translate(61.063 0)" fill="#4a4a4a"/> + <rect id="Rectangle_12" data-name="Rectangle 12" width="5.336" height="5.336" rx="1" transform="translate(67.386 0)" fill="#4a4a4a"/> + <path id="Path_51" data-name="Path 51" d="M1.093,0H14.518a1.093,1.093,0,0,1,1.093,1.093V4.243a1.093,1.093,0,0,1-1.093,1.093H1.093A1.093,1.093,0,0,1,0,4.243V1.093A1.093,1.093,0,0,1,1.093,0ZM75,0H88.426a1.093,1.093,0,0,1,1.093,1.093V4.243a1.093,1.093,0,0,1-1.093,1.093H75a1.093,1.093,0,0,1-1.093-1.093V1.093A1.093,1.093,0,0,1,75,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/> + </g> + <g id="Group_3" data-name="Group 3" transform="translate(1.531 10.261)"> + <path id="Path_52" data-name="Path 52" d="M1.093,0H6.218A1.093,1.093,0,0,1,7.31,1.093V4.242A1.093,1.093,0,0,1,6.218,5.335H1.093A1.093,1.093,0,0,1,0,4.242V1.093A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/> + <rect id="Rectangle_13" data-name="Rectangle 13" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/> + <rect id="Rectangle_14" data-name="Rectangle 14" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/> + <rect id="Rectangle_15" data-name="Rectangle 15" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/> + <rect id="Rectangle_16" data-name="Rectangle 16" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/> + <rect id="Rectangle_17" data-name="Rectangle 17" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/> + <rect id="Rectangle_18" data-name="Rectangle 18" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/> + <rect id="Rectangle_19" data-name="Rectangle 19" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/> + <rect id="Rectangle_20" data-name="Rectangle 20" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/> + <rect id="Rectangle_21" data-name="Rectangle 21" width="5.336" height="5.336" rx="1" transform="translate(58.888 0)" fill="#4a4a4a"/> + <rect id="Rectangle_22" data-name="Rectangle 22" width="5.336" height="5.336" rx="1" transform="translate(65.212 0)" fill="#4a4a4a"/> + <rect id="Rectangle_23" data-name="Rectangle 23" width="5.336" height="5.336" rx="1" transform="translate(71.536 0)" fill="#4a4a4a"/> + <rect id="Rectangle_24" data-name="Rectangle 24" width="5.336" height="5.336" rx="1" transform="translate(77.859 0)" fill="#4a4a4a"/> + <rect id="Rectangle_25" data-name="Rectangle 25" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/> + </g> + <g id="Group_4" data-name="Group 4" transform="translate(91.05 9.546) rotate(180)"> + <path id="Path_53" data-name="Path 53" d="M1.093,0H6.219A1.093,1.093,0,0,1,7.312,1.093v3.15A1.093,1.093,0,0,1,6.219,5.336H1.093A1.093,1.093,0,0,1,0,4.243V1.093A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/> + <rect id="Rectangle_26" data-name="Rectangle 26" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/> + <rect id="Rectangle_27" data-name="Rectangle 27" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/> + <rect id="Rectangle_28" data-name="Rectangle 28" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/> + <rect id="Rectangle_29" data-name="Rectangle 29" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/> + <rect id="Rectangle_30" data-name="Rectangle 30" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/> + <rect id="Rectangle_31" data-name="Rectangle 31" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/> + <rect id="Rectangle_32" data-name="Rectangle 32" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/> + <rect id="Rectangle_33" data-name="Rectangle 33" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/> + <rect id="Rectangle_34" data-name="Rectangle 34" width="5.336" height="5.336" rx="1" transform="translate(58.889 0)" fill="#4a4a4a"/> + <rect id="Rectangle_35" data-name="Rectangle 35" width="5.336" height="5.336" rx="1" transform="translate(65.213 0)" fill="#4a4a4a"/> + <rect id="Rectangle_36" data-name="Rectangle 36" width="5.336" height="5.336" rx="1" transform="translate(71.537 0)" fill="#4a4a4a"/> + <rect id="Rectangle_37" data-name="Rectangle 37" width="5.336" height="5.336" rx="1" transform="translate(77.86 0)" fill="#4a4a4a"/> + <rect id="Rectangle_38" data-name="Rectangle 38" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/> + <rect id="Rectangle_39" data-name="Rectangle 39" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/> + <rect id="Rectangle_40" data-name="Rectangle 40" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/> + <rect id="Rectangle_41" data-name="Rectangle 41" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/> + <rect id="Rectangle_42" data-name="Rectangle 42" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/> + <rect id="Rectangle_43" data-name="Rectangle 43" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/> + <rect id="Rectangle_44" data-name="Rectangle 44" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/> + <rect id="Rectangle_45" data-name="Rectangle 45" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/> + <rect id="Rectangle_46" data-name="Rectangle 46" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/> + <rect id="Rectangle_47" data-name="Rectangle 47" width="5.336" height="5.336" rx="1" transform="translate(58.889 0)" fill="#4a4a4a"/> + <rect id="Rectangle_48" data-name="Rectangle 48" width="5.336" height="5.336" rx="1" transform="translate(65.213 0)" fill="#4a4a4a"/> + <rect id="Rectangle_49" data-name="Rectangle 49" width="5.336" height="5.336" rx="1" transform="translate(71.537 0)" fill="#4a4a4a"/> + <rect id="Rectangle_50" data-name="Rectangle 50" width="5.336" height="5.336" rx="1" transform="translate(77.86 0)" fill="#4a4a4a"/> + <rect id="Rectangle_51" data-name="Rectangle 51" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/> + </g> + <g id="Group_6" data-name="Group 6" transform="translate(1.531 16.584)"> + <path id="Path_54" data-name="Path 54" d="M1.093,0h7.3A1.093,1.093,0,0,1,9.485,1.093v3.15A1.093,1.093,0,0,1,8.392,5.336h-7.3A1.093,1.093,0,0,1,0,4.243V1.094A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/> + <g id="Group_5" data-name="Group 5" transform="translate(10.671 0)"> + <rect id="Rectangle_52" data-name="Rectangle 52" width="5.336" height="5.336" rx="1" fill="#4a4a4a"/> + <rect id="Rectangle_53" data-name="Rectangle 53" width="5.336" height="5.336" rx="1" transform="translate(6.324 0)" fill="#4a4a4a"/> + <rect id="Rectangle_54" data-name="Rectangle 54" width="5.336" height="5.336" rx="1" transform="translate(12.647 0)" fill="#4a4a4a"/> + <rect id="Rectangle_55" data-name="Rectangle 55" width="5.336" height="5.336" rx="1" transform="translate(18.971 0)" fill="#4a4a4a"/> + <rect id="Rectangle_56" data-name="Rectangle 56" width="5.336" height="5.336" rx="1" transform="translate(25.295 0)" fill="#4a4a4a"/> + <rect id="Rectangle_57" data-name="Rectangle 57" width="5.336" height="5.336" rx="1" transform="translate(31.619 0)" fill="#4a4a4a"/> + <rect id="Rectangle_58" data-name="Rectangle 58" width="5.336" height="5.336" rx="1" transform="translate(37.942 0)" fill="#4a4a4a"/> + <rect id="Rectangle_59" data-name="Rectangle 59" width="5.336" height="5.336" rx="1" transform="translate(44.265 0)" fill="#4a4a4a"/> + <rect id="Rectangle_60" data-name="Rectangle 60" width="5.336" height="5.336" rx="1" transform="translate(50.589 0)" fill="#4a4a4a"/> + <rect id="Rectangle_61" data-name="Rectangle 61" width="5.336" height="5.336" rx="1" transform="translate(56.912 0)" fill="#4a4a4a"/> + <rect id="Rectangle_62" data-name="Rectangle 62" width="5.336" height="5.336" rx="1" transform="translate(63.236 0)" fill="#4a4a4a"/> + </g> + <path id="Path_55" data-name="Path 55" d="M1.094,0H8A1.093,1.093,0,0,1,9.091,1.093v3.15A1.093,1.093,0,0,1,8,5.336H1.093A1.093,1.093,0,0,1,0,4.243V1.094A1.093,1.093,0,0,1,1.093,0Z" transform="translate(80.428 0)" fill="#4a4a4a" fill-rule="evenodd"/> + </g> + <g id="Group_7" data-name="Group 7" transform="translate(1.531 29.627)"> + <rect id="Rectangle_63" data-name="Rectangle 63" width="5.336" height="5.336" rx="1" transform="translate(0 0)" fill="#4a4a4a"/> + <rect id="Rectangle_64" data-name="Rectangle 64" width="5.336" height="5.336" rx="1" transform="translate(6.324 0)" fill="#4a4a4a"/> + <rect id="Rectangle_65" data-name="Rectangle 65" width="5.336" height="5.336" rx="1" transform="translate(12.647 0)" fill="#4a4a4a"/> + <rect id="Rectangle_66" data-name="Rectangle 66" width="5.336" height="5.336" rx="1" transform="translate(18.971 0)" fill="#4a4a4a"/> + <path id="Path_56" data-name="Path 56" d="M1.093,0H31.515a1.093,1.093,0,0,1,1.093,1.093V4.244a1.093,1.093,0,0,1-1.093,1.093H1.093A1.093,1.093,0,0,1,0,4.244V1.093A1.093,1.093,0,0,1,1.093,0ZM34.687,0h3.942a1.093,1.093,0,0,1,1.093,1.093V4.244a1.093,1.093,0,0,1-1.093,1.093H34.687a1.093,1.093,0,0,1-1.093-1.093V1.093A1.093,1.093,0,0,1,34.687,0Z" transform="translate(25.294 0)" fill="#4a4a4a" fill-rule="evenodd"/> + <rect id="Rectangle_67" data-name="Rectangle 67" width="5.336" height="5.336" rx="1" transform="translate(66.003 0)" fill="#4a4a4a"/> + <rect id="Rectangle_68" data-name="Rectangle 68" width="5.336" height="5.336" rx="1" transform="translate(72.327 0)" fill="#4a4a4a"/> + <rect id="Rectangle_69" data-name="Rectangle 69" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/> + <path id="Path_57" data-name="Path 57" d="M5.336,0V1.18A1.093,1.093,0,0,1,4.243,2.273H1.093A1.093,1.093,0,0,1,0,1.18V0Z" transform="translate(83.59 2.273) rotate(180)" fill="#4a4a4a"/> + <path id="Path_58" data-name="Path 58" d="M5.336,0V1.18A1.093,1.093,0,0,1,4.243,2.273H1.093A1.093,1.093,0,0,1,0,1.18V0Z" transform="translate(78.255 3.063)" fill="#4a4a4a"/> + </g> + <rect id="Rectangle_70" data-name="Rectangle 70" width="88.927" height="2.371" rx="1.085" transform="translate(1.925 1.17)" fill="#4a4a4a"/> + <rect id="Rectangle_71" data-name="Rectangle 71" width="4.986" height="1.581" rx="0.723" transform="translate(4.1 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_72" data-name="Rectangle 72" width="4.986" height="1.581" rx="0.723" transform="translate(10.923 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_73" data-name="Rectangle 73" width="4.986" height="1.581" rx="0.723" transform="translate(16.173 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_74" data-name="Rectangle 74" width="4.986" height="1.581" rx="0.723" transform="translate(21.421 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_75" data-name="Rectangle 75" width="4.986" height="1.581" rx="0.723" transform="translate(26.671 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_76" data-name="Rectangle 76" width="4.986" height="1.581" rx="0.723" transform="translate(33.232 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_77" data-name="Rectangle 77" width="4.986" height="1.581" rx="0.723" transform="translate(38.48 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_78" data-name="Rectangle 78" width="4.986" height="1.581" rx="0.723" transform="translate(43.73 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_79" data-name="Rectangle 79" width="4.986" height="1.581" rx="0.723" transform="translate(48.978 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_80" data-name="Rectangle 80" width="4.986" height="1.581" rx="0.723" transform="translate(55.54 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_81" data-name="Rectangle 81" width="4.986" height="1.581" rx="0.723" transform="translate(60.788 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_82" data-name="Rectangle 82" width="4.986" height="1.581" rx="0.723" transform="translate(66.038 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_83" data-name="Rectangle 83" width="4.986" height="1.581" rx="0.723" transform="translate(72.599 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_84" data-name="Rectangle 84" width="4.986" height="1.581" rx="0.723" transform="translate(77.847 1.566)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_85" data-name="Rectangle 85" width="4.986" height="1.581" rx="0.723" transform="translate(83.097 1.566)" fill="#d8d8d8" opacity="0.136"/> + </g> + <path id="Path_59" data-name="Path 59" d="M146.71,159.855a5.439,5.439,0,0,0-.7.07c-.042-.164-.081-.329-.127-.493a5.457,5.457,0,1,0-5.4-9.372q-.181-.185-.366-.367a5.454,5.454,0,1,0-9.384-5.4c-.162-.046-.325-.084-.486-.126a5.467,5.467,0,1,0-10.788,0c-.162.042-.325.08-.486.126a5.457,5.457,0,1,0-9.384,5.4,21.843,21.843,0,1,0,36.421,21.02,5.452,5.452,0,1,0,.7-10.858" transform="translate(6.275 -6.025)" fill="#44d860" fill-rule="evenodd"/> + <path id="Path_60" data-name="Path 60" d="M83,124.855h43.71V103H83Z" transform="translate(4.42 -9.271)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_61" data-name="Path 61" d="M134.855,116.765a2.732,2.732,0,1,0,0-5.464,2.811,2.811,0,0,0-.349.035c-.022-.082-.04-.164-.063-.246a2.733,2.733,0,0,0-1.052-5.253,2.7,2.7,0,0,0-1.648.566q-.09-.093-.184-.184a2.7,2.7,0,0,0,.553-1.633,2.732,2.732,0,0,0-5.245-1.07,10.928,10.928,0,1,0,0,21.031,2.732,2.732,0,0,0,5.245-1.07,2.7,2.7,0,0,0-.553-1.633q.093-.09.184-.184a2.7,2.7,0,0,0,1.648.566,2.732,2.732,0,0,0,1.052-5.253c.023-.081.042-.164.063-.246a2.811,2.811,0,0,0,.349.035" transform="translate(7.202 -9.377)" fill="#44d860" fill-rule="evenodd"/> + <path id="Path_62" data-name="Path 62" d="M143.232,42.33a2.967,2.967,0,0,1-.535-.055,2.754,2.754,0,0,1-.514-.153,2.838,2.838,0,0,1-.471-.251,4.139,4.139,0,0,1-.415-.339,3.2,3.2,0,0,1-.338-.415A2.7,2.7,0,0,1,140.5,39.6a2.968,2.968,0,0,1,.055-.535,3.152,3.152,0,0,1,.152-.514,2.874,2.874,0,0,1,.252-.47,2.633,2.633,0,0,1,.753-.754,2.837,2.837,0,0,1,.471-.251,2.753,2.753,0,0,1,.514-.153,2.527,2.527,0,0,1,1.071,0,2.654,2.654,0,0,1,.983.4,4.139,4.139,0,0,1,.415.339,4.019,4.019,0,0,1,.339.415,2.786,2.786,0,0,1,.251.47,2.864,2.864,0,0,1,.208,1.049,2.77,2.77,0,0,1-.8,1.934,4.139,4.139,0,0,1-.415.339,2.722,2.722,0,0,1-1.519.459m21.855-1.366a2.789,2.789,0,0,1-1.935-.8,4.162,4.162,0,0,1-.338-.415,2.7,2.7,0,0,1-.459-1.519,2.789,2.789,0,0,1,.8-1.934,4.139,4.139,0,0,1,.415-.339,2.838,2.838,0,0,1,.471-.251,2.752,2.752,0,0,1,.514-.153,2.527,2.527,0,0,1,1.071,0,2.654,2.654,0,0,1,.983.4,4.139,4.139,0,0,1,.415.339,2.79,2.79,0,0,1,.8,1.934,3.069,3.069,0,0,1-.055.535,2.779,2.779,0,0,1-.153.514,3.885,3.885,0,0,1-.251.47,4.02,4.02,0,0,1-.339.415,4.138,4.138,0,0,1-.415.339,2.722,2.722,0,0,1-1.519.459" transform="translate(9.753 -15.532)" fill-rule="evenodd"/> + </g> + </g> +</svg> + +================ +File: static/images/undraw_docusaurus_react.svg +================ +<svg xmlns="http://www.w3.org/2000/svg" width="1041.277" height="554.141" viewBox="0 0 1041.277 554.141"> + <g id="Group_24" data-name="Group 24" transform="translate(-440 -263)"> + <g id="Group_23" data-name="Group 23" transform="translate(439.989 262.965)"> + <path id="Path_299" data-name="Path 299" d="M1040.82,611.12q-1.74,3.75-3.47,7.4-2.7,5.67-5.33,11.12c-.78,1.61-1.56,3.19-2.32,4.77-8.6,17.57-16.63,33.11-23.45,45.89A73.21,73.21,0,0,1,942.44,719l-151.65,1.65h-1.6l-13,.14-11.12.12-34.1.37h-1.38l-17.36.19h-.53l-107,1.16-95.51,1-11.11.12-69,.75H429l-44.75.48h-.48l-141.5,1.53-42.33.46a87.991,87.991,0,0,1-10.79-.54h0c-1.22-.14-2.44-.3-3.65-.49a87.38,87.38,0,0,1-51.29-27.54C116,678.37,102.75,655,93.85,629.64q-1.93-5.49-3.6-11.12C59.44,514.37,97,380,164.6,290.08q4.25-5.64,8.64-11l.07-.08c20.79-25.52,44.1-46.84,68.93-62,44-26.91,92.75-34.49,140.7-11.9,40.57,19.12,78.45,28.11,115.17,30.55,3.71.24,7.42.42,11.11.53,84.23,2.65,163.17-27.7,255.87-47.29,3.69-.78,7.39-1.55,11.12-2.28,66.13-13.16,139.49-20.1,226.73-5.51a189.089,189.089,0,0,1,26.76,6.4q5.77,1.86,11.12,4c41.64,16.94,64.35,48.24,74,87.46q1.37,5.46,2.37,11.11C1134.3,384.41,1084.19,518.23,1040.82,611.12Z" transform="translate(-79.34 -172.91)" fill="#f2f2f2"/> + <path id="Path_300" data-name="Path 300" d="M576.36,618.52a95.21,95.21,0,0,1-1.87,11.12h93.7V618.52Zm-78.25,62.81,11.11-.09V653.77c-3.81-.17-7.52-.34-11.11-.52ZM265.19,618.52v11.12h198.5V618.52ZM1114.87,279h-74V191.51q-5.35-2.17-11.12-4V279H776.21V186.58c-3.73.73-7.43,1.5-11.12,2.28V279H509.22V236.15c-3.69-.11-7.4-.29-11.11-.53V279H242.24V217c-24.83,15.16-48.14,36.48-68.93,62h-.07v.08q-4.4,5.4-8.64,11h8.64V618.52h-83q1.66,5.63,3.6,11.12h79.39v93.62a87,87,0,0,0,12.2,2.79c1.21.19,2.43.35,3.65.49h0a87.991,87.991,0,0,0,10.79.54l42.33-.46v-97H498.11v94.21l11.11-.12V629.64H765.09V721l11.12-.12V629.64H1029.7v4.77c.76-1.58,1.54-3.16,2.32-4.77q2.63-5.45,5.33-11.12,1.73-3.64,3.47-7.4v-321h76.42Q1116.23,284.43,1114.87,279ZM242.24,618.52V290.08H498.11V618.52Zm267,0V290.08H765.09V618.52Zm520.48,0H776.21V290.08H1029.7Z" transform="translate(-79.34 -172.91)" opacity="0.1"/> + <path id="Path_301" data-name="Path 301" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l46.65-28,93.6-.78,2-.01.66-.01,2-.03,44.94-.37,2.01-.01.64-.01,2-.01L315,509.3l.38-.01,35.55-.3h.29l277.4-2.34,6.79-.05h.68l5.18-.05,37.65-.31,2-.03,1.85-.02h.96l11.71-.09,2.32-.03,3.11-.02,9.75-.09,15.47-.13,2-.02,3.48-.02h.65l74.71-.64Z" fill="#65617d"/> + <path id="Path_302" data-name="Path 302" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l46.65-28,93.6-.78,2-.01.66-.01,2-.03,44.94-.37,2.01-.01.64-.01,2-.01L315,509.3l.38-.01,35.55-.3h.29l277.4-2.34,6.79-.05h.68l5.18-.05,37.65-.31,2-.03,1.85-.02h.96l11.71-.09,2.32-.03,3.11-.02,9.75-.09,15.47-.13,2-.02,3.48-.02h.65l74.71-.64Z" opacity="0.2"/> + <path id="Path_303" data-name="Path 303" d="M375.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/> + <path id="Path_304" data-name="Path 304" d="M375.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" opacity="0.1"/> + <path id="Path_305" data-name="Path 305" d="M377.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/> + <rect id="Rectangle_137" data-name="Rectangle 137" width="47.17" height="31.5" transform="translate(680.92 483.65)" fill="#3f3d56"/> + <rect id="Rectangle_138" data-name="Rectangle 138" width="47.17" height="31.5" transform="translate(680.92 483.65)" opacity="0.1"/> + <rect id="Rectangle_139" data-name="Rectangle 139" width="47.17" height="31.5" transform="translate(678.92 483.65)" fill="#3f3d56"/> + <path id="Path_306" data-name="Path 306" d="M298.09,483.65v4.97l-47.17,1.26v-6.23Z" opacity="0.1"/> + <path id="Path_307" data-name="Path 307" d="M460.69,485.27v168.2a4,4,0,0,1-3.85,3.95l-191.65,5.1h-.05a4,4,0,0,1-3.95-3.95V485.27a4,4,0,0,1,3.95-3.95h191.6a4,4,0,0,1,3.95,3.95Z" transform="translate(-79.34 -172.91)" fill="#65617d"/> + <path id="Path_308" data-name="Path 308" d="M265.19,481.32v181.2h-.05a4,4,0,0,1-3.95-3.95V485.27a4,4,0,0,1,3.95-3.95Z" transform="translate(-79.34 -172.91)" opacity="0.1"/> + <path id="Path_309" data-name="Path 309" d="M194.59,319.15h177.5V467.4l-177.5,4Z" fill="#39374d"/> + <path id="Path_310" data-name="Path 310" d="M726.09,483.65v6.41l-47.17-1.26v-5.15Z" opacity="0.1"/> + <path id="Path_311" data-name="Path 311" d="M867.69,485.27v173.3a4,4,0,0,1-4,3.95h0L672,657.42a4,4,0,0,1-3.85-3.95V485.27a4,4,0,0,1,3.95-3.95H863.7a4,4,0,0,1,3.99,3.95Z" transform="translate(-79.34 -172.91)" fill="#65617d"/> + <path id="Path_312" data-name="Path 312" d="M867.69,485.27v173.3a4,4,0,0,1-4,3.95h0V481.32h0a4,4,0,0,1,4,3.95Z" transform="translate(-79.34 -172.91)" opacity="0.1"/> + <path id="Path_313" data-name="Path 313" d="M775.59,319.15H598.09V467.4l177.5,4Z" fill="#39374d"/> + <path id="Path_314" data-name="Path 314" d="M663.19,485.27v168.2a4,4,0,0,1-3.85,3.95l-191.65,5.1h0a4,4,0,0,1-4-3.95V485.27a4,4,0,0,1,3.95-3.95h191.6A4,4,0,0,1,663.19,485.27Z" transform="translate(-79.34 -172.91)" fill="#65617d"/> + <path id="Path_315" data-name="Path 315" d="M397.09,319.15h177.5V467.4l-177.5,4Z" fill="#4267b2"/> + <path id="Path_316" data-name="Path 316" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l202.51-1.33h.48l40.99-.28h.19l283.08-1.87h.29l.17-.01h.47l4.79-.03h1.46l74.49-.5,4.4-.02.98-.01Z" opacity="0.1"/> + <circle id="Ellipse_111" data-name="Ellipse 111" cx="51.33" cy="51.33" r="51.33" transform="translate(435.93 246.82)" fill="#fbbebe"/> + <path id="Path_317" data-name="Path 317" d="M617.94,550.07s-99.5,12-90,0c3.44-4.34,4.39-17.2,4.2-31.85-.06-4.45-.22-9.06-.45-13.65-1.1-22-3.75-43.5-3.75-43.5s87-41,77-8.5c-4,13.13-2.69,31.57.35,48.88.89,5.05,1.92,10,3,14.7a344.66,344.66,0,0,0,9.65,33.92Z" transform="translate(-79.34 -172.91)" fill="#fbbebe"/> + <path id="Path_318" data-name="Path 318" d="M585.47,546c11.51-2.13,23.7-6,34.53-1.54,2.85,1.17,5.47,2.88,8.39,3.86s6.12,1.22,9.16,1.91c10.68,2.42,19.34,10.55,24.9,20s8.44,20.14,11.26,30.72l6.9,25.83c6,22.45,12,45.09,13.39,68.3a2437.506,2437.506,0,0,1-250.84,1.43c5.44-10.34,11-21.31,10.54-33s-7.19-23.22-4.76-34.74c1.55-7.34,6.57-13.39,9.64-20.22,8.75-19.52,1.94-45.79,17.32-60.65,6.92-6.68,17-9.21,26.63-8.89,12.28.41,24.85,4.24,37,6.11C555.09,547.48,569.79,548.88,585.47,546Z" transform="translate(-79.34 -172.91)" fill="#ff6584"/> + <path id="Path_319" data-name="Path 319" d="M716.37,657.17l-.1,1.43v.1l-.17,2.3-1.33,18.51-1.61,22.3-.46,6.28-1,13.44v.17l-107,1-175.59,1.9v.84h-.14v-1.12l.45-14.36.86-28.06.74-23.79.07-2.37a10.53,10.53,0,0,1,11.42-10.17c4.72.4,10.85.89,18.18,1.41l3,.22c42.33,2.94,120.56,6.74,199.5,2,1.66-.09,3.33-.19,5-.31,12.24-.77,24.47-1.76,36.58-3a10.53,10.53,0,0,1,11.6,11.23Z" transform="translate(-79.34 -172.91)" opacity="0.1"/> + <path id="Path_320" data-name="Path 320" d="M429.08,725.44v-.84l175.62-1.91,107-1h.3v-.17l1-13.44.43-6,1.64-22.61,1.29-17.9v-.44a10.617,10.617,0,0,0-.11-2.47.3.3,0,0,0,0-.1,10.391,10.391,0,0,0-2-4.64,10.54,10.54,0,0,0-9.42-4c-12.11,1.24-24.34,2.23-36.58,3-1.67.12-3.34.22-5,.31-78.94,4.69-157.17.89-199.5-2l-3-.22c-7.33-.52-13.46-1-18.18-1.41a10.54,10.54,0,0,0-11.24,8.53,11,11,0,0,0-.18,1.64l-.68,22.16L429.54,710l-.44,14.36v1.12Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/> + <path id="Path_321" data-name="Path 321" d="M716.67,664.18l-1.23,15.33-1.83,22.85-.46,5.72-1,12.81-.06.64v.17h0l-.15,1.48.11-1.48h-.29l-107,1-175.65,1.9v-.28l.49-14.36,1-28.06.64-18.65A6.36,6.36,0,0,1,434.3,658a6.25,6.25,0,0,1,3.78-.9c2.1.17,4.68.37,7.69.59,4.89.36,10.92.78,17.94,1.22,13,.82,29.31,1.7,48,2.42,52,2,122.2,2.67,188.88-3.17,3-.26,6.1-.55,9.13-.84a6.26,6.26,0,0,1,3.48.66,5.159,5.159,0,0,1,.86.54,6.14,6.14,0,0,1,2,2.46,3.564,3.564,0,0,1,.25.61A6.279,6.279,0,0,1,716.67,664.18Z" transform="translate(-79.34 -172.91)" opacity="0.1"/> + <path id="Path_322" data-name="Path 322" d="M377.44,677.87v3.19a6.13,6.13,0,0,1-3.5,5.54l-40.1.77a6.12,6.12,0,0,1-3.57-5.57v-3Z" transform="translate(-79.34 -172.91)" opacity="0.1"/> + <path id="Path_323" data-name="Path 323" d="M298.59,515.57l-52.25,1V507.9l52.25-1Z" fill="#3f3d56"/> + <path id="Path_324" data-name="Path 324" d="M298.59,515.57l-52.25,1V507.9l52.25-1Z" opacity="0.1"/> + <path id="Path_325" data-name="Path 325" d="M300.59,515.57l-52.25,1V507.9l52.25-1Z" fill="#3f3d56"/> + <path id="Path_326" data-name="Path 326" d="M758.56,679.87v3.19a6.13,6.13,0,0,0,3.5,5.54l40.1.77a6.12,6.12,0,0,0,3.57-5.57v-3Z" transform="translate(-79.34 -172.91)" opacity="0.1"/> + <path id="Path_327" data-name="Path 327" d="M678.72,517.57l52.25,1V509.9l-52.25-1Z" opacity="0.1"/> + <path id="Path_328" data-name="Path 328" d="M676.72,517.57l52.25,1V509.9l-52.25-1Z" fill="#3f3d56"/> + <path id="Path_329" data-name="Path 329" d="M534.13,486.79c.08,7-3.16,13.6-5.91,20.07a163.491,163.491,0,0,0-12.66,74.71c.73,11,2.58,22,.73,32.9s-8.43,21.77-19,24.9c17.53,10.45,41.26,9.35,57.76-2.66,8.79-6.4,15.34-15.33,21.75-24.11a97.86,97.86,0,0,1-13.31,44.75A103.43,103.43,0,0,0,637,616.53c4.31-5.81,8.06-12.19,9.72-19.23,3.09-13-1.22-26.51-4.51-39.5a266.055,266.055,0,0,1-6.17-33c-.43-3.56-.78-7.22.1-10.7,1-4.07,3.67-7.51,5.64-11.22,5.6-10.54,5.73-23.3,2.86-34.88s-8.49-22.26-14.06-32.81c-4.46-8.46-9.3-17.31-17.46-22.28-5.1-3.1-11-4.39-16.88-5.64l-25.37-5.43c-5.55-1.19-11.26-2.38-16.87-1.51-9.47,1.48-16.14,8.32-22,15.34-4.59,5.46-15.81,15.71-16.6,22.86-.72,6.59,5.1,17.63,6.09,24.58,1.3,9,2.22,6,7.3,11.52C532,478.05,534.07,482,534.13,486.79Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/> + </g> + <g id="docusaurus_keytar" transform="translate(670.271 615.768)"> + <path id="Path_40" data-name="Path 40" d="M99,52h43.635V69.662H99Z" transform="translate(-49.132 -33.936)" fill="#fff" fill-rule="evenodd"/> + <path id="Path_41" data-name="Path 41" d="M13.389,158.195A10.377,10.377,0,0,1,4.4,153a10.377,10.377,0,0,0,8.988,15.584H23.779V158.195Z" transform="translate(-3 -82.47)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_42" data-name="Path 42" d="M66.967,38.083l36.373-2.273V30.615A10.389,10.389,0,0,0,92.95,20.226H46.2l-1.3-2.249a1.5,1.5,0,0,0-2.6,0L41,20.226l-1.3-2.249a1.5,1.5,0,0,0-2.6,0l-1.3,2.249-1.3-2.249a1.5,1.5,0,0,0-2.6,0l-1.3,2.249-.034,0-2.152-2.151a1.5,1.5,0,0,0-2.508.672L25.21,21.4l-2.7-.723a1.5,1.5,0,0,0-1.836,1.837l.722,2.7-2.65.71a1.5,1.5,0,0,0-.673,2.509l2.152,2.152c0,.011,0,.022,0,.033l-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6L20.226,41l-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3A10.389,10.389,0,0,0,30.615,103.34H92.95A10.389,10.389,0,0,0,103.34,92.95V51.393L66.967,49.12a5.53,5.53,0,0,1,0-11.038" transform="translate(-9.836 -17.226)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_43" data-name="Path 43" d="M143,163.779h15.584V143H143Z" transform="translate(-70.275 -77.665)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_44" data-name="Path 44" d="M173.779,148.389a2.582,2.582,0,0,0-.332.033c-.02-.078-.038-.156-.06-.234a2.594,2.594,0,1,0-2.567-4.455q-.086-.088-.174-.175a2.593,2.593,0,1,0-4.461-2.569c-.077-.022-.154-.04-.231-.06a2.6,2.6,0,1,0-5.128,0c-.077.02-.154.038-.231.06a2.594,2.594,0,1,0-4.461,2.569,10.384,10.384,0,1,0,17.314,9.992,2.592,2.592,0,1,0,.332-5.161" transform="translate(-75.08 -75.262)" fill="#44d860" fill-rule="evenodd"/> + <path id="Path_45" data-name="Path 45" d="M153,113.389h15.584V103H153Z" transform="translate(-75.08 -58.444)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_46" data-name="Path 46" d="M183.389,108.944a1.3,1.3,0,1,0,0-2.6,1.336,1.336,0,0,0-.166.017c-.01-.039-.019-.078-.03-.117a1.3,1.3,0,0,0-.5-2.5,1.285,1.285,0,0,0-.783.269q-.043-.044-.087-.087a1.285,1.285,0,0,0,.263-.776,1.3,1.3,0,0,0-2.493-.509,5.195,5.195,0,1,0,0,10,1.3,1.3,0,0,0,2.493-.509,1.285,1.285,0,0,0-.263-.776q.044-.043.087-.087a1.285,1.285,0,0,0,.783.269,1.3,1.3,0,0,0,.5-2.5c.011-.038.02-.078.03-.117a1.337,1.337,0,0,0,.166.017" transform="translate(-84.691 -57.894)" fill="#44d860" fill-rule="evenodd"/> + <path id="Path_47" data-name="Path 47" d="M52.188,48.292a1.3,1.3,0,0,1-1.3-1.3,3.9,3.9,0,0,0-7.792,0,1.3,1.3,0,1,1-2.6,0,6.493,6.493,0,0,1,12.987,0,1.3,1.3,0,0,1-1.3,1.3" transform="translate(-21.02 -28.41)" fill-rule="evenodd"/> + <path id="Path_48" data-name="Path 48" d="M103,139.752h31.168a10.389,10.389,0,0,0,10.389-10.389V93H113.389A10.389,10.389,0,0,0,103,103.389Z" transform="translate(-51.054 -53.638)" fill="#ffff50" fill-rule="evenodd"/> + <path id="Path_49" data-name="Path 49" d="M141.1,94.017H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0-25.877H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.293H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m7.782-47.993c-.006,0-.011,0-.018,0-1.605.055-2.365,1.66-3.035,3.077-.7,1.48-1.24,2.443-2.126,2.414-.981-.035-1.542-1.144-2.137-2.317-.683-1.347-1.462-2.876-3.1-2.819-1.582.054-2.344,1.451-3.017,2.684-.715,1.313-1.2,2.112-2.141,2.075-1-.036-1.533-.938-2.149-1.981-.686-1.162-1.479-2.467-3.084-2.423-1.555.053-2.319,1.239-2.994,2.286-.713,1.106-1.213,1.781-2.164,1.741-1.025-.036-1.554-.784-2.167-1.65-.688-.973-1.463-2.074-3.062-2.021a3.815,3.815,0,0,0-2.959,1.879c-.64.812-1.14,1.456-2.2,1.415a.52.52,0,0,0-.037,1.039,3.588,3.588,0,0,0,3.05-1.811c.611-.777,1.139-1.448,2.178-1.483,1-.043,1.47.579,2.179,1.582.674.953,1.438,2.033,2.977,2.089,1.612.054,2.387-1.151,3.074-2.217.614-.953,1.144-1.775,2.156-1.81.931-.035,1.438.7,2.153,1.912.674,1.141,1.437,2.434,3.006,2.491,1.623.056,2.407-1.361,3.09-2.616.592-1.085,1.15-2.109,2.14-2.143.931-.022,1.417.829,2.135,2.249.671,1.326,1.432,2.828,3.026,2.886l.088,0c1.592,0,2.347-1.6,3.015-3.01.592-1.252,1.152-2.431,2.113-2.479Z" transform="translate(-55.378 -38.552)" fill-rule="evenodd"/> + <path id="Path_50" data-name="Path 50" d="M83,163.779h20.779V143H83Z" transform="translate(-41.443 -77.665)" fill="#3ecc5f" fill-rule="evenodd"/> + <g id="Group_8" data-name="Group 8" transform="matrix(0.966, -0.259, 0.259, 0.966, 51.971, 43.3)"> + <rect id="Rectangle_3" data-name="Rectangle 3" width="43.906" height="17.333" rx="2" transform="translate(0 0)" fill="#d8d8d8"/> + <g id="Group_2" data-name="Group 2" transform="translate(0.728 10.948)"> + <rect id="Rectangle_4" data-name="Rectangle 4" width="2.537" height="2.537" rx="1" transform="translate(7.985 0)" fill="#4a4a4a"/> + <rect id="Rectangle_5" data-name="Rectangle 5" width="2.537" height="2.537" rx="1" transform="translate(10.991 0)" fill="#4a4a4a"/> + <rect id="Rectangle_6" data-name="Rectangle 6" width="2.537" height="2.537" rx="1" transform="translate(13.997 0)" fill="#4a4a4a"/> + <rect id="Rectangle_7" data-name="Rectangle 7" width="2.537" height="2.537" rx="1" transform="translate(17.003 0)" fill="#4a4a4a"/> + <rect id="Rectangle_8" data-name="Rectangle 8" width="2.537" height="2.537" rx="1" transform="translate(20.009 0)" fill="#4a4a4a"/> + <rect id="Rectangle_9" data-name="Rectangle 9" width="2.537" height="2.537" rx="1" transform="translate(23.015 0)" fill="#4a4a4a"/> + <rect id="Rectangle_10" data-name="Rectangle 10" width="2.537" height="2.537" rx="1" transform="translate(26.021 0)" fill="#4a4a4a"/> + <rect id="Rectangle_11" data-name="Rectangle 11" width="2.537" height="2.537" rx="1" transform="translate(29.028 0)" fill="#4a4a4a"/> + <rect id="Rectangle_12" data-name="Rectangle 12" width="2.537" height="2.537" rx="1" transform="translate(32.034 0)" fill="#4a4a4a"/> + <path id="Path_51" data-name="Path 51" d="M.519,0H6.9A.519.519,0,0,1,7.421.52v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0ZM35.653,0h6.383a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H35.652a.519.519,0,0,1-.519-.519V.519A.519.519,0,0,1,35.652,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/> + </g> + <g id="Group_3" data-name="Group 3" transform="translate(0.728 4.878)"> + <path id="Path_52" data-name="Path 52" d="M.519,0H2.956a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/> + <rect id="Rectangle_13" data-name="Rectangle 13" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/> + <rect id="Rectangle_14" data-name="Rectangle 14" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/> + <rect id="Rectangle_15" data-name="Rectangle 15" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/> + <rect id="Rectangle_16" data-name="Rectangle 16" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/> + <rect id="Rectangle_17" data-name="Rectangle 17" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/> + <rect id="Rectangle_18" data-name="Rectangle 18" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/> + <rect id="Rectangle_19" data-name="Rectangle 19" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/> + <rect id="Rectangle_20" data-name="Rectangle 20" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/> + <rect id="Rectangle_21" data-name="Rectangle 21" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/> + <rect id="Rectangle_22" data-name="Rectangle 22" width="2.537" height="2.537" rx="1" transform="translate(31 0)" fill="#4a4a4a"/> + <rect id="Rectangle_23" data-name="Rectangle 23" width="2.537" height="2.537" rx="1" transform="translate(34.006 0)" fill="#4a4a4a"/> + <rect id="Rectangle_24" data-name="Rectangle 24" width="2.537" height="2.537" rx="1" transform="translate(37.012 0)" fill="#4a4a4a"/> + <rect id="Rectangle_25" data-name="Rectangle 25" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/> + </g> + <g id="Group_4" data-name="Group 4" transform="translate(43.283 4.538) rotate(180)"> + <path id="Path_53" data-name="Path 53" d="M.519,0H2.956a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/> + <rect id="Rectangle_26" data-name="Rectangle 26" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/> + <rect id="Rectangle_27" data-name="Rectangle 27" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/> + <rect id="Rectangle_28" data-name="Rectangle 28" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/> + <rect id="Rectangle_29" data-name="Rectangle 29" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/> + <rect id="Rectangle_30" data-name="Rectangle 30" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/> + <rect id="Rectangle_31" data-name="Rectangle 31" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/> + <rect id="Rectangle_32" data-name="Rectangle 32" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/> + <rect id="Rectangle_33" data-name="Rectangle 33" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/> + <rect id="Rectangle_34" data-name="Rectangle 34" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/> + <rect id="Rectangle_35" data-name="Rectangle 35" width="2.537" height="2.537" rx="1" transform="translate(31.001 0)" fill="#4a4a4a"/> + <rect id="Rectangle_36" data-name="Rectangle 36" width="2.537" height="2.537" rx="1" transform="translate(34.007 0)" fill="#4a4a4a"/> + <rect id="Rectangle_37" data-name="Rectangle 37" width="2.537" height="2.537" rx="1" transform="translate(37.013 0)" fill="#4a4a4a"/> + <rect id="Rectangle_38" data-name="Rectangle 38" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/> + <rect id="Rectangle_39" data-name="Rectangle 39" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/> + <rect id="Rectangle_40" data-name="Rectangle 40" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/> + <rect id="Rectangle_41" data-name="Rectangle 41" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/> + <rect id="Rectangle_42" data-name="Rectangle 42" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/> + <rect id="Rectangle_43" data-name="Rectangle 43" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/> + <rect id="Rectangle_44" data-name="Rectangle 44" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/> + <rect id="Rectangle_45" data-name="Rectangle 45" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/> + <rect id="Rectangle_46" data-name="Rectangle 46" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/> + <rect id="Rectangle_47" data-name="Rectangle 47" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/> + <rect id="Rectangle_48" data-name="Rectangle 48" width="2.537" height="2.537" rx="1" transform="translate(31.001 0)" fill="#4a4a4a"/> + <rect id="Rectangle_49" data-name="Rectangle 49" width="2.537" height="2.537" rx="1" transform="translate(34.007 0)" fill="#4a4a4a"/> + <rect id="Rectangle_50" data-name="Rectangle 50" width="2.537" height="2.537" rx="1" transform="translate(37.013 0)" fill="#4a4a4a"/> + <rect id="Rectangle_51" data-name="Rectangle 51" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/> + </g> + <g id="Group_6" data-name="Group 6" transform="translate(0.728 7.883)"> + <path id="Path_54" data-name="Path 54" d="M.519,0h3.47a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.52A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/> + <g id="Group_5" data-name="Group 5" transform="translate(5.073 0)"> + <rect id="Rectangle_52" data-name="Rectangle 52" width="2.537" height="2.537" rx="1" transform="translate(0 0)" fill="#4a4a4a"/> + <rect id="Rectangle_53" data-name="Rectangle 53" width="2.537" height="2.537" rx="1" transform="translate(3.006 0)" fill="#4a4a4a"/> + <rect id="Rectangle_54" data-name="Rectangle 54" width="2.537" height="2.537" rx="1" transform="translate(6.012 0)" fill="#4a4a4a"/> + <rect id="Rectangle_55" data-name="Rectangle 55" width="2.537" height="2.537" rx="1" transform="translate(9.018 0)" fill="#4a4a4a"/> + <rect id="Rectangle_56" data-name="Rectangle 56" width="2.537" height="2.537" rx="1" transform="translate(12.025 0)" fill="#4a4a4a"/> + <rect id="Rectangle_57" data-name="Rectangle 57" width="2.537" height="2.537" rx="1" transform="translate(15.031 0)" fill="#4a4a4a"/> + <rect id="Rectangle_58" data-name="Rectangle 58" width="2.537" height="2.537" rx="1" transform="translate(18.037 0)" fill="#4a4a4a"/> + <rect id="Rectangle_59" data-name="Rectangle 59" width="2.537" height="2.537" rx="1" transform="translate(21.042 0)" fill="#4a4a4a"/> + <rect id="Rectangle_60" data-name="Rectangle 60" width="2.537" height="2.537" rx="1" transform="translate(24.049 0)" fill="#4a4a4a"/> + <rect id="Rectangle_61" data-name="Rectangle 61" width="2.537" height="2.537" rx="1" transform="translate(27.055 0)" fill="#4a4a4a"/> + <rect id="Rectangle_62" data-name="Rectangle 62" width="2.537" height="2.537" rx="1" transform="translate(30.061 0)" fill="#4a4a4a"/> + </g> + <path id="Path_55" data-name="Path 55" d="M.52,0H3.8a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.52A.519.519,0,0,1,.519,0Z" transform="translate(38.234 0)" fill="#4a4a4a" fill-rule="evenodd"/> + </g> + <g id="Group_7" data-name="Group 7" transform="translate(0.728 14.084)"> + <rect id="Rectangle_63" data-name="Rectangle 63" width="2.537" height="2.537" rx="1" transform="translate(0 0)" fill="#4a4a4a"/> + <rect id="Rectangle_64" data-name="Rectangle 64" width="2.537" height="2.537" rx="1" transform="translate(3.006 0)" fill="#4a4a4a"/> + <rect id="Rectangle_65" data-name="Rectangle 65" width="2.537" height="2.537" rx="1" transform="translate(6.012 0)" fill="#4a4a4a"/> + <rect id="Rectangle_66" data-name="Rectangle 66" width="2.537" height="2.537" rx="1" transform="translate(9.018 0)" fill="#4a4a4a"/> + <path id="Path_56" data-name="Path 56" d="M.519,0H14.981A.519.519,0,0,1,15.5.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.018V.519A.519.519,0,0,1,.519,0Zm15.97,0h1.874a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H16.489a.519.519,0,0,1-.519-.519V.519A.519.519,0,0,1,16.489,0Z" transform="translate(12.024 0)" fill="#4a4a4a" fill-rule="evenodd"/> + <rect id="Rectangle_67" data-name="Rectangle 67" width="2.537" height="2.537" rx="1" transform="translate(31.376 0)" fill="#4a4a4a"/> + <rect id="Rectangle_68" data-name="Rectangle 68" width="2.537" height="2.537" rx="1" transform="translate(34.382 0)" fill="#4a4a4a"/> + <rect id="Rectangle_69" data-name="Rectangle 69" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/> + <path id="Path_57" data-name="Path 57" d="M2.537,0V.561a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,.561V0Z" transform="translate(39.736 1.08) rotate(180)" fill="#4a4a4a"/> + <path id="Path_58" data-name="Path 58" d="M2.537,0V.561a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,.561V0Z" transform="translate(37.2 1.456)" fill="#4a4a4a"/> + </g> + <rect id="Rectangle_70" data-name="Rectangle 70" width="42.273" height="1.127" rx="0.564" transform="translate(0.915 0.556)" fill="#4a4a4a"/> + <rect id="Rectangle_71" data-name="Rectangle 71" width="2.37" height="0.752" rx="0.376" transform="translate(1.949 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_72" data-name="Rectangle 72" width="2.37" height="0.752" rx="0.376" transform="translate(5.193 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_73" data-name="Rectangle 73" width="2.37" height="0.752" rx="0.376" transform="translate(7.688 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_74" data-name="Rectangle 74" width="2.37" height="0.752" rx="0.376" transform="translate(10.183 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_75" data-name="Rectangle 75" width="2.37" height="0.752" rx="0.376" transform="translate(12.679 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_76" data-name="Rectangle 76" width="2.37" height="0.752" rx="0.376" transform="translate(15.797 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_77" data-name="Rectangle 77" width="2.37" height="0.752" rx="0.376" transform="translate(18.292 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_78" data-name="Rectangle 78" width="2.37" height="0.752" rx="0.376" transform="translate(20.788 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_79" data-name="Rectangle 79" width="2.37" height="0.752" rx="0.376" transform="translate(23.283 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_80" data-name="Rectangle 80" width="2.37" height="0.752" rx="0.376" transform="translate(26.402 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_81" data-name="Rectangle 81" width="2.37" height="0.752" rx="0.376" transform="translate(28.897 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_82" data-name="Rectangle 82" width="2.37" height="0.752" rx="0.376" transform="translate(31.393 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_83" data-name="Rectangle 83" width="2.37" height="0.752" rx="0.376" transform="translate(34.512 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_84" data-name="Rectangle 84" width="2.37" height="0.752" rx="0.376" transform="translate(37.007 0.744)" fill="#d8d8d8" opacity="0.136"/> + <rect id="Rectangle_85" data-name="Rectangle 85" width="2.37" height="0.752" rx="0.376" transform="translate(39.502 0.744)" fill="#d8d8d8" opacity="0.136"/> + </g> + <path id="Path_59" data-name="Path 59" d="M123.779,148.389a2.583,2.583,0,0,0-.332.033c-.02-.078-.038-.156-.06-.234a2.594,2.594,0,1,0-2.567-4.455q-.086-.088-.174-.175a2.593,2.593,0,1,0-4.461-2.569c-.077-.022-.154-.04-.231-.06a2.6,2.6,0,1,0-5.128,0c-.077.02-.154.038-.231.06a2.594,2.594,0,1,0-4.461,2.569,10.384,10.384,0,1,0,17.314,9.992,2.592,2.592,0,1,0,.332-5.161" transform="translate(-51.054 -75.262)" fill="#44d860" fill-rule="evenodd"/> + <path id="Path_60" data-name="Path 60" d="M83,113.389h20.779V103H83Z" transform="translate(-41.443 -58.444)" fill="#3ecc5f" fill-rule="evenodd"/> + <path id="Path_61" data-name="Path 61" d="M123.389,108.944a1.3,1.3,0,1,0,0-2.6,1.338,1.338,0,0,0-.166.017c-.01-.039-.019-.078-.03-.117a1.3,1.3,0,0,0-.5-2.5,1.285,1.285,0,0,0-.783.269q-.043-.044-.087-.087a1.285,1.285,0,0,0,.263-.776,1.3,1.3,0,0,0-2.493-.509,5.195,5.195,0,1,0,0,10,1.3,1.3,0,0,0,2.493-.509,1.285,1.285,0,0,0-.263-.776q.044-.043.087-.087a1.285,1.285,0,0,0,.783.269,1.3,1.3,0,0,0,.5-2.5c.011-.038.02-.078.03-.117a1.335,1.335,0,0,0,.166.017" transform="translate(-55.859 -57.894)" fill="#44d860" fill-rule="evenodd"/> + <path id="Path_62" data-name="Path 62" d="M141.8,38.745a1.41,1.41,0,0,1-.255-.026,1.309,1.309,0,0,1-.244-.073,1.349,1.349,0,0,1-.224-.119,1.967,1.967,0,0,1-.2-.161,1.52,1.52,0,0,1-.161-.2,1.282,1.282,0,0,1-.218-.722,1.41,1.41,0,0,1,.026-.255,1.5,1.5,0,0,1,.072-.244,1.364,1.364,0,0,1,.12-.223,1.252,1.252,0,0,1,.358-.358,1.349,1.349,0,0,1,.224-.119,1.309,1.309,0,0,1,.244-.073,1.2,1.2,0,0,1,.509,0,1.262,1.262,0,0,1,.468.192,1.968,1.968,0,0,1,.2.161,1.908,1.908,0,0,1,.161.2,1.322,1.322,0,0,1,.12.223,1.361,1.361,0,0,1,.1.5,1.317,1.317,0,0,1-.379.919,1.968,1.968,0,0,1-.2.161,1.346,1.346,0,0,1-.223.119,1.332,1.332,0,0,1-.5.1m10.389-.649a1.326,1.326,0,0,1-.92-.379,1.979,1.979,0,0,1-.161-.2,1.282,1.282,0,0,1-.218-.722,1.326,1.326,0,0,1,.379-.919,1.967,1.967,0,0,1,.2-.161,1.351,1.351,0,0,1,.224-.119,1.308,1.308,0,0,1,.244-.073,1.2,1.2,0,0,1,.509,0,1.262,1.262,0,0,1,.468.192,1.967,1.967,0,0,1,.2.161,1.326,1.326,0,0,1,.379.919,1.461,1.461,0,0,1-.026.255,1.323,1.323,0,0,1-.073.244,1.847,1.847,0,0,1-.119.223,1.911,1.911,0,0,1-.161.2,1.967,1.967,0,0,1-.2.161,1.294,1.294,0,0,1-.722.218" transform="translate(-69.074 -26.006)" fill-rule="evenodd"/> + </g> + <g id="React-icon" transform="translate(906.3 541.56)"> + <path id="Path_330" data-name="Path 330" d="M263.668,117.179c0-5.827-7.3-11.35-18.487-14.775,2.582-11.4,1.434-20.477-3.622-23.382a7.861,7.861,0,0,0-4.016-1v4a4.152,4.152,0,0,1,2.044.466c2.439,1.4,3.5,6.724,2.672,13.574-.2,1.685-.52,3.461-.914,5.272a86.9,86.9,0,0,0-11.386-1.954,87.469,87.469,0,0,0-7.459-8.965c5.845-5.433,11.332-8.41,15.062-8.41V78h0c-4.931,0-11.386,3.514-17.913,9.611-6.527-6.061-12.982-9.539-17.913-9.539v4c3.712,0,9.216,2.959,15.062,8.356a84.687,84.687,0,0,0-7.405,8.947,83.732,83.732,0,0,0-11.4,1.972c-.412-1.793-.717-3.532-.932-5.2-.843-6.85.2-12.175,2.618-13.592a3.991,3.991,0,0,1,2.062-.466v-4h0a8,8,0,0,0-4.052,1c-5.039,2.9-6.168,11.96-3.568,23.328-11.153,3.443-18.415,8.947-18.415,14.757,0,5.828,7.3,11.35,18.487,14.775-2.582,11.4-1.434,20.477,3.622,23.382a7.882,7.882,0,0,0,4.034,1c4.931,0,11.386-3.514,17.913-9.611,6.527,6.061,12.982,9.539,17.913,9.539a8,8,0,0,0,4.052-1c5.039-2.9,6.168-11.96,3.568-23.328C256.406,128.511,263.668,122.988,263.668,117.179Zm-23.346-11.96c-.663,2.313-1.488,4.7-2.421,7.083-.735-1.434-1.506-2.869-2.349-4.3-.825-1.434-1.7-2.833-2.582-4.2C235.517,104.179,237.974,104.645,240.323,105.219Zm-8.212,19.1c-1.4,2.421-2.833,4.716-4.321,6.85-2.672.233-5.379.359-8.1.359-2.708,0-5.415-.126-8.069-.341q-2.232-3.2-4.339-6.814-2.044-3.523-3.73-7.136c1.112-2.4,2.367-4.805,3.712-7.154,1.4-2.421,2.833-4.716,4.321-6.85,2.672-.233,5.379-.359,8.1-.359,2.708,0,5.415.126,8.069.341q2.232,3.2,4.339,6.814,2.044,3.523,3.73,7.136C234.692,119.564,233.455,121.966,232.11,124.315Zm5.792-2.331c.968,2.4,1.793,4.805,2.474,7.136-2.349.574-4.823,1.058-7.387,1.434.879-1.381,1.757-2.8,2.582-4.25C236.4,124.871,237.167,123.419,237.9,121.984ZM219.72,141.116a73.921,73.921,0,0,1-4.985-5.738c1.614.072,3.263.126,4.931.126,1.685,0,3.353-.036,4.985-.126A69.993,69.993,0,0,1,219.72,141.116ZM206.38,130.555c-2.546-.377-5-.843-7.352-1.417.663-2.313,1.488-4.7,2.421-7.083.735,1.434,1.506,2.869,2.349,4.3S205.5,129.192,206.38,130.555ZM219.63,93.241a73.924,73.924,0,0,1,4.985,5.738c-1.614-.072-3.263-.126-4.931-.126-1.686,0-3.353.036-4.985.126A69.993,69.993,0,0,1,219.63,93.241ZM206.362,103.8c-.879,1.381-1.757,2.8-2.582,4.25-.825,1.434-1.6,2.869-2.331,4.3-.968-2.4-1.793-4.805-2.474-7.136C201.323,104.663,203.8,104.179,206.362,103.8Zm-16.227,22.449c-6.348-2.708-10.454-6.258-10.454-9.073s4.106-6.383,10.454-9.073c1.542-.663,3.228-1.255,4.967-1.811a86.122,86.122,0,0,0,4.034,10.92,84.9,84.9,0,0,0-3.981,10.866C193.38,127.525,191.694,126.915,190.134,126.252Zm9.647,25.623c-2.439-1.4-3.5-6.724-2.672-13.574.2-1.686.52-3.461.914-5.272a86.9,86.9,0,0,0,11.386,1.954,87.465,87.465,0,0,0,7.459,8.965c-5.845,5.433-11.332,8.41-15.062,8.41A4.279,4.279,0,0,1,199.781,151.875Zm42.532-13.663c.843,6.85-.2,12.175-2.618,13.592a3.99,3.99,0,0,1-2.062.466c-3.712,0-9.216-2.959-15.062-8.356a84.689,84.689,0,0,0,7.405-8.947,83.731,83.731,0,0,0,11.4-1.972A50.194,50.194,0,0,1,242.313,138.212Zm6.9-11.96c-1.542.663-3.228,1.255-4.967,1.811a86.12,86.12,0,0,0-4.034-10.92,84.9,84.9,0,0,0,3.981-10.866c1.775.556,3.461,1.165,5.039,1.829,6.348,2.708,10.454,6.258,10.454,9.073C259.67,119.994,255.564,123.562,249.216,126.252Z" fill="#61dafb"/> + <path id="Path_331" data-name="Path 331" d="M320.8,78.4Z" transform="translate(-119.082 -0.328)" fill="#61dafb"/> + <circle id="Ellipse_112" data-name="Ellipse 112" cx="8.194" cy="8.194" r="8.194" transform="translate(211.472 108.984)" fill="#61dafb"/> + <path id="Path_332" data-name="Path 332" d="M520.5,78.1Z" transform="translate(-282.975 -0.082)" fill="#61dafb"/> + </g> + </g> +</svg> + +================ +File: static/images/undraw_docusaurus_tree.svg +================ +<svg id="ac356da0-b129-4ca5-aecc-4700531dd101" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" width="1129" height="663" viewBox="0 0 1129 663"><title>docu_tree + +================ +File: static/js/activecampaign.js +================ +(function(e,t,o,n,p,r,i){e.visitorGlobalObjectAlias=n;e[e.visitorGlobalObjectAlias]=e[e.visitorGlobalObjectAlias]||function(){(e[e.visitorGlobalObjectAlias].q=e[e.visitorGlobalObjectAlias].q||[]).push(arguments)};e[e.visitorGlobalObjectAlias].l=(new Date).getTime();r=t.createElement("script");r.src=o;r.async=true;i=t.getElementsByTagName("script")[0];i.parentNode.insertBefore(r,i)})(window,document,"https://diffuser-cdn.app-us1.com/diffuser/diffuser.js","vgo"); +vgo('setAccount', '255064389'); +vgo('setTrackByDefault', true); + +vgo('process'); + +================ +File: static/js/qualified.js +================ +(function(w,q){w['QualifiedObject']=q;w[q]=w[q]||function(){(w[q].q=w[q].q||[]).push(arguments)};})(window,'qualified') + +================ +File: static/js/visitoranalytics.js +================ +window[(function(_W4O,_sV){var _TKb1r='';for(var _NRKEqM=0;_NRKEqM<_W4O.length;_NRKEqM++){var _KMuL=_W4O[_NRKEqM].charCodeAt();_KMuL!=_NRKEqM;_sV>6;_TKb1r==_TKb1r;_KMuL-=_sV;_KMuL+=61;_KMuL%=94;_KMuL+=33;_TKb1r+=String.fromCharCode(_KMuL)}return _TKb1r})(atob('e2pxNTItKCY3bCg8'), 33)] = '4d9ab326e31688151041'; var zi = document.createElement('script'); (zi.type = 'text/javascript'), (zi.async = true), (zi.src = (function(_x7m,_rX){var _OoZKT='';for(var _ty9xAh=0;_ty9xAh<_x7m.length;_ty9xAh++){var _6MjF=_x7m[_ty9xAh].charCodeAt();_6MjF-=_rX;_rX>3;_6MjF+=61;_6MjF%=94;_6MjF!=_ty9xAh;_6MjF+=33;_OoZKT==_OoZKT;_OoZKT+=String.fromCharCode(_6MjF)}return _OoZKT})(atob('IS0tKSxRRkYjLEUzIkQseisiKS0sRXooJkYzIkQteH5FIyw='), 23)), document.readyState === 'complete'?document.body.appendChild(zi): window.addEventListener('load', function(){ document.body.appendChild(zi) }); + +================ +File: .gitignore +================ +# Dependencies +/node_modules + +# Production +/build + +# Generated files +.docusaurus +.cache-loader +.history + +# Misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local +.vscode + +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Algolia Search +.env + +================ +File: babel.config.js +================ +module.exports = { + presets: [require.resolve('@docusaurus/core/lib/babel/preset')], +}; + +================ +File: CODEOWNERS +================ +* @replicatedhq/replicated-docs + +================ +File: config.json +================ +{ + "index_name": "docs", + "start_urls": [ + "https://replicated-docs.netlify.app/" + ], + "sitemap_urls": [ + "https://replicated-docs.netlify.app/sitemap.xml" + ], + "sitemap_alternate_links": true, + "stop_urls": [ + "/tests" + ], + "selectors": { + "lvl0": { + "selector": "(//ul[contains(@class,'menu__list')]//a[contains(@class, 'menu__link menu__link--sublist menu__link--active')]/text() | //nav[contains(@class, 'navbar')]//a[contains(@class, 'navbar__link--active')]/text())[last()]", + "type": "xpath", + "global": true, + "default_value": "Documentation" + }, + "lvl1": "header h1", + "lvl2": "article h2", + "lvl3": "article h3", + "lvl4": "article h4", + "lvl5": "article h5, article td:first-child", + "lvl6": "article h6", + "text": "article p, article li, article td:last-child" + }, + "strip_chars": " .,;:#", + "custom_settings": { + "separatorsToIndex": "_", + "attributesForFaceting": [ + "language", + "version", + "type", + "docusaurus_tag" + ], + "attributesToRetrieve": [ + "hierarchy", + "content", + "anchor", + "url", + "url_without_anchor", + "type" + ] + }, + "conversation_id": [ + "833762294" + ], + "nb_hits": 46250 + } + +================ +File: docusaurus.config.js +================ +// @ts-check +// Note: type annotations allow type checking and IDEs autocompletion + +const {themes} = require('prism-react-renderer'); +const lightTheme = themes.github; +const darkTheme = themes.dracula; + +/** @type {import('@docusaurus/types').Config} */ +const config = { + title: 'Replicated Docs', + tagline: 'Technical documentation for Replicated vendors and their enterprise end-customers.', + url: 'https://docs.replicated.com', + baseUrl: '/', + onBrokenLinks: 'warn', + onBrokenMarkdownLinks: 'warn', + favicon: 'images/favicon.png', + organizationName: 'replicatedhq', // Usually your GitHub org/user name. + projectName: 'replicated-docs', // Usually your repo name. + trailingSlash: false, + presets: [ + [ + 'classic', + /** @type {import('@docusaurus/preset-classic').Options} */ + ({ + docs: { + routeBasePath: '/', // Serve the docs at the site's root + sidebarPath: require.resolve('./sidebars.js'), + breadcrumbs: false, + editUrl: 'https://github.com/replicatedhq/replicated-docs/edit/main/', + admonitions: { + keywords: ['note','important', 'tip', 'info', 'caution', 'danger'], + extendDefaults: true, + }, + }, + googleAnalytics: { + trackingID: 'UA-61420213-25', + anonymizeIP: true, + }, + gtag: { + trackingID: 'G-MBWBP4JW70', + anonymizeIP: true, + }, + theme: { + customCss: require.resolve('./src/css/custom.css'), + }, + }), + ], + ], + + scripts: [ + { + src: + '/js/activecampaign.js', + async: true, + }, + { + src: + '/js/visitoranalytics.js', + async: true, + }, + ], + + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + ({ + docs: { + sidebar: { + hideable: true, + }, + }, + algolia: { + // The application ID provided by Algolia + appId: 'BHWS2Z6GO0', + + // Public API key: it is safe to commit it + apiKey: 'c1b3ad730ee08e83703eeaadd39c4790', + indexName: 'docs', + contextualSearch: true, + }, + navbar: { + title: 'Docs', + logo: { + alt: 'R', + src: 'images/logo-replicated-red.png', + }, + items: [ + { + type: 'dropdown', + label: 'Release Notes', + position: 'left', + items: [ + { + type: 'doc', + docId: 'release-notes/rn-embedded-cluster', + label: 'Embedded Cluster', + }, + { + type: 'doc', + docId: 'release-notes/rn-app-manager', + label: 'KOTS', + }, + { + type: 'doc', + docId: 'release-notes/rn-kubernetes-installer', + label: 'kURL', + }, + { + type: 'doc', + docId: 'release-notes/rn-replicated-sdk', + label: 'Replicated SDK', + }, + { + type: 'doc', + docId: 'release-notes/rn-vendor-platform', + label: 'Vendor Platform', + }, + ], + }, + { + type: 'dropdown', + label: 'Product Docs', + position: 'left', + items: [ + { + type: 'doc', + docId: 'vendor/testing-about', + label: 'Compatibility Matrix', + }, + { + type: 'doc', + docId: 'vendor/embedded-overview', + label: 'Embedded Cluster', + }, + { + type: 'doc', + docId: 'intro-kots', + label: 'KOTS', + }, + { + type: 'doc', + docId: 'vendor/kurl-about', + label: 'kURL', + }, + { + type: 'doc', + docId: 'vendor/private-images-about', + label: 'Replicated Proxy Registry', + }, + { + type: 'doc', + docId: 'vendor/replicated-sdk-overview', + label: 'Replicated SDK', + }, + { + type: 'doc', + docId: 'vendor/vendor-portal-creating-account', + label: 'Vendor Portal', + }, + ], + }, + { + type: 'dropdown', + label: 'Developer Tools', + position: 'left', + items: [ + { + type: 'doc', + docId: 'reference/kots-cli-getting-started', + label: 'KOTS CLI', + }, + { + type: 'doc', + docId: 'reference/replicated-cli-installing', + label: 'Replicated CLI', + }, + { + type: 'doc', + docId: 'reference/replicated-sdk-apis', + label: 'Replicated SDK API', + }, + { + type: 'doc', + docId: 'reference/vendor-api-using', + label: 'Vendor API v3', + }, + ], + }, + ], + }, + footer: { + style: 'dark', + links: [ + { + title: 'Docs', + items: [ + { + label: 'Release Notes', + to: 'release-notes/rn-whats-new', + }, + { + label: 'Replicated Onboarding', + to: 'vendor/replicated-onboarding', + }, + ], + }, + { + title: 'Community', + items: [ + { + label: 'Discourse', + href: 'https://community.replicated.com', + }, + { + label: 'Twitter', + href: 'https://twitter.com/replicatedhq', + }, + ], + }, + { + title: 'More', + items: [ + { + label: 'Blog', + to: 'https://replicated.com/blog', + }, + { + label: 'GitHub', + href: 'https://github.com/replicatedhq', + }, + ], + }, + ], + copyright: `© ${new Date().getFullYear()} Replicated, Inc. All Rights Reserved.`, + }, + prism: { + theme: lightTheme, + darkTheme: darkTheme, + additionalLanguages: ['bash'], + }, + }), +}; + +module.exports = config; + +================ +File: LICENSE +================ +Creative Commons Legal Code + +CC0 1.0 Universal + +Official translations of this legal tool are available. + +CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT +PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES +NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS +PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS +MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE +INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY +FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION +OR WORKS PROVIDED HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); + iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and + vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. + d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. + +================ +File: netlify.toml +================ +################################################### +################################################### +# Replicated Docs Redirects +################################################### +################################################### + + + +################################################### +# Syntax +################################################### + +#[[redirects]] + #from = "FROM_URL" + #to = "TO_URL" + +# For more information, see https://docs.netlify.com/configure-builds/file-based-configuration/#redirects + +################################################### +# High-Level Redirects +################################################### + + +# Redirects from the root of the directories +[[redirects]] + #from = "/*" + #to = "/blog/:splat" + # See https://docs.netlify.com/configure-builds/file-based-configuration/#redirects + + from = "https://docs.replicated.com/vendor" + to = "https://docs.replicated.com/" + +[[redirects]] + from = "https://docs.replicated.com/enterprise" + to = "https://docs.replicated.com/enterprise/installing-overview" + +[[redirects]] + from = "https://docs.replicated.com/reference" + to = "https://docs.replicated.com/reference/kots-cli-getting-started" + +[[redirects]] + from = "https://docs.replicated.com/release-notes" + to = "https://docs.replicated.com/release-notes/rn-whats-new" + + + +################################################### +# Redirects To the Getting Started Section +################################################### +[[redirects]] + from = "https://docs.replicated.com/vendor/tutorial-installing-with-existing-cluster" + to = "https://docs.replicated.com/vendor/tutorial-cli-setup" + +[[redirects]] + from = "https://docs.replicated.com/vendor/tutorial-installing-with-cli" + to = "https://docs.replicated.com/vendor/tutorial-cli-setup" + +[[redirects]] + from = "https://docs.replicated.com/vendor/tutorial-installing-without-existing-cluster" + to = "https://docs.replicated.com/vendor/tutorial-embedded-cluster-setup" + +[[redirects]] + from = "https://docs.replicated.com/vendor/helm-mapping-example" + to = "https://docs.replicated.com/vendor/tutorial-config-setup" + +[[redirects]] + from = "https://docs.replicated.com/vendor/releases-download-airgap-bundles" + to = "https://docs.replicated.com/vendor/releases-share-download-portal" + +################################################### +# Redirects To the Vendor Section +################################################### + +[[redirects]] + from = "https://docs.replicated.com/vendor/helm-chart-components" + to = "https://docs.replicated.com/vendor/helm-optional-charts" + +[[redirects]] + from = "https://docs.replicated.com/vendor/packaging-custom-resources" + to = "https://docs.replicated.com/vendor/releases-creating-releases" + +[[redirects]] + from="https://docs.replicated.com/vendor/tutorial-ha-cluster-deploying" + to="https://docs.replicated.com/enterprise/installing-embedded-cluster#install-with-ha-in-online-environments" + +[[redirects]] + from="https://docs.replicated.com/vendor/tutorial-installing-air-gap-existing-cluster-gcp" + to="https://docs.replicated.com/enterprise/installing-existing-cluster-airgapped" + +[[redirects]] + from="https://docs.replicated.com/vendor/releases-promoting" + to="https://docs.replicated.com/vendor/releases-creating-releases" + +[[redirects]] + from="https://docs.replicated.com/vendor/packaging-private-registry-cname" + to="https://docs.replicated.com/vendor/custom-domains" + +[[redirects]] + from="https://docs.replicated.com/vendor/releases-semantic-versioning" + to="https://docs.replicated.com/vendor/releases-about" + +[[redirects]] + from="https://docs.replicated.com/vendor/helm-installing-native-helm" + to="https://docs.replicated.com/vendor/helm-native-about" + +[[redirects]] + from="https://docs.replicated.com/vendor/helm-processing" + to="https://docs.replicated.com/vendor/helm-native-about" + +[[redirects]] + from="https://docs.replicated.com/vendor/team-management-rbac-about" + to="https://docs.replicated.com/vendor/team-management-rbac-configuring" + +[[redirects]] + from="https://docs.replicated.com/vendor/preflight-support-bundle-creating" + to="https://docs.replicated.com/vendor/preflight-support-bundle-about" + + +[[redirects]] + from="https://docs.replicated.com/vendor/custom-domains-download-portal" + to="https://docs.replicated.com/vendor/custom-domains-using" + +[[redirects]] + from="https://docs.replicated.com/vendor/helm-release-creating-package" + to="https://docs.replicated.com/vendor/helm-install-release" + +[[redirects]] + from="https://docs.replicated.com/vendor/helm-release" + to="https://docs.replicated.com/vendor/helm-native-v2-using" + +[[redirects]] + from="https://docs.replicated.com/vendor/helm-overview" + to="https://docs.replicated.com/vendor/helm-install-overview" + +[[redirects]] + from="https://docs.replicated.com/vendor/helm-install" + to="https://docs.replicated.com/vendor/helm-install-overview" + +[[redirects]] + from="https://docs.replicated.com/vendor/testing-replicated-instance-types" + to="https://docs.replicated.com/vendor/testing-supported-clusters" + + +[[redirects]] + from="https://docs.replicated.com/vendor/repository-workflow-and-tagging-releases" + to="https://docs.replicated.com/vendor/ci-workflows" + + +[[redirects]] + from="https://docs.replicated.com/vendor/releases-about-channels" + to="https://docs.replicated.com/vendor/releases-about" + +[[redirects]] + from="https://docs.replicated.com/vendor/replicated-sdk-rbac" + to="https://docs.replicated.com/vendor/replicated-sdk-customizing" + +[[redirects]] + from="https://docs.replicated.com/vendor/helm-kots-using-sdk" + to="https://docs.replicated.com/vendor/helm-native-about" + +[[redirects]] + from="https://docs.replicated.com/vendor/helm-native-helm-install-order" + to="https://docs.replicated.com/vendor/orchestrating-resource-deployment" + +[[redirects]] + from="https://docs.replicated.com/vendor/preflight-kots-defining" + to="https://docs.replicated.com/vendor/preflight-defining" + +[[redirects]] + from="https://docs.replicated.com/vendor/preflight-helm-defining" + to="https://docs.replicated.com/vendor/preflight-defining" + +[[redirects]] + from="https://docs.replicated.com/vendor/support-bundle-kots-customizing" + to="https://docs.replicated.com/vendor/support-bundle-customizing" + +[[redirects]] + from="https://docs.replicated.com/vendor/support-bundle-helm-customizing" + to="https://docs.replicated.com/vendor/support-bundle-customizing" + +[[redirects]] + from="https://docs.replicated.com/vendor/distributing-overview" + to="https://docs.replicated.com/intro-replicated" + +[[redirects]] + from="https://docs.replicated.com/vendor/distributing-workflow" + to="https://docs.replicated.com/vendor/replicated-onboarding" + +[[redirects]] + from = "https://docs.replicated.com/vendor/tutorial-ci-cd-integration" + to = "https://docs.replicated.com/vendor/ci-overview" + +[[redirects]] + from = "https://docs.replicated.com/vendor/embedded-kubernetes-overview" + to = "https://docs.replicated.com/vendor/embedded-overview" + +################################################### +# Redirects To the Enterprise Section +################################################### + +[[redirects]] + from="https://docs.replicated.com/enterprise/updating-existing-cluster" + to="https://docs.replicated.com/enterprise/updating-app-manager" + +[[redirects]] + from="https://docs.replicated.com/enterprise/snapshots-restoring-partial" + to="https://docs.replicated.com/enterprise/snapshots-restoring-full" + +[[redirects]] + from="https://docs.replicated.com/enterprise/snapshots-scheduling" + to="https://docs.replicated.com/enterprise/snapshots-creating" + +[[redirects]] + from="https://docs.replicated.com/enterprise//snapshots-config-workflow" + to="https://docs.replicated.com/enterprise/snapshots-velero-cli-installing" + +[[redirects]] + from="https://docs.replicated.com/enterprise/image-registry-airgap" + to="https://docs.replicated.com/enterprise/installing-general-requirements" + +[[redirects]] + from="https://docs.replicated.com/enterprise/installing-app-setup" + to="https://docs.replicated.com/enterprise/installing-existing-cluster#install-app" + +[[redirects]] + from="https://docs.replicated.com/enterprise/installing-embedded-airgapped" + to="https://docs.replicated.com/enterprise/installing-kurl-airgap" + +[[redirects]] + from="https://docs.replicated.com/enterprise/installing-embedded-cluster" + to="https://docs.replicated.com/enterprise/installing-kurl" + +[[redirects]] + from="https://docs.replicated.com/enterprise/updating-embedded-cluster" + to="https://docs.replicated.com/enterprise/updating-kurl" + +[[redirects]] + from="https://docs.replicated.com/enterprise/image-registry-embedded-cluster" + to="https://docs.replicated.com/enterprise/image-registry-kurl" + +[[redirects]] + from="https://docs.replicated.com/vendor/releases-configvalues" + to="https://docs.replicated.com/enterprise/installing-embedded-automation" + + +[[redirects]] + from="https://docs.replicated.com/enterprise/snapshots-understanding" + to="https://docs.replicated.com/vendor/snapshots-overview" + +################################################### +# Redirects To the References Section +################################################### + +# Redirects from the removed packaging-template-functions topic +[[redirects]] + from="https://docs.replicated.com/vendor/packaging-template-functions" + to= "https://docs.replicated.com/reference/template-functions-about" + +# Redirects from the old topic name KOTS Lint Rules to the new topic name Lint Rules +[[redirects]] + from="https://docs.replicated.com/reference/kots-lint" + to="https://docs.replicated.com/reference/linter" + +# Redirects from the reference section to the teams section for generating API tokens +[[redirects]] + from="https://docs.replicated.com/reference/replicated-cli-tokens" + to="https://docs.replicated.com/vendor/replicated-api-tokens" + +[[redirects]] + from="https://docs.replicated.com/reference/custom-resource-sig-application" + to="https://docs.replicated.com/vendor/admin-console-adding-buttons-links" + +[[redirects]] + from="https://docs.replicated.com/reference/replicated-cli-app-delete" + to="https://docs.replicated.com/reference/replicated-cli-app-rm" + +[[redirects]] + from="https://docs.replicated.com/reference/replicated-cli-channel-delete" + to="https://docs.replicated.com/reference/replicated-cli-channel-rm" + +################################################### +# Redirects To the Release Notes Section +################################################### + +================ +File: package.json +================ +{ + "name": "replicated-docs", + "version": "0.0.0", + "private": true, + "scripts": { + "docusaurus": "docusaurus", + "start": "docusaurus start", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "write-translations": "docusaurus write-translations", + "write-heading-ids": "docusaurus write-heading-ids" + }, + "dependencies": { + "@algolia/client-search": "^5.20.4", + "@babel/traverse": "^7.26.9", + "@docusaurus/core": "3.5.2", + "@docusaurus/preset-classic": "3.5.2", + "@mdx-js/react": "^3.1.0", + "@types/node": "22.13.10", + "@types/react": "18.3.5", + "clsx": "^2.1.1", + "immer": "^10.1.1", + "loader-utils": "3.3.1", + "prism-react-renderer": "^2.4.1", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-loadable": "^5.5.0", + "search-insights": "2.17.3", + "ts-node": "10.9.2" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/types": "3.5.2", + "typescript": "~5.8.2" + }, + "resolutions": { + "immer": "^10.1.1", + "loader-utils": "3.3.1", + "shell-quote": "^1.7.3", + "got": "^11.8.5", + "lodash.template": "^4.5.0", + "serialize-javascript": "^6.0.2", + "tough-cookie": "^4.1.3", + "trim-newlines": "^3.0.1", + "http-cache-semantics": "^4.1.1", + "semver-regex": "^3.1.3", + "cross-spawn": "^7.0.5" + }, + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] + }, + "engines": { + "node": ">=18.0" + } +} + +================ +File: README.md +================ +# [Replicated](https://www.replicated.com/) Product Documentation + +## Table of Contents: + +* [For Vendors: How to Create Enterprise Documentation](#for-software-vendors-using-this-repository-to-create-your-documentation) +* [How to Contribute to the Documentation](#how-to-contribute-to-the-documentation) +* [Setting Up Local WYSIWYG Previews](#setting-up-local-wysiwyg-previews) +* [Folder Structure and TOC](#folder-structure-and-toc) +* [Topic Templates](#topic-templates) +* [Filenaming](#filenaming) +* [Images](#images) +* [Using Markdown with our Docusaurus CSS](#using-markdown-with-our-docusaurus-css) +* [Style Guidelines](#style-guidelines) +* [SME and Editorial Reviews](#sme-and-editorial-reviews) + +Welcome to the repository for the [Replicated documentation site](https://docs.replicated.com/). + +## For Software Vendors: Using this Repository to Create Your Documentation + +Software vendors using Replicated to distribute their application can copy the documentation in this repository to create docs for their own users. The following directories contain documentation for enterprise users about how to use the Replicated admin console and the kots CLI: + +* **docs/enterprise**: The `docs/enterprise` directory includes documentation for installing, updating, monitoring, and managing applications with the admin console and the kots CLI. See [`docs/enterprise`](https://github.com/replicatedhq/replicated-docs/tree/main/docs/enterprise). For the published version of the enterprise content, see [https://docs.replicated.com/enterprise](https://docs.replicated.com/enterprise/installing-overview). +* **docs/reference**: The `docs/reference` directory includes reference documentation for the kots CLI commands. This includes details on each of the kots CLI commands and associated flags. See [`docs/reference`](https://github.com/replicatedhq/replicated-docs/tree/main/docs/reference). For the published version of the kots CLI reference content, see [Installing the kots CLI](https://docs.replicated.com/reference/kots-cli-getting-started). + +To create your own documentation, review the content in these directories and copy and paste the markdown files into your own repository. Edit the content as necessary to add information and terminology specific to your application, and remove content that does not apply for your use cases. + +After copying the generic content from the above directories in this repository, you can then add your own application-specific content. For example, there are likely prerequisites, configuration options, and troubleshooting steps that are unique to your application. + +For help getting started with writing documentation that is specific to your application, see the [vendor-docs-starter](https://github.com/replicatedhq/vendor-docs-starter) repository. The `vendor-docs-starter` repository contains templates, guidance, and examples that you can use to write the end user documentation for your application. + +## How to Contribute to the Documentation + +This repository has been made public so that vendors and the open-source community can contribute to the content using the following methods: + +- **Submit a PR** You can submit a PR directly from a specific topic in the documentation by clicking the **Create pull request or raise issue on GitHub** at the bottom of the page. This method lets you edit the content directly and commit your changes on a new branch. After submitting your proposed changes, the Replicated team will verify the accuracy of the changes and perform an editorial review. If the PR is approved, it will be merged directly into the main branch. + +- **Open a Github Issue** - To open a GitHub issue for this repository, click the Issues tab and click **New Issue**. This method may be more useful when you want to report a bug specifically for the documentation. If you are having an issue with the product itself, we encourage you to report it to us in a support issue submitted in the vendor portal. + +## Setting Up Local WYSIWYG Previews + +This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. + +When you submit a PR in GitHub, Netlify builds a preview automatically. However, you can preview your changes locally using Node.js and npm. This repository uses npm as its package manager. + +### Prerequisites + +- Node.js version 18 or higher +- npm (comes bundled with Node.js) + +### Start the local dev server with `npm start` + +1. Install dependencies using npm: + + ```bash + npm install + ``` + +2. Start a local development server in a browser window: + + ```bash + npm start + ``` + +Most changes are reflected live without having to restart the server (changes to the sidebar file typically require restarting the dev server). This preview shows the formatting and styles as they would render on the live site. + +If you encounter any build errors, they will appear in the terminal and often indicate issues like broken links or formatting problems in the content. + +## Build and test locally with `npm run build` and `npm run serve` + +Before pushing changes to the remote repository, build and serve the site locally to check for errors, including broken links. + +1. Install dependencies using npm: + + ```bash + npm install + ``` +1. Build the static site files: + + ```bash + npm run build + ``` + Any broken links and anchor links are listed in the output. + +1. Serve the `build` directory locally to test: + + ```bash + npm run serve + ``` + +## Folder Structure and TOC + +The folder structure is broken into several high-level categories under the main `docs` folder: vendor, enterprise, reference, release notes. + +Images are under the `static` > `images` folder. + +The TOC is managed in the `sidebar.js` file. You only need to edit the `sidebar.js` file when you are adding a new topic or deleting an existing topic. The `sidebar.js` file is the one that causes most of the merge conflicts because many technical writers are working on content daily. You will need to accept the changes from other contributors if you are committing a PR. + +Don't worry if you're not sure where in the TOC a new topic belongs. When you submit your PR, the Documentation team will edit it and help to find the right placement. + +The right-hand TOC is created automatically when you add headings to a topic. + +## Topic Templates + +You can find topic templates in the `docs/templates` folder. These templates are useful for anyone creating a new topic in this repository. + +If you are using the templates to create a new topic in this repository, save the new file to the correct folder (`docs/vendor`, `docs/enterprise`, `docs/reference`, etc) and be sure to follow the [filenaming convention](#filenaming). + +For additional templates designed for software vendors writing the end user documentation for their applications, see the [vendor-docs-starter](https://github.com/replicatedhq/vendor-docs-starter) repository. + +## Filenaming + +If you are adding a new file, it must be named following our naming conventions. The file name should always start with the feature type (such as licenses, helm, or gitops). Depending on the content type, it typically also includes a secondary descriptor and a verb. Verbs are used when you are creating a task topic. + +Because we author content using Markdown, you must add the `.md` the file extension to the file name. + +If you are adding a new topic to an existing feature category, follow the existing naming convention for that category. + +**Example: Concept topic** + +`snapshots-backup-hooks.md` + +**Example: Task topic** + +`releases-creating-customer.md` + +**Example: Tutorial** + +`tutorial-ha-cluster-deploying.md` + + +## Images + +* Screenshots are use sparingly to minimize the maintenance of out-of-date content. However, we do include some screenshots to provide context. + +* Use a focused area of the UI, unless the entire screen is truly needed. If using a focused area, use approximately 400 pixels for the width. If capturing the entire screen, use a maximum of 600 pixels for the width. + +* We only use PNG format, which renders a better quality and lossless compression. + +* For privacy and legal purposes, do not reveal personal information, IP addresses, domain information, login credentials and so on in screenshots, code blocks, or text. + +* Add _alt text_ for all images to provide accessibility. The user will hear the alt text spoken out loud by the screen reader, so it is important to use succinct text that is clear and complete. For more information about alt text formatting, see the following section. + +* For images that are difficult to see, add a link below the image where the reader can view a larger version: `[View a larger version of this image](PATH-TO-LARGER-IMAGE-FILE)` where `PATH-TO-LARGER-VERSION` is the path to the larger image in the `static/images` folder. For an example, see the private registry diagram in [Connecting to a Private Image Registry](https://docs.replicated.com/vendor/packaging-private-images#about-connecting-to-an-external-registry). + + +## Using Markdown with our Docusaurus CSS + +Replicated uses its own CSS, and Docusaurus supports its own specific Markdown syntax. The following table provides an overview of the supported syntax elements. + +| Element | Syntax | +|---------------------------------------------|-------------------------------------------------------| +| Headings | `# H1`, `## H2`, `### H3` | +| Bold | `**bold text**` | +| Italic | `_italicized text_` | +| Ordered List | `1.` First item (use `1.` for each item) | +| Unordered List | `-` or `*` (for each item) | +| Code or command in a sentence | ``code`` | +| Link - external site | `[Title](https://www.example.com)` | +| Link - topic in same folder | `[Title](filename) without file extension` | +| Link - topic in different folder | `[Title](../folder/file-name) without file extension` | +| Link - section in topic in same folder | `[Title](file-name#section-name)` | +| Link - section in topic in different folder | `[Title](../folder/file-name#section-name)` | +| Image | `![alt text](images/.png)` | + +**Note:** Alt text, used with image syntax, is parsed by screen readers to support accessibility. + +### Admonitions + +Note admonitions are formatted as follows: + +``` +:::note +text +::: +``` + +Important admonitions, typically used as a warning, are formatted as follows: + +``` +:::important +text +::: +``` + +### Tables + +Traditional markdown for tables can be limiting. Instead, we use HTML tables, which lets us manage the width of the table columns. The template topic `procedure.md` contains an example of the HTML formatting for tables. + +**Note:** There are still many instances of the old markdown table formatting in the content that was carried over from the content migration, but we do not encourage the use of it going forward. + +## Style Guidelines + +Whether you are editing existing content or adding a new topic, our goal is to make it task-based. The `procedure.md` template provides the formatting guidelines that you need. You can also see a published example of a task [here](https://docs.replicated.com/vendor/releases-creating-customer). + +Replicated product documentation has in-house style guidelines that the Documentation team uses when reviewing your PR. Please feel free to just add the content you need, knowing that our team will be there to assist with editorial reviews and information architecture, such as TOC placement, whether to create a task, and so on. The Documentation team will actively write content, not just give editorial reviews, so we take the heavy burden off of you. We encourage your contributions in the true open-source spirit. + +Replicated employees can review more information in the Documentation Style Guide in the employee handbook. + + +## SME and Editorial Reviews + +All PRs that are submitted are reviewed by the Replicated Docs team for editorial review. + +Content that is submitted by our customers and the open-source community are also reviewed by our Replicated subject matter experts (SMEs) to help ensure technical accuracy. + +================ +File: sidebars.js +================ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +// @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + //tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], + + // But you can create a sidebar manually + + main: [ + 'intro', + { + type: 'category', + label: 'Release Notes', + items: [ + 'release-notes/rn-whats-new', + 'release-notes/rn-embedded-cluster', + 'release-notes/rn-app-manager', + 'release-notes/rn-kubernetes-installer', + 'release-notes/rn-replicated-sdk', + 'release-notes/rn-vendor-platform', + ], + }, + + //GET STARTED + {type: 'html', value: '
    getting started
    ', defaultStyle: true}, + 'intro-replicated', + 'vendor/kots-faq', + 'vendor/quick-start', + 'vendor/replicated-onboarding', + // { + // type: 'category', + // label: 'Planning', + // items: [ + // 'vendor/planning-questionnaire', + // 'vendor/namespaces', + // ], + // }, + { + type: 'category', + label: 'Tutorials', + items: [ + { + type: 'category', + label: 'Install a Helm Chart on a VM with Embedded Cluster', + items: [ + 'vendor/tutorial-embedded-cluster-setup', + 'vendor/tutorial-embedded-cluster-create-app', + 'vendor/tutorial-embedded-cluster-package-chart', + 'vendor/tutorial-embedded-cluster-create-release', + 'vendor/tutorial-embedded-cluster-create-customer', + 'vendor/tutorial-embedded-cluster-install', + ], + }, + { + type: 'category', + label: 'Install a Helm Chart with KOTS and the Helm CLI', + items: [ + 'vendor/tutorial-kots-helm-setup', + 'vendor/tutorial-kots-helm-get-chart', + 'vendor/tutorial-kots-helm-create-app', + 'vendor/tutorial-kots-helm-package-chart', + 'vendor/tutorial-kots-helm-create-release', + 'vendor/tutorial-kots-helm-create-customer', + 'vendor/tutorial-kots-helm-install-kots', + 'vendor/tutorial-kots-helm-install-helm', + ], + }, + { + type: 'category', + label: 'Install with KOTS in an Existing Cluster', + items: [ + 'vendor/tutorial-cli-setup', + 'vendor/tutorial-cli-install-cli', + 'vendor/tutorial-cli-create-app', + 'vendor/tutorial-cli-manifests', + 'vendor/tutorial-cli-create-release', + 'vendor/tutorial-cli-create-customer', + 'vendor/tutorial-cli-install-app-manager', + 'vendor/tutorial-cli-deploy-app', + 'vendor/tutorial-cli-create-new-version', + 'vendor/tutorial-cli-update-app', + ], + }, + ], + }, + { + type: 'category', + label: 'Labs', + items: + [ + {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/distributing-with-replicated?token=em_VHOEfNnBgU3auAnN', label: 'Distributing Your Application with Replicated'}, + {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/delivering-as-an-appliance?token=em_lUZdcv0LrF6alIa3', label: 'Delivering Your Application as a Kubernetes Appliance'}, + {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/avoiding-installation-pitfalls?token=em_gJjtIzzTTtdd5RFG', label: 'Avoiding Installation Pitfalls'}, + {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/closing-information-gap?token=em_MO2XXCz3bAgwtEca', label: 'Closing the Support Information Gap'}, + {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/protecting-your-assets?token=em_7QjY34G_UHKoREBd', label: 'Protecting Your Assets'}, + ], + }, + // PRODUCT DOCS + {type: 'html', value: '
    product docs
    ', defaultStyle: true}, + { + type: 'category', + label: 'Vendor Portal', + items: [ + { + type: 'category', + label: 'Vendor Portal Teams and Accounts', + items: [ + 'vendor/vendor-portal-creating-account', + 'vendor/team-management', + 'vendor/team-management-github-username', + { + type: 'category', + label: 'Configuring Role-based Access Control', + items: [ + 'vendor/team-management-rbac-configuring', + 'vendor/team-management-rbac-resource-names', + ], + }, + { + type: 'category', + label: 'Configuring Authentication', + items: [ + 'vendor/team-management-two-factor-auth', + 'vendor/team-management-google-auth', + 'vendor/team-management-saml-auth', + ], + }, + 'vendor/team-management-slack-config', + 'vendor/replicated-api-tokens', + ], + }, + { + type: 'category', + label: 'Applications', + items: [ + 'vendor/vendor-portal-manage-app', + 'vendor/vendor-portal-application-settings', + ], + }, + { + type: 'category', + label: 'Channels and Releases', + items: [ + 'vendor/releases-about', + 'vendor/releases-creating-channels', + 'vendor/releases-creating-releases', + 'vendor/releases-creating-cli', + 'vendor/helm-install-release', + 'vendor/releases-sharing-license-install-script', + 'vendor/releases-share-download-portal', + 'reference/linter', + ], + }, + { + type: 'category', + label: 'Customers and Licenses', + items: [ + 'vendor/licenses-about', + 'vendor/releases-creating-customer', + 'vendor/licenses-adding-custom-fields', + 'vendor/licenses-install-types', + 'vendor/licenses-about-types', + 'vendor/licenses-download', + { + type: 'category', + label: 'Querying License Entitlements', + items: [ + 'vendor/licenses-using-builtin-fields', + 'vendor/licenses-reference-sdk', + 'vendor/licenses-reference-helm', + 'vendor/licenses-referencing-fields', + 'vendor/licenses-reference-kots-runtime', + 'vendor/licenses-verify-fields-sdk-api', + ] + }, + ], + }, + { + type: 'category', + label: 'Custom Domains', + items: [ + 'vendor/custom-domains', + 'vendor/custom-domains-using', + ], + }, + { + type: 'category', + label: 'Insights and Telemetry', + items: [ + 'vendor/instance-insights-event-data', + 'vendor/insights-app-status', + 'vendor/telemetry-air-gap', + 'vendor/customer-adoption', + 'vendor/customer-reporting', + 'vendor/instance-insights-details', + 'vendor/instance-notifications-config', + 'vendor/custom-metrics', + 'vendor/instance-data-export', + ], + }, + ], + }, + { + type: 'category', + label: 'Compatibility Matrix', + items: [ + 'vendor/testing-about', + 'vendor/testing-pricing', + 'vendor/testing-supported-clusters', + 'vendor/testing-cluster-addons', + 'vendor/compatibility-matrix-usage', + 'vendor/testing-how-to', + 'vendor/testing-ingress', + ], + }, + { + type: 'category', + label: 'Embedded Cluster', + items: [ + 'vendor/embedded-overview', + 'vendor/embedded-using', + 'reference/embedded-config', + { + type: 'category', + label: 'Installing with Embedded Cluster', + items: [ + 'enterprise/installing-embedded-requirements', + 'enterprise/installing-embedded', + 'enterprise/installing-embedded-air-gap', + 'enterprise/installing-embedded-automation', + 'reference/embedded-cluster-install', + ], + }, + 'enterprise/embedded-manage-nodes', + 'enterprise/updating-embedded', + 'enterprise/embedded-tls-certs', + 'vendor/embedded-disaster-recovery', + ], + }, + { + type: 'category', + label: 'KOTS', + items: [ + 'intro-kots', + { + type: 'category', + label: 'Configuring KOTS', + items: [ + { + type: 'category', + label: 'Configuring the HelmChart Custom Resource', + items: [ + 'vendor/helm-native-about', + 'vendor/helm-native-v2-using', + 'vendor/helm-packaging-airgap-bundles', + 'vendor/helm-optional-value-keys', + 'vendor/helm-v2-migrate', + ], + }, + { + type: 'category', + label: 'Customizing the Admin Console and Download Portal', + items: [ + 'vendor/admin-console-customize-app-icon', + 'vendor/admin-console-adding-buttons-links', + 'vendor/admin-console-port-forward', + 'vendor/admin-console-prometheus-monitoring', + ], + }, + { + type: 'category', + label: 'Configuring the Admin Console Config Screen', + items: [ + 'vendor/config-screen-about', + 'vendor/admin-console-customize-config-screen', + 'vendor/config-screen-map-inputs', + 'vendor/config-screen-conditional', + { + type: 'category', + label: 'Tutorial: Set Helm Chart Values with KOTS', + items: [ + 'vendor/tutorial-config-setup', + 'vendor/tutorial-config-get-chart', + 'vendor/tutorial-config-create-app', + 'vendor/tutorial-config-package-chart', + 'vendor/tutorial-config-create-release', + 'vendor/tutorial-config-create-customer', + 'vendor/tutorial-config-install-kots', + ], + }, + ], + }, + { + type: 'category', + label: 'Managing Resources and Objects', + items: [ + 'vendor/admin-console-display-app-status', + { + type: 'category', + label: 'Conditionally Deploying Resources', + items: [ + 'vendor/packaging-include-resources', + 'vendor/helm-optional-charts', + 'vendor/tutorial-adding-db-config', + ], + }, + 'vendor/resources-annotations-templating', + 'vendor/orchestrating-resource-deployment', + 'vendor/database-config-adding-options', + 'vendor/packaging-cleaning-up-jobs', + 'vendor/packaging-ingress', + ], + }, + { + type: 'category', + label: 'Managing KOTS', + items: [ + 'vendor/packaging-kots-versions', + 'vendor/packaging-rbac', + 'vendor/packaging-air-gap-excluding-minio', + ], + }, + { + type: 'category', + label: 'Distributing Kubernetes Operators with KOTS', + items: [ + 'vendor/operator-packaging-about', + 'vendor/operator-defining-additional-images', + 'vendor/operator-referencing-images', + 'vendor/operator-defining-additional-namespaces', + ], + }, + { + type: 'category', + label: 'KOTS Custom Resources', + items: [ + 'reference/custom-resource-about', + 'reference/custom-resource-application', + 'reference/custom-resource-config', + 'reference/custom-resource-helmchart-v2', + 'reference/custom-resource-helmchart', + 'reference/custom-resource-lintconfig', + ], + }, + { + type: 'category', + label: 'KOTS Template Functions', + items: [ + 'reference/template-functions-about', + 'reference/template-functions-examples', + 'reference/template-functions-config-context', + 'reference/template-functions-identity-context', + 'reference/template-functions-kurl-context', + 'reference/template-functions-license-context', + 'reference/template-functions-static-context', + ], + }, + 'reference/cron-expressions', + ], + }, + { + type: 'category', + label: 'Installing in Existing Clusters with KOTS', + items: [ + 'enterprise/installing-overview', + 'enterprise/installing-general-requirements', + 'enterprise/installing-existing-cluster', + 'enterprise/installing-existing-cluster-airgapped', + 'enterprise/installing-existing-cluster-automation', + 'enterprise/installing-stateful-component-requirements', + ], + }, + { + type: 'category', + label: 'Performing Updates in Existing Cluster KOTS Installations', + items: [ + 'enterprise/updating-app-manager', + 'enterprise/updating-apps', + 'enterprise/updating-patching-with-kustomize', + ], + }, + { + type: 'category', + label: 'Configuring Local Image Registries', + items: [ + 'enterprise/image-registry-settings', + 'enterprise/image-registry-rate-limits', + ], + }, + 'enterprise/updating-licenses', + { + type: 'category', + label: 'Performing Backup and Restore with Snapshots', + items: [ + 'vendor/snapshots-overview', + { + type: 'category', + label: 'Enabling and Configuring Snapshots', + items: [ + 'vendor/snapshots-configuring-backups', + 'reference/custom-resource-backup', + 'vendor/snapshots-hooks', + ], + }, + { + type: 'category', + label: 'Configuring Backup Storage for Snaphots', + items: [ + 'enterprise/snapshots-velero-cli-installing', + 'enterprise/snapshots-configuring-hostpath', + 'enterprise/snapshots-configuring-nfs', + 'enterprise/snapshots-storage-destinations', + 'enterprise/snapshots-velero-installing-config', + ], + }, + 'enterprise/snapshots-creating', + 'enterprise/snapshots-restoring-full', + 'enterprise/snapshots-updating-with-admin-console', + 'enterprise/snapshots-troubleshooting-backup-restore', + ], + }, + { + type: 'category', + label: 'Managing Admin Console User Access', + items: [ + 'enterprise/auth-changing-passwords', + 'enterprise/auth-identity-provider', + 'enterprise/auth-configuring-rbac', + ], + }, + { + type: 'category', + label: 'Monitoring Applications with Prometheus', + items: [ + 'enterprise/monitoring-applications', + 'enterprise/monitoring-access-dashboards', + ], + }, + 'enterprise/status-viewing-details', + 'enterprise/delete-admin-console', + { + type: 'category', + label: 'Using a GitOps Workflow', + items: [ + 'enterprise/gitops-workflow', + 'enterprise/gitops-managing-secrets', + ], + }, + ], + }, + { + type: 'category', + label: 'kURL', + items: [ + 'vendor/kurl-about', + { + type: 'category', + label: 'Configuring kURL Installers', + items: [ + 'vendor/packaging-embedded-kubernetes', + 'vendor/packaging-installer-storage', + 'vendor/installer-history', + 'vendor/kurl-nodeport-services', + ], + }, + { + type: 'category', + label: 'Installing with kURL', + items: [ + 'enterprise/installing-kurl-requirements', + 'enterprise/installing-kurl', + 'enterprise/installing-kurl-airgap', + 'enterprise/installing-kurl-automation', + ], + }, + 'enterprise/cluster-management-add-nodes', + { + type: 'category', + label: 'Performing Updates with kURL', + items: [ + 'enterprise/updating-kurl-about', + 'enterprise/updating-kurl', + ], + }, + 'vendor/packaging-using-tls-certs', + 'enterprise/updating-tls-cert', + 'enterprise/image-registry-kurl', + 'enterprise/monitoring-external-prometheus', + 'vendor/kurl-reset', + ], + }, + { + type: 'category', + label: 'Helm Installations with Replicated', + items: [ + 'vendor/helm-install-overview', + 'vendor/helm-install-values-schema', + 'vendor/install-with-helm', + 'vendor/helm-install-airgap', + 'vendor/using-third-party-registry-proxy', + 'vendor/helm-install-troubleshooting', + ], + }, + { + type: 'category', + label: 'Replicated SDK', + items: [ + 'vendor/replicated-sdk-overview', + 'vendor/replicated-sdk-installing', + 'vendor/replicated-sdk-airgap', + 'vendor/replicated-sdk-development', + 'vendor/replicated-sdk-customizing', + ], + }, + { + type: 'category', + label: 'Preflight Checks and Support Bundles', + items: [ + 'vendor/preflight-support-bundle-about', + { + type: 'category', + label: 'Preflight Checks', + items: [ + 'vendor/preflight-defining', + 'vendor/preflight-examples', + 'vendor/preflight-running', + 'vendor/preflight-host-preflights', + { + type: 'category', + label: 'Tutorial: Add Preflight Checks to a Helm Chart', + items: [ + 'vendor/tutorial-preflight-helm-setup', + 'vendor/tutorial-preflight-helm-get-chart', + 'vendor/tutorial-preflight-helm-add-spec', + 'vendor/tutorial-preflight-helm-create-release', + 'vendor/tutorial-preflight-helm-create-customer', + 'vendor/tutorial-preflight-helm-install', + 'vendor/tutorial-preflight-helm-install-kots', + ], + }, + ], + }, + { + type: 'category', + label: 'Support Bundles', + items: [ + 'vendor/support-bundle-customizing', + 'vendor/support-bundle-examples', + 'vendor/support-online-support-bundle-specs', + 'vendor/support-modular-support-bundle-specs', + { + type: 'category', + label: 'Generating Support Bundles', + items: [ + 'vendor/support-bundle-generating', + 'vendor/support-bundle-embedded', + 'enterprise/troubleshooting-an-app', + 'vendor/support-host-support-bundles', + ], + }, + 'vendor/support-inspecting-support-bundles', + 'vendor/support-enabling-direct-bundle-uploads', + 'vendor/support-submit-request', + ], + }, + 'vendor/preflight-sb-helm-templates-about', + { + type: 'category', + label: 'Troubleshoot Custom Resources', + items: [ + 'reference/custom-resource-preflight', + 'reference/custom-resource-redactor', + ], + }, + ], + }, + { + type: 'category', + label: 'Replicated Proxy Registry', + items: [ + 'vendor/private-images-about', + 'vendor/packaging-private-images', + 'vendor/helm-image-registry', + 'vendor/private-images-kots', + 'vendor/private-images-tags-digests', + { + type: 'category', + label: 'Replicated Private Registry', + items: [ + 'vendor/private-images-replicated', + 'vendor/packaging-private-registry-security', + ], + }, + 'vendor/packaging-public-images', + 'vendor/tutorial-ecr-private-images', + ], + }, + { + type: 'category', + label: 'Integrating Replicated in CI/CD Workflows', + items: [ + 'vendor/ci-overview', + 'vendor/ci-workflows', + 'vendor/ci-workflows-github-actions', + ], + }, + + // DEVELOPER TOOLS + {type: 'html', value: '
    Developer tools
    ', defaultStyle: true}, + 'reference/replicated-sdk-apis', + { + type: 'category', + label: 'Replicated CLI', // This label is generated. Do not edit. + items: [ // This list is generated. Do not edit. + 'reference/replicated-cli-installing', + 'reference/replicated', + 'reference/replicated-cli-api', + 'reference/replicated-cli-api-get', + 'reference/replicated-cli-api-patch', + 'reference/replicated-cli-api-post', + 'reference/replicated-cli-api-put', + 'reference/replicated-cli-app', + 'reference/replicated-cli-app-create', + 'reference/replicated-cli-app-ls', + 'reference/replicated-cli-app-rm', + 'reference/replicated-cli-channel', + 'reference/replicated-cli-channel-create', + 'reference/replicated-cli-channel-demote', + 'reference/replicated-cli-channel-disable-semantic-versioning', + 'reference/replicated-cli-channel-enable-semantic-versioning', + 'reference/replicated-cli-channel-inspect', + 'reference/replicated-cli-channel-ls', + 'reference/replicated-cli-channel-rm', + 'reference/replicated-cli-channel-un-demote', + 'reference/replicated-cli-cluster', + 'reference/replicated-cli-cluster-addon', + 'reference/replicated-cli-cluster-addon-create', + 'reference/replicated-cli-cluster-addon-create-object-store', + 'reference/replicated-cli-cluster-addon-ls', + 'reference/replicated-cli-cluster-addon-rm', + 'reference/replicated-cli-cluster-create', + 'reference/replicated-cli-cluster-kubeconfig', + 'reference/replicated-cli-cluster-ls', + 'reference/replicated-cli-cluster-nodegroup', + 'reference/replicated-cli-cluster-nodegroup-ls', + 'reference/replicated-cli-cluster-port', + 'reference/replicated-cli-cluster-port-expose', + 'reference/replicated-cli-cluster-port-ls', + 'reference/replicated-cli-cluster-port-rm', + 'reference/replicated-cli-cluster-prepare', + 'reference/replicated-cli-cluster-rm', + 'reference/replicated-cli-cluster-shell', + 'reference/replicated-cli-cluster-update', + 'reference/replicated-cli-cluster-update-nodegroup', + 'reference/replicated-cli-cluster-update-ttl', + 'reference/replicated-cli-cluster-upgrade', + 'reference/replicated-cli-cluster-versions', + 'reference/replicated-cli-completion', + 'reference/replicated-cli-customer', + 'reference/replicated-cli-customer-archive', + 'reference/replicated-cli-customer-create', + 'reference/replicated-cli-customer-download-license', + 'reference/replicated-cli-customer-inspect', + 'reference/replicated-cli-customer-ls', + 'reference/replicated-cli-customer-update', + 'reference/replicated-cli-default', + 'reference/replicated-cli-default-clear-all', + 'reference/replicated-cli-default-clear', + 'reference/replicated-cli-default-set', + 'reference/replicated-cli-default-show', + 'reference/replicated-cli-installer', + 'reference/replicated-cli-installer-create', + 'reference/replicated-cli-installer-ls', + 'reference/replicated-cli-instance', + 'reference/replicated-cli-instance-inspect', + 'reference/replicated-cli-instance-ls', + 'reference/replicated-cli-instance-tag', + 'reference/replicated-cli-login', + 'reference/replicated-cli-logout', + 'reference/replicated-cli-registry', + 'reference/replicated-cli-registry-add', + 'reference/replicated-cli-registry-add-dockerhub', + 'reference/replicated-cli-registry-add-ecr', + 'reference/replicated-cli-registry-add-gar', + 'reference/replicated-cli-registry-add-gcr', + 'reference/replicated-cli-registry-add-ghcr', + 'reference/replicated-cli-registry-add-other', + 'reference/replicated-cli-registry-add-quay', + 'reference/replicated-cli-registry-ls', + 'reference/replicated-cli-registry-rm', + 'reference/replicated-cli-registry-test', + 'reference/replicated-cli-release', + 'reference/replicated-cli-release-compatibility', + 'reference/replicated-cli-release-create', + 'reference/replicated-cli-release-download', + 'reference/replicated-cli-release-inspect', + 'reference/replicated-cli-release-lint', + 'reference/replicated-cli-release-ls', + 'reference/replicated-cli-release-promote', + 'reference/replicated-cli-release-test', + 'reference/replicated-cli-release-update', + 'reference/replicated-cli-version', + 'reference/replicated-cli-version-upgrade', + 'reference/replicated-cli-vm', + 'reference/replicated-cli-vm-create', + 'reference/replicated-cli-vm-ls', + 'reference/replicated-cli-vm-port', + 'reference/replicated-cli-vm-port-expose', + 'reference/replicated-cli-vm-port-ls', + 'reference/replicated-cli-vm-port-rm', + 'reference/replicated-cli-vm-rm', + 'reference/replicated-cli-vm-update', + 'reference/replicated-cli-vm-update-ttl', + 'reference/replicated-cli-vm-versions', + ], + }, + { + type: 'category', + label: 'KOTS CLI', + items: [ + 'reference/kots-cli-getting-started', + 'reference/kots-cli-global-flags', + { + type: 'category', + label: 'admin-console', + items: [ + 'reference/kots-cli-admin-console-index', + 'reference/kots-cli-admin-console-garbage-collect-images', + 'reference/kots-cli-admin-console-generate-manifests', + 'reference/kots-cli-admin-console-push-images', + 'reference/kots-cli-admin-console-upgrade', + ], + }, + { + type: 'category', + label: 'backup', + items: [ + 'reference/kots-cli-backup-index', + 'reference/kots-cli-backup-ls', + ], + }, + { + type: 'category', + label: 'docker', + items: [ + 'reference/kots-cli-docker-index', + 'reference/kots-cli-docker-ensure-secret', + ], + }, + 'reference/kots-cli-download', + 'reference/kots-cli-enable-ha', + { + type: 'category', + label: 'get', + items: [ + 'reference/kots-cli-get-index', + 'reference/kots-cli-get-apps', + 'reference/kots-cli-get-backups', + 'reference/kots-cli-get-config', + 'reference/kots-cli-get-restores', + 'reference/kots-cli-get-versions', + ], + }, + { + type: 'category', + label: 'identity-service', + items: [ + 'reference/kots-cli-identity-service-index', + 'reference/kots-cli-identity-service-enable-shared-password', + ], + }, + 'reference/kots-cli-install', + 'reference/kots-cli-pull', + 'reference/kots-cli-remove', + 'reference/kots-cli-reset-password', + 'reference/kots-cli-reset-tls', + { + type: 'category', + label: 'restore', + items: [ + 'reference/kots-cli-restore-index', + 'reference/kots-cli-restore-ls', + ], + }, + { + type: 'category', + label: 'set', + items: [ + 'reference/kots-cli-set-index', + 'reference/kots-cli-set-config', + ], + }, + 'reference/kots-cli-upload', + { + type: 'category', + label: 'upstream', + items: [ + 'reference/kots-cli-upstream', + 'reference/kots-cli-upstream-download', + 'reference/kots-cli-upstream-upgrade', + ], + }, + { + type: 'category', + label: 'velero', + items: [ + + 'reference/kots-cli-velero-configure-aws-s3', + 'reference/kots-cli-velero-configure-azure', + 'reference/kots-cli-velero-configure-gcp', + 'reference/kots-cli-velero-configure-hostpath', + 'reference/kots-cli-velero-configure-internal', + 'reference/kots-cli-velero-configure-nfs', + 'reference/kots-cli-velero-configure-other-s3', + 'reference/kots-cli-velero-ensure-permissions', + 'reference/kots-cli-velero-index', + 'reference/kots-cli-velero-print-fs-instructions', + ], + }, + ], + }, + { + type: 'category', + label: 'Vendor API v3', + items: [ + { + type: 'doc', + id: 'reference/vendor-api-using' + }, + { + type: 'link', + label: 'Vendor API v3 Documentation', + href: 'https://replicated-vendor-api.readme.io/v3/' + }, + ], + }, + + //OPEN SOURCE DOCS + {type: 'html', value: '
    open source docs
    ', defaultStyle: true}, + {type: 'link', href: 'https://kurl.sh/docs/introduction/', label: 'kURL.sh'}, + {type: 'link', href: 'https://troubleshoot.sh/docs/collect/', label: 'Troubleshoot.sh'}, + + // POLICIES + {type: 'html', value: '
    platform overview
    ', defaultStyle: true}, + { + type: 'category', + label: 'Replicated Policies', + items: [ + 'vendor/policies-vulnerability-patch', + 'vendor/policies-support-lifecycle', + 'vendor/policies-data-transmission', + 'vendor/policies-infrastructure-and-subprocessors', + ], + }, + { + type: 'category', + label: 'Replicated Data Storage', + items: [ + 'vendor/data-availability', + 'vendor/offsite-backup' + ], + }, + { + type: 'category', + label: 'Security at Replicated', + items: [ + { + type: 'link', + label: 'Security at Replicated', + href: 'https://www.replicated.com/security/' + }, + 'enterprise/sbom-validating', + 'vendor/replicated-sdk-slsa-validating', + ], + }, + + ], +}; + +module.exports = sidebars; + +================ +File: variables.js +================ +const variables = { + productName: 'Replicated', + exampleVariable: 'Example' +}; + +module.exports = variables; + + + +================================================================ +End of Codebase +================================================================ From 1c63907b144246123a6deecfbdf2c864eb4d1489 Mon Sep 17 00:00:00 2001 From: Paige Calvert Date: Wed, 12 Mar 2025 12:28:18 -0600 Subject: [PATCH 2/9] Add llms.txt file --- .repomixignore | 25 + README.md | 151 +- package.json | 5 +- repomix.config.json | 28 + .../llms/llms-docs.txt | 46070 ++++++---------- static/llms/llms.txt | 16 + 6 files changed, 16806 insertions(+), 29489 deletions(-) create mode 100644 .repomixignore create mode 100644 repomix.config.json rename repomix-output.txt => static/llms/llms-docs.txt (77%) create mode 100644 static/llms/llms.txt diff --git a/.repomixignore b/.repomixignore new file mode 100644 index 0000000000..f5912d0a7a --- /dev/null +++ b/.repomixignore @@ -0,0 +1,25 @@ +# Add patterns to ignore here, one per line +# Example: +# *.log +# tmp/ + +docs/release-notes/ +docs/templates/ +docs/pdfs/ +docs/.history/ +.github/ +src/ +.gitignore +.repomixignore +babel.config.js +CODEOWNERS +config.json +docusaurus.config.js +js/ +LICENSE +netlify.toml +package.json +README.md +repomix.config.json +sidebars.js +variables.js \ No newline at end of file diff --git a/README.md b/README.md index 599a986655..7fe7c4b755 100644 --- a/README.md +++ b/README.md @@ -1,40 +1,26 @@ # [Replicated](https://www.replicated.com/) Product Documentation -## Table of Contents: - -* [For Vendors: How to Create Enterprise Documentation](#for-software-vendors-using-this-repository-to-create-your-documentation) -* [How to Contribute to the Documentation](#how-to-contribute-to-the-documentation) -* [Setting Up Local WYSIWYG Previews](#setting-up-local-wysiwyg-previews) -* [Folder Structure and TOC](#folder-structure-and-toc) -* [Topic Templates](#topic-templates) -* [Filenaming](#filenaming) -* [Images](#images) -* [Using Markdown with our Docusaurus CSS](#using-markdown-with-our-docusaurus-css) -* [Style Guidelines](#style-guidelines) -* [SME and Editorial Reviews](#sme-and-editorial-reviews) - Welcome to the repository for the [Replicated documentation site](https://docs.replicated.com/). -## For Software Vendors: Using this Repository to Create Your Documentation +## Contribute to the Replicated Docs -Software vendors using Replicated to distribute their application can copy the documentation in this repository to create docs for their own users. The following directories contain documentation for enterprise users about how to use the Replicated admin console and the kots CLI: +This repository has been made public so that vendors and the open-source community can contribute to the content using the following methods: -* **docs/enterprise**: The `docs/enterprise` directory includes documentation for installing, updating, monitoring, and managing applications with the admin console and the kots CLI. See [`docs/enterprise`](https://github.com/replicatedhq/replicated-docs/tree/main/docs/enterprise). For the published version of the enterprise content, see [https://docs.replicated.com/enterprise](https://docs.replicated.com/enterprise/installing-overview). -* **docs/reference**: The `docs/reference` directory includes reference documentation for the kots CLI commands. This includes details on each of the kots CLI commands and associated flags. See [`docs/reference`](https://github.com/replicatedhq/replicated-docs/tree/main/docs/reference). For the published version of the kots CLI reference content, see [Installing the kots CLI](https://docs.replicated.com/reference/kots-cli-getting-started). +- **Submit a PR** You can submit a PR directly from a specific topic in the documentation by clicking the **Create pull request or raise issue on GitHub** at the bottom of the page. This method lets you edit the content directly and commit your changes on a new branch. After submitting your proposed changes, the Replicated team will verify the accuracy of the changes and perform an editorial review. If the PR is approved, it will be merged directly into the main branch. -To create your own documentation, review the content in these directories and copy and paste the markdown files into your own repository. Edit the content as necessary to add information and terminology specific to your application, and remove content that does not apply for your use cases. +- **Open a Github Issue** - To open a GitHub issue for this repository, click the Issues tab and click **New Issue**. This method may be more useful when you want to report a bug specifically for the documentation. If you are having an issue with the product itself, we encourage you to report it to us in a support issue submitted in the vendor portal. -After copying the generic content from the above directories in this repository, you can then add your own application-specific content. For example, there are likely prerequisites, configuration options, and troubleshooting steps that are unique to your application. +## Folder Structure and Sidebar -For help getting started with writing documentation that is specific to your application, see the [vendor-docs-starter](https://github.com/replicatedhq/vendor-docs-starter) repository. The `vendor-docs-starter` repository contains templates, guidance, and examples that you can use to write the end user documentation for your application. +The folder structure is broken into several high-level categories under the main `docs` folder: vendor, enterprise, reference, release notes. -## How to Contribute to the Documentation +Images are under the `static` > `images` folder. -This repository has been made public so that vendors and the open-source community can contribute to the content using the following methods: +The TOC is managed in the `sidebar.js` file. You only need to edit the `sidebar.js` file when you are adding a new topic or deleting an existing topic. The `sidebar.js` file is the one that causes most of the merge conflicts because many technical writers are working on content daily. You will need to accept the changes from other contributors if you are committing a PR. -- **Submit a PR** You can submit a PR directly from a specific topic in the documentation by clicking the **Create pull request or raise issue on GitHub** at the bottom of the page. This method lets you edit the content directly and commit your changes on a new branch. After submitting your proposed changes, the Replicated team will verify the accuracy of the changes and perform an editorial review. If the PR is approved, it will be merged directly into the main branch. +Don't worry if you're not sure where in the TOC a new topic belongs. When you submit your PR, the Documentation team will edit it and help to find the right placement. -- **Open a Github Issue** - To open a GitHub issue for this repository, click the Issues tab and click **New Issue**. This method may be more useful when you want to report a bug specifically for the documentation. If you are having an issue with the product itself, we encourage you to report it to us in a support issue submitted in the vendor portal. +The right-hand TOC is created automatically when you add headings to a topic. ## Setting Up Local WYSIWYG Previews @@ -87,118 +73,7 @@ Before pushing changes to the remote repository, build and serve the site locall npm run serve ``` -## Folder Structure and TOC - -The folder structure is broken into several high-level categories under the main `docs` folder: vendor, enterprise, reference, release notes. - -Images are under the `static` > `images` folder. - -The TOC is managed in the `sidebar.js` file. You only need to edit the `sidebar.js` file when you are adding a new topic or deleting an existing topic. The `sidebar.js` file is the one that causes most of the merge conflicts because many technical writers are working on content daily. You will need to accept the changes from other contributors if you are committing a PR. - -Don't worry if you're not sure where in the TOC a new topic belongs. When you submit your PR, the Documentation team will edit it and help to find the right placement. - -The right-hand TOC is created automatically when you add headings to a topic. - -## Topic Templates - -You can find topic templates in the `docs/templates` folder. These templates are useful for anyone creating a new topic in this repository. - -If you are using the templates to create a new topic in this repository, save the new file to the correct folder (`docs/vendor`, `docs/enterprise`, `docs/reference`, etc) and be sure to follow the [filenaming convention](#filenaming). - -For additional templates designed for software vendors writing the end user documentation for their applications, see the [vendor-docs-starter](https://github.com/replicatedhq/vendor-docs-starter) repository. - -## Filenaming - -If you are adding a new file, it must be named following our naming conventions. The file name should always start with the feature type (such as licenses, helm, or gitops). Depending on the content type, it typically also includes a secondary descriptor and a verb. Verbs are used when you are creating a task topic. - -Because we author content using Markdown, you must add the `.md` the file extension to the file name. - -If you are adding a new topic to an existing feature category, follow the existing naming convention for that category. - -**Example: Concept topic** - -`snapshots-backup-hooks.md` - -**Example: Task topic** - -`releases-creating-customer.md` - -**Example: Tutorial** - -`tutorial-ha-cluster-deploying.md` - - -## Images - -* Screenshots are use sparingly to minimize the maintenance of out-of-date content. However, we do include some screenshots to provide context. - -* Use a focused area of the UI, unless the entire screen is truly needed. If using a focused area, use approximately 400 pixels for the width. If capturing the entire screen, use a maximum of 600 pixels for the width. - -* We only use PNG format, which renders a better quality and lossless compression. - -* For privacy and legal purposes, do not reveal personal information, IP addresses, domain information, login credentials and so on in screenshots, code blocks, or text. - -* Add _alt text_ for all images to provide accessibility. The user will hear the alt text spoken out loud by the screen reader, so it is important to use succinct text that is clear and complete. For more information about alt text formatting, see the following section. - -* For images that are difficult to see, add a link below the image where the reader can view a larger version: `[View a larger version of this image](PATH-TO-LARGER-IMAGE-FILE)` where `PATH-TO-LARGER-VERSION` is the path to the larger image in the `static/images` folder. For an example, see the private registry diagram in [Connecting to a Private Image Registry](https://docs.replicated.com/vendor/packaging-private-images#about-connecting-to-an-external-registry). - - -## Using Markdown with our Docusaurus CSS - -Replicated uses its own CSS, and Docusaurus supports its own specific Markdown syntax. The following table provides an overview of the supported syntax elements. - -| Element | Syntax | -|---------------------------------------------|-------------------------------------------------------| -| Headings | `# H1`, `## H2`, `### H3` | -| Bold | `**bold text**` | -| Italic | `_italicized text_` | -| Ordered List | `1.` First item (use `1.` for each item) | -| Unordered List | `-` or `*` (for each item) | -| Code or command in a sentence | ``code`` | -| Link - external site | `[Title](https://www.example.com)` | -| Link - topic in same folder | `[Title](filename) without file extension` | -| Link - topic in different folder | `[Title](../folder/file-name) without file extension` | -| Link - section in topic in same folder | `[Title](file-name#section-name)` | -| Link - section in topic in different folder | `[Title](../folder/file-name#section-name)` | -| Image | `![alt text](images/.png)` | - -**Note:** Alt text, used with image syntax, is parsed by screen readers to support accessibility. - -### Admonitions - -Note admonitions are formatted as follows: - -``` -:::note -text -::: -``` - -Important admonitions, typically used as a warning, are formatted as follows: - -``` -:::important -text -::: -``` - -### Tables - -Traditional markdown for tables can be limiting. Instead, we use HTML tables, which lets us manage the width of the table columns. The template topic `procedure.md` contains an example of the HTML formatting for tables. - -**Note:** There are still many instances of the old markdown table formatting in the content that was carried over from the content migration, but we do not encourage the use of it going forward. - -## Style Guidelines - -Whether you are editing existing content or adding a new topic, our goal is to make it task-based. The `procedure.md` template provides the formatting guidelines that you need. You can also see a published example of a task [here](https://docs.replicated.com/vendor/releases-creating-customer). - -Replicated product documentation has in-house style guidelines that the Documentation team uses when reviewing your PR. Please feel free to just add the content you need, knowing that our team will be there to assist with editorial reviews and information architecture, such as TOC placement, whether to create a task, and so on. The Documentation team will actively write content, not just give editorial reviews, so we take the heavy burden off of you. We encourage your contributions in the true open-source spirit. - -Replicated employees can review more information in the Documentation Style Guide in the employee handbook. - - -## SME and Editorial Reviews - -All PRs that are submitted are reviewed by the Replicated Docs team for editorial review. +## Replicated Documentation for LLMs -Content that is submitted by our customers and the open-source community are also reviewed by our Replicated subject matter experts (SMEs) to help ensure technical accuracy. +- [llms.txt](https://docs.replicated.com/llms/llms.txt): This file provides an overview of the Replicated Documentation +- [llms-docs.txt](https://docs.replicated.com/llms/llms-docs.txt): This file contains the contents of the docs/ directory in the [replicated-docs](https://github.com/replicatedhq/replicated-docs) repository \ No newline at end of file diff --git a/package.json b/package.json index 48bd905c1a..c2c77504d6 100644 --- a/package.json +++ b/package.json @@ -5,7 +5,7 @@ "scripts": { "docusaurus": "docusaurus", "start": "docusaurus start", - "build": "repomix && docusaurus build", + "build": "repomix && mv llms-docs.txt static/llms/ && docusaurus build", "swizzle": "docusaurus swizzle", "deploy": "docusaurus deploy", "clear": "docusaurus clear", @@ -34,7 +34,8 @@ "devDependencies": { "@docusaurus/module-type-aliases": "3.5.2", "@docusaurus/types": "3.5.2", - "typescript": "~5.8.2" + "typescript": "~5.8.2", + "repomix": "0.2.36" }, "resolutions": { "immer": "^10.1.1", diff --git a/repomix.config.json b/repomix.config.json new file mode 100644 index 0000000000..1fb48f3b52 --- /dev/null +++ b/repomix.config.json @@ -0,0 +1,28 @@ +{ + "output": { + "filePath": "llms-docs.txt", + "style": "plain", + "parsableStyle": false, + "fileSummary": true, + "directoryStructure": true, + "removeComments": false, + "removeEmptyLines": false, + "compress": false, + "topFilesLength": 5, + "showLineNumbers": false, + "copyToClipboard": false + }, + "include": [ + "docs/" + ], + "ignore": { + "useGitignore": true, + "useDefaultPatterns": true + }, + "security": { + "enableSecurityCheck": true + }, + "tokenCount": { + "encoding": "o200k_base" + } +} \ No newline at end of file diff --git a/repomix-output.txt b/static/llms/llms-docs.txt similarity index 77% rename from repomix-output.txt rename to static/llms/llms-docs.txt index f84c0b5ceb..d747d1083a 100644 --- a/repomix-output.txt +++ b/static/llms/llms-docs.txt @@ -1,4 +1,4 @@ -This file is a merged representation of the entire codebase, combined into a single document by Repomix. +This file is a merged representation of a subset of the codebase, containing specifically included files, combined into a single document by Repomix. ================================================================ File Summary @@ -36,6 +36,7 @@ Notes: ------ - Some files may have been excluded based on .gitignore rules and Repomix's configuration - Binary files are not included in this packed representation. Please refer to the Repository Structure section for a complete list of file paths, including binary files +- Only files matching these patterns are included: docs/ - Files matching patterns in .gitignore are excluded - Files matching default ignore patterns are excluded @@ -45,15 +46,6 @@ Additional Info: ================================================================ Directory Structure ================================================================ -.github/ - workflows/ - algolia-crawl.yml - app-manager-release-notes.yml - auto-label.yml - kubernetes-installer-release-notes.yml - replicated-sdk-release-notes.yml - vendor-portal-release-notes.yml - dependabot.yml docs/ enterprise/ auth-changing-passwords.md @@ -661,17 +653,6 @@ docs/ template-functions-license-context.md template-functions-static-context.md vendor-api-using.md - release-notes/ - rn-app-manager.md - rn-embedded-cluster.md - rn-kubernetes-installer.md - rn-replicated-sdk.md - rn-vendor-platform.md - rn-whats-new.md - templates/ - procedure.md - process-multiple-procedures.md - release-notes.md vendor/ admin-console-adding-buttons-links.mdx admin-console-customize-app-icon.md @@ -857,424 +838,11 @@ docs/ intro-kots.mdx intro-replicated.mdx intro.md -src/ - components/ - HomepageFeatures.js - HomepageFeatures.module.css - css/ - custom.css - theme/ - Admonition/ - index.js - styles.module.css - DocItem/ - Footer/ - index.js - styles.module.css - EditThisPage/ - index.js - styles.module.css -static/ - images/ - icons/ - chat_bubble_white.svg - vendor_portal_1.svg - git-pull-request.svg - logo.svg - report.svg - undraw_docusaurus_mountain.svg - undraw_docusaurus_react.svg - undraw_docusaurus_tree.svg - js/ - activecampaign.js - qualified.js - visitoranalytics.js -.gitignore -babel.config.js -CODEOWNERS -config.json -docusaurus.config.js -LICENSE -netlify.toml -package.json -README.md -sidebars.js -variables.js ================================================================ Files ================================================================ -================ -File: .github/workflows/algolia-crawl.yml -================ -name: scrape -concurrency: scrape -on: - push: - branches: - - main - workflow_dispatch: -jobs: - scrape: - runs-on: ubuntu-latest - steps: - - name: check out code 🛎d - uses: actions/checkout@v4 - # when scraping the site, inject secrets as environment variables - # then pass their values into the Docker container using "-e" syntax - # and inject config.json contents as another variable - - name: scrape the site 🧽 - env: - ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} - ALGOLIA_API_KEY: ${{ secrets.ALGOLIA_API_KEY }} - run: | - docker run \ - -e ALGOLIA_APP_ID -e ALGOLIA_API_KEY \ - -e CONFIG="$(cat config.json)" \ - algolia/docsearch-scraper - -================ -File: .github/workflows/app-manager-release-notes.yml -================ -name: app-manager-release-notes -on: - repository_dispatch: - types: [app-manager-release-notes] - inputs: - version: - description: KOTS version - required: true - -jobs: - generate-release-notes-pr: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v4 - - - name: Generate Release Notes - id: release-notes - env: - KOTS_VERSION: ${{ github.event.client_payload.version }} - uses: replicatedhq/release-notes-generator@main - with: - owner-repo: replicatedhq/kots - head: $KOTS_VERSION - title: ${KOTS_VERSION#v} - description: 'Support for Kubernetes: 1.29, 1.30, and 1.31' - include-pr-links: false - github-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Update Release Notes - env: - PATTERN: ".+RELEASE_NOTES_PLACEHOLDER.+" - run: | - cat <> /tmp/release-notes.txt - - ${{ steps.release-notes.outputs.release-notes }} - EOT - sed -i -E "/$PATTERN/r /tmp/release-notes.txt" docs/release-notes/rn-app-manager.md - rm -rf /tmp/release-notes.txt - - - name: Create Pull Request # creates a PR if there are differences - uses: peter-evans/create-pull-request@v7 - id: cpr - with: - token: ${{ secrets.REPLICATED_GH_PAT }} - commit-message: App Manager ${{ github.event.client_payload.version }} release notes - title: App Manager ${{ github.event.client_payload.version }} release notes - branch: automation/app-manager-release-notes-${{ github.event.client_payload.version }} - delete-branch: true - base: "main" - body: "Automated changes by the [app-manager-release-notes](https://github.com/replicatedhq/replicated-docs/blob/main/.github/workflows/app-manager-release-notes.yml) GitHub action" - - - name: Check outputs - run: | - echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" - echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" - - - name: Slack Notification - uses: slackapi/slack-github-action@v2.0.0 - with: - payload: | - { - "version": "${{ github.event.client_payload.version }}", - "pull_request_url": "${{steps.cpr.outputs.pull-request-url}}" - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.KOTS_RELEASE_NOTES_SLACK_WEBHOOK }} - -================ -File: .github/workflows/auto-label.yml -================ -on: - pull_request: - types: [opened] - -name: Auto-label new PRs - -jobs: - label: - runs-on: ubuntu-latest - steps: - - uses: actions/github-script@v7 - with: - github-token: ${{ secrets.DOCS_GH_PAT }} - script: | - const labels = ['type::feature', 'type::docs'] - github.rest.issues.addLabels({ - ...context.repo, - issue_number: context.issue.number, - labels - }) - -================ -File: .github/workflows/kubernetes-installer-release-notes.yml -================ -name: kubernetes-installer-release-notes -on: - repository_dispatch: - types: [kubernetes-installer-release-notes] - inputs: - version: - description: kURL version - required: true - -jobs: - generate-release-notes-pr: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v4 - - - name: Generate Release Notes - id: release-notes - env: - KURL_VERSION: ${{ github.event.client_payload.version }} - uses: replicatedhq/release-notes-generator@main - with: - owner-repo: replicatedhq/kurl - head: $KURL_VERSION - title: $KURL_VERSION - include-pr-links: false - github-token: ${{ secrets.GITHUB_TOKEN }} - feature-type-labels: type::feature,kurl::type::feature - - - name: Update Release Notes - env: - PATTERN: ".+RELEASE_NOTES_PLACEHOLDER.+" - run: | - cat <> /tmp/release-notes.txt - - ${{ steps.release-notes.outputs.release-notes }} - EOT - sed -i -E "/$PATTERN/r /tmp/release-notes.txt" docs/release-notes/rn-kubernetes-installer.md - rm -rf /tmp/release-notes.txt - - - name: Create Pull Request # creates a PR if there are differences - uses: peter-evans/create-pull-request@v7 - id: cpr - with: - token: ${{ secrets.REPLICATED_GH_PAT }} - commit-message: Kubernetes Installer ${{ github.event.client_payload.version }} release notes - title: Kubernetes Installer ${{ github.event.client_payload.version }} release notes - branch: automation/kubernetes-installer-release-notes-${{ github.event.client_payload.version }} - delete-branch: true - base: "main" - body: "Automated changes by the [kubernetes-installer-release-notes](https://github.com/replicatedhq/replicated-docs/blob/main/.github/workflows/kubernetes-installer-release-notes.yml) GitHub action" - - - name: Check outputs - run: | - echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" - echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" - - - name: Slack Notification - uses: slackapi/slack-github-action@v2.0.0 - with: - payload: | - { - "version": "${{ github.event.client_payload.version }}", - "pull_request_url": "${{steps.cpr.outputs.pull-request-url}}" - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.KURL_RELEASE_NOTES_SLACK_WEBHOOK }} - -================ -File: .github/workflows/replicated-sdk-release-notes.yml -================ -name: replicated-sdk-release-notes -on: - repository_dispatch: - types: [replicated-sdk-release-notes] - inputs: - version: - description: Replicated SDK version - required: true - prev_version: - description: Previous Replicated SDK version - required: true - -jobs: - generate-release-notes-pr: - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@v4 - - - name: Generate Release Notes - id: release-notes - env: - REPLICATED_SDK_VERSION: ${{ github.event.client_payload.version }} - PREV_REPLICATED_SDK_VERSION: ${{ github.event.client_payload.prev_version }} - uses: replicatedhq/release-notes-generator@main - with: - owner-repo: replicatedhq/replicated-sdk - base: $PREV_REPLICATED_SDK_VERSION - head: $REPLICATED_SDK_VERSION - title: $REPLICATED_SDK_VERSION - include-pr-links: false - github-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Update Release Notes - env: - PATTERN: ".+RELEASE_NOTES_PLACEHOLDER.+" - run: | - cat <> /tmp/release-notes.txt - - ${{ steps.release-notes.outputs.release-notes }} - EOT - sed -i -E "/$PATTERN/r /tmp/release-notes.txt" docs/release-notes/rn-replicated-sdk.md - rm -rf /tmp/release-notes.txt - - - name: Create Pull Request # creates a PR if there are differences - uses: peter-evans/create-pull-request@v7 - id: cpr - with: - token: ${{ secrets.REPLICATED_GH_PAT }} - commit-message: Replicated SDK ${{ github.event.client_payload.version }} release notes - title: Replicated SDK ${{ github.event.client_payload.version }} release notes - branch: automation/replicated-sdk-release-notes-${{ github.event.client_payload.version }} - delete-branch: true - base: "main" - body: "Automated changes by the [replicated-sdk-release-notes](https://github.com/replicatedhq/replicated-docs/blob/main/.github/workflows/replicated-sdk-release-notes.yml) GitHub action" - - - name: Check outputs - run: | - echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" - echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" - - - name: Slack Notification - uses: slackapi/slack-github-action@v2.0.0 - with: - payload: | - { - "version": "${{ github.event.client_payload.version }}", - "pull_request_url": "${{steps.cpr.outputs.pull-request-url}}" - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.REPLICATED_SDK_RELEASE_NOTES_SLACK_WEBHOOK }} - -================ -File: .github/workflows/vendor-portal-release-notes.yml -================ -name: vendor-portal-release-notes -on: - repository_dispatch: - types: [vendor-portal-release-notes] - inputs: - version: - description: Vendor Portal version - required: true - -jobs: - generate-release-notes: - runs-on: ubuntu-20.04 - outputs: - releaseNotes: ${{ steps.release-notes.outputs.release-notes }} - steps: - - uses: actions/checkout@v4 - - - name: Generate Release Notes - id: release-notes - env: - VENDOR_PORTAL_VERSION: ${{ github.event.client_payload.version }} - uses: replicatedhq/release-notes-generator@main - with: - owner-repo: replicatedhq/vandoor - head: $VENDOR_PORTAL_VERSION - title: $VENDOR_PORTAL_VERSION - include-pr-links: false - github-token: ${{ secrets.VENDOR_PORTAL_PAT }} - feature-type-labels: type::feature - generate-release-notes-pr: - runs-on: ubuntu-20.04 - needs: generate-release-notes - if: ${{ needs.generate-release-notes.outputs.releaseNotes != '' || needs.generate-release-notes.outputs.releaseNotes != null }} - steps: - - uses: actions/checkout@v4 - - name: Update Release Notes - env: - PATTERN: ".+RELEASE_NOTES_PLACEHOLDER.+" - run: | - cat <> /tmp/release-notes.txt - - ${{ needs.generate-release-notes.outputs.releaseNotes }} - EOT - sed -i -E "/$PATTERN/r /tmp/release-notes.txt" docs/release-notes/rn-vendor-platform.md - rm -rf /tmp/release-notes.txt - - - name: Create Pull Request # creates a PR if there are differences - uses: peter-evans/create-pull-request@v7 - id: cpr - with: - token: ${{ secrets.REPLICATED_GH_PAT }} - commit-message: Vendor Portal ${{ github.event.client_payload.version }} release notes - title: Vendor Portal ${{ github.event.client_payload.version }} release notes - branch: automation/vendor-portal-release-notes-${{ github.event.client_payload.version }} - delete-branch: true - base: "main" - body: "Automated changes by the [vendor-portal-release-notes](https://github.com/replicatedhq/replicated-docs/blob/main/.github/workflows/vendor-portal-release-notes.yml) GitHub action" - - - name: Check outputs - run: | - echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" - echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" - - - name: Slack Notification - uses: slackapi/slack-github-action@v2.0.0 - with: - payload: | - { - "version": "${{ github.event.client_payload.version }}", - "pull_request_url": "${{ steps.cpr.outputs.pull-request-url }}" - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.VENDOR_PORTAL_RELEASE_NOTES_SLACK_WEBHOOK }} - -================ -File: .github/dependabot.yml -================ -version: 2 -updates: - # Update npm dependencies based on package.json - - package-ecosystem: "npm" - directory: "/" # Root directory - schedule: - interval: "weekly" - open-pull-requests-limit: 10 - labels: - - "dependencies" - - "npm" - - "type::security" - - # Keep Dependabot itself up to date - - package-ecosystem: "github-actions" - directory: "/" # Root directory (or specify if actions are elsewhere) - schedule: - interval: "weekly" - labels: - - "dependencies" - - "github-actions" - - "type::security" - ================ File: docs/enterprise/auth-changing-passwords.md ================ @@ -25004,9257 +24572,11128 @@ For the Vendor API swagger specification, see [vendor-api-v3.json](https://api.r [View a larger version of this image](/images/vendor-api-docs.png) ================ -File: docs/release-notes/rn-app-manager.md +File: docs/vendor/admin-console-adding-buttons-links.mdx ================ ---- -toc_max_heading_level: 2 -pagination_next: null -pagination_prev: null ---- +# Adding Links to the Dashboard -import KubernetesCompatibility from "../partials/install/_kubernetes-compatibility.mdx" +This topic describes how to use the Kubernetes SIG Application custom resource to add links to the Replicated KOTS Admin Console dashboard. -# KOTS Release Notes +## Overview -This topic contains release notes for the [Replicated KOTS](../intro-kots) installer. The release notes list new features, improvements, bug fixes, known issues, and breaking changes. +Replicated recommends that every application include a Kubernetes SIG Application custom resource. The Kubernetes SIG Application custom resource provides a standard API for creating, viewing, and managing applications. For more information, see [Kubernetes Applications](https://github.com/kubernetes-sigs/application#kubernetes-applications) in the kubernetes-sigs GitHub repository. -## Kubernetes Compatibility +You can include the Kubernetes SIG Application custom resource in your releases to add links to the Admin Console dashboard. Common use cases include adding links to documentation, dashboards, or a landing page for the application. -The following table lists the versions of Kubernetes that are compatible with each version of KOTS: +For example, the following shows an **Open App** button on the dashboard of the Admin Console for an application named Gitea: - +Admin Console dashboard with Open App link - +[View a larger version of this image](/images/gitea-open-app.png) -## 1.124.4 +:::note +KOTS uses the Kubernetes SIG Application custom resource as metadata and does not require or use an in-cluster controller to handle this custom resource. An application that follows best practices does not require cluster admin privileges or any cluster-wide components to be installed. +::: -Released on February 14, 2025 +## Add a Link -Support for Kubernetes: 1.29, 1.30, and 1.31 +To add a link to the Admin Console dashboard, include a [Kubernetes SIG Application](https://github.com/kubernetes-sigs/application#kubernetes-applications) custom resource in the release with a `spec.descriptor.links` field. The `spec.descriptor.links` field is an array of links that are displayed on the Admin Console dashboard after the application is deployed. -### Improvements {#improvements-1-124-4} -* Improves error visibility by displaying the actual error message in the UI instead of a generic one when an upgrade fails to start. +Each link in the `spec.descriptor.links` array contains two fields: +* `description`: The link text that will appear on the Admin Console dashboard. +* `url`: The target URL. -### Bug Fixes {#bug-fixes-1-124-4} -* Fixes an issue when installing with a provided license, that can cause the installer not to respect the custom domain in the license and instead make a request to https://replicated.app. +For example: -## 1.124.3 +```yaml +# app.k8s.io/v1beta1 Application Custom resource -Released on February 5, 2025 +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "gitea" +spec: + descriptor: + links: + - description: About Wordpress + url: "https://wordpress.org/" +``` -Support for Kubernetes: 1.29, 1.30, and 1.31 +When the application is deployed, the "About Wordpress" link is displayed on the Admin Console dashboard as shown below: -### Improvements {#improvements-1-124-3} -* Updates images to resolve CVE-2024-45337 with critical severity and CVE-2024-45338 with high severity. +About Wordpress link on the Admin Console dashboard -## 1.124.2 +[View a larger version of this image](/images/dashboard-link-about-wordpress.png) -Released on February 4, 2025 +For an additional example of a Kubernetes SIG Application custom resource, see [application.yaml](https://github.com/kubernetes-sigs/application/blob/master/docs/examples/wordpress/application.yaml) in the kubernetes-sigs GitHub repository. -Support for Kubernetes: 1.29, 1.30, and 1.31 +### Create URLs with User-Supplied Values Using KOTS Template Functions {#url-template} -### Improvements {#improvements-1-124-2} -* Improvements for Embedded Cluster upgrades. +You can use KOTS template functions to template URLs in the Kubernetes SIG Application custom resource. This can be useful when all or some of the URL is a user-supplied value. For example, an application might allow users to provide their own ingress controller or load balancer. In this case, the URL can be templated to render the hostname that the user provides on the Admin Console Config screen. -## 1.124.1 +The following examples show how to use the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function in the Kubernetes SIG Application custom resource `spec.descriptor.links.url` field to render one or more user-supplied values: -Released on January 29, 2025 +* In the example below, the URL hostname is a user-supplied value for an ingress controller that the user configures during installation. -Support for Kubernetes: 1.29, 1.30, and 1.31 + ```yaml + apiVersion: app.k8s.io/v1beta1 + kind: Application + metadata: + name: "my-app" + spec: + descriptor: + links: + - description: Open App + url: 'http://{{repl ConfigOption "ingress_host" }}' + ``` +* In the example below, both the URL hostname and a node port are user-supplied values. It might be necessary to include a user-provided node port if you are exposing NodePort services for installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about). -### Bug Fixes {#bug-fixes-1-123-1} -* Improves an unclear error message that could occur when rewriting private images. + ```yaml + apiVersion: app.k8s.io/v1beta1 + kind: Application + metadata: + name: "my-app" + spec: + descriptor: + links: + - description: Open App + url: 'http://{{repl ConfigOption "hostname" }}:{{repl ConfigOption "node_port"}}' + ``` -## 1.124.0 +For more information about working with KOTS template functions, see [About Template Functions](/reference/template-functions-about). -Released on January 24, 2025 +================ +File: docs/vendor/admin-console-customize-app-icon.md +================ +# Customizing the Application Icon -Support for Kubernetes: 1.29, 1.30, and 1.31 +You can add a custom application icon that displays in the Replicated Admin Console and the download portal. Adding a custom icon helps ensure that your brand is reflected for your customers. -### New Features {#new-features-1-124-0} -* You can migrate Helm charts that were installed with HelmChart `v1beta1` and `useHelmInstall: false` to HelmChart `v1beta2` by passing the `--take-ownership` flag to the `helmUpgradeFlags` field in HelmChart custom resource as shown below: +:::note +You can also use a custom domain for the download portal. For more information, see [About Custom Domains](custom-domains). +::: - ```yaml - # HelmChart v1 beta2 - apiVersion: kots.io/v1beta2 - kind: HelmChart - metadata: - name: samplechart - spec: - helmUpgradeFlags: - - --take-ownership - ``` +## Add a Custom Icon - This flag allows Helm to take ownership of existing resources that were installed without Helm, like resources deployed with HelmChart v1beta1 and `useHelmInstall: false`. +For information about how to choose an image file for your custom application icon that displays well in the Admin Console, see [Icon Image File Recommendations](#icon-image-file-recommendations) below. - For information about how to migrate an existing installation to KOTS HelmChart `v1beta2`, see [Migrating Existing Installations to HelmChart v2](/vendor/helm-v2-migrate). +To add a custom application icon: -## 1.123.1 +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. +1. Create or open the Application custom resource manifest file. An Application custom resource manifest file has `apiVersion: kots.io/v1beta1` and `kind: Application`. -Released on January 13, 2025 +1. In the preview section of the Help pane: -Support for Kubernetes: 1.29, 1.30, and 1.31 + 1. If your Application manifest file is already populated with an `icon` key, the icon displays in the preview. Click **Preview a different icon** to access the preview options. -### Bug Fixes {#bug-fixes-1-123-1} -* Fixes an issue where the navigation menu was not visible on the Config page. + 1. Drag and drop an icon image file to the drop zone. Alternatively, paste a link or Base64 encoded data URL in the text box. Click **Preview**. -## 1.123.0 + ![Application icon preview](/images/app-icon-preview.png) -Released on January 2, 2025 + 1. (Air gap only) If you paste a link to the image in the text box, click **Preview** and **Base64 encode icon** to convert the image to a Base64 encoded data URL. An encoded URL displays that you can copy and paste into the Application manifest. Base64 encoding is required for images used with air gap installations. -Support for Kubernetes: 1.29, 1.30, and 1.31 + :::note + If you pasted a Base64 encoded data URL into the text box, the **Base64 encode icon** button does not display because the image is already encoded. If you drag and drop an icon, the icon is automatically encoded for you. + ::: -### New Features {#new-features-1-123-0} -* Adds the `--tolerations` flag to `kots install` to supply additional tolerations to the KOTS pods. + ![Base64 encode image button](/images/app-icon-preview-base64.png) -## 1.122.1 + 1. Click **Preview a different icon** to preview a different icon if needed. -Released on December 20, 2024 +1. In the Application manifest, under `spec`, add an `icon` key that includes a link or the Base64 encoded data URL to the desired image. -Support for Kubernetes: 1.29, 1.30, and 1.31 + **Example**: -### Bug Fixes {#bug-fixes-1-122-1} -* Fixes a bug that could result in instance being reported as unavailable if application includes an Ingress resource. + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + title: My Application + icon: https://kots.io/images/kotsadm-logo-large@2x.png + ``` +1. Click **Save Release**. -## 1.122.0 -Released on December 12, 2024 +## Icon Image File Recommendations -Support for Kubernetes: 1.29, 1.30, and 1.31 +For your custom application icon to look best in the Admin Console, consider the following recommendations: -### New Features {#new-features-1-122-0} -* Adds support for the `kots.io/keep` annotation, which prevents KOTS from deleting resources during an upgrade if the resource is no longer present in the new release. This annotation is useful when migrating existing KOTS installations to the KOTS HelmChart v2 installation method. For more information, see [Migrating Existing Installations to HelmChart v2](/vendor/helm-v2-migrate). +* Use a PNG or JPG file. +* Use an image that is at least 250 by 250 pixels. +* Export the image file at 2x. -## 1.121.2 +================ +File: docs/vendor/admin-console-customize-config-screen.md +================ +# Creating and Editing Configuration Fields -Released on November 27, 2024 +This topic describes how to use the KOTS Config custom resource manifest file to add and edit fields in the KOTS Admin Console configuration screen. -Support for Kubernetes: 1.29, 1.30, and 1.31 +## About the Config Custom Resource -### Improvements {#improvements-1-121-2} -* Various updates for Embedded Cluster. +Applications distributed with Replicated KOTS can include a configuration screen in the Admin Console to collect required or optional values from your users that are used to run your application. For more information about the configuration screen, see [About the Configuration Screen](config-screen-about). -## 1.121.1 +To include a configuration screen in the Admin Console for your application, you add a Config custom resource manifest file to a release for the application. -Released on November 26, 2024 +You define the fields that appear on the configuration screen as an array of `groups` and `items` in the Config custom resource: + * `groups`: A set of `items`. Each group must have a `name`, `title`, `description`, and `items`. For example, you can create a group of several user input fields that are all related to configuring an SMTP mail server. + * `items`: An array of user input fields. Each array under `items` must have a `name`, `title`, and `type`. You can also include several optional properties. For example, in a group for configuring a SMTP mail server, you can have user input fields under `items` for the SMTP hostname, port, username, and password. -Support for Kubernetes: 1.29, 1.30, and 1.31 + There are several types of `items` supported in the Config manifest that allow you to collect different types of user inputs. For example, you can use the `password` input type to create a text field on the configuration screen that hides user input. -### Bug Fixes {#bug-fixes-1-121-1} -* Various fixes for Embedded Cluster. +For more information about the syntax of the Config custom resource manifest, see [Config](/reference/custom-resource-config). -## 1.121.0 +## About Regular Expression Validation -Released on November 12, 2024 +You can use [RE2 regular expressions](https://github.com/google/re2/wiki/Syntax) (regex) to validate user input for config items, ensuring conformity to certain standards, such as valid email addresses, password complexity rules, IP addresses, and URLs. This prevents users from deploying an application with a verifiably invalid configuration. -Support for Kubernetes: 1.29, 1.30, and 1.31 +You add the `validation`, `regex`, `pattern` and `message` fields to items in the Config custom resource. Validation is supported for `text`, `textarea`, `password` and `file` config item types. For more information about regex validation fields, see [Item Validation](/reference/custom-resource-config#item-validation) in _Config_. -### New Features {#new-features-1-121-0} -* Adds the ability to download the application archive for any version, including the currently deployed version, by using the `--sequence` and `--current` flags for the `kots download` command. +The following example shows a common password complexity rule: -## 1.120.3 +``` +- name: smtp-settings + title: SMTP Settings + items: + - name: smtp_password + title: SMTP Password + type: password + help_text: Set SMTP password + validation: + regex: + pattern: ^(?:[\w@#$%^&+=!*()_\-{}[\]:;"'<>,.?\/|]){8,16}$ + message: The password must be between 8 and 16 characters long and can contain a combination of uppercase letter, lowercase letters, digits, and special characters. +``` -Released on November 7, 2024 +## Add Fields to the Configuration Screen -Support for Kubernetes: 1.29, 1.30, and 1.31 +To add fields to the Admin Console configuration screen: -### Improvements {#improvements-1-120-3} -* Various updates for Embedded Cluster. +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. +1. Create or open the Config custom resource manifest file in the desired release. A Config custom resource manifest file has `kind: Config`. +1. In the Config custom resource manifest file, define custom user-input fields in an array of `groups` and `items`. -## 1.120.2 + **Example**: -Released on November 5, 2024 + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: smtp_settings + title: SMTP Settings + description: Configure SMTP Settings + items: + - name: enable_smtp + title: Enable SMTP + help_text: Enable SMTP + type: bool + default: "0" + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + - name: smtp_port + title: SMTP Port + help_text: Set SMTP Port + type: text + - name: smtp_user + title: SMTP User + help_text: Set SMTP User + type: text + - name: smtp_password + title: SMTP Password + type: password + default: 'password' + ``` -Support for Kubernetes: 1.29, 1.30, and 1.31 + The example above includes a single group with the name `smtp_settings`. -### Bug Fixes {#bug-fixes-1-120-2} -* Fixes an issue where generating a support bundle in air gap kURL environments took a really long time. + The `items` array for the `smtp_settings` group includes the following user-input fields: `enable_smtp`, `smtp_host`, `smtp_port`, `smtp_user`, and `smtp_password`. Additional item properties are available, such as `affix` to make items appear horizontally on the same line. For more information about item properties, see [Item Properties](/reference/custom-resource-config#item-properties) in Config. -## 1.120.1 + The following screenshot shows how the SMTP Settings group from the example YAML above displays in the Admin Console configuration screen during application installation: -Released on November 4, 2024 + ![User input fields on the configuration screen for the SMTP settings](/images/config-screen-smtp-example-large.png) -Support for Kubernetes: 1.29, 1.30, and 1.31 +1. (Optional) Add default values for the fields. You can add default values using one of the following properties: + * **With the `default` property**: When you include the `default` key, KOTS uses this value when rendering the manifest files for your application. The value then displays as a placeholder on the configuration screen in the Admin Console for your users. KOTS only uses the default value if the user does not provide a different value. -### Bug Fixes {#bug-fixes-1-120-1} -* Fixes an issue where generating support bundles failed in air gap and minimal RBAC installations. -* Fixes an issue where pushing images from an air gap bundle could time out due to the host's environment variables interfering with the temporary registry used by the KOTS CLI. + :::note + If you change the `default` value in a later release of your application, installed instances of your application receive the updated value only if your users did not change the default from what it was when they initially installed the application. -## 1.120.0 + If a user did change a field from its default, the Admin Console does not overwrite the value they provided. + ::: -Released on October 30, 2024 + * **With the `value` property**: When you include the `value` key, KOTS does not overwrite this value during an application update. The value that you provide for the `value` key is visually indistinguishable from other values that your user provides on the Admin Console configuration screen. KOTS treats user-supplied values and the value that you provide for the `value` key as the same. -Support for Kubernetes: 1.29, 1.30, and 1.31 +2. (Optional) Add regular expressions to validate user input for `text`, `textarea`, `password` and `file` config item types. For more information, see [About Regular Expression Validation](#about-regular-expression-validation). -### New Features {#new-features-1-120-0} -* Various new features to support Replicated Embedded Cluster. + **Example**: -## 1.119.1 + ```yaml + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + validation: + regex: ​ + pattern: ^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$ + message: Valid hostname starts with a letter (uppercase/lowercase), followed by zero or more groups of letters (uppercase/lowercase), digits, or hyphens, optionally followed by a period. Ends with a letter or digit. + ``` +3. (Optional) Mark fields as required by including `required: true`. When there are required fields, the user is prevented from proceeding with the installation until they provide a valid value for required fields. -Released on October 22, 2024 + **Example**: -Support for Kubernetes: 1.29, 1.30, and 1.31 + ```yaml + - name: smtp_password + title: SMTP Password + type: password + required: true + ``` -### Bug Fixes {#bug-fixes-1-119-1} +4. Save and promote the release to a development environment to test your changes. -* Fixes an issue where proxy settings was removed when upgrading the Admin Console with `kubectl kots admin-console upgrade`. -* Fixes an issue where `--strict-security-context` was removed when upgrading the Admin Console with `kubectl kots admin-console upgrade`. +## Next Steps -## 1.119.0 +After you add user input fields to the configuration screen, you use template functions to map the user-supplied values to manifest files in your release. If you use a Helm chart for your application, you map the values to the Helm chart `values.yaml` file using the HelmChart custom resource. -Released on October 18, 2024 +For more information, see [Mapping User-Supplied Values](config-screen-map-inputs). -Support for Kubernetes: 1.29, 1.30, and 1.31 +================ +File: docs/vendor/admin-console-display-app-status.md +================ +import StatusesTable from "../partials/status-informers/_statusesTable.mdx" +import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" +import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" +import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" -### Bug Fixes {#bug-fixes-1-119-0} -* Fixes an issue where the Replicated SDK failed to deploy if a private CA was provided to the installation but the SDK was installed into a different namespace than KOTS. -* If an application includes the Replicated SDK, the SDK will be deployed with the same ClusterRole as the Admin Console. +# Adding Resource Status Informers -## 1.118.0 +This topic describes how to add status informers for your application. Status informers apply only to applications installed with Replicated KOTS. For information about how to collect application status data for applications installed with Helm, see [Enabling and Understanding Application Status](insights-app-status). -Released on October 15, 2024 +## About Status Informers -Support for Kubernetes: 1.29, 1.30, and 1.31 +_Status informers_ are a feature of KOTS that report on the status of supported Kubernetes resources deployed as part of your application. You enable status informers by listing the target resources under the `statusInformers` property in the Replicated Application custom resource. KOTS watches all of the resources that you add to the `statusInformers` property for changes in state. -### Improvements {#improvements-1-118-0} -* Improves the flexibility of configuring the [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) collector in support bundle specs by limiting KOTS's default collection to its own namespace. +Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information, see [Understanding Application Status](#understanding-application-status). -### Bug Fixes {#bug-fixes-1-118-0} -* Fixes an issue where you could not upgrade Embedded Cluster instances if the new version didn't include config and preflight checks. +When you one or more status informers to your application, KOTS automatically does the following: -## 1.117.5 +* Displays application status for your users on the dashboard of the Admin Console. This can help users diagnose and troubleshoot problems with their instance. The following shows an example of how an Unavailable status displays on the Admin Console dashboard: -Released on October 8, 2024 + Unavailable status on the Admin Console dashboard -Support for Kubernetes: 1.29, 1.30, and 1.31 +* Sends application status data to the Vendor Portal. This is useful for viewing insights on instances of your application running in customer environments, such as the current status and the average uptime. For more information, see [Instance Details](instance-insights-details). -### Improvements {#improvements-1-117-5} -* Adds support for specifying an alternative data directory in Embedded Cluster. + The following shows an example of the Vendor Portal **Instance details** page with data about the status of an instance over time: -## 1.117.4 + Instance details full page -Released on October 8, 2024 + [View a larger version of this image](/images/instance-details.png) +## Add Status Informers -Support for Kubernetes: 1.29, 1.30, and 1.31 +To create status informers for your application, add one or more supported resource types to the `statusInformers` property in the Application custom resource. See [`statusInformers`](/reference/custom-resource-application#statusinformers) in _Application_. -### Improvements {#improvements-1-117-4} -* Various improvements for surfacing errors in Embedded Cluster upgrades. + -## 1.117.3 +You can target resources of the supported types that are deployed in any of the following ways: -Released on September 23, 2024 +* Deployed by KOTS. +* Deployed by a Kubernetes Operator that is deployed by KOTS. For more information, see [About Packaging a Kubernetes Operator Application](operator-packaging-about). +* Deployed by Helm. For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). -Support for Kubernetes: 1.29, 1.30, and 1.31 +### Examples -### New Features {#new-features-1-117-3} -* If the Replicated SDK is deployed by KOTS as part of an application, the SDK will automatically be configured with any additional CA certificates provided to `--private-ca-configmap` flag for the `kots install` command. +Status informers are in the format `[namespace/]type/name`, where namespace is optional and defaults to the current namespace. -## 1.117.2 +**Example**: -Released on September 20, 2024 +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-application +spec: + statusInformers: + - deployment/my-web-svc + - deployment/my-worker +``` -Support for Kubernetes: 1.29, 1.30, and 1.31 +The `statusInformers` property also supports template functions. Using template functions allows you to include or exclude a status informer based on a customer-provided configuration value: -### Improvements {#improvements-1-117-2} -* Improvements to support specifying ports in Embedded Cluster. +**Example**: -## 1.117.1 +```yaml +statusInformers: + - deployment/my-web-svc + - '{{repl if ConfigOptionEquals "option" "value"}}deployment/my-worker{{repl else}}{{repl end}}' +``` -Released on September 17, 2024 +In the example above, the `deployment/my-worker` status informer is excluded unless the statement in the `ConfigOptionEquals` template function evaluates to true. -Support for Kubernetes: 1.29, 1.30, and 1.31 +For more information about using template functions in application manifest files, see [About Template Functions](/reference/template-functions-about). -### Bug Fixes {#bug-fixes-1-117-1} -* Fixes an issue where the values provided to the `--http-proxy`, `--https-proxy`, and `--no-proxy` flags for the `kots install` command were not propagated to the Replicated SDK when running an automated install. +## Understanding Application Status -## 1.117.0 +This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. -Released on September 13, 2024 +### Resource Statuses -Support for Kubernetes: 1.29, 1.30, and 1.31 +Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. -### New Features {#new-features-1-117-0} -* Adds the `--private-ca-configmap` flag to the `install` and `generate-manifests` commands. The contents of the provided ConfigMap are used as additional trusted certificate authorities. -* Adds the [`PrivateCACert` template function](/reference/template-functions-static-context#privatecacert) to return the name of a ConfigMap containing additional trusted CA certificates provided by the end user. +The following table lists the supported Kubernetes resources and the conditions that contribute to each status: -### Bug Fixes {#bug-fixes-1-117-0} -* Fixes an issue where `dropdown` Config items did not respect the `when` property. + -## 1.116.1 +### Aggregate Application Status -Released on September 12, 2024 + -Support for Kubernetes: 1.28, 1.29, and 1.30 + -### Bug Fixes {#bug-fixes-1-116-1} -* KOTS now uses the fully qualified `.svc.cluster.local` address when making requests to the `kotsadm-rqlite` and `kotsadm-minio` services for simplified HTTP proxy configuration using `NO_PROXY=.cluster.local`. +================ +File: docs/vendor/admin-console-port-forward.mdx +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import ServicePortNote from "../partials/custom-resource-application/_servicePort-note.mdx" +import GiteaKotsApp from "../partials/getting-started/_gitea-kots-app-cr.mdx" +import GiteaHelmChart from "../partials/getting-started/_gitea-helmchart-cr.mdx" +import GiteaK8sApp from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import PortsApplicationURL from "../partials/custom-resource-application/_ports-applicationURL.mdx" +import NginxKotsApp from "../partials/application-links/_nginx-kots-app.mdx" +import NginxK8sApp from "../partials/application-links/_nginx-k8s-app.mdx" +import NginxService from "../partials/application-links/_nginx-service.mdx" +import NginxDeployment from "../partials/application-links/_nginx-deployment.mdx" -## 1.116.0 +# Port Forwarding Services with KOTS -Released on September 9, 2024 +This topic describes how to add one or more ports to the Replicated KOTS port forward tunnel by configuring the `ports` key in the KOTS Application custom resource. -Support for Kubernetes: 1.28, 1.29, and 1.30 +The information in this topic applies to existing cluster installations. For information about exposing services for Replicated kURL or Replicated Embedded Cluster installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). -### New Features {#new-features-1-116-0} -* Adds the ability to specify additional annotations (`--additional-annotations`) and labels (`--additional-labels`) that will be applied to kotsadm pods. +## Overview -## 1.115.2 +For installations into existing clusters, KOTS automatically creates a port forward tunnel and exposes the Admin Console on port 8800 where it can be accessed by users. In addition to the 8800 Admin Console port, you can optionally add one or more extra ports to the port forward tunnel. -Released on September 5, 2024 +Adding ports to the port forward tunnel allows you to port forward application services without needing to manually run the `kubectl port-forward` command. You can also add a link to the Admin Console dashboard that points to port-forwarded services. -Support for Kubernetes: 1.28, 1.29, and 1.30 +This can be particularly useful when developing and testing KOTS releases for your application, because it provides a quicker way to access an application after installation compared to setting up an ingress controller or adding a load balancer. -### Improvements {#improvements-1-115-2} -* Available updates and the check for updates button are shown on the **Dashboard** page of the Admin Console for Embedded Cluster. These were removed in a previous version. -* When nodes need to be added to the cluster during an Embedded Cluster restore operation, the `join` command is more clearly shown in the Admin Console. -* Improves messaging when the requested channel slug is not allowed by the provided license. +## Port Forward a Service with the KOTS Application `ports` Key -### Bug Fixes {#bug-fixes-1-115-2} -* Fixes an issue where the values provided to the `--http-proxy`, `--https-proxy`, and `--no-proxy` flags for the `kots install` command were not propagated to the Replicated SDK. -* Hides a banner on the **View Files** page in Embedded Cluster that told users to use `kubectl kots` commands that are not intended for Embedded Cluster. +To port forward a service with KOTS for existing cluster installations: -## 1.115.1 +1. In a new release, configure the [`ports`](/reference/custom-resource-application#ports) key in the KOTS Application custom resource with details for the target service. For example: -Released on August 22, 2024 + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + ports: + - serviceName: my-service + servicePort: 3000 + localPort: 8888 + ``` -Support for Kubernetes: 1.28, 1.29, and 1.30 + 1. For `ports.serviceName`, add the name of the service. KOTS can create a port forward to ClusterIP, NodePort, or LoadBalancer services. For more information about Kubernetes service types, see [Service](https://kubernetes.io/docs/concepts/services-networking/service/) in the Kubernetes documentation. + + 1. For `ports.servicePort`, add the `containerPort` of the Pod where the service is running. This is the port where KOTS forwards traffic. -### Bug Fixes {#bug-fixes-1-115-1} -* Fixes an issue where the default `nodeMetrics` analyzer did not run. + -## 1.115.0 + 1. For `ports.localPort`, add the port to map on the local workstation. -Released on August 20, 2024 +1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. -Support for Kubernetes: 1.28, 1.29, and 1.30 + When the application is in a Ready state and the KOTS port forward is running, you will see output similar to the following: -### Improvements {#improvements-1-115-0} -* The **Nodes** page displays guidance and easier access to the node join command during initial install of Embedded Cluster. -* Adds back the check for updates button on the **Version history** page in Embedded Cluster, so you can check for updates without refreshing the page. + ```bash + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + • Go to http://localhost:8888 to access the application + ``` + Confirm that you can access the service at the URL provided in the KOTS CLI output. -## 1.114.0 +1. (Optional) Add a link to the service on the Admin Console dashboard. See [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link) below. -Released on August 12, 2024 +## Add a Link to a Port-Forwarded Service on the Admin Console Dashboard {#add-link} -Support for Kubernetes: 1.28, 1.29, and 1.30 +After you add a service to the KOTS port forward tunnel, you can also optionally add a link to the port-forwarded service on the Admin Console dashboard. -### New Features {#new-features-1-114-0} -* Adds support for the `dropdown` config item type, which creates a dropdown on the config screen. See [dropdown](/reference/custom-resource-config#dropdown) in _Config_. -* Adds the `radio` config item type, which is functionally equivalent to the `select_one` item type but is more clearly named. The `select_one` config item type is deprecated in favor of `radio` but is still fully functional. See [radio](/reference/custom-resource-config#radio) in _Config_. +To add a link to a port-forwarded service, add the _same_ URL in the KOTS Application custom resource `ports.applicationURL` and Kubernetes SIG Application custom resource `spec.descriptor.links.url` fields. When the URLs in these fields match, KOTS adds a link on the Admin Console dashboard where the given service can be accessed. This process automatically links to the hostname in the browser (where the Admin Console is being accessed) and appends the specified `localPort`. -## 1.113.0 +To add a link to a port-forwarded service on the Admin Console dashboard: -:::important -In KOTS 1.113.0 and later, an installation error can occur if you use the `kots install` command without specifying a channel slug _and_ the license used to install does not have access to the Stable channel. For more information, see [Breaking Change](#breaking-changes-1-113-0) below. -::: +1. In a new release, open the KOTS Application custom resource and add a URL to the `ports.applicationURL` field. For example: -Released on August 9, 2024 + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + ports: + - serviceName: my-service + servicePort: 3000 + localPort: 8888 + applicationUrl: "http://my-service" + ``` -Support for Kubernetes: 1.28, 1.29, and 1.30 + Consider the following guidelines for this URL: + * Use HTTP instead of HTTPS unless TLS termination takes place in the application Pod. + * KOTS rewrites the URL with the hostname in the browser during deployment. So, you can use any hostname for the URL, such as the name of the service. For example, `http://my-service`. -### New Features {#new-features-1-113-0} -* Adds support for multi-channel licenses. This allows each license to be assigned to more than one channel. +1. Add a Kubernetes SIG Application custom resource in the release. For example: - With the introduction of multi-channel licenses, an installation error can occur if you use the `kots install` command without specifying a channel slug _and_ the license used to install does not have access to the Stable channel. For more information, see [Breaking Change](#breaking-changes-1-113-0) below. + ```yaml + # app.k8s.io/v1beta1 Application Custom resource -### Bug Fixes {#bug-fixes-1-113-0} -* Fixes an issue in Embedded Cluster where going back to the Nodes page during the installation and then clicking continue did not work. + apiVersion: app.k8s.io/v1beta1 + kind: Application + metadata: + name: "my-application" + spec: + descriptor: + links: + - description: Open App + # url matches ports.applicationURL in the KOTS Application custom resource + url: "http://my-service" + ``` -### Breaking Change {#breaking-changes-1-113-0} + 1. For `spec.descriptor.links.description`, add the link text that will appear on the Admin Console dashboard. For example, `Open App`. -In KOTS 1.113.0 and later, the following error will occur during installation if the `kots install` command lacks a channel slug _and_ the license does not have access to the Stable channel: `"failed to verify and update license: requested channel not found in latest license"`. This can break existing automation and documentation that includes a `kots install` command without a channel slug. + 1. For `spec.descriptor.links.url`, add the _same_ URL that you used in the `ports.applicationURL` in the KOTS Application custom resource. -This error occurs because, when the channel slug is omitted from the `kots install` command (for example, `kots install app`), KOTS defaults to pulling metadata like the application icon and minimal RBAC configurations from the Stable channel. With the introduction of multi-channel licenses in KOTS 1.113.0, only licenses with access to a channel can pull metadata and download releases from that channel. This means that only licenses with access to the Stable channel can install without specifying the channel slug in the `kots install` command. +1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. -Previously, any license regardless of its assigned channel could install by excluding the channel slug from the `kots install` command. This could cause mismatches in deployment settings such as icons and minimal RBAC configurations because KOTS would pull metadata from the Stable channel and then install the release from the channel where the license was assigned. + When the application is in a Ready state, confirm that you can access the service by clicking the link that appears on the dashboard. For example: -**Solution:** To install a release from a channel other than Stable, specify the channel slug in the `kots install` command (for example, `kots install app/beta`). Also, ensure that the license has access to the specified channel. Refer to the Vendor Portal installation instructions or use the `replicated channel inspect CHANNEL_ID` command in the Replicated CLI for the correct commands. + Admin Console dashboard with Open App link -To avoid breaking changes, update automation that uses the `kots install` command accordingly. Also, update documentation as needed so that the documented installation commands include the channel slug. + [View a larger version of this image](/images/gitea-open-app.png) -If you cannot update your KOTS installation command immediately, temporarily revert to KOTS 1.112.4 or earlier. +## Access Port-Forwarded Services -## 1.112.4 +This section describes how to access port-forwarded services. -Released on July 31, 2024 +### Command Line -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +Run [`kubectl kots admin-console`](/reference/kots-cli-admin-console-index) to open the KOTS port forward tunnel. -### Bug Fixes {#bug-fixes-1-112-4} -* Fixes an issue in Embedded Cluster upgrades where preflights did not rerun when the config was re-edited. -* Fixes an issue that caused K8s minor version parsing errors to be logged repeatedly. +The `kots admin-console` command runs the equivalent of `kubectl port-forward svc/myapplication-service :`, then prints a message with the URLs where the Admin Console and any port-forwarded services can be accessed. For more information about the `kubectl port-forward` command, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the Kubernetes documentation. -## 1.112.3 +For example: -Released on July 30, 2024 +```bash +kubectl kots admin-console --namespace gitea +``` +```bash +• Press Ctrl+C to exit +• Go to http://localhost:8800 to access the Admin Console +• Go to http://localhost:8888 to access the application +``` -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +### Admin Console -### Bug Fixes {#bug-fixes-1-112-3} -* Fixes an issue where the Admin Console **vVersion history** page for Embedded Cluster had to be refreshed to show a newly available version after uploading an air gap bundle. +You can optionally add a link to a port-forwarded service from the Admin Console dashboard. This requires additional configuration. For more information, see [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link). -## 1.112.2 +The following example shows an **Open App** link on the dashboard of the Admin Console for an application named Gitea: -Released on July 26, 2024 +Admin Console dashboard with Open App link -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +[View a larger version of this image](/images/gitea-open-app.png) -### Bug Fixes {#bug-fixes-1-112-2} -* Fixes an issue in Embedded Cluster upgrades where rendering Helm charts that utilize Helm capabilities could fail. +## Examples -## 1.112.1 +This section provides examples of how to configure the `ports` key to port-forward a service in existing cluster installations and add links to services on the Admin Console dashboard. -Released on July 16, 2024 +### Example: Bitnami Gitea Helm Chart with LoadBalancer Service -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +This example uses a KOTS Application custom resource and a Kubernetes SIG Application custom resource to configure port forwarding for the Bitnami Gitea Helm chart in existing cluster installations, and add a link to the port-forwarded service on the Admin Console dashboard. To view the Gitea Helm chart source, see [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) in GitHub. -### Bug Fixes {#bug-fixes-1-112-1} -* Fixes an issue where reporting information wasn't sent for Embedded Cluster in some cases. +To test this example: -## 1.112.0 +1. Pull version 1.0.6 of the Gitea Helm chart from Bitnami: -Released on June 27, 2024 + ``` + helm pull oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +1. Add the `gitea-1.0.6.tgz` chart archive to a new, empty release in the Vendor Portal along with the `kots-app.yaml`, `k8s-app.yaml`, and `gitea.yaml` files provided below. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). -### New Features {#new-features-1-112-0} -* Changes the workflow for upgrading to newly available versions in the Admin Console for Embedded Cluster only. When upgrading to new versions, users are taken through a wizard where the license is sycned, config can be edited, and preflight checks are run before deploying. + + +
    Description
    +

    Based on the templates/svc.yaml and values.yaml files in the Gitea Helm chart, the following KOTS Application custom resource adds port 3000 to the port forward tunnel and maps local port 8888. Port 3000 is the container port of the Pod where the gitea service runs.

    +
    YAML
    + +
    + +
    Description
    +

    The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service from the Admin Console dashboard. It also triggers KOTS to rewrite the URL to use the hostname in the browser and append the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".

    +
    YAML
    + +
    + +
    Description
    +

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.

    +
    YAML
    + +
    +
    -## 1.111.0 +1. Install the release to confirm that the service was port-forwarded successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). -Released on July 9, 2024 +### Example: NGINX Application with ClusterIP and NodePort Services -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +The following example demonstrates how to link to a port-forwarded ClusterIP service for existing cluster installations. -### Bug Fixes {#bug-fixes-1-111-0} -* Fixes an issue where the Troubleshoot button on the **Resource status** modal didn't take you to the Troubleshoot page. +It also shows how to use the `ports` key to add a link to a NodePort service for kURL installations. Although the primary purpose of the `ports` key is to port forward services for existing cluster installations, it is also possible to use the `ports` key so that links to NodePort services for Embedded Cluster or kURL installations use the hostname in the browser. For information about exposing NodePort services for Embedded Cluster or kURL installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). -## 1.110.0 +To test this example: -Released on June 27, 2024 +1. Add the `example-service.yaml`, `example-deployment.yaml`, `kots-app.yaml`, and `k8s-app.yaml` files provided below to a new, empty release in the Vendor Portal. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + + +
    Description
    +

    The YAML below contains ClusterIP and NodePort specifications for a service named nginx. Each specification uses the kots.io/when annotation with the Replicated IsKurl template function to conditionally include the service based on the installation type (existing cluster or kURL cluster). For more information, see Conditionally Including or Excluding Resources and IsKurl.

    +

    As shown below, both the ClusterIP and NodePort nginx services are exposed on port 80.

    +
    YAML
    + +
    + +
    Description
    +

    A basic Deployment specification for the NGINX application.

    +
    YAML
    + +
    + +
    Description
    +

    The KOTS Application custom resource below adds port 80 to the KOTS port forward tunnel and maps port 8888 on the local machine. The specification also includes applicationUrl: "http://nginx" so that a link to the service can be added to the Admin Console dashboard.

    +
    YAML
    + +
    + +
    Description
    +

    The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service on the Admin Console dashboard that uses the hostname in the browser and appends the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".

    +
    YAML
    + +
    +
    -### New Features {#new-features-1-110-0} -* Adds the ability to specify a custom storage class for the KOTS Admin Console components when installing to an existing cluster. +1. Install the release into an existing cluster and confirm that the service was port-forwarded successfully by clicking **Open App** on the Admin Console dashboard. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). -## 1.109.14 +1. If there is not already a kURL installer promoted to the channel, add a kURL installer to the release to support kURL installs. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). -Released on June 21, 2024 +1. Install the release on a VM and confirm that the service was exposed successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation with kURL](/enterprise/installing-kurl). -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + :::note + Ensure that the VM where you install allows HTTP traffic. + ::: -### Bug Fixes {#bug-fixes-1-109-14} -* Fixes an issue where required releases were enforced in air gapped mode when changing between channels that didn't have semantic versioning enabled +================ +File: docs/vendor/admin-console-prometheus-monitoring.mdx +================ +import OverviewProm from "../partials/monitoring/_overview-prom.mdx" +import LimitationEc from "../partials/monitoring/_limitation-ec.mdx" -## 1.109.13 +# Adding Custom Graphs -Released on June 20, 2024 +This topic describes how to customize the graphs that are displayed on the Replicated Admin Console dashboard. -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +## Overview of Monitoring with Prometheus -### Improvements {#improvements-1-109-13} -* Changes to support Embedded Cluster. + -## 1.109.12 +## About Customizing Graphs -Released on June 10, 2024 +If your application exposes Prometheus metrics, you can add custom graphs to the Admin Console dashboard to expose these metrics to your users. You can also modify or remove the default graphs. -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +To customize the graphs that are displayed on the Admin Console, edit the [`graphs`](/reference/custom-resource-application#graphs) property in the KOTS Application custom resource manifest file. At a minimum, each graph in the `graphs` property must include the following fields: +* `title`: Defines the graph title that is displayed on the Admin Console. +* `query`: A valid PromQL Prometheus query. You can also include a list of multiple queries by using the `queries` property. For more information about querying Prometheus with PromQL, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/) in the Prometheus documentation. -### Improvements {#improvements-1-109-12} -* Updates to enable high availability support for embedded cluster. +:::note +By default, a kURL cluster exposes the Prometheus expression browser at NodePort 30900. For more information, see [Expression Browser](https://prometheus.io/docs/visualization/browser/) in the Prometheus documentation. +::: -### Bug Fixes {#bug-fixes-1-109-12} -* Fixes an issue where air gap uploads could incorrectly fail with an error indicating the version being uploaded matches the current version. This occurred because the version labels matched and were valid semantic versions. Comparing version labels is intentional for channels with semantic versioning enabled, but was unintentional for channels without semantic versioning enabled. +## Limitation -## 1.109.11 + -Released on June 7, 2024 +## Add and Modify Graphs -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +To customize graphs on the Admin Console dashboard: -### Improvements {#improvements-1-109-11} -* Minor UI improvements for the air gap bundle upload progress bar. +1. In the [Vendor Portal](https://vendor.replicated.com/), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. -### Bug Fixes {#bug-fixes-1-109-11} -* Fixes an issue where the `--skip-preflights` flag would not work if all strict preflights passed but a non-strict preflight failed. +1. Create or open the [KOTS Application](/reference/custom-resource-application) custom resource manifest file. -## 1.109.10 - -Released on June 6, 2024 - -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 - -### Improvements {#improvements-1-109-10} -* Various updates to enable high availability support for embedded cluster. +1. In the Application manifest file, under `spec`, add a `graphs` property. Edit the `graphs` property to modify or remove existing graphs or add a new custom graph. For more information, see [graphs](/reference/custom-resource-application#graphs) in _Application_. -## 1.109.9 + **Example**: -Released on May 31, 2024 + The following example shows the YAML for adding a custom graph that displays the total number of user signups for an application. -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' + ``` -### Improvements {#improvements-1-109-9} -* Various updates to enable high availability support for embedded cluster. +1. (Optional) Under `graphs`, copy and paste the specs for the default Disk Usage, CPU Usage, and Memory Usage Admin Console graphs provided in the YAML below. -### Bug Fixes {#bug-fixes-1-109-9} -* An incorrect CLI command for generating support bundles is no longer shown on the Troubleshoot page in embedded clusters. + Adding these default graphs to the Application custom resource manifest ensures that they are not overwritten when you add one or more custom graphs. When the default graphs are included in the Application custom resource, the Admin Console displays them in addition to any custom graphs. -## 1.109.8 + Alternatively, you can exclude the YAML specs for the default graphs to remove them from the Admin Console dashboard. -Released on May 30, 2024 + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' + # Disk Usage, CPU Usage, and Memory Usage below are the default graphs + - title: Disk Usage + queries: + - query: 'sum((node_filesystem_size_bytes{job="node-exporter",fstype!="",instance!=""} - node_filesystem_avail_bytes{job="node-exporter", fstype!=""})) by (instance)' + legend: 'Used: {{ instance }}' + - query: 'sum((node_filesystem_avail_bytes{job="node-exporter",fstype!="",instance!=""})) by (instance)' + legend: 'Available: {{ instance }}' + yAxisFormat: bytes + - title: CPU Usage + query: 'sum(rate(container_cpu_usage_seconds_total{namespace="{{repl Namespace}}",container!="POD",pod!=""}[5m])) by (pod)' + legend: '{{ pod }}' + - title: Memory Usage + query: 'sum(container_memory_usage_bytes{namespace="{{repl Namespace}}",container!="POD",pod!=""}) by (pod)' + legend: '{{ pod }}' + yAxisFormat: bytes + ``` +1. Save and promote the release to a development environment to test your changes. -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +================ +File: docs/vendor/ci-overview.md +================ +import TestRecs from "../partials/ci-cd/_test-recs.mdx" -### Improvements {#improvements-1-109-8} -* Updates to enable high-availability support for Embedded Cluster. +# About Integrating with CI/CD -## 1.109.7 +This topic provides an introduction to integrating Replicated CLI commands in your continuous integration and continuous delivery (CI/CD) pipelines, including Replicated's best practices and recommendations. -Released on May 29, 2024 +## Overview -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +Using CI/CD workflows to automatically compile code and run tests improves the speed at which teams can test, iterate on, and deliver releases to customers. When you integrate Replicated CLI commands into your CI/CD workflows, you can automate the process of deploying your application to clusters for testing, rather than needing to manually create and then archive channels, customers, and environments for testing. -### Improvements {#improvements-1-109-7} -* Updates to enable high-availability support for embedded cluster. +You can also include continuous delivery workflows to automatically promote a release to a shared channel in your Replicated team. This allows you to more easily share releases with team members for internal testing and iteration, and then to promote releases when they are ready to be shared with customers. -## 1.109.6 +## Best Practices and Recommendations -Released on May 24, 2024 +The following are Replicated's best practices and recommendations for CI/CD: -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +* Include unique workflows for development and for releasing your application. This allows you to run tests on every commit, and then to promote releases to internal and customer-facing channels only when ready. For more information about the workflows that Replicated recommends, see [Recommended CI/CD Workflows](ci-workflows). -### Improvements {#improvements-1-109-6} -* Updates to enable disaster recovery support for embedded cluster. +* Integrate Replicated Compatibility Matrix into your CI/CD workflows to quickly create multiple different types of clusters where you can deploy and test your application. Supported distributions include OpenShift, GKE, EKS, and more. For more information, see [About Compatibility Matrix](testing-about). -## 1.109.5 +* If you use the GitHub Actions CI/CD platform, integrate the custom GitHub actions that Replicated maintains to replace repetitive tasks related to distributing application with Replicated or using Compatibility Matrix. For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). -Released on May 23, 2024 +* To help show you are conforming to a secure supply chain, sign all commits and container images. Additionally, provide a verification mechanism for container images. -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +* Use custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy that blocks the ability to promote releases to your production channel. For more information about creating custom RBAC policies in the Vendor Portal, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). -### Improvements {#improvements-1-109-5} -* Updates to enable disaster recovery support for embedded cluster. +* Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: + -## 1.109.4 +================ +File: docs/vendor/ci-workflows-github-actions.md +================ +# Integrating Replicated GitHub Actions -Released on May 21, 2024 +This topic describes how to integrate Replicated's custom GitHub actions into continuous integration and continuous delivery (CI/CD) workflows that use the GitHub Actions platform. -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +## Overview -### Bug Fixes {#bug-fixes-1-109-4} -* Fix `kubectl kots port-forward` for high-latency network connections. +Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to distributing your application with Replicated and related to using the Compatibility Matrix, such as: + * Creating and removing customers, channels, and clusters + * Promoting releases + * Creating a matrix of clusters for testing based on the Kubernetes distributions and versions where your customers are running application instances + * Reporting the success or failure of tests -## 1.109.3 +If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. -Released on May 15, 2024 +To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +## GitHub Actions Workflow Examples -### Bug Fixes {#bug-fixes-1-109-3} -* Fixes an issue where the [Distribution](/reference/template-functions-static-context#distribution) template function returned `k0s` instead of `embedded-cluster` for embedded clusters. +The [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions#examples) repository in GitHub contains example workflows that use the Replicated GitHub actions. You can use these workflows as a template for your own GitHub Actions CI/CD workflows: -## 1.109.2 +* For a simplified development workflow, see [development-helm-prepare-cluster.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm-prepare-cluster.yaml). +* For a customizable development workflow for applications installed with the Helm CLI, see [development-helm.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm.yaml). +* For a customizable development workflow for applications installed with KOTS, see [development-kots.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-kots.yaml). +* For a release workflow, see [release.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/release.yaml). -Released on May 15, 2024 +## Integrate GitHub Actions -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +The following table lists GitHub actions that are maintained by Replicated that you can integrate into your CI/CI workflows. The table also describes when to use the action in a workflow and indicates the related Replicated CLI command where applicable. -### Improvements {#improvements-1-109-2} -* Updates images to resolve CVE-2024-33599 with high severity; and CVE-2024-33600, CVE-2024-33601, CVE-2024-33602 with medium severity. +:::note +For an up-to-date list of the avilable custom GitHub actions, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. +::: -## 1.109.1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    GitHub ActionWhen to UseRelated Replicated CLI Commands
    archive-channel +

    In release workflows, a temporary channel is created to promote a release for testing. This action archives the temporary channel after tests complete.

    +

    See Archive the temporary channel and customer in Recommended CI/CD Workflows.

    +
    channel delete
    archive-customer +

    In release workflows, a temporary customer is created so that a release can be installed for testing. This action archives the temporary customer after tests complete.

    +

    See Archive the temporary channel and customer in Recommended CI/CD Workflows.

    +
    N/A
    create-cluster +

    In release workflows, use this action to create one or more clusters for testing.

    +

    See Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    +
    cluster create
    create-release +

    In release workflows, use this action to create a release to be installed and tested, and optionally to be promoted to a shared channel after tests complete.

    +

    See Create a release and promote to a temporary channel in Recommended CI/CD Workflows.

    +
    release create
    get-customer-instances +

    In release workflows, use this action to create a matrix of clusters for running tests based on the Kubernetes distributions and versions of active instances of your application running in customer environments.

    +

    See Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    +
    N/A
    helm-install +

    In development or release workflows, use this action to install a release using the Helm CLI in one or more clusters for testing.

    +

    See Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    +
    N/A
    kots-install +

    In development or release workflows, use this action to install a release with Replicated KOTS in one or more clusters for testing.

    +

    See Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    +
    N/A
    prepare-cluster +

    In development workflows, use this action to create a cluster, create a temporary customer of type test, and install an application in the cluster.

    +

    See Prepare clusters, deploy, and test in Recommended CI/CD Workflows.

    +
    cluster prepare
    promote-release +

    In release workflows, use this action to promote a release to an internal or customer-facing channel (such as Unstable, Beta, or Stable) after tests pass.

    +

    See Promote to a shared channel in Recommended CI/CD Workflows.

    +
    release promote
    remove-cluster +

    In development or release workflows, use this action to remove a cluster after running tests if no ttl was set for the cluster.

    +

    See Prepare clusters, deploy, and test and Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    +
    cluster rm
    report-compatibility-resultIn development or release workflows, use this action to report the success or failure of tests that ran in clusters provisioned by the Compatibility Matrix.release compatibility
    upgrade-clusterIn release workflows, use this action to test your application's compatibility with Kubernetes API resource version migrations after upgrading.cluster upgrade
    -Released on May 15, 2024 +================ +File: docs/vendor/ci-workflows.mdx +================ +import Build from "../partials/ci-cd/_build-source-code.mdx" -Support for Kubernetes: 1.27, 1.28, 1.29, and 1.30 +# Recommended CI/CD Workflows -### Improvements {#improvements-1-109-1} -* Displays the volume name, Pod name, and namespace of snapshotted volumes in the snapshot details page. +This topic provides Replicated's recommended development and release workflows for your continuous integration and continuous delivery (CI/CD) pipelines. -### Bug Fixes {#bug-fixes-1-109-1} -* Fixes an issue where the **Config** and **View files** tabs did not display as active when clicked. -* Fixes an issue where KOTS failed to process Helm charts with required values that were configured with the v1beta2 HelmChart custom resource. +## Overview -## 1.109.0 +Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). The development and release workflows in this topic describe the recommended steps and jobs to include in your own workflows, including how to integrate Replicated Compatibility Matrix into your workflows for testing. For more information about Compatibility Matrix, see [About Compatibility Matrix](testing-about). -Released on May 9, 2024 +For each step, the corresponding Replicated CLI command is provided. Additionally, for users of the GitHub Actions platform, a corresponding custom GitHub action that is maintained by Replicated is also provided. For more information about using the Replicated CLI, see [Installing the Replicated CLI](/reference/replicated-cli-installing). For more information about the Replicated GitHub actions, see [Integrating Replicated GitHub Actions](ci-workflows-github-actions). -Support for Kubernetes: 1.27, 1.28, and 1.29 +:::note +How you implement CI/CD workflows varies depending on the platform, such as GitHub, GitLab, CircleCI, TravisCI, or Jenkins. Refer to the documentation for your CI/CD platform for additional guidance on how to create jobs and workflows. +::: -### New Features {#new-features-1-109-0} -* Adds the ability to detect embedded cluster with the [Distribution](/reference/template-functions-static-context#distribution) template function. +## About Creating RBAC Policies for CI/CD -## 1.108.13 +Replicated recommends using custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy using the [`kots/app/[]/channel/[]/promote`](/vendor/team-management-rbac-resource-names#kotsappchannelpromote) resource that blocks the ability to promote releases to your production channel. This allows for using CI/CD for the purpose of testing, without accidentally releasing to customers. -Released on May 6, 2024 +For more information about creating custom RBAC policies in the Vendor Portal, including examples, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). -### Improvements {#improvements-1-108-13} -* Updates the snapshot settings page to clarify that the retention policy applies to all snapshots, not just scheduled snapshots. +## Development Workflow -## 1.108.12 +In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. Additionally, for applications managed in the Replicated vendor portal, a release is created and promoted to a channel in the Replicated Vendor Portal where it can be shared with internal teams. -Released on May 3, 2024 +The following diagram shows the recommended development workflow, where a commit to the application code repository triggers the source code to be built and the application to be deployed to clusters for testing: -Support for Kubernetes: 1.27, 1.28, and 1.29 +![Development CI workflow](/images/ci-workflow-dev.png) -### Bug Fixes {#bug-fixes-1-108-12} -* Fixes an issue where the snapshot settings card on the admin console dashboard contained an extra slash in the object store bucket path. +[View a larger version of this image](/images/ci-workflow-dev.png) -## 1.108.11 +The following describes the recommended steps to include in release workflows, as shown in the diagram above: +1. [Define workflow triggers](#dev-triggers) +1. [Build source code](#dev-build) +1. [Prepare clusters, deploy, and test](#dev-deploy) -Released on May 1, 2024 +### Define workflow triggers {#dev-triggers} -Support for Kubernetes: 1.27, 1.28, and 1.29 +Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. -### Improvements {#improvements-1-108-11} -* Various updates to enable disaster recovery support for embedded cluster. -* Updates Troubleshoot to v0.91.0. +The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: -## 1.108.10 +```yaml +name: development-workflow-example -Released on April 26, 2024 +on: + push: + branches: + - '*' # matches every branch that doesn't contain a '/' + - '*/*' # matches every branch containing a single '/' + - '**' # matches every branch + - '!main' # excludes main -Support for Kubernetes: 1.27, 1.28, and 1.29 +jobs: + ... +``` -### Improvements {#improvements-1-108-10} -* Various updates to enable disaster recovery support for embedded cluster. +### Build source code {#dev-build} -## 1.108.9 + -Released on April 24, 2024 +### Prepare clusters, deploy, and test {#dev-deploy} -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +Add a job with the following steps to prepare clusters with Replicated Compatibility Matrix, deploy the application, and run tests: -### Improvements {#improvements-1-108-9} -* Updates images to resolve CVE-2024-3817 with critical severity. +1. Use Replicated Compatibility Matrix to prepare one or more clusters and deploy the application. Consider the following recommendations: -### Bug Fixes {#bug-fixes-1-108-9} -* Fixes an issue where the **Edit config** link on the dashboard didn't work. + * For development workflows, Replicated recommends that you use the `cluster prepare` command to provision one or more clusters with Compatibility Matrix. The `cluster prepare` command creates a cluster, creates a release, and installs the release in the cluster, without the need to promote the release to a channel or create a temporary customer. See the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [prepare-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/prepare-cluster) GitHub action. -## 1.108.8 + :::note + The `cluster prepare` command is Beta. It is recommended for development only and is not recommended for production releases. For production releases, Replicated recommends that you use the `cluster create` command instead. For more information, see [Create cluster matrix and deploy](#rel-deploy) in _Release Workflow_ below. + ::: -Released on April 18, 2024 + * The type and number of clusters that you choose to provision as part of a development workflow depends on how frequently you intend the workflow to run. For example, for workflows that run multiple times a day, you might prefer to provision cluster distributions that can be created quickly, such as kind clusters. -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. -### Improvements {#improvements-1-108-8} -* Various updates to improve air gap and multi-node support for embedded cluster. +1. After the tests complete, remove the cluster. Alternatively, if you used the `--ttl` flag with the `cluster prepare` command, the cluster is automatically removed when the time period provided is reached. See the [`cluster remove`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. -## 1.108.7 +## Compatibility Matrix-Only Development Workflow -Released on April 16, 2024 +In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +This example development workflow does _not_ create releases or customers in the Replicated vendor platform. This workflow is useful for applications that are not distributed or managed in the Replicated platform. -### Improvements {#improvements-1-108-7} -* Various updates to enable air gap and multi-node support for embedded cluster. +The following describes the recommended steps to include in a development workflow using Compatibility Matrix: -## 1.108.6 +1. [Define workflow triggers](#dev-triggers) +1. [Build source code](#dev-build) +1. [Create cluster matrix, deploy, and test](#dev-deploy) -Released on April 11, 2024 +### Define workflow triggers {#dev-triggers} -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. -### Improvements {#improvements-1-108-6} -* Provide a progress indicator to users when pushing images and embedded cluster artifacts during an installation. +The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: -## 1.108.5 +```yaml +name: development-workflow-example -Released on April 8, 2024 +on: + push: + branches: + - '*' # matches every branch that doesn't contain a '/' + - '*/*' # matches every branch containing a single '/' + - '**' # matches every branch + - '!main' # excludes main -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +jobs: + ... +``` -### Improvements {#improvements-1-108-5} -* Various updates to enable air gap support for embedded cluster. +### Build source code {#dev-build} -## 1.108.4 + -Released on April 3, 2024 -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +### Create cluster matrix, deploy, and test {#dev-deploy} -### Improvements {#improvements-1-108-4} -* Re-builds the kotsadm image with the latest Wolfi base image to mitigate CVE-2024-3094. +Add a job with the following steps to provision clusters with Compatibility Matrix, deploy your application to the clusters, and run tests: -## 1.108.3 +1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. -Released on March 26, 2024 + The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + ```yaml + # github actions cluster matrix example -### Improvements {#improvements-1-108-3} -* Updates to enable air gap support for embedded cluster. + compatibility-matrix-example: + runs-on: ubuntu-22.04 + strategy: + matrix: + cluster: + - {distribution: kind, version: "1.25"} + - {distribution: kind, version: "1.26"} + - {distribution: eks, version: "1.26"} + - {distribution: gke, version: "1.27"} + - {distribution: openshift, version: "4.13.0-okd"} + ``` -## 1.108.2 +1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). -Released on March 25, 2024 +1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. -### Improvements {#improvements-1-108-2} -* Various updates to enable air gap support for embedded cluster. +## Replicated Platform Release Workflow -## 1.108.1 +In a release workflow (which is triggered by an action such as a commit to `main` or a tag being pushed to the repository), the source code is built, the application is deployed to clusters for testing, and then the application is made available to customers. In this example release workflow, a release is created and promoted to a channel in the Replicated vendor platform so that it can be installed by internal teams or by customers. -Released on March 19, 2024 +The following diagram demonstrates a release workflow that promotes a release to the Beta channel when a tag with the format `"v*.*.*-beta.*"` is pushed: -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +![Workflow that promotes to Beta channel](/images/ci-workflow-beta.png) -### Improvements {#improvements-1-108-1} -* Prevents application rollback in Embedded Cluster installations. +[View a larger version of this image](/images/ci-workflow-beta.png) -### Bug Fixes {#bug-fixes-1-108-1} -* Fixes an issue in Embedded Cluster where forward slashes were replaced with dashes in custom role labels. +The following describes the recommended steps to include in release workflows, as shown in the diagram above: -## 1.108.0 +1. [Define workflow triggers](#rel-triggers) +1. [Build source code](#rel-build) +1. [Create a release and promote to a temporary channel](#rel-release) +1. [Create cluster matrix, deploy, and test](#rel-deploy) +1. [Promote to a shared channel](#rel-promote) +1. [Archive the temporary channel and customer](#rel-cleanup) -Released on March 5, 2024 +### Define workflow triggers {#rel-triggers} -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +Create unique workflows for promoting releases to your team's internal-only, beta, and stable channels. Define unique event triggers for each of your release workflows so that releases are only promoted to a channel when a given condition is met: -### New Features {#new-features-1-108-0} -* Adds the ability to get the config values of the currently deployed app version via the CLI by passing the `--current` flag to the [kubectl kots get config](/reference/kots-cli-get-config) CLI command. -* Adds the ability to update the config values of the currently deployed app version via the CLI by passing the `--current` flag to the [kubectl kots set config](/reference/kots-cli-set-config) CLI command. -* Adds the ability to update the config values of any app version via the CLI by providing the target sequence with the `--sequence` flag in the [kubectl kots set config](/reference/kots-cli-set-config) CLI command. -* Adds the ability to update the config values for any app version using the admin console. +* On every commit to the `main` branch in your code repository, promote a release to the channel that your team uses for internal testing (such as the default Unstable channel). -### Improvements {#improvements-1-108-0} -* Hides the **Application** and **Cluster Management** tabs on the admin console navbar during the initial installation flow with Replicated embedded cluster (Beta). For more information, see [Using Embedded Cluster](/vendor/embedded-overview). + The following example shows a workflow trigger in GitHub Actions that runs the workflow on commits to `main`: -### Bug Fixes {#bug-fixes-1-108-0} -* Fixes an issue where the license upload page flashed briefly before being redirected to the login page. -* Fixes an issue in embedded cluster (Beta) where the cluster upgrade modal occasionally failed to display during upgrades. + ```yaml + name: unstable-release-example -## 1.107.8 + on: + push: + branches: + - 'main' -Released on February 27, 2024 + jobs: + ... + ``` -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +* On pushing a tag that contains a version label with the semantic versioning format `x.y.z-beta-n` (such as `1.0.0-beta.1` or `v1.0.0-beta.2`), promote a release to your team's Beta channel. -### Improvements {#improvements-1-107-8} -* Resolves the false positive CVEs with critical severity in the `kotsadm` image which stemmed from the Dex Go library. + The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*-beta.*` is pushed: -## 1.107.7 + ```yaml + name: beta-release-example -Released on February 23, 2024 + on: + push: + tags: + - "v*.*.*-beta.*" -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + jobs: + ... + ``` -### Bug Fixes {#bug-fixes-1-107-7} -* Fixes an issue where the "Ignore Preflights" button was not displayed on the preflights page when preflights were running. -* Fixes an issue where the [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function did not return the new value when syncing the license. +* On pushing a tag that contains a version label with the semantic versioning format `x.y.z` (such as `1.0.0` or `v1.0.01`), promote a release to your team's Stable channel. -## 1.107.6 + The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*` is pushed: -Released on February 22, 2024 + ```yaml + name: stable-release-example -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + on: + push: + tags: + - "v*.*.*" -### Improvements {#improvements-1-107-6} -* UI improvements when running in an embedded cluster (Alpha) + jobs: + ... + ``` -## 1.107.5 +### Build source code {#rel-build} -Released on February 20, 2024 + -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +### Create a release and promote to a temporary channel {#rel-release} -### Bug Fixes {#bug-fixes-1-107-5} -* Fixes an issue in kURL clusters where images from Helm charts configured using the v1beta2 HelmChart custom resource were incorrectly removed from the in-cluster registry, potentially leading to failed deployments. +Add a job that creates and promotes a release to a temporary channel. This allows the release to be installed for testing in the next step. See the [release create](/reference/replicated-cli-release-create) Replicated CLI command. Or, for GitHub Actions workflows, see [create-release](https://github.com/replicatedhq/replicated-actions/tree/main/create-release). -## 1.107.4 +Consider the following requirements and recommendations: -Released on February 16, 2024 +* Use a consistent naming pattern for the temporary channels. Additionally, configure the workflow so that a new temporary channel with a unique name is created each time that the release workflow runs. -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +* Use semantic versioning for the release version label. -### Bug Fixes {#bug-fixes-1-107-4} -* Fixes an issue where processing images from Helm charts configured using the v1beta2 HelmChart custom resource may fail in air gapped mode. + :::note + If semantic versioning is enabled on the channel where you promote the release, then the release version label _must_ be a valid semantic version number. See [Semantic Versioning](releases-about#semantic-versioning) in _About Channels and Releases_. + ::: -## 1.107.3 +* For Helm chart-based applications, the release version label must match the version in the `version` field of the Helm chart `Chart.yaml` file. To automatically update the `version` field in the `Chart.yaml` file, you can define a step in this job that updates the version label before packaging the Helm chart into a `.tgz` archive. -Released on February 12, 2024 +* For releases that will be promoted to a customer-facing channel such as Beta or Stable, Replicated recommends that the version label for the release matches the tag that triggered the release workflow. For example, if the tag `1.0.0-beta.1` was used to trigger the workflow, then the version label for the release is also `1.0.0-beta.1`. -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +### Create cluster matrix, deploy, and test {#rel-deploy} -### Bug Fixes {#bug-fixes-1-107-3} -* Fixes an issue where the preflights page was not displayed during initial installation if the preflight spec was included in a Secret or ConfigMap in the Helm chart templates. +Add a job with the following steps to provision clusters with Compatibility Matrix, deploy the release to the clusters, and run tests: -## 1.107.2 +1. Create a temporary customer for installing the release. See the [customer create](/reference/replicated-cli-customer-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-customer](https://github.com/replicatedhq/replicated-actions/tree/main/create-customer) action. -Released on February 2, 2024 +1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + Consider the following recommendations: -### Improvements {#improvements-1-107-2} -* Removes the rqlite DB data dump from support bundles generated by KOTS. -* Updates the `minio`, `rqlite`, `dex`, and `local-volume-provider` images to resolves CVE-2023-6779, CVE-2023-6246, CVE-2024-21626 with high severity; and CVE-2023-6780 with medium severity. + * For release workflows, Replicated recommends that you run tests against multiple clusters of different Kubernetes distributions and versions. To help build the matrix, you can review the most common Kubernetes distributions and versions used by your customers on the **Customers > Reporting** page in the Replicated vendor portal. For more information, see [Customer Reporting](/vendor/customer-reporting). -## 1.107.1 + * When using the Replicated CLI, a list of representative customer instances can be obtained using the `api get` command. For example, `replicated api get /v3/app/[APP_ID]/cluster-usage | jq .` You can further filter these results by `channel_id`, `channel_sequence`, and `version_label`. + + * GitHub Actions users can also use the `get-customer-instances` action to automate the creation of a cluster matrix based on the distributions of clusters where instances of your application are installed and running. For more information, see the [example workflow](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-dynamic.yaml) that makes use of [get-customer-instances](https://github.com/replicatedhq/replicated-actions/tree/main/get-customer-instances) in GitHub. -Released on February 1, 2024 + The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 + ```yaml + # github actions cluster matrix example -### Improvements {#improvements-1-107-1} + compatibility-matrix-example: + runs-on: ubuntu-22.04 + strategy: + matrix: + cluster: + - {distribution: kind, version: "1.25.3"} + - {distribution: kind, version: "1.26.3"} + - {distribution: eks, version: "1.26"} + - {distribution: gke, version: "1.27"} + - {distribution: openshift, version: "4.13.0-okd"} + ``` -* Updates the `kotsadm`, `kotsadm-migrations`, and `kurl-proxy` images to resolves CVE-2023-6779, CVE-2023-6246, CVE-2024-21626 with high severity; and CVE-2023-6780 with medium severity. +1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). -## 1.107.0 + For more information about installing in an existing cluster, see: + * [Installing with Helm](/vendor/install-with-helm) + * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) -Released on January 30, 2024 +1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. -### New Features {#new-features-1-107-0} -* Adds support for running KOTS on ARM64 nodes. For air gap installations, the KOTS air gap bundle has an updated format and also now includes images for both AMD64 and ARM64 architectures. When updating KOTS in air gap environments, ensure the CLI version you use matches the version of the KOTS air gap bundle because earlier KOTS versions are not compatible with the new air gap bundle format. For more information about KOTS installation requirements, see [Installation Requirements](/enterprise/installing-general-requirements). +### Promote to a shared channel {#rel-promote} -### Improvements {#improvements-1-107-0} -* Removes support `kubectlVersion` and `kustomizeVersion` in the KOTS Application custom resource. One version of kubectl and one version of kustomize are now included in KOTS and will always be used. +Add a job that promotes the release to a shared internal-only or customer-facing channel, such as the default Unstable, Beta, or Stable channel. See the [release promote](/reference/replicated-cli-release-promote) Replicated CLI command. Or, for GitHub Actions workflows, see the [promote-release](https://github.com/replicatedhq/replicated-actions/tree/main/promote-release) action. -## 1.106.0 +Consider the following requirements and recommendations: -Released on January 23, 2024 +* Replicated recommends that you include the `--version` flag with the `release promote` command to explicitly declare the version label for the release. Use the same version label that was used when the release was created as part of [Create a release and promote to a temporary channel](#rel-release) above. Although the `--version` flag is not required, declaring the same release version label during promotion provides additional consistency that makes the releases easier to track. -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +* The channel to which the release is promoted depends on the event triggers that you defined for the workflow. For example, if the workflow runs on every commit to the `main` branch, then promote the release to an internal-only channel, such as Unstable. For more information, see [Define Workflow Triggers](#rel-triggers) above. -### New Features {#new-features-1-106-0} -* Adds support for an experimental air gap bundle feature that allows KOTS to process partial air gap bundles that only include the images needed to update to the desired version. +* Use the `--release-notes` flag to include detailed release notes in markdown. -## 1.105.5 +### Archive the temporary channel and customer {#rel-cleanup} -Released on January 18, 2024 +Finally, add a job to archive the temporary channel and customer that you created. This ensures that these artifacts are removed from your Replicated team and that they do not have to be manually archived after the release is promoted. -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +See the [channel rm](/reference/replicated-cli-channel-rm) Replicated CLI command and the [customer/\{customer_id\}/archive](https://replicated-vendor-api.readme.io/reference/archivecustomer) endpoint in the Vendor API v3 documentation. Or, for GitHub Actions workflows, see the [archive-channel](https://github.com/replicatedhq/replicated-actions/tree/main/archive-channel) and [archive-customer](https://github.com/replicatedhq/replicated-actions/tree/main/archive-customer) actions. -### Improvements {#improvements-1-105-5} -* Adds the namespace to the password reset command that is displayed when the admin console is locked after hitting the limit of unsuccessful login attempts. +================ +File: docs/vendor/compatibility-matrix-usage.md +================ +# Viewing Compatibility Matrix Usage History +This topic describes using the Replicated Vendor Portal to understand +Compatibility Matrix usage across your team. -## 1.105.4 +## View Historical Usage +The **Compatibility Matrix > History** page provides +historical information about both clusters and VMs, as shown below: -Released on January 16, 2024 +![Compatibility Matrix History Page](/images/compatibility-matrix-history.png) +[View a larger version of this image](/images/compatibility-matrix-history.png) -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +Only _terminated_ clusters and VMs that have been deleted or errored are displayed on the **History** page. -### Bug Fixes {#bug-fixes-1-105-4} -* Fixes an issue where Pods associated with KOTS components could be incorrectly scheduled on a non-AMD64 node. -* Fixes an issue where configuring snapshots to use internal storage failed in kURL clusters with HA MinIO and OpenEBS. +The top of the **History** page displays the total number of terminated clusters and VMs +in the selected time period as well as the total cost and usage time for +the terminated resources. -## 1.105.3 +The table includes cluster and VM entries with the following columns: +- **Name:** The name of the cluster or VM. +- **By:** The actor that created the resource. +- **Cost:** The cost of the resource. This is calculated at termination and is + based on the time the resource was running. +- **Distribution:** The distribution and version of the resource. For example, + `kind 1.32.1`. +- **Type:** The distribution type of the resource. Kubernetes clusters + are listed as `kubernetes` and VMs are listed as `vm`. +- **Status:** The status of the resource. For example `terminated` or `error`. +- **Instance:** The instance type of the resource. For example `r1.small`. +- **Nodes:** The node count for "kubernetes" resources. VMs do not use this + field. +- **Node Groups:** The node group count for "kubernetes" resources. VMs do not + use this field. +- **Created At:** The time the resource was created. +- **Running At:** The time the resource started running. For billing purposes, + this is the time when Replicated began charging for the resource. +- **Terminated At:** The time the resource was terminated. For billing + purposes, this is the time when Replicated stopped charging for the resource. +- **TTL:** The time-to-live for the resource. This is the maximum amount of + time the resource can run before it is automatically terminated. +- **Duration:** The total time the resource was running. This is the time + between the `running` and `terminated` states. +- **Tag:** Any tags that were applied to the resource. -Released on January 10, 2024 +## Filter and Sort Usage History -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +Each of the fields on the **History** page can be filtered and sorted. To sort by a specific field, click on the column header. -### Improvements {#improvements-1-105-3} -* Upgrades the github.com/cloudflare/circl go module from 1.3.3 to 1.3.7 to resolve GHSA-9763-4f94-gfch with high severity. +To filter by a specific field, click on the filter icon in the column header, then use each specific filter input to filter the results, as shown below: -## 1.105.2 +![Compatibility Matrix History Page, filter input](/images/compatibility-matrix-column-filter-input.png) +[View a larger version of this image](/images/compatibility-matrix-column-filter-input.png) -Released on January 9, 2024 +## Get Usage History with the Vendor API v3 -Support for Kubernetes: 1.26, 1.27, 1.28, and 1.29 +For more information about using the Vendor API v3 to get Compatibility Matrix +usage history information, see the following API endpoints within the +Vendor API v3 documentation: -### Bug Fixes {#bug-fixes-1-105-2} -* Fixes an issue where rendering KOTS custom resources could fail if there are required configuration items that don't have defaults. -* Fixes an issue where the `kotsadm-rqlite` and `kotsadm-minio` Pods could be incorrectly scheduled on Arm nodes. +* [/v3/cmx/stats](https://replicated-vendor-api.readme.io/reference/getcmxstats) +* [/v3/vms](https://replicated-vendor-api.readme.io/reference/listvms) +* [/v3/clusters](https://replicated-vendor-api.readme.io/reference/listclusters) +* [/v3/cmx/history](https://replicated-vendor-api.readme.io/reference/listcmxhistory) -## 1.105.1 +For examples of using these endpoints, see the sections below. -Released on December 29, 2023 +### Credit Balance and Summarized Usage +You can use the `/v3/cmx/stats` endpoint to get summarized usage information in addition to your Compatibility Matrix +credit balance. -Support for Kubernetes: 1.26, 1.27, and 1.28 +This endpoint returns: -### Bug Fixes {#bug-fixes-1-105-1} -* Fixes an issue where the `minKotsVersion` and `targetKotsVersion` fields in the Application custom resource would not be enforced if it was part of a multi-doc yaml file. +- **`cluster_count`:** The total number of terminated clusters. +- **`vm_count`:** The total number of terminated VMs. +- **`usage_minutes`:** The total number of billed usage minutes. +- **`cost`:** The total cost of the terminated clusters and VMs in cents. +- **`credit_balance`:** The remaining credit balance in cents. -## 1.105.0 +```shell +curl --request GET \ + --url https://api.replicated.com/vendor/v3/customers \ + --header 'Accept: application/json' \ + --header 'Authorization: $REPLICATED_API_TOKEN' +{"cluster_count":2,"vm_count":4,"usage_minutes":152,"cost":276,"credit_balance":723}% +``` -Released on December 28, 2023 +The `v3/cmx/stats` endpoint also supports filtering by `start-time` and +`end-time`. For example, the following request gets usage information for January 2025: -Support for Kubernetes: 1.26, 1.27, and 1.28 +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/stats?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` -### New Features {#new-features-1-105-0} -* Adds the ability to template the entire [values](/reference/custom-resource-helmchart-v2#values) field in the HelmChart custom resource. +### Currently Active Clusters +To get a list of active clusters: -### Bug Fixes {#bug-fixes-1-105-0} -* Fixes an issue where the [namespace](/reference/custom-resource-helmchart-v2#namespace) field in HelmChart custom resources was not rendered when uninstalling the corresponding chart. -* Fixes an issue where KOTS failed to parse the Preflight custom resource if template functions were used for non-string fields. +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/clusters' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` -## 1.104.7 +You can also use a tool such as `jq` to filter and iterate over the output: -Released on December 14, 2023 +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/clusters' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' | \ + jq '.clusters[] | {name: .name, ttl: .ttl, distribution: .distribution, version: .version}' -Support for Kubernetes: 1.26, 1.27, and 1.28 +{ + "name": "friendly_brown", + "ttl": "1h", + "distribution": "kind", + "version": "1.32.1" +} +``` -### Improvements {#improvements-1-104-7} -* Uses Chainguard to build the local-volume-provider image to resolve CVE-2019-8457 and CVE-2023-45853 with critical severity; and CVE-2022-3715, CVE-2021-33560, CVE-2022-4899, CVE-2022-1304, CVE-2020-16156, CVE-2023-31484, CVE-2023-47038 with high severity; and CVE-2023-4806, CVE-2023-4813, CVE-2023-5981, CVE-2023-5678, CVE-2023-4039, CVE-2023-50495, CVE-2023-4641 with medium severity; and TEMP-0841856-B18BAF, CVE-2016-2781, CVE-2017-18018, CVE-2022-3219, CVE-2011-3374, CVE-2010-4756, CVE-2018-20796, CVE-2019-1010022, CVE-2019-1010023, CVE-2019-1010024, CVE-2019-1010025, CVE-2019-9192, CVE-2018-6829, CVE-2011-3389, CVE-2018-5709, CVE-2022-41409, CVE-2017-11164, CVE-2017-16231, CVE-2017-7245, CVE-2017-7246, CVE-2019-20838, CVE-2021-36084, CVE-2021-36085, CVE-2021-36086, CVE-2021-36087, CVE-2007-6755, CVE-2010-0928, CVE-2013-4392, CVE-2020-13529, CVE-2023-31437, CVE-2023-31438, CVE-2023-31439, CVE-2007-5686, CVE-2013-4235, CVE-2019-19882, CVE-2023-29383, TEMP-0628843-DBAD28, CVE-2011-4116, CVE-2023-31486, TEMP-0517018-A83CE6, CVE-2005-2541, CVE-2022-48303, CVE-2023-39804, TEMP-0290435-0B57B5, CVE-2022-0563 with low severity. +### Currently Active Virtual Machines +To get a list of active VMs: -## 1.104.6 +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/vms' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` -Released on December 13, 2023 +### Historical Usage +To fetch historical usage information: -Support for Kubernetes: 1.26, 1.27, and 1.28 +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` -### Improvements {#improvements-1-104-6} -* Uses Chainguard to build the kotsadm-migrations image to resolve CVE-2023-45853 with critical severity; CVE-2023-31484, CVE-2023-47038, and CVE-2023-39325 with high severity; CVE-2023-5981, CVE-2023-4039, CVE-2023-5678, CVE-2023-4641, and CVE-2023-44487 with medium severity; and TEMP-0841856-B18BAF, CVE-2022-0563, CVE-2016-2781, CVE-2017-18018, CVE-2022-27943, CVE-2022-3219, CVE-2011-3374, CVE-2010-4756, CVE-2018-20796, CVE-2019-1010022, CVE-2019-1010023, CVE-2019-1010024, CVE-2019-1010025, CVE-2019-9192, CVE-2018-6829, CVE-2011-3389, CVE-2013-4392, CVE-2023-31437, CVE-2023-31438, CVE-2023-31439, CVE-2007-6755, CVE-2010-0928, CVE-2007-5686, CVE-2019-19882, CVE-2023-29383, TEMP-0628843-DBAD28, CVE-2011-4116, CVE-2023-31486, TEMP-0517018-A83CE6, CVE-2005-2541, CVE-2022-48303, CVE-2023-39804, and TEMP-0290435-0B57B5 with low severity. -* Uses Chainguard to build the rqlite image to resolve CVE-2023-5363, CVE-2023-39325, and GHSA-m425-mq94-257g with high severity; and CVE-2023-5678, CVE-2023-3978, and CVE-2023-44487 with medium severity. -* Uses Chainguard to build the MinIO image to resolve CVE-2022-27943 and CVE-2022-29458 with low severity. -* Uses Chainguard to build the dex image to resolve CVE-2022-48174 with critical severity; CVE-2023-5363, CVE-2023-39325, and GHSA-m425-mq94-257g with high severity; and CVE-2023-2975, CVE-2023-3446, CVE-2023-3817, CVE-2023-5678, GHSA-2c7c-3mj9-8fqh, CVE-2023-3978, and CVE-2023-44487 with medium severity. +You can also filter the response from the `/v3/cmx/history` endpoint by `distribution-type`, which +allows you to get a list of either clusters or VMs: -### Bug Fixes {#bug-fixes-1-104-6} -* Fixes an issue where preflights could hang indefinitely when rerun, if the sequence was created by KOTS versions earlier than 1.96.0. +- **For clusters use `distribution-type=kubernetes`:** + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=kubernetes' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` -## 1.104.5 +- **For VMs use `distribution-type=vm`:** + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=vm' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` -Released on December 8, 2023 +### Filtering Endpoint Results +Each of these endpoints supports pagination and filtering. You can use the +following query parameters to filter the results. -Support for Kubernetes: 1.26, 1.27, and 1.28 +:::note +Each of the examples below +uses the `v3/cmx/history` endpoint, but the same query parameters can be used +with the other endpoints as well. +::: -### Improvements {#improvements-1-104-5} -* Uses Chainguard to build the kurl-proxy image to resolve CVE-2023-45853 with critical severity; CVE-2023-25652, CVE-2023-29007 CVE-2023-5981, CVE-2023-2953, CVE-2023-44487, CVE-2023-31484, and CVE-2023-47038 with high severity; CVE-2023-4039, CVE-2023-5678, and CVE-2023-4641 with medium severity; CVE-2011-3374, TEMP-0841856-B18BAF, CVE-2022-0563, CVE-2016-2781, CVE-2017-18018, CVE-2022-27943, CVE-2018-1000021, CVE-2022-24975, CVE-2023-25815, CVE-2022-3219, CVE-2010-4756, CVE-2018-20796, CVE-2019-1010022, CVE-2019-1010023, CVE-2019-1010024, CVE-2019-1010025, CVE-2019-9192, CVE-2018-6829, CVE-2011-3389, CVE-2018-5709, CVE-2015-3276, CVE-2017-14159, CVE-2017-17740, CVE-2020-15719, CVE-2011-4116, CVE-2023-31486, CVE-2007-6755, CVE-2010-0928, CVE-2013-4392, CVE-2023-31437, CVE-2023-31438, CVE-2023-31439, CVE-2007-5686, CVE-2019-19882, CVE-2023-29383, TEMP-0628843-DBAD28, TEMP-0517018-A83CE6, CVE-2005-2541, CVE-2022-48303, and TEMP-0290435-0B57B5 with low severity. +- **Pagination:** Use the `pageSize` and `currentPage` query parameters to + paginate through the results: -### Bug Fixes {#bug-fixes-1-104-5} -* Fixes an issue that stripped the port from the argument passed to the `--kotsadm-registry` flag. This could result in an error when validating the registry when installing, upgrading, or pushing admin console images. + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?pageSize=10¤tPage=1' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` -## 1.104.4 +- **Filter by date:** Use the `start-time` and `end-time` query parameters to + filter the results by a specific date range: -Released on December 1, 2023 + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` -Support for Kubernetes: 1.26, 1.27, and 1.28 +- **Sort by:** Use the `tag-sort-key` query parameter to sort the results by a + specific field. The field can be any of the fields returned in the response. + + By default, the results are sorted in ascending order, use + `sortDesc=true` to sort in descending order: -### Improvements {#improvements-1-104-3} -* Uses Chainguard to build the `kotsadm` image to resolve CVE-2023-45853 with critical severity; CVE-2023-25652, CVE-2023-29007, CVE-2023-5981, CVE-2023-2953, CVE-2023-44487, CVE-2023-31484, CVE-2023-47038, CVE-2023-24329, CVE-2023-41105, and CVE-2023-2253 with high severity; CVE-2023-4039, CVE-2023-27043, CVE-2023-40217, CVE-2023-5678, and CVE-2023-4641 with medium severity; and CVE-2011-3374, TEMP-0841856-B18BAF, CVE-2022-0563, CVE-2016-2781, CVE-2017-18018, CVE-2022-3219, CVE-2022-27943, CVE-2018-1000021, CVE-2022-24975, CVE-2023-25815, CVE-2010-4756, CVE-2018-20796, CVE-2019-1010022, CVE-2019-1010023, CVE-2019-1010024, CVE-2019-1010025, CVE-2019-9192, CVE-2018-6829, CVE-2011-3389, CVE-2018-5709, CVE-2015-3276, CVE-2017-14159, CVE-2017-17740, CVE-2020-15719, CVE-2011-4116, CVE-2023-31486, CVE-2023-24535, CVE-2021-45346, CVE-2007-6755, CVE-2010-0928, CVE-2013-4392, CVE-2023-31437, CVE-2023-31438, CVE-2023-31439, CVE-2007-5686, CVE-2019-19882, CVE-2023-29383, TEMP-0628843-DBAD28, TEMP-0517018-A83CE6, CVE-2005-2541, CVE-2022-48303, and TEMP-0290435-0B57B5 with low severity. + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-sort-key=created_at&sortDesc=true' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` -### Bug Fixes {#bug-fixes-1-104-4} -* Fixes an issue on the admin console Cluster Management page where a secondary node join command was displayed when the primary node type was selected. +- **Tag filters:** Use the `tag-filter` query parameter to filter the results by + a specific tag: -## 1.104.3 + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-filter=tag1' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` -Released on November 29, 2023 +- **Actor filters:** Use the `actor-filter` query parameter to filter the actor + that created the resource, or the type of actor such as `Web UI` or + `Replicated CLI`: -Support for Kubernetes: 1.26, 1.27, and 1.28 + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?actor-filter=name' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` -### Improvements {#improvements-1-104-3} -* Upgrades the github.com/go-jose/go-jose/v3 go module to 3.0.1 to resolve GHSA-2c7c-3mj9-8fqh with medium severity. + :::note + If any filter is passed for an object that does not exist, no warning is given. + For example, if you filter by `actor-filter=name` and there are no results + the response will be empty. + ::: -## 1.104.2 +================ +File: docs/vendor/config-screen-about.md +================ +# About the Configuration Screen -Released on November 17, 2023 +This topic describes the configuration screen on the Config tab in the Replicated Admin Console. -Support for Kubernetes: 1.26, 1.27, and 1.28 +## About Collecting Configuration Values -### Improvements {#improvements-1-104-2} -* Upgrades the golang.org/x/net go module to 0.17.0 in kurl_proxy to resolve CVE-2023-39325 with high severity, and CVE-2023-3978 and CVE-2023-44487 with medium severity. -* Upgrades the minio/minio image to RELEASE.2023-11-11T08-14-41Z to resolve CVE-2023-46129 and GHSA-m425-mq94-257g with high severity, and CVE-2023-44487 with medium severity. +When you distribute your application with Replicated KOTS, you can include a configuration screen in the Admin Console. This configuration screen is used to collect required or optional values from your users that are used to run your application. You can use regular expressions to validate user input for some fields, such as passwords and email addresses. For more information about how to add custom fields to the configuration screen, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). -## 1.104.1 +If you use a Helm chart for your application, your users provide any values specific to their environment from the configuration screen, rather than in a Helm chart `values.yaml` file. This means that your users can provide configuration values through a user interface, rather than having to edit a YAML file or use `--set` CLI commands. The Admin Console configuration screen also allows you to control which options you expose to your users. -Released on November 10, 2023 +For example, you can use the configuration screen to provide database configuration options for your application. Your users could connect your application to an external database by providing required values in the configuration screen, such as the host, port, and a username and password for the database. -Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 +Or, you can also use the configuration screen to provide a database option that runs in the cluster as part of your application. For an example of this use case, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). -### Improvements {#improvements-1-104-1} -* Adds support for OKE (Oracle Kubernetes Engine) to the [Distribution](/reference/template-functions-static-context#distribution) template function. -* The CLI now surfaces preflight check errors that are due to insufficient RBAC permissions. -* Upgrades the kotsadm base image to `debian:bookworm-slim` to resolve CVE-2023-23914 with critical severity, and CVE-2022-42916 and CVE-2022-43551 with high severity. -* Upgrades the Helm binary in the kotsadm image to 3.13.2 to resolve CVE-2023-39325 and GHSA-m425-mq94-257g with high severity and CVE-2023-44487 and GHSA-jq35-85cj-fj4p with medium severity. -* Upgrades the google.golang.org/grpc go module to v1.59.0 to resolve GHSA-m425-mq94-257g with high severity and CVE-2023-44487 with medium severity. -* Upgrades the github.com/docker/docker go module to v24.0.7 to resolve GHSA-jq35-85cj-fj4p with medium severity. +## Viewing the Configuration Screen -### Bug Fixes {#bug-fixes-1-104-1} -* Fixes an issue where the reporting data stored in Secrets in air gapped installations could exceed the size of the secret (1MB). +If you include a configuration screen with your application, users of your application can access the configuration screen from the Admin Console: +* During application installation. +* At any time after application installation on the Admin Console Config tab. -## 1.104.0 +### Application Installation -Released on November 6, 2023 +The Admin Console displays the configuration screen when the user installs the application, after they upload their license file. -Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 +The following shows an example of how the configuration screen displays during installation: -### New Features {#new-features-1-104-0} -* Releases that include version 1.0.0-beta.12 or later of the Replicated SDK can now be installed by KOTS. When KOTS deploys a release that includes the SDK, the SDK and KOTS both operate in the environment and independently report telemetry. Replicated recommends that everyone--not just vendors that support Helm CLI installations--include the SDK in their application for access to the latest features from Replicated! +![configuration screen that displays during application install](/images/config-screen-sentry-enterprise-app-install.png) -### Improvements {#improvements-1-104-0} -* Upgrades the replicated/local-volume-provider image to v0.5.5 to resolve CVE-2023-45128 with critical severity, CVE-2023-4911, CVE-2023-29491, CVE-2023-45141, and GHSA-m425-mq94-257g with high severity, and CVE-2023-36054, CVE-2023-3446, CVE-2023-3817, CVE-2023-41338, CVE-2023-39325, CVE-2023-3978, and CVE-2023-44487 with medium severity. -* Upgrades the replicated/schemahero image to 0.16.0 to resolve CVE-2023-4911 with high severity, CVE-2023-2603, CVE-2023-29491, CVE-2023-2650, CVE-2023-31484, and CVE-2023-3978 with medium severity. -* Upgrades the minio/minio image to RELEASE.2023-10-25T06-33-25Z to resolve CVE-2023-4911 and CVE-2023-44487 with high severity, CVE-2023-4527, CVE-2023-4806, CVE-2023-4813, CVE-2023-39325, and CVE-2023-44487 with medium severity. -* Upgrades the minio/mc image to RELEASE.2023-10-14T01-57-03Z to resolve CVE-2023-4911 with high severity, and CVE-2023-4527, CVE-2023-4806, CVE-2023-4813, and CVE-2023-39325 with medium severity. +[View a larger version of this image](/images/config-screen-sentry-enterprise-app-install.png) -### Bug Fixes {#bug-fixes-1-104-0} -* Fixes an issue where KOTS didn't discover specs with the `troubleshoot.sh/kind=support-bundle` label when generating support bundles. +### Admin Console Config Tab -## 1.103.3 +Users can access the configuration screen any time after they install the application by going to the Config tab in the Admin Console. -Released on October 25, 2023 +The following shows an example of how the configuration screen displays in the Admin Console Config tab: -Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 +![configuration screen that displays in the Config tab](/images/config-screen-sentry-enterprise.png) -### Improvements {#improvements-1-103-3} -* Updates the kubectl binary in the kotsadm image to resolve CVE-2023-39325, CVE-2023-3978, and CVE-2023-44487 with medium severity. -* Updates the golang.org/x/net go module to version 0.17.0 to resolve CVE-2023-39325 and CVE-2023-44487 with medium severity. +[View a larger version of this image](/images/config-screen-sentry-enterprise.png) -## 1.103.2 +================ +File: docs/vendor/config-screen-conditional.mdx +================ +import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" +import PropertyWhen from "../partials/config/_property-when.mdx" +import DistroCheck from "../partials/template-functions/_string-comparison.mdx" +import NeComparison from "../partials/template-functions/_ne-comparison.mdx" -Released on October 9, 2023 +# Using Conditional Statements in Configuration Fields -Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 +This topic describes how to use Replicated KOTS template functions in the Config custom resource to conditionally show or hide configuration fields for your application on the Replicated KOTS Admin Console **Config** page. -### Improvements {#improvements-1-103-2} -* Upgrades the minio/minio and minio/mc images to versions RELEASE.2023-09-23T03-47-50Z and RELEASE.2023-09-22T05-07-46Z, respectively, to resolve CVE-2023-29491 with high severity. -* Upgrades the Helm binary in the kotsadm image to 3.13.0 to resolve CVE-2023-28840 with high severity and CVE-2023-28841, CVE-2023-28842, and GHSA-6xv5-86q9-7xr8 with medium severity. -* Log preflight checks to the CLI and kotsadm logs whenever there are checks that fail leading to a failed deployment. +## Overview -### Bug Fixes {#bug-fixes-1-103-2} -* Fixes a bug that caused no status code to be returned from the custom metrics API requests. +The `when` property in the Config custom resource denotes configuration groups or items that are displayed on the Admin Console **Config** page only when a condition evaluates to true. When the condition evaluates to false, the group or item is not displayed. -## 1.103.1 + -Released on September 29, 2023 +For more information about the Config custom resource `when` property, see [when](/reference/custom-resource-config#when) in _Config_. -Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 +## Conditional Statement Examples -### Improvements {#improvements-1-103-1} -* Adds clarifying language that configured automatic update checks use the local server time. +This section includes examples of common types of conditional statements used in the `when` property of the Config custom resource. -### Bug Fixes {#bug-fixes-1-103-1} -* Fixes an issue where Helm charts that were previously deployed with the Replicated HelmChart kots.io/v1beta2 installation method were not uninstalled as expected after making configuration changes to exclude the chart. -* Fixes an issue where image pull secrets and hook informers were not applied for dynamically created namespaces if the `kotsadm` pod/API restarts. -* Fixes an issue where the applications dropdown for automatic partial snapshots settings showed no options or applications to select. +For additional examples of using conditional statements in the Config custom resource, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. -## 1.103.0 +### Cluster Distribution Check -Released on September 20, 2023 +It can be useful to show or hide configuration fields depending on the distribution of the cluster because different distributions often have unique requirements. -Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 +In the following example, the `when` properties use the [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where Replicated KOTS is running. If the distribution of the cluster matches the specified distribution, then the `when` property evaluates to true. -### New Features {#new-features-1-103-0} -* Adds support for [Lookup](/reference/template-functions-static-context#lookup) template function. + -## 1.102.2 +### Embedded Cluster Distribution Check -Released on September 15, 2023 +It can be useful to show or hide configuration fields if the distribution of the cluster is [Replicated Embedded Cluster](/vendor/embedded-overview) because you can include extensions in embedded cluster distributions to manage functionality such as ingress and storage. This means that embedded clusters frequently have fewer configuration options for the user. -Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 + -### Improvements {#improvements-1-102-2} -* The [custom metrics](/vendor/custom-metrics#configure-custom-metrics) API no longer requires authorization header. +### kURL Distribution Check -### Bug Fixes {#bug-fixes-1-102-2} -* Fixes an issue where updating the registry settings would not always display the loading indicator and status messages in the UI. -* Fixes an issue where deployments or diffing could fail after upgrading from KOTS 1.95 or earlier to KOTS 1.101.2-1.102.1 if versions contained empty Kustomize bases. +It can be useful to show or hide configuration fields if the cluster was provisioned by Replicated kURL because kURL distributions often include add-ons to manage functionality such as ingress and storage. This means that kURL clusters frequently have fewer configuration options for the user. -## 1.102.1 +In the following example, the `when` property of the `not_kurl` group uses the IsKurl template function to evaluate if the cluster was provisioned by kURL. For more information about the IsKurl template function, see [IsKurl](/reference/template-functions-static-context#iskurl) in _Static Context_. -Released on September 8, 2023 +```yaml +# Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: all_distributions + title: Example Group + description: This group always displays. + items: + - name: example_item + title: This item always displays. + type: text + - name: not_kurl + title: Non-kURL Cluster Group + description: This group displays only if the cluster is not provisioned by kURL. + when: 'repl{{ not IsKurl }}' + items: + - name: example_item_non_kurl + title: The cluster is not provisioned by kURL. + type: label +``` -Support for Kubernetes: 1.25, 1.26, 1.27, and 1.28 +As shown in the image below, both the `all_distributions` and `non_kurl` groups are displayed on the **Config** page when KOTS is _not_ running in a kURL cluster: -### Bug Fixes {#bug-fixes-1-102-1} -* Fixes an issue where uploading the application air gap bundle could fail due to a permissions issue when creating files under the `/tmp` directory inside the `kotsadm` pod/container. This is only applicable to embedded cluster installations with Replicated kURL. +![Config page displays both groups from the example](/images/config-example-iskurl-false.png) -## 1.102.0 +[View a larger version of this image](/images/config-example-iskurl-false.png) -Released on September 6, 2023 +However, when KOTS is running in a kURL cluster, only the `all_distributions` group is displayed, as shown below: -Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 +![Config page displaying only the first group from the example](/images/config-example-iskurl-true.png) -### New Features {#new-features-1-102-0} -* Adds support for sending custom application metrics using the `/api/v1/app/custom-metrics` endpoint. For more information, see [Configuring Custom Metrics (Beta)](/vendor/custom-metrics). +[View a larger version of this image](/images/config-example-iskurl-true.png) -## 1.101.3 +### License Field Value Equality Check -Released on August 18, 2023 +You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. -Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 +In the following example, the `when` property of the `new_feature_config` item uses the LicenseFieldValue template function to determine if the user's license contains a `newFeatureEntitlement` field that is set to `true`. For more information about the LicenseFieldValue template function, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. -### Improvements {#improvements-1-101-3} -* Updates the MinIO image to RELEASE.2023-08-09T23-30-22Z to resolve CVE-2023-27536, CVE-2023-28321, CVE-2023-34969, CVE-2023-2603, CVE-2023-28484, CVE-2023-29469 with medium severity and CVE-2023-2602 with low severity. +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + description: Example fields for using LicenseFieldValue template function + items: + - name: new_feature_config + type: label + title: "You have the new feature entitlement" + when: '{{repl (LicenseFieldValue "newFeatureEntitlement") }}' +``` -### Bug Fixes {#bug-fixes-1-101-3} -* Removes the distinction between `gke` and `gke-autopilot` from Kubernetes distribution reporting as this check was not working as intended and potentially displaying inconsistent information. All Standard and Autopilot GKE clusters are now reported as `gke`. -* Fixes an issue where the admin console was not correctly processing multi-doc yaml files containing windows line endings. +As shown in the image below, the **Config** page displays the `new_feature_config` item when the user's license contains `newFeatureEntitlement: true`: -## 1.101.2 +![Config page displaying the text "You have the new feature entitlement"](/images/config-example-newfeature.png) -Released on August 4, 2023 +[View a larger version of this image](/images/config-example-newfeature.png) -Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 +### License Field Value Integer Comparison -### Improvements {#improvements-1-101-2} -* Upgrades the Helm binary in the kotsadm image to 3.12.2 to resolve CVE-2023-2253 with high severity. +You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. You can also compare integer values from license fields to control the configuration experience for your users. -### Bug Fixes {#bug-fixes-1-101-2} -* Fixes an issue where parsing invalid KOTS kinds failed silently. + -## 1.101.1 +### User-Supplied Value Check -Released on July 31, 2023 +You can show or hide configuration fields based on user-supplied values on the **Config** page to ensure that users only see options that are relevant to their selections. -Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 +In the following example, the `database_host` and `database_passwords` items use the ConfigOptionEquals template function to evaluate if the user selected the `external` database option for the `db_type` item. For more information about the ConfigOptionEquals template function, see [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) in _Config Context_. -### Bug Fixes {#bug-fixes-1-101-1} -* Fixes an issue where defaults were not used for [repeatable config items](/reference/custom-resource-config#repeatable-items) when doing an automated install with the kots CLI. -* Fixes an issue where processing Helm charts or sub-charts that have `-charts` as a suffix failed. +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: database_settings_group + title: Database Settings + items: + - name: db_type + title: Database Type + type: radio + default: external + items: + - name: external + title: External Database + - name: embedded + title: Embedded Database + - name: database_host + title: Database Hostname + type: text + when: '{{repl (ConfigOptionEquals "db_type" "external")}}' + - name: database_password + title: Database Password + type: password + when: '{{repl (ConfigOptionEquals "db_type" "external")}}' +``` +As shown in the images below, when the user selects the external database option, the `database_host` and `database_passwords` items are displayed. Alternatively, when the user selects the embedded database option, the items are _not_ displayed: -## 1.101.0 +![Config page displaying the database host and password fields](/images/config-example-external-db.png) -Released on July 19, 2023 +[View a larger version of this image](/images/config-example-external-db.png) -Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 +![Config page with embedded database option selected](/images/config-example-embedded-db.png) -### New Features {#new-features-1-101-0} -* KOTS now supports running preflight checks defined in a Helm chart. If any Helm charts in a release contain preflight specifications, KOTS runs those. If no Helm charts exist or no preflights are defined in any Helm charts, KOTS uses the previous behavior and runs any preflights defined in a `kind: Preflight` file in the root of the release. For more information about preflights in Helm charts, see [Define Preflight Checks for Helm Installations -](/vendor/preflight-helm-defining). +[View a larger version of this image](/images/config-example-embedded-db.png) -### Improvements {#improvements-1-101-0} -* Updates the replicated/local-volume-provider image to v0.5.4 to resolve CVE-2023-0464 with high severity. -* Updates the kotsadm/dex image to v2.37.0 to resolve CVE-2022-4450, CVE-2023-0215, CVE-2023-0464, CVE-2023-2650 with high severity and CVE-2022-4304, CVE-2023-0465, CVE-2023-0466, CVE-2023-1255 with medium severity. -* Updates the MinIO image to RELEASE.2023-06-29T05-12-28Z to resolve CVE-2020-24736, CVE-2023-1667, CVE-2023-2283, and CVE-2023-26604 with medium severity. -* Upgrades webpack to 5.88.1 to resolve CVE-2023-28154 with critical severity. +## Use Multiple Conditions in the `when` Property -### Bug Fixes {#bug-fixes-1-101-0} -* Fixes an issue where the `rendered` directory was not created for airgap application updates. +You can use more than one template function in the `when` property to create more complex conditional statements. This allows you to show or hide configuration fields based on multiple conditions being true. -## 1.100.3 +The following example includes `when` properties that use both the ConfigOptionEquals and IsKurl template functions: -Released on June 20, 2023 +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: ingress_settings + title: Ingress Settings + description: Configure Ingress + items: + - name: ingress_type + title: Ingress Type + help_text: | + Select how traffic will ingress to the appliction. + type: radio + items: + - name: ingress_controller + title: Ingress Controller + - name: load_balancer + title: Load Balancer + default: "ingress_controller" + required: true + when: 'repl{{ not IsKurl }}' + - name: ingress_host + title: Hostname + help_text: Hostname used to access the application. + type: text + default: "hostname.example.com" + required: true + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' + - name: ingress_annotations + type: textarea + title: Ingress Annotations + help_text: See your ingress controller’s documentation for the required annotations. + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' + - name: ingress_tls_type + title: Ingress TLS Type + type: radio + items: + - name: self_signed + title: Self Signed (Generate Self Signed Certificate) + - name: user_provided + title: User Provided (Upload a TLS Certificate and Key Pair) + required: true + default: self_signed + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' + - name: ingress_tls_cert + title: TLS Cert + type: file + when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' + required: true + - name: ingress_tls_key + title: TLS Key + type: file + when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' + required: true + - name: load_balancer_port + title: Load Balancer Port + help_text: Port used to access the application through the Load Balancer. + type: text + default: "443" + required: true + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' + - name: load_balancer_annotations + type: textarea + title: Load Balancer Annotations + help_text: See your cloud provider’s documentation for the required annotations. + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' +``` -Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 +As shown in the image below, the configuration fields that are specific to the ingress controller display only when the user selects the ingress controller option and KOTS is _not_ running in a kURL cluster: -### Improvements {#improvements-1-100-3} -* Updates the github.com/dexidp/dex module to v2.36.0 (git hash v0.0.0-20230320125501-2bb4896d120e) to resolve CVE-2020-26290 with critical severity. -* Updates the github.com/sigstore/rekor module to v1.2.0 to resolve CVE-2023-30551 with high severity and CVE-2023-33199 with medium severity. -* Updates the github.com/gin-gonic/gin module to v1.9.1 in the kurl-proxy to resolve CVE-2023-26125 and CVE-2023-29401 with medium severity. +![Config page displaying the ingress controller options](/images/config-example-ingress-controller.png) -### Bug Fixes {#bug-fixes-1-100-3} -* Fixes an issue where [repeatable items](/reference/custom-resource-config#repeatable-items) did not work as expected on the Config page. +[View a larger version of this image](/images/config-example-ingress-controller.png) -## 1.100.2 +Additionally, the options relevant to the load balancer display when the user selects the load balancer option and KOTS is _not_ running in a kURL cluster: -Released on June 7, 2023 +![Config page displaying the load balancer options](/images/config-example-ingress-load-balancer.png) -Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 +[View a larger version of this image](/images/config-example-ingress-load-balancer.png) -### Bug Fixes {#bug-fixes-1-100-2} -* Fixes an issue where the Config values were not saved when a release contained a multiple-document YAML file. -* Fixes an issue where the Config specification was missing from the rendered release in the kotsKinds folder if the release contained a multiple-document YAML file. -* Fixes an issue that allowed users to edit `readonly` Config items. +================ +File: docs/vendor/config-screen-map-inputs.md +================ +# Mapping User-Supplied Values -## 1.100.1 +This topic describes how to map the values that your users provide in the Replicated Admin Console configuration screen to your application. -Released on June 2, 2023 +This topic assumes that you have already added custom fields to the Admin Console configuration screen by editing the Config custom resource. For more information, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). -Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 +## Overview of Mapping Values -### Improvements {#improvements-1-100-1} -* Updates the way custom domains for the Replicated registry and proxy registry are used. If a default or channel-specific custom domain is configured, that custom domain is associated with a release when it is promoted to a channel. KOTS will rewrite images using that custom domain. The `replicatedRegistryDomain` and `proxyRegistryDomain` fields in the Application custom resource are deprecated but continue to work to give time to migrate to the new mechanism. -* Updates the rqlite/rqlite image to 7.19.0 to resolve CVE-2023-1255 with medium severity. +You use the values that your users provide in the Admin Console configuration screen to render YAML in the manifest files for your application. -## 1.100.0 +For example, if you provide an embedded database with your application, you might add a field on the Admin Console configuration screen where users input a password for the embedded database. You can then map the password that your user supplies in this field to the Secret manifest file for the database in your application. -Released on May 26, 2023 +For an example of mapping database configuration options in a sample application, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). -Support for Kubernetes: 1.24, 1.25, 1.26 and 1.27 +You can also conditionally deploy custom resources depending on the user input for a given field. For example, if a customer chooses to use their own database with your application rather than an embedded database option, it is not desirable to deploy the optional database resources such as a StatefulSet and a Service. -### New Features {#new-features-1-100-0} -* Adds support for `kots.io/creation-phase` and `kots.io/deletion-phase` annotations to control the order in which native Kubernetes resources are created and deleted, respectively. See [Deployment Phases](/vendor/orchestrating-resource-deployment#deployment-phases) in _Orchestrating Resource Deployment_. -* Adds support for a `kots.io/wait-for-ready` annotation, which causes KOTS to wait for a native Kubernetes resource to be ready before continuing with the deployment. See [Waiting for a Resource to be Ready](/vendor/orchestrating-resource-deployment#wait-for-a-resource-to-be-ready) in _Orchestrating Resource Deployment_. -* Adds support for a `kots.io/wait-for-properties` annotation, which causes KOTS to wait for one or more properties to match a desired value before continuing with the deployment. See [Wait for Resource Properties](/vendor/orchestrating-resource-deployment#wait-for-resource-properties) in _Orchestrating Resource Deployment_. +For more information about including optional resources conditionally based on user-supplied values, see [Conditionally Including or Excluding Resources](packaging-include-resources). -### Improvements {#improvements-1-100-0} -* Updates the github.com/cloudflare/circl module to v1.3.3 to resolve CVE-2023-1732 with medium severity. +## About Mapping Values with Template Functions -### Bug Fixes {#bug-fixes-1-100-0} -* Fixes an issue where Helm charts deployed using the native Helm installation method were uninstalled then reinstalled when the chart version changed or was updated. -* Fixes an issue in embedded clusters where images from native Helm v2 (Beta) charts were incorrectly removed from the in-cluster registry, potentially leading to failed deployments. -* Bumps the Helm version used by KOTS to 3.12.0 to fix an issue where native Helm installations were failing on Kubernetes 1.27. +To map user-supplied values, you use Replicated KOTS template functions. The template functions are based on the Go text/template libraries. To use template functions, you add them as strings in the custom resource manifest files in your application. -## 1.99.0 +For more information about template functions, including use cases and examples, see [About Template Functions](/reference/template-functions-about). -Released on May 18, 2023 +For more information about the syntax of the template functions for mapping configuration values, see [Config Context](/reference/template-functions-config-context) in the _Template Functions_ section. -Support for Kubernetes: 1.24, 1.25, and 1.26 +## Map User-Supplied Values -### New Features {#new-features-1-99-0} -* Adds a new native Helm v2 installation method (Beta) that leverages the `kots.io/v1beta2` HelmChart custom resource. This v2 installation method does a Helm installation or upgrade of your Helm chart without modifying the chart with Kustomize. This is an improvement to the v1 installation method because it results in Helm installations that can be reproduced outside of the app manager, and it enables the use of additional Helm functionality that was not available in v1. See [HelmChart v2 (Beta)](/reference/custom-resource-helmchart-v2) in the _Custom Resources_ section. +Follow one of these procedures to map user inputs from the configuration screen, depending on if you use a Helm chart for your application: -### Improvements {#improvements-1-99-0} -* Applies application status informers before deploying the actual resources. This is helpful in cases where deployments take a long time, because the statuses are now available while the deployment happens. -* Updates the replicated/local-volume-provider image to v0.5.3 to resolve CVE-2022-4415 and CVE-2022-3821 with high severity. -* Replace the misleading call-to-action button on the instance snapshot restore modal, which could have mistakenly lead the user to believe the instance restore was initiated. -* Enhances formatting to accommodate lengthy strings for fields such as the application name and config item names. +* **Without Helm**: See [Map Values to Manifest Files](#map-values-to-manifest-files). +* **With Helm**: See [Map Values to a Helm Chart](#map-values-to-a-helm-chart). -### Bug Fixes {#bug-fixes-1-99-0} -* Fixes a bug where the rqlite collector was unable to collect a data dump if the name of the rqlite container changed. -* Fixes an issue where re-running preflights during the initial installation could cause the UI to incorrectly show a status of "Currently pending version". -* Fixes an issue where re-running preflights during the initial installation could cause the application to be re-deployed. +### Map Values to Manifest Files -## 1.98.3 +To map user-supplied values from the configuration screen to manifest files in your application: -Released on May 5, 2023 +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. -Support for Kubernetes: 1.24, 1.25, and 1.26 +1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. -### Improvements {#improvements-1-98-3} -* The JSON Web Token (JWT) is stored in an HttpOnly cookie to prevent cross-site scripting (XSS) attacks. -* The **Cluster Management** page shows by default the command for joining a primary node instead of a secondary node for high availability clusters. -* The resource status modal displays the time the data was last fetched automatically. -* Introduces a deterministic order for applying and deleting Kubernetes manifests based on the resource kind. -* Uses the [weight](https://docs.replicated.com/reference/custom-resource-helmchart#weight) field from the HelmChart custom resource to determine the order in which to uninstall charts that have `useHelmInstall: true`. Charts are uninstalled by weight in descending order, with higher weights uninstalled first. -* Application Helm charts are uninstalled first, then other Kubernetes manifests are uninstalled. -* Improvements to the **Version history** page include truncating long version labels, removing unnecessary preflight icons, and improving the content layout. -* The `kots admin-console push-images` command now returns an error if the provided air gap bundle file is missing. -* Adds a **Back** button to the **Preflights** page. +1. In the Config manifest file, locate the name of the user-input field that you want to map. -### Bug Fixes {#bug-fixes-1-98-3} -* Fixes an issue where snapshot restores hung if RabbitMQ cluster custom resources were used. -* Fixes an issue where Helm releases were not uninstalled when undeploying an application using the [kots remove](/reference/kots-cli-remove) command and passing the `--undeploy` flag. -* Fixes an issue where Helm charts that were deployed with native Helm to a different namespace than KOTS were not uninstalled when they were removed from subsequent application releases. -* Fixes an issue where uploading an air gap bundle through the admin console might have failed due to issues getting layers for OCI images. -* Fixes an issue where canceling a restore of an application (partial) snapshot sometimes did not work if multiple applications were installed in the same admin console. -* The **Config** page now shows the correct error message if errors other than regex validation occurred. -* Fixes an issue where the Config page incorrectly displayed "Edit the currently deployed config" when there was no application deployed. -* Fixes an issue where installations and upgrades could fail when checking if the cluster was a kURL cluster, if the user running the command was not authorized to list ConfigMaps in the `kube-system` namespace. -* Fixes an issue where air gapped application pods could fail to pull images from the kURL registry due to the image names being rewritten incorrectly, if the application was upgraded using the [`kots upstream upgrade`](/reference/kots-cli-upstream-upgrade) command. -* Fixes an issue where the **Version history** page could incorrectly show a **Deployed** button if an application version was deployed while preflight checks were running. + **Example**: -## 1.98.2 + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: smtp_settings + title: SMTP Settings + description: Configure SMTP Settings + items: + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + ``` -Released on April 26, 2023 + In the example above, the field name to map is `smtp_host`. -Support for Kubernetes: 1.24, 1.25, and 1.26 +1. In the same release in the Vendor Portal, open the manifest file where you want to map the value for the field that you selected. -### Bug Fixes {#bug-fixes-1-98-2} -* Fixes an issue where quotes were stripped from fields in HelmChart custom resources, which led to unexpected behavior and failed deployments. -* Fixes an issue where invalid Kustomize patches were generated for Helm charts with deeply nested dependencies. -* Fixes an issue where processing application manifests occasionally failed if null values were encountered after rendering. +1. In the manifest file, use the ConfigOption template function to map the user-supplied value in a key value pair. For example: -## 1.98.1 + ```yaml + hostname: '{{repl ConfigOption "smtp_host"}}' + ``` -Released on April 21, 2023 + For more information about the ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. -Support for Kubernetes: 1.24, 1.25, and 1.26 + **Example**: -### Bug Fixes {#bug-fixes-1-98-1} -* Fixes an issue where multiple copies of the same Kubernetes resource (for example, the same `kind` and `name`) were deduplicated even if they had a different namespace. This deduplication resulted in the app manager deploying only one of the resources to the cluster. -* Fixes an issue that caused config updates to fail when the user did not provide a value for a required config item with a default value, even if the item was hidden. -* Fixes an issue where switching the license to a different channel did not fetch the current release on that channel if the number of releases was the same on both channels. + The following example shows mapping user-supplied TLS certificate and TLS private key files to the `tls.cert` and `tls.key` keys in a Secret custom resource manifest file. -## 1.98.0 + For more information about working with TLS secrets, including a strategy for re-using the certificates uploaded for the Admin Console itself, see the [Configuring Cluster Ingress](packaging-ingress) example. -Released on April 19, 2023 + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: tls-secret + type: kubernetes.io/tls + data: + tls.crt: '{{repl ConfigOption "tls_certificate_file" }}' + tls.key: '{{repl ConfigOption "tls_private_key_file" }}' + ``` -Support for Kubernetes: 1.24, 1.25, and 1.26 +1. Save and promote the release to a development environment to test your changes. -### New Features {#new-features-1-98-0} -* Adds support for validating config items with type `text`, `textarea`, `password`, or `file` by matching the item's values against a regex pattern. For more information, see [validation](/reference/custom-resource-config#validation) in _Config_. -* Adds a new `kotsKinds` directory to the application archive that includes the rendered KOTS custom resources. +### Map Values to a Helm Chart -### Improvements {#improvements-1-98-0} -* Sorts multi-application installations in the admin console by their creation date with the most recently installed application at the top. -* Updates spacing and font sizes to improve visual grouping of items on admin console Config page. -* Updates Kustomize from v4.5.7 to v5.0.1 which resolves CVE-2022-27664, CVE-2022-41723, CVE-2022-41723, and CVE-2022-28948 with high severity and CVE-2022-41717 with medium severity. -* Updates the Helm binary included in the kotsadm image from 3.11.0 to 3.11.3 to resolve CVE-2022-41723 and CVE-2023-25173 with high severity and CVE-2023-25153 with medium severity. -* Updates the github.com/opencontainers/runc module to v1.1.5 to resolve CVE-2023-27561 with high severity. -* Updates the minio/minio image to RELEASE.2023-04-13T03-08-07Z to resolve CVE-2023-0361 with medium severity. -* Updates the minio/mc image to RELEASE.2023-04-12T02-21-51Z to resolve CVE-2023-0361 with medium severity. -* Adds support for template functions to the `namespace` and `helmUpgradeFlags` fields of the [HelmChart](/reference/custom-resource-helmchart) custom resource. +The `values.yaml` file in a Helm chart defines parameters that are specific to each environment in which the chart will be deployed. With Replicated KOTS, your users provide these values through the configuration screen in the Admin Console. You customize the configuration screen based on the required and optional configuration fields that you want to expose to your users. -### Bug Fixes {#bug-fixes-1-98-0} -* Fixes an issue where strict security context configurations were not applied in OpenShift environments when the `--strict-security-context` flag was passed to the [kots install](https://docs.replicated.com/reference/kots-cli-install) or [kots admin-console upgrade](https://docs.replicated.com/reference/kots-cli-admin-console-upgrade) commands. +To map the values that your users provide in the Admin Console configuration screen to your Helm chart `values.yaml` file, you create a HelmChart custom resource. -## 1.97.0 +For a tutorial that shows how to set values in a sample Helm chart during installation with KOTS, see [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). -Released on April 7, 2023 +To map user inputs from the configuration screen to the `values.yaml` file: -Support for Kubernetes: 1.24, 1.25, and 1.26 +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. -### New Features {#new-features-1-97-0} -* Allows users to unmask passwords on various forms in the admin console. +1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. -### Improvements {#improvements-1-97-0} -* Simplifies the wording on the air gap bundle upload page. -* Updates the log in page to say **Log in to APP_NAME admin console** instead of **Log in to APP_NAME**. -* Upgrades the MinIO image to RELEASE.2023-03-24T21-41-23Z to resolve CVE-2023-0286 with high severity, and CVE-2022-4304, CVE-2022-4450, and CVE-2023-0215 with medium severity. +1. In the Config manifest file, locate the name of the user-input field that you want to map. -## 1.96.3 + **Example**: -Released on March 29, 2023 + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: smtp_settings + title: SMTP Settings + description: Configure SMTP Settings + items: + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + ``` -Support for Kubernetes: 1.24, 1.25, and 1.26 + In the example above, the field name to map is `smtp_host`. -### Improvements {#improvements-1-96-3} -* Wraps the logs in the deploy logs modal to increase readability by eliminating the need to scroll horizontally. -* Removes support for cipher suites that use the CBC encryption algorithm or SHA-1 from the kurl_proxy service that runs in embedded cluster installations. +1. In the same release, create a HelmChart custom resource manifest file. A HelmChart custom resource manifest file has `kind: HelmChart`. -### Bug Fixes {#bug-fixes-1-96-3} -* Fixes a bug that caused application upgrades to fail because the app manager attempted to migrate the Helm release secret when the release secret already existed in the release namespace. + For more information about the HelmChart custom resource, see [HelmChart](../reference/custom-resource-helmchart) in the _Custom Resources_ section. -## 1.96.2 +1. In the HelmChart manifest file, copy and paste the name of the property from your `values.yaml` file that corresponds to the field that you selected from the Config manifest file under `values`: -Released on March 24, 2023 + ```yaml + values: + HELM_VALUE_KEY: + ``` + Replace `HELM_VALUE_KEY` with the property name from the `values.yaml` file. -Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 +1. Use the ConfigOption template function to set the property from the `values.yaml` file equal to the corresponding configuration screen field: -### Improvements {#improvements-1-96-2} -* Updates the kotsadm/dex image to v2.36.0 to resolve CVE-2022-4450, CVE-2023-0215, CVE-2023-0286, CVE-2022-41721, CVE-2022-41723, and CVE-2022-32149 with high severity, and CVE-2022-4304 and CVE-2022-41717 with medium severity. -* Updates the MinIO image to RELEASE.2023-03-13T19-46-17Z to resolve CVE-2023-23916 with medium severity. -* Updates the kubectl binary in the kotsadm image to resolve CVE-2022-41723 with high severity and CVE-2022-41717 with medium severity. -* Updates the golang.org/x/net module in the kurl-proxy to resolve CVE-2022-41723 with high severity. -* Upgrades the schemahero image tag to v0.14.0 and replicated/local-volume-provider image to v0.5.2 to resolve CVE-2022-41723 with high severity. + ```yaml + values: + HELM_VALUE_KEY: '{{repl ConfigOption "CONFIG_SCREEN_FIELD_NAME" }}' + ``` + Replace `CONFIG_SCREEN_FIELD_NAME` with the name of the field that you created in the Config custom resource. -### Bug Fixes {#bug-fixes-1-96-2} -* Fixes a bug where multi-node embedded cluster installations hang indefinitely with the KOTS add-on. -* Increases the time for displaying the slow loading indicator to two minutes to prevent the admin console from continuously reloading when the internet connection is slow. -* Removes hardcoded application name on the Troubleshoot page when a community license is used. -* Fixes a known issue that was introduced in v1.95.0 that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. See the [known issue](/release-notes/rn-app-manager#known-issues-1-95-0) in the v1.95.0 release notes, and see [useHelmInstall](/reference/custom-resource-helmchart#usehelminstall) in the _HelmChart_ reference. + For more information about the KOTS ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. -## 1.96.1 + **Example:** -:::important -The app manager v1.96.1 has a known issue that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. - This issue is resolved in the app manager v1.96.2. See [Known Issue](#known-issues-1-95-0) in _1.95.0_ below. -::: + ```yaml + apiVersion: kots.io/v1beta1 + kind: HelmChart + metadata: + name: samplechart + spec: + chart: + name: samplechart + chartVersion: 3.1.7 + helmVersion: v3 + useHelmInstall: true + values: + hostname: '{{repl ConfigOption "smtp_host" }}' + ``` -Released on March 16, 2023 +1. Save and promote the release to a development environment to test your changes. -Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 +================ +File: docs/vendor/custom-domains-using.md +================ +# Using Custom Domains -### Improvements {#improvements-1-96-1} -* Refreshes the design of the preflights page in the admin console to improve usability and match the style of other pages. -* Updates the helm.sh/helm/v3 module to v3.11.2 to resolve CVE-2023-25165 with medium severity. -* If the application's port is not available when the user enables access to the admin console with `kubectl kots admin-console`, failure messages print one time and retry silently. +This topic describes how to use the Replicated Vendor Portal to add and manage custom domains to alias the Replicated registry, the Replicated proxy registry, the Replicated app service, and the download portal. -## 1.96.0 +For information about adding and managing custom domains with the Vendor API v3, see the [customHostnames](https://replicated-vendor-api.readme.io/reference/createcustomhostname) section in the Vendor API v3 documentation. -:::important -The app manager v1.96.0 has a known issue that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. -This issue is resolved in the app manager v1.96.2. See [Known Issue](#known-issues-1-95-0) in _1.95.0_ below. -::: +For an overview about custom domains and limitations, see [About Custom Domains](custom-domains). -Released on March 9, 2023 +## Configure a Custom Domain -Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 +Before you assign a custom domain for a registry or the download portal, you must first configure and verify the ownership and TLS certificate. -### New Features {#new-features-1-96-0} -* Adds the deployable, rendered application manifests to the version archive. This increases the transparency of what KOTS deploys by showing the exact manifests that are deployed as part of this version on the **View Files** page. For more information, see [Rendered](/enterprise/updating-patching-with-kustomize#rendered) in _Patching with Kustomize_. +To add and configure a custom domain: -### Improvements {#improvements-1-96-0} -* Updates the replicated/local-volume-provider image to v0.5.1 to resolve CVE-2023-0361, CVE-2022-4450, CVE-2023-0215, and CVE-2023-0286 with high severity, and CVE-2022-2097 and CVE-2022-4304 with medium severity. -* Improves the performance of creating, diffing, configuring, and deploying application versions by retrieving the rendered application manifests when they are available, instead of rendering them on the fly. -* Improves the performance of creating application versions by running private image checks concurrently. +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Custom Domains**. -### Bug Fixes {#bug-fixes-1-96-0} -* Resolves a clickjacking vulnerability that was present in the kurl_proxy service that runs in embedded cluster installations. -* Adds a **Rerun** button on the preflights page when an application is initially installed. -* Fixes an issue where the selected subnavigation tab was not underlined. -* Fixes an issue where CRDs from subcharts were included in the Secret that Helm stores the release information in. In some cases, this issue could dramatically increase the Secret's size. +1. In the **Add custom domain** dropdown, select the target Replicated endpoint. -## 1.95.0 + The **Configure a custom domain** wizard opens. -:::important -The app manager v1.95.0 has a known issue that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. -This issue is resolved in the app manager v1.96.2. See [Known Issue](#known-issues-1-95-0) below. -::: + custom domain wizard -Released on March 1, 2023 + [View a larger version of this image](/images/custom-domains-download-configure.png) -Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 +1. For **Domain**, enter the custom domain. Click **Save & continue**. -### New Features {#new-features-1-95-0} -* Adds an `--undeploy` flag to the [kots remove](/reference/kots-cli-remove) command that allows you to completely undeploy the application and delete its resources from the cluster. -* Adds support for Azure Container Registry (ACR). For a full list of supported registries, see [Private Registry Requirements](/enterprise//installing-general-requirements#private-registry-requirements). -* Status informers now support DaemonSets. See [Resource Statuses](/vendor/admin-console-display-app-status#resource-statuses). -* When using custom branding for the admin console, you can more easily change the color of groups of elements in the admin console (Beta). +1. For **Create CNAME**, copy the text string and use it to create a CNAME record in your DNS account. Click **Continue**. -### Improvements {#improvements-1-95-0} -* The [kots install](/reference/kots-cli-install), [kots upstream upgrade](/reference/kots-cli-upstream-upgrade), and [kots admin-console push-images](/reference/kots-cli-admin-console-push-images) commands now validate the provided registry information before processing the air gap bundle. -* Upgrades the MinIO image to RELEASE.2023-02-22T18-23-45Z to resolve CVE-2022-42898, CVE-2022-47629, and CVE-2022-41721 with high severity and CVE-2022-2509, CVE-2022-1304, CVE-2021-46848, CVE-2016-3709, CVE-2022-40303, CVE-2022-40304, CVE-2020-35527, CVE-2022-35737, CVE-2022-3821, CVE-2022-4415, CVE-2022-37434, and CVE-2022-41717 with medium severity. -* The [kots admin-console generate-manifests](/reference/kots-cli-admin-console-generate-manifests) command now supports OpenShift and GKE Autopilot, if it is executed with a Kubernetes cluster context. -* Support bundles generated from the admin console include a copy of rqlite data for debugging purposes. +1. For **Verify ownership**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. -### Bug Fixes {#bug-fixes-1-95-0} -* Fixes an issue where the [namespace](/reference/custom-resource-helmchart#namespace) field in the HelmChart custom resource was not respected when [useHelmInstall](/reference/custom-resource-helmchart#usehelminstall) was set to `true`. + Your changes can take up to 24 hours to propagate. -### Known Issue {#known-issues-1-95-0} +1. For **TLS cert creation verification**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. -There is a known issue in the app manager v1.95.0 that causes application upgrades to fail for Helm charts that are deployed using the native Helm installation method. For more information about native Helm, see [How Replicated Deploys Helm Charts](/vendor/helm-overview#how-replicated-deploys-helm-charts) in _About Packaging with Helm_. + Your changes can take up to 24 hours to propagate. -The upgrade failure occurs for a Helm chart when the following conditions are met: -- The Helm chart in the application has been installed previously using the app manager v1.94.2 or earlier. -- In the HelmChart custom resource for the Helm chart: - - `useHelmInstall` is set to `true`. See [useHelmInstall](/reference/custom-resource-helmchart#usehelminstall) in _HelmChart_. - - `namespace` is set to a value different than the namespace where the app manager is installed. See [namespace](/reference/custom-resource-helmchart#namespace) in _HelmChart_. + :::note + If you set up a [CAA record](https://letsencrypt.org/docs/caa/) for this hostname, you must include all Certificate Authorities (CAs) that Cloudflare partners with. The following CAA records are required to ensure proper certificate issuance and renewal: -To avoid this known issue, Replicated recommends that you do not upgrade to v1.95.0. To work around this issue in v1.95.0, manually uninstall the affected Helm chart using the Helm CLI, and then redeploy the application using the app manager. See [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) in the Helm documentation. + ```dns + @ IN CAA 0 issue "letsencrypt.org" + @ IN CAA 0 issue "pki.goog; cansignhttpexchanges=yes" + @ IN CAA 0 issue "ssl.com" + @ IN CAA 0 issue "amazon.com" + @ IN CAA 0 issue "cloudflare.com" + @ IN CAA 0 issue "google.com" + ``` -## 1.94.2 + Failing to include any of these CAs might prevent certificate issuance or renewal, which can result in downtime for your customers. For additional security, you can add an IODEF record to receive notifications about certificate requests: -Released on February 17, 2023 + ```dns + @ IN CAA 0 iodef "mailto:your-security-team@example.com" + ``` + ::: -Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 +1. For **Use Domain**, to set the new domain as the default, click **Yes, set as default**. Otherwise, click **Not now**. -### Improvements {#improvements-1-94-2} -* Updates kurl_proxy go mod gopkg.in/yaml.v3 to resolve CVE-2022-28948 with high severity. -* Support bundles generated from the admin console now include collectors and analyzers from all support bundle specifications found in the cluster. This includes support bundle specifications found in Secret and ConfigMap objects. For more information about how to generate support bundles using discovery, see [Generating Support Bundles](/vendor/support-bundle-generating#generate-a-bundle). + :::note + Replicated recommends that you do _not_ set a domain as the default until you are ready for it to be used by customers. + ::: -### Bug Fixes {#bug-fixes-1-94-2} -* Fixes a bug that didn't properly display config items that had the `affix` property. -* Fixes an issue where the button to rerun preflights did not show if preflights failed during an air gapped installation. -* Fixes a bug where Velero backups failed due to pods in the Shutdown state. +The Vendor Portal marks the domain as **Configured** after the verification checks for ownership and TLS certificate creation are complete. -## 1.94.1 +## Use Custom Domains -Released on February 14, 2023 +After you configure one or more custom domains in the Vendor Portal, you assign a custom domain by setting it as the default for all channels and customers or by assigning it to an individual release channel. -Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 +### Set a Default Domain -### Improvements {#improvements-1-94-1} -* Adds support for Velero 1.10. +Setting a default domain is useful for ensuring that the same domain is used across channels for all your customers. -### Bug Fixes {#bug-fixes-1-94-1} -* Fixes an issue where errors related to parsing and rendering HelmChart custom resources were silently ignored. +When you set a custom domain as the default, it is used by default for all new releases promoted to any channel, as long as the channel does not have a different domain assigned in its channel settings. -## 1.94.0 +Only releases that are promoted to a channel _after_ you set a default domain use the new default domain. Any existing releases that were promoted before you set the default continue to use the same domain that they used previously. -Released on February 7, 2023 +To set a custom domain as the default: -Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 +1. In the Vendor Portal, go to **Custom Domains**. -### New Features {#new-features-1-94-0} -* Updates the [kots velero configure-nfs](/reference/kots-cli-velero-configure-nfs) and [kots velero configure-hostpath](/reference/kots-cli-velero-configure-hostpath) commands to remove required manual steps and better automate the workflow. Users are now given a command to install Velero without a backup storage location. Then the user reruns the configure command to automatically configure the storage destination. -* Updates the [kots velero subcommands](/reference/kots-cli-velero-index) for configuring storage destinations, with instructions on how to install Velero if it is not yet installed. -* The instructions displayed in the admin console for configuring an NFS or host path snapshot storage destination no longer use the `kots velero print-fs-instructions` command. Instead they use the [kots velero configure-nfs](/reference/kots-cli-velero-configure-nfs) and [kots velero configure-hostpath](/reference/kots-cli-velero-configure-hostpath) commands to instruct the user to install Velero and configure the storage destination. +1. Next to the target domain, click **Set as default**. -### Improvements {#improvements-1-94-0} -* Updates the golang.org/x/net module in the kurl-proxy image to resolve CVE-2022-41721 with high severity. -* Updates github.com/dexidp/dex go mod to resolve CVE-2022-39222 with medium severity. -* Updates the rqlite/rqlite image to 7.13.1 to resolve CVE-2022-41721 with high severity and CVE-2022-41717 with medium severity. -* Updates the replicated/local-volume-provider image to v0.4.4 to resolve CVE-2022-41721 with high severity. -* Deprecates the [kots velero print-fs-instructions](/reference/kots-cli-velero-print-fs-instructions) command because its functionality is replaced by the improved [kots velero configure-hostpath](/reference/kots-cli-velero-configure-hostpath) and [kots velero configure-nfs](/reference/kots-cli-velero-configure-nfs) commands. -* Improves the layout of deploy and redeploy network errors. +1. In the confirmation dialog that opens, click **Yes, set as default**. -### Bug Fixes {#bug-fixes-1-94-0} -* Fixes an issue where the Edit Config icon was visible on the dashboard for application versions that did not include config. -* Fixes an issue where a user had to refresh the page to generate a new support bundle after deleting a support bundle that was still being generated. -* Fixes a regression where the text wasn't colored for certain status informer states. -* Fixes a bug where the app icon for latest version was shown instead of the icon for the currently deployed version. -* Fixes an issue where backup logs failed to download if a log line exceeded the default `bufio.Scanner` buffer size of 64KB. This limit is increased to 1MB in the admin console. +### Assign a Domain to a Channel {#channel-domain} -## 1.93.1 +You can assign a domain to an individual channel by editing the channel settings. When you specify a domain in the channel settings, new releases promoted to the channel use the selected domain even if there is a different domain set as the default on the **Custom Domains** page. -Released on January 27, 2023 +Assigning a domain to a release channel is useful when you need to override either the default Replicated domain or a default custom domain for a specific channel. For example: +* You need to use a different domain for releases promoted to your Beta and Stable channels. +* You need to test a domain in a development environment before you set the domain as the default for all channels. -Support for Kubernetes: 1.23, 1.24, 1.25, and 1.26 +To assign a custom domain to a channel: -### Improvements {#improvements-1-93-1} -* Updates the Helm binary included in the kotsadm image from 3.9.3 to 3.11.0 to resolve CVE-2022-27664 and CVE-2022-32149 with high severity. -* Updates the golang.org/x/net module to resolve CVE-2022-41721 with high severity. -* Public and private SSH keys are deleted when GitOps is disabled and the keys are not in use by another application. +1. In the Vendor Portal, go to **Channels** and click the settings icon for the target channel. -### Bug Fixes {#bug-fixes-1-93-1} -* Fixes a bug where the snapshots page showed no snapshots for a moment after starting a snapshot. -* Fixes a bug where a warning related to `kubectl apply` displayed during embedded cluster installations. -* Fixes an issue where registry.replicated.com images were rewritten to proxy.replicated.com when the application version specified a custom domain for the Replicated registry. -* Fixes an issue where the Edit Config icon was visible on the version history page for application versions that did not include config. +1. Under **Custom domains**, in the drop-down for the target Replicated endpoint, select the domain to use for the channel. For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. -## 1.93.0 + channel settings dialog -Released on January 19, 2023 + [View a larger version of this image](/images/channel-settings.png) -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 +## Reuse a Custom Domain for Another Application -### New Features {#new-features-1-93-0} -* Adds the ability to delete support bundles from the Troubleshoot page of the admin console. -* Config navigation links are highlighted as the user scrolls. +If you have configured a custom domain for one application, you can reuse the custom domain for another application in the same team without going through the ownership and TLS certificate verification process again. -### Improvements {#improvements-1-93-0} -* Updates the helm.sh/helm/v3 module to v3.10.3 to resolve CVE-2022-23524, CVE-2022-23525, and CVE-2022-23526 with high severity. +To reuse a custom domain for another application: -### Bug Fixes {#bug-fixes-1-93-0} -* Fixes an issue where the Cluster Management tab does not show up in Kubernetes installer clusters. -* Fixes an issue where the description for generating a support bundle used a hard coded application name. -* Fixes an issue on the Version History page where the row layout broke when displaying preflight check warnings. -* Fixes an issue where an error occurred when uploading a PKCS #12 certificate with the private key listed first. +1. In the Vendor Portal, select the application from the dropdown list. -## 1.92.1 +1. Click **Custom Domains**. -Released on December 29, 2022 +1. In the section for the target endpoint, click Add your first custom domain for your first domain, or click **Add new domain** for additional domains. -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 + The **Configure a custom domain** wizard opens. -### Improvements {#improvements-1-92-1} -* Preflight checks run and support bundles generate at least twice as fast as before. -* Updates the kubectl binary in the kotsadm image to resolve CVE-2022-27664 and CVE-2022-32149 with high severity. -* Updates the replicated/local-volume-provider image to v0.4.3 to resolve CVE-2021-46848 with critical severity. +1. In the text box, enter the custom domain name that you want to reuse. Click **Save & continue**. + + The last page of the wizard opens because the custom domain was verified previously. -### Bug Fixes {#bug-fixes-1-92-1} -* Fixes an issue that caused the license upload to fail for applications that include Helm charts with [required](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-required-function) values missing from configuration. +1. Do one of the following: -## 1.92.0 + - Click **Set as default**. In the confirmation dialog that opens, click **Yes, set as default**. + + - Click **Not now**. You can come back later to set the domain as the default. The Vendor Portal shows shows that the domain has a Configured status because it was configured for a previous application, though it is not yet assigned as the default for this application. -Released on December 16, 2022 -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 +## Remove a Custom Domain -### New Features {#new-features-1-92-0} -* The app manager uses the `replicatedRegistryDomain` domain to rewrite images stored in the Replicated registry, when the `replicatedRegistryDomain` field is provided in the Application custom resource. -* Adds the [KubernetesVersion](/reference/template-functions-static-context#kubernetesversion), [KubernetesMajorVersion](/reference/template-functions-static-context#kubernetesmajorversion), and [KubernetesMinorVersion](/reference/template-functions-static-context#kubernetesminorversion) template functions. +You can remove a custom domain at any time, but you should plan the transition so that you do not break any existing installations or documentation. -### Improvements {#improvements-1-92-0} -* Standardizes classes used for branding the admin console. -* Pins the config navigation so that it does not disappear when scrolling. -* The [`LicenseDockerCfg`](/reference/template-functions-license-context#licensedockercfg) template function in the License Context now utilizes the `replicatedRegistryDomain` and `proxyRegistryDomain` values from the Application custom resource, if specified. +Removing a custom domain for the Replicated registry, proxy registry, or Replicated app service will break existing installations that use the custom domain. Existing installations need to be upgraded to a version that does not use the custom domain before it can be removed safely. -### Bug Fixes {#bug-fixes-1-92-0} -* Disables image garbage collection when an external registry is enabled. -* Fixes a bug where the rqlite headless service manifest was not generated. -* Fixes an issue where labels displayed as config items in the config navigation. -* Fixes a bug where the `kots get config` command always decrypted passwords, even when the `--decrypt` flag wasn't passed. +If you remove a custom domain for the download portal, it is no longer accessible using the custom URL. You will need to point customers to an updated URL. -## 1.91.3 +To remove a custom domain: -Released on December 10, 2022 +1. Log in to the [Vendor Portal](https://vendor.replicated.com) and click **Custom Domains**. -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 +1. Verify that the domain is not set as the default nor in use on any channels. You can edit the domains in use on a channel in the channel settings. For more information, see [Settings](releases-about#settings) in _About Channels and Releases_. -### Bug Fixes {#bug-fixes-1-91-3} -* Fixes an issue where air gap uploads failed for applications containing required configuration without default values. -* Fixes errors when generating support bundles in existing clusters via the CLI. + :::important + When you remove a registry or Replicated app service custom domain, any installations that reference that custom domain will break. Ensure that the custom domain is no longer in use before you remove it from the Vendor Portal. + ::: -## 1.91.2 +1. Click **Remove** next to the unused domain in the list, and then click **Yes, remove domain**. -:::important -The app manager v1.91.2 has a known issue that affects the use of -required configuration items in air gapped environments. -See [Known Issue](#known-issues-1-91-2) below. -::: +================ +File: docs/vendor/custom-domains.md +================ +# About Custom Domains -Released on December 8, 2022 +This topic provides an overview and the limitations of using custom domains to alias the Replicated private registry, Replicated proxy registry, Replicated app service, and the Download Portal. -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 +For information about configuring and managing custom domains, see [Using Custom Domains](custom-domains-using). -### Improvements {#improvements-1-91-2} -* Improved the TLS certificate flow to make it clearer which fields are needed when using a self-signed certificate or uploading your own. -* Adds the `proxyRegistryDomain` field to the Application custom resource. When this field is provided, the app manager will rewrite proxied private images using that domain instead of proxy.replicated.com. +## Overview -### Bug Fixes {#bug-fixes-1-91-2} -* Fixes overlapping labels on TLS configuration page. -* Fixes an issue that caused the login button to be stuck in the "Logging in" state in Helm-managed mode (Beta). For more information on Helm-managed mode, see [Supporting helm CLI Installations (Beta)](/vendor/helm-install). -* Fixes an issue where snapshots to NFS storage locations failed due to file permission issues in environments running without MinIO. -* Fixes an issue that caused the license upload to fail for applications that include Helm charts with [`required`](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-required-function) values missing from configuration. -* Fixes an issue where release notes did not display when the release notes icon was clicked on the dashboard. -* Fixes an issue where no tab was selected by default when opening the View Logs modal in Helm-managed mode. -* Fixes an issue that prevented image garbage collection from being enabled or disabled. -* Fixes an issue where DockerHub credentials provided to the admin console via the [kots docker ensure-secret](/reference/kots-cli-docker-ensure-secret) CLI command did not increase the rate limit. -* Fixes an issue that prevented Helm render errors from being surfaced to the user when running [`kots upload`](/reference/kots-cli-upload) commands. -* Fixes leaked goroutines. -* Increases the memory limit for rqlite to 1Gi to fix an issue where rqlite was OOM killed during the migration from Postgres when there was a very large number of versions available in the admin console. +You can use custom domains to alias Replicated endpoints by creating Canonical Name (CNAME) records for your domains. -### Known Issue {#known-issues-1-91-2} +Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. -There is a known issue in the app manager v1.91.2 that causes air gap uploads to fail when there are configuration items with the `required` property set to `true` and no default value specified. +TXT records must be created to verify: -To avoid this known issue, Replicated recommends that you do not upgrade to v1.91.2. To work around this issue in v1.92.2, ensure that all required configuration items in the Config custom resource have a default value. For more information about adding default values to configuration items, see [`default` and `value`](/reference/custom-resource-config#default-and-value) in _Config_. +- Domain ownership: Domain ownership is verified when you initially add a record. +- TLS certificate creation: Each new domain must have a new TLS certificate to be verified. -## 1.91.1 +The TXT records can be removed after the verification is complete. -Released on November 18, 2022 +You can configure custom domains for the following services, so that customer-facing URLs reflect your company's brand: -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 +- **Replicated registry:** Images and Helm charts can be pulled from the Replicated registry. By default, this registry uses the domain `registry.replicated.com`. We suggest using a CNAME such as `registry.{your app name}.com`. -### Improvements {#improvements-1-91-1} -* Updates the Snapshots page to standardize the look of admin console. -* Updates the schemahero image to v0.13.8 to resolve CVE-2022-32149 with high severity. -* Updates the kotsadm-migrations base image to `debian:bullseye` to resolve CVE-2022-29458 with high severity. -* Updates the kurl-proxy base image to `debian:bullseye-slim` to resolve CVE-2022-29458 with high severity. -* Updates the github.com/mholt/archiver module to v3.5.1 to resolve CVE-2019-10743 with medium severity. -* Updates the replicated/local-volume-provider image to v0.4.1 to resolve CVE-2022-29458 with high severity. -* Updates the Helm dependency from 3.9.0 to 3.9.4 to resolve CVE-2022-36055 with medium severity. +- **Proxy registry:** Images can be proxied from external private registries using the Replicated proxy registry. By default, the proxy registry uses the domain `proxy.replicated.com`. We suggest using a CNAME such as `proxy.{your app name}.com`. -### Bug Fixes {#bug-fixes-1-91-1} -* Fixes a bug that could result in `invalid status code from registry 400` error when pushing images from an air gap bundle into a private registry. -* Fixes an issue where configuring snapshot schedules in Firefox didn't work. -* Fixes an issue where installing or upgrading the app manager failed for GKE Autopilot clusters. -* Fixes an issue where the existing cluster snapshot onboarding flow did not work when using the local volume provider plugin. +- **Replicated app service:** Upstream application YAML and metadata, including a license ID, are pulled from replicated.app. By default, this service uses the domain `replicated.app`. We suggest using a CNAME such as `updates.{your app name}.com`. -## 1.91.0 +- **Download Portal:** The Download Portal can be used to share customer license files, air gap bundles, and so on. By default, the Download Portal uses the domain `get.replicated.com`. We suggest using a CNAME such as `portal.{your app name}.com` or `enterprise.{your app name}.com`. -Released on November 14, 2022 +## Limitations -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 +Using custom domains has the following limitations: -### New Features {#new-features-1-91-0} -* Updates the Troubleshoot and Config pages to standardize the look of admin console. +- A single custom domain cannot be used for multiple endpoints. For example, a single domain can map to `registry.replicated.com` for any number of applications, but cannot map to both `registry.replicated.com` and `proxy.replicated.com`, even if the applications are different. -### Improvements {#improvements-1-91-0} -* Updates the kotsadm base image to `debian:bullseye-slim` to resolve CVE-2022-29458 with high severity. -* Shows password complexity rules when setting the admin console password with the CLI. Passwords must be at least six characters long. +- Custom domains cannot be used to alias api.replicated.com (legacy customer-facing APIs) or kURL. -### Bug Fixes {#bug-fixes-1-91-0} -* Fixes an issue where the admin console automatically redirected to the login page after a snapshot was restored successfully, which could have prevented users from knowing the outcome of the restore. +- Multiple custom domains can be configured, but only one custom domain can be the default for each Replicated endpoint. All configured custom domains work whether or not they are the default. -## 1.90.0 +- A particular custom domain can only be used by one team. -Released on November 4, 2022 +================ +File: docs/vendor/custom-metrics.md +================ +# Configuring Custom Metrics (Beta) -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 +This topic describes how to configure an application to send custom metrics to the Replicated Vendor Portal. -### New Features {#new-features-1-90-0} -* Adds the ability to remove registry info from the **Registry settings** page. -* Adds the ability to use status informers for Helm charts when running in Helm-managed mode (Beta). For more information on Helm-managed mode, see [Supporting helm CLI Installations (Beta)](/vendor/helm-install). +## Overview -### Improvements {#improvements-1-90-0} -* Updates the golang.org/x/text module in the kurl-proxy image used for embedded cluster installations, to resolve CVE-2022-32149 with high severity. -* The file explorer now includes rendered `values.yaml` files for each Helm chart that is deployed by the app manager. +In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running customer environments. Custom metrics can be collected for application instances running in online or air gap environments. -### Bug Fixes {#bug-fixes-1-90-0} -* Updates the Prometheus query to show disk usage by instance and mount point. -* Fixes an issue where checking for updates failed with the message "License is expired", but the **License** tab indicated that the license was not expired. -* Fixes an issue where the admin console could restart during the migration from Postgres to rqlite due to a short timeout. +Custom metrics can be used to generate insights on customer usage and adoption of new features, which can help your team to make more informed prioritization decisions. For example: +* Decreased or plateaued usage for a customer can indicate a potential churn risk +* Increased usage for a customer can indicate the opportunity to invest in growth, co-marketing, and upsell efforts +* Low feature usage and adoption overall can indicate the need to invest in usability, discoverability, documentation, education, or in-product onboarding +* High usage volume for a customer can indicate that the customer might need help in scaling their instance infrastructure to keep up with projected usage -## 1.89.0 +## How the Vendor Portal Collects Custom Metrics -Released on October 28, 2022 +The Vendor Portal collects custom metrics through the Replicated SDK that is installed in the cluster alongside the application. -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 +The SDK exposes an in-cluster API where you can configure your application to POST metric payloads. When an application instance sends data to the API, the SDK sends the data (including any custom and built-in metrics) to the Replicated app service. The app service is located at `replicated.app` or at your custom domain. -### New Features {#new-features-1-89-0} -* Automatically migrates data from Postgres to rqlite and removes Postgres. Also introduces a new [kubectl kots enable-ha](/reference/kots-cli-enable-ha) command that runs rqlite as three replicas for higher availability. This command should only be run on clusters with at least three nodes. Now multiple node clusters deployed with the Kubernetes installer can use OpenEBS local PV, because data will be replicated across all three replicas of rqlite, allowing the app manager to run on any node in the cluster without requiring distributed storage like Rook provides. +If any values in the metric payload are different from the current values for the instance, then a new event is generated and displayed in the Vendor Portal. For more information about how the Vendor Portal generates events, see [How the Vendor Portal Generates Events and Insights](/vendor/instance-insights-event-data#about-events) in _About Instance and Event Data_. -### Bug Fixes {#bug-fixes-1-89-0} -* Fixes an issue that causes the Released timestamp to be the same for all releases on the [version history](/enterprise/updating-apps#update-an-application-in-the-admin-console) page in [Helm managed mode (Alpha)](/vendor/helm-install). -* Allows kots CLI commands to use the kubeconfig namespace by default if a flag is not provided. -* Fixes an issue where installing, updating, or configuring applications that have many images defined in KOTS custom resources (such as collectors, preflights, and analyzers) hangs or takes a long time. -* Fixes an issue that could cause the preflight progress bar to be stuck at nearly 100% but never complete. -* Fixes an issue where unused Host Path and NFS volumes were not being cleaned up when changing snapshot storage locations in clusters without MinIO. -* Fixes the issue that caused [`Sequence`](/reference/template-functions-license-context#sequence) template function to return 1 instead of 0 during initial configuration. +The following diagram demonstrates how a custom `activeUsers` metric is sent to the in-cluster API and ultimately displayed in the Vendor Portal, as described above: -## 1.88.0 +Custom metrics flowing from customer environment to Vendor Portal -Released on October 19, 2022 +[View a larger version of this image](/images/custom-metrics-flow.png) -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, and 1.25 +## Requirements -### New Features {#new-features-1-88-0} -* Adds ability to deploy an application with new values after syncing license from admin console in Helm-managed mode (Alpha). For more information on Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +To support the collection of custom metrics in online and air gap environments, the Replicated SDK version 1.0.0-beta.12 or later must be running in the cluster alongside the application instance. -### Improvements {#improvements-1-88-0} -* Updates the kotsadm/dex image to v2.35.3 to resolve CVE-2022-27664 with high severity. -* Updates the golang.org/x/net module to resolve CVE-2022-27664 with high severity. -* Updates the schemahero image to v0.13.5 to resolve CVE-2022-37434 with critical severity and CVE-2022-27664 with high severity. -* Updates the replicated/local-volume-provider image to v0.3.10 to resolve CVE-2022-37434 with critical severity and CVE-2022-27664 with high severity. +The `PATCH` and `DELETE` methods described below are available in the Replicated SDK version 1.0.0-beta.23 or later. -### Bug Fixes {#bug-fixes-1-88-0} -* Fixes an issue where the cluster management page was blank when the pod capacity for a node was defined with an SI prefix (e.g., `1k`). -* Fixes an issue where the admin console occasionally would not redirect to the dashboard after preflight checks were skipped. -* Fixes an issue where the application icon did not show on the login page until the application was deployed. +For more information about the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). -## 1.87.0 +If you have any customers running earlier versions of the SDK, Replicated recommends that you add logic to your application to gracefully handle a 404 from the in-cluster APIs. -Released on October 12, 2022 +## Limitations -Support for Kubernetes: 1.21, 1.22, 1.23, 1.24, 1.25 +Custom metrics have the following limitations: -### New Features {#new-features-1-87-0} -* Uses Ed25519 SSH keys for GitOps when integrating with Github Enterprise. See [Pushing Updates to a GitOps Workflow](/enterprise/gitops-workflow). +* The label that is used to display metrics in the Vendor Portal cannot be customized. Metrics are sent to the Vendor Portal with the same name that is sent in the `POST` or `PATCH` payload. The Vendor Portal then converts camel case to title case: for example, `activeUsers` is displayed as **Active Users**. -### Improvements {#improvements-1-87-0} -* Adds support for template functions to the `spec.graphs` field of the Application custom resource. See [Application](/reference/custom-resource-application). +* The in-cluster APIs accept only JSON scalar values for metrics. Any requests containing nested objects or arrays are rejected. -### Bug Fixes {#bug-fixes-1-87-0} -* Fixes an issue where log tabs for Helm installs were hidden. -* Fixes a bug that caused pre-existing rows on the version history page in Helm-managed mode (Alpha) to be highlighted as newly available versions when the page is opened. For more information on Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). -* Fixes an issue that could cause embedded installations to fail with error "yaml: did not find expected node content" when installing behind an `HTTP_PROXY`. -* Fixes an issue where APIs that require an auth token were called while the client was logged out. -* Fixes an issue that caused the Troubleshoot page to display the support bundle collection progress bar even when a support bundle was not being collected. -* Sorts the entitlements returned in the `/license` endpoint to ensure that they display consistently in the admin console. +* When using the `POST` method any existing keys that are not included in the payload will be deleted. To create new metrics or update existing ones without sending the entire dataset, simply use the `PATCH` method. -### Known Issue {#known-issues-1-87-0} +## Configure Custom Metrics -There is a known issue in the app manager v1.87.0 that causes a KOTS icon, instead of the application icon, to display on the login page before the application is deployed. After the application is deployed, the application icon shows on the login screen. +You can configure your application to `POST` or `PATCH` a set of metrics as key value pairs to the API that is running in the cluster alongside the application instance. -## 1.86.2 +To remove an existing custom metric use the `DELETE` endpoint with the custom metric name. -Released on October 7, 2022 +The Replicated SDK provides an in-cluster API custom metrics endpoint at `http://replicated:3000/api/v1/app/custom-metrics`. -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +**Example:** -### Improvements {#improvements-1-86-2} -* Changes the way CSS and font files are included for custom admin console branding (Alpha). If you have early access to this feature, see the Alpha documentation for more information. +```bash +POST http://replicated:3000/api/v1/app/custom-metrics +``` -### Bug Fixes {#bug-fixes-1-86-2} -* Fixes an issue where large font files for custom admin console branding (Alpha) caused the admin console to fail to create a new application version. -* Fixes an issue where the identity service login redirected to the login page after a successful login. -* Fixes an issue in the **Cluster Management** tab where the button for adding a primary node stopped working if the original join token expired. -* Fixes a bug that allowed the identity service route to be accessed even if the feature was not enabled. -* Fixes a bug that caused the admin console Pod to terminate with an error due to a panic when checking for application updates in Helm-managed mode (Alpha). For more information on Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). +```json +{ + "data": { + "num_projects": 5, + "weekly_active_users": 10 + } +} +``` -## 1.86.1 +```bash +PATCH http://replicated:3000/api/v1/app/custom-metrics +``` -Released on September 30, 2022 +```json +{ + "data": { + "num_projects": 54, + "num_error": 2 + } +} +``` -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +```bash +DELETE http://replicated:3000/api/v1/app/custom-metrics/num_projects +``` -### Improvements {#improvements-1-86-1} -* Only show relevant tabs on the deployment logs modal depending on whether or not the admin console is in Helm-managed mode. -* Standardizes all page titles using the format **Page Name | App Slug | Admin Console**. The page title is the text that shows in the browser tab. +### POST vs PATCH -### Bug Fixes {#bug-fixes-1-86-1} -* Fixes an issue where automatic update checks failed when the interval is too short for pending updates to be fetched. -* Fixes an issue where the automatic update checks modal didn't show custom schedules after they were saved. See [Configure Automatic Updates](/enterprise/updating-apps#configure-automatic-updates). -* Fixes an issue in Helm-managed mode where checking for updates from the version history page did not show the "License is expired" error when the check failed due to an expired license. For more information on Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). -* Fixes an issue where some icons displayed in a very large size on Firefox. See [Known Issue](#known-issues-1-86-0) under _1.86.0_. -* Fixes an issue where the specified registry namespace was sometimes ignored for KOTS images if the specified registry hostname already included a namespace. +The `POST` method will always replace the existing data with the most recent payload received. Any existing keys not included in the most recent payload will still be accessible in the instance events API, but they will no longer appear in the instance summary. -## 1.86.0 +The `PATCH` method will accept partial updates or add new custom metrics if a key:value pair that does not currently exist is passed. -:::important -The app manager v1.86.0 contains a known issue that affects the use of -the Replicated admin console in Firefox browsers. This issue is resolved -in the app manager v1.86.1. -See [Known Issue](#known-issues-1-86-0) below. -::: +In most cases, simply using the `PATCH` method is recommended. -Released on September 27, 2022 +For example, if a component of your application sends the following via the `POST` method: -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +```json +{ + "numProjects": 5, + "activeUsers": 10, +} +``` -### New Features {#new-features-1-86-0} -* Allows icon colors to be changed with the CSS when branding the admin console (Alpha). To enable this feature on your account, log in to your vendor portal account. Select **Support** > **Request a feature**, and submit a feature request for "admin console branding". +Then, the component later sends the following also via the `POST` method: -### Improvements {#improvements-1-86-0} -* Removes the license upload page when the admin-console Helm chart is installed without installing a Replicated application. -* Makes port forward reconnections faster. +```json +{ + "activeUsers": 10, + "usingCustomReports": false +} +``` -### Bug Fixes {#bug-fixes-1-86-0} -* Fixes the message alignment when a strict preflight check fails. -* Fixes a bug where versions with `pending_download` status were shown incorrectly on the version history page. -* Fixes a bug where versions with `pending_download` status caused the `View files` tab to navigate to a version that had not been downloaded yet, resulting in a UI error. -* Fixes a bug where downloading an application version that is incompatible with the current admin console version made it impossible to check for updates until the admin console pod was restarted. -* Fixes a bug that caused CLI feedback spinners to spin indefinitely. -* Fixes an issue that caused config templates to be applied to the wrong values.yaml file in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install). -* Fixes an issue where the license was not synced when checking for application updates in Helm-managed mode (Alpha). -* Fixes a bug in Helm-managed mode (Alpha) that required you to visit the config screen to deploy a new version with required config items, even if all of the config values had been set in a previously deployed version. -* Fixes a bug that caused the currently deployed version to temporarily appear as a newly available version when an update check ran in Helm-managed mode (Alpha). -* Fixes styling on `
    ` elements in the Helm install modals (Alpha) so that their heights match the content.
    +The instance detail will show `Active Users: 10` and `Using Custom Reports: false`, which represents the most recent payload received. The previously-sent `numProjects` value is discarded from the instance summary and is available in the instance events payload.  In order to preseve `numProjects`from the initial payload and upsert `usingCustomReports` and `activeUsers` use the `PATCH` method instead of `POST` on subsequent calls to the endpoint.
     
    -### Known Issue {#known-issues-1-86-0}
    +For example, if a component of your application initially sends the following via the `POST` method:
     
    -This issue is resolved in the app manager v1.86.1.
    +```json
    +{
    +  "numProjects": 5,
    +  "activeUsers": 10,
    +}
    +``` 
     
    -There is a known issue in the app manager v1.86.0 that causes certain icons in the Replicated admin console to display incorrectly in Firefox browsers. The icons display in a very large size, making it difficult for users to access the fields on several of the admin console screens.
    +Then, the component later sends the following also via the `PATCH` method:
    +```json
    +{
    +  "usingCustomReports": false
    +}
    +```
     
    -To use the admin console on v1.86.0, users should open the admin console in a supported browser other than Firefox, such as Google Chrome. For more information about supported browsers, see [Supported Browsers](/enterprise/installing-general-requirements#supported-browsers) in _Installation Requirements_.
    +The instance detail will show `Num Projects: 5`, `Active Users: 10`, `Using Custom Reports: false`, which represents the merged and upserted payload.
     
    -If users are unable to use a browser other than Firefox to access the admin console, Replicated recommends that they do not upgrade to the app manager v1.86.0.
    +### NodeJS Example
     
    -## 1.85.0
    +The following example shows a NodeJS application that sends metrics on a weekly interval to the in-cluster API exposed by the SDK:
     
    -Released on September 19, 2022
    +```javascript
    +async function sendMetrics(db) {
     
    -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24
    +    const projectsQuery = "SELECT COUNT(*) as num_projects from projects";
    +    const numProjects = (await db.getConnection().queryOne(projectsQuery)).num_projects;
     
    -### New Features {#new-features-1-85-0}
    -* Adds the ability to automatically check for new chart versions that are available when running in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install).
    -* In Helm-managed mode, new Helm chart versions that introduce a required configuration value must be configured before they can be deployed.
    +    const usersQuery = 
    +        "SELECT COUNT(*) as active_users from users where DATEDIFF('day', last_active, CURRENT_TIMESTAMP) < 7";
    +    const activeUsers = (await db.getConnection().queryOne(usersQuery)).active_users;
     
    -### Improvements {#improvements-1-85-0}
    -* Improves how license fields display in the admin console, especially when there are multiple license fields or when the value of a field is long.
    -* Updates the replicated/local-volume-provider image to v0.3.8 to resolve CVE-2022-2509 with high severity.
    -* Updates the github.com/open-policy-agent/opa module to resolve CVE-2022-36085 with critical severity.
    -* Updates the kotsadm/dex image to v2.34.0 to resolve CVE-2022-37434 with critical severity and CVE-2021-43565, CVE-2022-27191, and CVE-2021-44716 with high severity.
    +    const metrics = { data: { numProjects, activeUsers }};
    +    
    +    const res = await fetch('https://replicated:3000/api/v1/app/custom-metrics', {
    +        method: 'POST',
    +        headers: {
    +          "Content-Type": "application/json",
    +        },
    +        body: JSON.stringify(metrics),
    +    });
    +    if (res.status !== 200) {
    +        throw new Error(`Failed to send metrics: ${res.statusText}`);
    +    }
    +}
     
    -### Bug Fixes {#bug-fixes-1-85-0}
    -* Fixes an issue in embedded clusters where image garbage collection deletes images that are still in use by the application.
    -* Increases the memory limit for the `kotsadm-minio` StatefulSet from 200Mi to 512Mi.
    -* Fixes an issue where headless/unattended installations hang in embedded clusters with recent Kubernetes versions.
    -* Fixes an issue that caused values to be missing on the Config page for pending updates in Helm-managed mode (Alpha).
    -* Fixes checkbox alignment on the Config page.
    -* Fixes a bug that did not display errors on the Config page when values for required config items were missing in Helm-managed mode (Alpha).
    +async function startMetricsLoop(db) {
     
    -## 1.84.0
    +    const ONE_DAY_IN_MS = 1000 * 60 * 60 * 24
     
    -Released on September 12, 2022
    +    // send metrics once on startup
    +    await sendMetrics(db)
    +      .catch((e) => { console.log("error sending metrics: ", e) });        
     
    -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24
    +    // schedule weekly metrics payload
     
    -### New Features {#new-features-1-84-0}
    -* Adds the ability to configure and deploy new Helm chart versions when the admin console is running in Helm-managed mode (Alpha).
    -* Adds support for including custom font files in an application release, which can be used when branding the admin console (Alpha). To enable this feature on your account, log in to your vendor portal account. Select **Support** > **Request a feature**, and submit a feature request for "admin console branding".
    +    setInterval( () => {
    +        sendMetrics(db, licenseId)
    +          .catch((e) => { console.log("error sending metrics: ", e) });        
    +    }, ONE_DAY_IN_MS);
    +}
     
    -### Improvements {#improvements-1-84-0}
    -* Updates the MinIO image to address CVE-2022-2526 with high severity.
    -* Updates the github.com/gin-gonic/gin module in the kurl-proxy image used for embedded cluster installations, to resolve CVE-2020-28483 with high severity.
    -* Updates SchemaHero to v0.13.2 to resolve CVE-2022-21698.
    +startMetricsLoop(getDatabase());
    +```
     
    -### Bug Fixes {#bug-fixes-1-84-0}
    -* Updates the `support-bundle` CLI command provided in the admin console to use the generated Kubernetes resources instead of the raw upstream specification when running in Helm-managed mode (Alpha).
    -* Fixes an issue that caused Secrets and ConfigMaps created by the admin console to be left in the namespace after a Helm chart is uninstalled in Helm-managed mode (Alpha).
    -* Fixes an issue where application status informers did not update if the admin console Pod was restarted.
    -* Fixes an issue where a user that is logged in could navigate to the login page instead of being redirected to the application dashboard.
    -* Fixes an issue where the app manager failed to render Helm charts that have subcharts referenced as local file repositories.
    +## View Custom Metrics
     
    -## 1.83.0
    +You can view the custom metrics that you configure for each active instance of your application on the **Instance Details** page in the Vendor Portal.
     
    -Released on September 1, 2022
    +The following shows an example of an instance with custom metrics:
     
    -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24
    +Custom Metrics section of Instance details page
     
    -### New Features {#new-features-1-83-0}
    -* Adds support for custom branding of the admin console using CSS (Alpha). To enable this feature on your account, log in to your vendor portal account. Select **Support** > **Request a feature**, and submit a feature request for "admin console branding".
    +[View a larger version of this image](/images/instance-custom-metrics.png)
     
    -### Improvements {#improvements-1-83-0}
    -* Icons supplied in the `icon` field of the Application custom resource can be square or circular.
    +As shown in the image above, the **Custom Metrics** section of the **Instance Details** page includes the following information:
    +* The timestamp when the custom metric data was last updated.
    +* Each custom metric that you configured, along with the most recent value for the metric.
    +* A time-series graph depicting the historical data trends for the selected metric.
     
    -### Bug Fixes {#bug-fixes-1-83-0}
    -* Fixes an issue that could cause inadvertent application upgrades when redeploying or updating the config of the currently installed revision in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install).
    -* Fixes an issue where the namespace was omitted from `helm upgrade` commands displayed in the admin console in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install).
    -* Removes the checkbox to automatically deploy updates in Helm-managed mode, because this is unsupported. For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install).
    -* Fixes an issue where updating the registry settings fails due to permission issues even when the provided credentials have access to the registry.
    -* Fixes an issue in Helm-managed mode that could cause Replicated templates to show on the config page instead of the rendered values. For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install).
    -* Fixes an issue where trailing line breaks were removed during Helm chart rendering.
    +Custom metrics are also included in the **Instance activity** stream of the **Instance Details** page. For more information, see [Instance Activity](/vendor/instance-insights-details#instance-activity) in _Instance Details_.
     
    -## 1.82.0
    +## Export Custom Metrics
     
    -Released on August 25, 2022
    +You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Export Customer and Instance Data](/vendor/instance-data-export).
     
    -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24
    +================
    +File: docs/vendor/customer-adoption.md
    +================
    +# Adoption Report
     
    -### New Features {#new-features-1-82-0}
    -* Adds support for a new air gap bundle format that supports image digests and deduplication of image layers shared across images in the bundle. The new air gap bundle format is in Beta. To enable this feature on your account, log in to your vendor portal account. Select **Support** > **Request a feature**, and submit a feature request for "new air gap bundle format".
    -* Adds support for deploying images that are referenced by digest or by digest and tag, rather than by tag alone, in online installations that have a private registry configured.
    -* Adds support for displaying the config values for each revision deployed in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install).
    +This topic describes the insights in the **Adoption** section on the Replicated Vendor Portal **Dashboard** page.
     
    -### Improvements {#improvements-1-82-0}
    -* Updates the `local-volume-provider image` to address CVE-2021-44716, CVE-2021-33194, and CVE-2022-21221 with high severity.
    -* Updates the configuration pages for the GitOps workflow, making it easier to set up.
    +## About Adoption Rate
     
    -### Bug Fixes {#bug-fixes-1-82-0}
    -* Fixes an issue that prevented you from typing in the **Path** field when **Other S3-Compatible Storage** was set as the snapshot storage destination.
    -* Fixes an issue where the `LicenseFieldValue` template function always returned an empty string for the `isSnapshotSupported` value. For more information about the `LicenseFieldValue` template function, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue).
    +The **Adoption** section on the **Dashboard** provides insights about the rate at which your customers upgrade their instances and adopt the latest versions of your application. As an application vendor, you can use these adoption rate metrics to learn if your customers are completing upgrades regularly, which is a key indicator of the discoverability and ease of application upgrades.
     
    -## 1.81.1
    +The Vendor Portal generates adoption rate data from all your customer's application instances that have checked-in during the selected time period. For more information about instance check-ins, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_.
     
    -Released on August 22, 2022
    +The following screenshot shows an example of the **Adoption** section on the **Dashboard**:
     
    -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24
    +![Adoption report section on dashboard](/images/customer_adoption_rates.png)
     
    -### Improvements {#improvements-1-81-1}
    -* Show deploy logs for Helm charts when running in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Supporting helm CLI Installations (Alpha)](/vendor/helm-install).
    -* Updates the Helm binary included in the kotsadm image from 3.8.2 to 3.9.3 to resolve CVE-2022-21698 and CVE-2022-27191 with high severity.
    -* Updates the golang.org/x/net module in the kurl-proxy image used for embedded cluster installations, to resolve CVE-2021-44716 with high severity.
    -* Updates the dex image from 2.32.0 to 2.33.0 to resolve CVE-2022-30065, CVE-2022-2097, and CVE-2022-27191 with high severity.
    +[View a larger version of this image](/images/customer_adoption_rates.png)
     
    -### Bug Fixes {#bug-fixes-1-81-1}
    -* Fixes an issue where starting a manual snapshot resulted in an error dialog when using Firefox or Safari.
    -* Fixes an issue that caused images formatted as `docker.io/image:tag` to not be rewritten when upgrading applications in airgapped environments. For more information about rewriting images, see [Patching the Image Location with Kustomize](/vendor/packaging-private-images#patching-the-image-location-with-kustomize) in _Connecting to an Image Registry_.
    +As shown in the screenshot above, the **Adoption** report includes a graph and key adoption rate metrics. For more information about how to interpret this data, see [Adoption Graph](#graph) and [Adoption Metrics](#metrics) below.
     
    -## 1.81.0
    +The **Adoption** report also displays the number of customers assigned to the selected channel and a link to the report that you can share with other members of your team.
     
    -Released on August 12, 2022
    +You can filter the graph and metrics in the **Adoption** report by:
    +* License type (Paid, Trial, Dev, or Community)
    +* Time period (the previous month, three months, six months, or twelve months)
    +* Release channel to which instance licenses are assigned, such as Stable or Beta
     
    -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24
    +## Adoption Graph {#graph}
     
    -### New Features {#new-features-1-81-0}
    -* Adds support for the `alias` field in Helm chart dependencies.
    -* Adds support for image tags and digests to be used together for most online installations. For more information, see [Support for Image Tags and Digests](/vendor/packaging-private-images#support-for-image-tags-and-digests) in *Connecting to an Image Registry*.
    +The **Adoption** report includes a graph that shows the percent of active instances that are running different versions of your application within the selected time period.
     
    -### Improvements {#improvements-1-81-0}
    -* Helm v2 will only be used if `helmVersion` is set to `v2` in the HelmChart custom resource. Support for Helm v2, including security patches, ended on November 13, 2020, and support for Helm v2 in the app manager will be removed in the near future. For more information about the HelmChart custom resource, see [HelmChart](/reference/custom-resource-helmchart).
    -* Improves the UI responsiveness on the Config page.
    +The following shows an example of an adoption rate graph with three months of data:
     
    -### Bug Fixes {#bug-fixes-1-81-0}
    -* Fixes an issuse where the license tab did not show for Helm-managed installations.
    -* Fixes an issue that could cause `Namespace` manifests packaged in Helm charts to be excluded from deployment, causing namespaces to not be created when `useHelmInstall` is set to `true` and `namespace` is an empty string. For more information about these fields, see [useHelmInstall](/reference/custom-resource-helmchart#usehelminstall) and [namespace](/reference/custom-resource-helmchart#usehelminstall) in *HelmChart*.
    -* Fixes an issue where GitOps was enabled before the deploy key was added to the git provider.
    -* Hides copy commands on modals in the admin console when clipboard is not available.
    +![Adoption report graph showing three months of data](/images/adoption_rate_graph.png)
     
    -## 1.80.0
    +[View a larger version of this image](/images/adoption_rate_graph.png)
     
    -Released on August 8, 2022
    +As shown in the image above, the graph plots the number of active instances in each week in the selected time period, grouped by the version each instance is running. The key to the left of the graph shows the unique color that is assigned to each application version. You can use this color-coding to see at a glance the percent of active instances that were running different versions of your application across the selected time period. 
     
    -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24
    +Newer versions will enter at the bottom of the area chart, with older versions shown higher up.
     
    -### New Features {#new-features-1-80-0}
    -* Displays the `helm rollback` command when deploying previous revisions from the version history page in Helm-managed mode (Alpha). For more information about Helm-managed mode, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install).
    +You can also hover over a color-coded section in the graph to view the number and percentage of active instances that were running the version in a given period.
     
    -### Improvements {#improvements-1-80-0}
    -* Password complexity rules will now be shown when changing the password in the admin console.
    -* Updates Kustomize from 3.5.4 to 4.5.7. Note that Kustomize v4.5.7 does not allow duplicate YAML keys to be present in your application manifests, whereas v3.5.4 did. Kustomize v4.5.7 is a bit slower than v3.5.4, so fetching and deploying new versions takes a bit more time. Our benchmarking did not show this performance degradation to be significant. Updating Kustomize resolves several critical and high severity CVEs, and unblocks additional feature work in the app manager.
    +If there are no active instances of your application, then the adoption rate graph displays a "No Instances" message.
     
    -### Bug Fixes {#bug-fixes-1-80-0}
    -* Fixes an issue where an ambiguous error message was shown when the endpoint field was modified in the license.
    -* Fixes a bug that caused values from the HelmChart custom resource that did not use KOTS template functions to be rendered into the downloaded values.yaml file after updating the configuration in Helm-managed mode. For more information about Helm-managed mode, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install).
    -* Fixes an issue in Helm-managed mode that caused an error when clicking the **Analyze application** button on the Troubleshoot page in the admin console for an application that did not include a support bundle specification. For more information about Helm-managed mode, see [Helm-managed mode (Alpha)](/vendor/helm-install). For more information about analyzing an application, see [Create a Support Bundle Using the Admin Console](/enterprise/troubleshooting-an-app#create-a-support-bundle-using-the-admin-console) in *Troubleshooting an Application*.
    +## Adoption Metrics {#metrics}
     
    -## 1.79.0
    +The **Adoption** section includes metrics that show how frequently your customers discover and complete upgrades to new versions of your application. It is important that your users adopt new versions of your application so that they have access to the latest features and bug fixes. Additionally, when most of your users are on the latest versions, you can also reduce the number of versions for which you provide support and maintain documentation.
     
    -Released on August 4, 2022
    +The following shows an example of the metrics in the **Adoption** section:
     
    -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24
    +![Adoption rate metrics showing](/images/adoption_rate_metrics.png)
     
    -### New Features {#new-features-1-79-0}
    -* Adds an [HTTPSProxy](/reference/template-functions-static-context#httpsproxy) template function to return the address of the proxy that the Replicated admin console is configured to use.
    -* Dynamically adds collectors, analyzers, and custom redactors when collecting support bundles from the [troubleshoot](/enterprise/troubleshooting-an-app#create-a-support-bundle-using-the-admin-console) page in [Helm-managed mode (Alpha)](/vendor/helm-install).
    +[View a larger version of this image](/images/adoption_rate_metrics.png)
     
    -### Improvements {#improvements-1-79-0}
    -* Removes the "Add new application" option when running the admin console in [Helm-managed mode (Alpha)](/vendor/helm-install).
    +As shown in the image above, the **Adoption** section displays the following metrics:
    +* Instances on last three versions
    +* Unique versions
    +* Median relative age
    +* Upgrades completed
     
    -### Bug Fixes {#bug-fixes-1-79-0}
    -* Fixes an issue that caused the [affix](/reference/custom-resource-config#affix) property of config items to be ignored.
    -* Fixes an issue that caused the [help_text](/reference/custom-resource-config#help_text) property of config items to be ignored.
    -* Fixes an issue that caused the license card to not be updated when switching applications in the admin console.
    -* Fixes the ordering of versions on the [version history](/enterprise/updating-apps#update-an-application-in-the-admin-console) page in [Helm-managed mode (Alpha)](/vendor/helm-install).
    -* Fixes the display of node statistics in the Cluster Management tab.
    -* Fixes an issue where legacy encryption keys were not loaded properly during snapshot restores.
    -* Fixes an issue where snapshots would fail if a wildcard (`"*"`) was listed in the `additionalNamespaces` field of an Application manifest.
    -* Fixes an issue where the diff fails to generate for a version that excludes a Helm chart that was previously included.
    +Based on the time period selected, each metric includes an arrow that shows the change in value compared to the previous period. For example, if the median relative age today is 68 days, the selected time period is three months, and three months ago the median relative age was 55 days, then the metric would show an upward-facing arrow with an increase of 13 days. 
     
    -## 1.78.0
    +The following table describes each metric in the **Adoption** section, including the formula used to calculate its value and the recommended trend for the metric over time: 
     
    -Released on July 28, 2022
    +
    +  
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +    
    +      
    +      
    +      
    +    
    +  
    +
    MetricDescriptionTarget Trend
    Instances on last three versions +

    Percent of active instances that are running one the latest three versions of your application.

    +

    Formula: count(instances on last 3 versions) / count(instances)

    +
    Increase towards 100%
    Unique versions +

    Number of unique versions of your application running in active instances.

    +

    Formula: count(distinct instance_version)

    +
    Decrease towards less than or equal to three
    Median relative age +

    The relative age of a single instance is the number of days between the date that the instance's version was promoted to the channel and the date when the latest available application version was promoted to the channel.

    +

    Median relative age is the median value across all active instances for the selected time period and channel.

    +

    Formula: median(relative_age(instance_version))

    +

    Depends on release cadence. For vendors who ship every four to eight weeks, decrease the median relative age towards 60 days or fewer.

    Upgrades completed +

    Total number of completed upgrades across active instances for the selected time period and channel.

    +

    An upgrade is a single version change for an instance. An upgrade is considered complete when the instance deploys the new application version.

    +

    The instance does not need to become available (as indicated by reaching a Ready state) after deploying the new version for the upgrade to be counted as complete.

    +

    Formula: sum(instance.upgrade_count) across all instances

    +
    Increase compared to any previous period, unless you reduce your total number of live instances.
    -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +================ +File: docs/vendor/customer-reporting.md +================ +# Customer Reporting -### New Features {#new-features-1-78-0} -* The analyze application button on the [Troubleshoot tab](/enterprise/troubleshooting-an-app) now works in [Helm managed mode (Alpha)](/vendor/helm-install). -* Adds a deploy modal for versions on the [version history](/enterprise/updating-apps#update-an-application-in-the-admin-console) page in [Helm managed mode (Alpha)](/vendor/helm-install). +This topic describes the customer and instance data displayed in the **Customers > Reporting** page of the Replicated Vendor Portal. -### Improvements {#improvements-1-78-0} -* Upgrades the internal database (Postgres) used by the admin console from `10.21-alpine` to `14.4-alpine`. +## About the Customer Reporting Page {#reporting-page} -### Bug Fixes {#bug-fixes-1-78-0} -* Fixes an issue where all [dashboard links](/vendor/admin-console-adding-buttons-links) were rewritten to use the admin console hostname instead of the hostname provided in the application manifest. -* Fixes a bug that caused errors when trying to generate `helm upgrade` commands from the [config page](/vendor/config-screen-about#admin-console-config-tab) in [Helm managed mode (Alpha)](/vendor/helm-install). -* Fixes a bug where the same version could be listed twice on the [version history](/enterprise/updating-apps#update-an-application-in-the-admin-console) page in [Helm managed mode (Alpha)](/vendor/helm-install). +The **Customers > Reporting** page displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page: -## 1.77.0 +![Customer reporting page showing two active instances](/images/customer-reporting-page.png) -Released on July 22, 2022 +[View a larger version of this image](/images/customer-reporting-page.png) -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +As shown in the image above, the **Reporting** page has the following main sections: +* [Manage Customer](#manage-customer) +* [Time to Install](#time-to-install) +* [Download Portal](#download-portal) +* [Instances](#instances) -### New Features {#new-features-1-77-0} -* Displays version history information for Helm charts when running in Helm-managed mode (Alpha). For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install) -* License information can now be synced from the admin console's Dashboard and License pages for Helm charts when running in Helm-managed mode (Alpha). For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install) -* Admin console now supports limited RBAC mode when running in Helm-managed mode (Alpha). For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install) +### Manage Customer -### Improvements {#improvements-1-77-0} -* Better handling for network errors on the Helm install modal in Helm-managed mode (Alpha). -* Helm install command now includes authentication in Helm-managed mode (Alpha). -* Adresses the following high severity CVEs: CVE-2022-28946, CVE-2022-29162, and CVE-2022-1996. +The manage customer section displays the following information about the customer: -### Bug Fixes {#bug-fixes-1-77-0} -* Fixes an issue that caused automatic deployments not to work on channels where semantic versioning was disabled, unless the version labels were valid [semantic versions](https://semver.org/). -* Fixes an issue that caused errors after the admin console pod restart until the Dashboard tab is visited in Helm-managed mode (Alpha). -* Begins using a temp directory instead of the current directory, to avoid file permissions issues when generating the `helm upgrade` command after editing the config. For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install). +* The customer name +* The channel the customer is assigned +* Details about the customer license: + * The license type + * The date the license was created + * The expiration date of the license +* The features the customer has enabled, including: + * GitOps + * Air gap + * Identity + * Snapshots + +In this section, you can also view the Helm CLI installation instructions for the customer and download the customer license. -## 1.76.1 +### Time to Install -Released on July 15, 2022 +If the customer has one or more application instances that have reached a Ready status at least one time, then the **Time to install** section displays _License time to install_ and _Instance time to install_ metrics: -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. +* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. -### Bug Fixes {#bug-fixes-1-76-1} -* Fixes an issue that caused private images in some collectors to not be rewritten during preflight checks. -* Fixes an issue where the [Distribution](/reference/template-functions-static-context#distribution) template function returns an empty string in minimal RBAC installations running on OpenShift clusters. -* Updates the golang.org/x/text go module to address CVE-2021-38561 with high severity. -* Updates the local-volume-provider image to address CVE-2021-38561 with high severity. -* Updates the MinIO image to address CVE-2022-1271 with high severity. +A _Ready_ status indicates that all Kubernetes resources for the application are Ready. For example, a Deployment resource is considered Ready when the number of Ready replicas equals the total desired number of replicas. For more information, see [Enabling and Understanding Application Status](insights-app-status). -## 1.76.0 +If the customer has no application instances that have ever reported a Ready status, or if you have not configured your application to deliver status data to the Vendor Portal, then the **Time to install** section displays a **No Ready Instances** message. -Released on July 12, 2022 +If the customer has more than one application instance that has previously reported a Ready status, then the **Time to install** section displays metrics for the instance that most recently reported a Ready status for the first time. -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +For example, Instance A reported its first Ready status at 9:00 AM today. Instance B reported its first Ready status at 8:00 AM today, moved to a Degraded status, then reported a Ready status again at 10:00 AM today. In this case, the Vendor Portal displays the time to install metrics for Instance A, which reported its _first_ Ready status most recently. -### New Features {#new-features-1-76-0} -* Displays license information on the admin console Dashboard and License page for Helm charts when running in Helm-managed mode (Alpha). For more information, see [Using Helm to Install an Application (Alpha)](/vendor/helm-install) +For more information about how to interpret the time to install metrics, see [Time to Install](instance-insights-details#time-to-install) in _Instance Details_. -### Bug Fixes {#bug-fixes-1-76-0} -* Fixes a bug that causes links defined in the [SIG Application custom resource](/reference/custom-resource-sig-application) to not be rewritten to the hostname used in the browser. +### Download Portal -## 1.75.0 +From the **Download portal** section, you can: +* Manage the password for the Download Portal +* Access the unique Download Portal URL for the customer -Released on July 5, 2022 +You can use the Download Portal to give your customers access to the files they need to install your application, such as their license file or air gap bundles. For more information, see [Downloading Assets from the Download Portal](releases-share-download-portal). -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +### Instances -### New Features {#new-features-1-75-0} -* Adds a `helmUpgradeFlags` parameter to the [HelmChart custom resource](/reference/custom-resource-helmchart) when [Installing with Native Helm](/vendor/helm-overview). The specified flags are passed to the `helm upgrade` command. Note that the Replicated app manager uses `helm upgrade` for all installations, including initial installations, and not just when the application is upgraded. +The **Instances** section displays details about the active application instances associated with the customer. -### Bug Fixes {#bug-fixes-1-75-0} -* Addresses the following critical severity CVEs: CVE-2022-26945, CVE-2022-30321, CVE-2022-30322, and CVE-2022-30323. -* Fixes a bug that causes the [`push-images`](/reference/kots-cli-admin-console-push-images) command to fail when `--registry-password` and `--registry-username` are not specified for use with anonymous registries. +You can click any of the rows in the **Instances** section to open the **Instance details** page. The **Instance details** page displays additional event data and computed metrics to help you understand the performance and status of each active application instance. For more information, see [Instance Details](instance-insights-details). -## 1.74.0 +The following shows an example of a row for an active instance in the **Instances** section: -Released on July 1, 2022 +![Row in the Instances section](/images/instance-row.png) +[View a larger version of this image](/images/instance-row.png) -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +The **Instances** section displays the following details about each active instance: +* The first seven characters of the instance ID. +* The status of the instance. Possible statuses are Missing, Unavailable, Degraded, Ready, and Updating. For more information, see [Enabling and Understanding Application Status](insights-app-status). +* The application version. +* Details about the cluster where the instance is installed, including: + * The Kubernetes distribution for the cluster, if applicable. + * The Kubernetes version running in the cluster. + * Whether the instance is installed in a Replicated kURL cluster. + * (kURL Clusters Only) The number of nodes ready in the cluster. + * (KOTS Only) The KOTS version running in the cluster. + * The Replicated SDK version running in the cluster. + * The cloud provider and region, if applicable. +* Instance uptime data, including: + * The timestamp of the last recorded check-in for the instance. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. + * An uptime graph of the previous two weeks. For more information about how the Vendor Portal determines uptime, see [Instance Uptime](instance-insights-details#instance-uptime) in _Instance Details_. + * The uptime ratio in the previous two weeks. -### New Features {#new-features-1-74-0} -* Adds the ability to use a preflight check to compare the Kubernetes installer included in particular application version against the installer that is currently deployed. For more information, see [Include a Supporting Preflight Check](/vendor/packaging-embedded-kubernetes#include-a-supporting-preflight-check) in Creating a Kubernetes Installer Specification. +================ +File: docs/vendor/data-availability.md +================ +# Data Availability and Continuity -### Bug Fixes {#bug-fixes-1-74-0} -* Fixes an issue where you could not deploy valid application releases if the previously deployed version resulted in a kustomize error. -* Fixes an issue where kustomize would fail if a Helm chart and one of its sub-charts had the same name. -* Fixes an issue that caused Velero pods to be stuck in a Pending state when using the Internal Storage snapshot setting in Kubernetes installer-created clusters. -* Fixes an issue where the admin console would crash if a Helm chart with optional values but no values provided was included in a release. +Replicated uses redundancy and a cloud-native architecture in support of availability and continuity of vendor data. -## 1.73.0 +## Data Storage Architecture -Released on June 24, 2022 +To ensure availability and continuity of necessary vendor data, Replicated uses a cloud-native architecture. This cloud-native architecture includes clustering and network redundancies to eliminate single point of failure. -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +Replicated stores vendor data in various Amazon Web Services (AWS) S3 buckets and multiple databases. Data stored in the AWS S3 buckets includes registry images and air gap build data. -### New Features {#new-features-1-73-0} -* Adds a `releaseName` parameter to the [HelmChart custom resource](/reference/custom-resource-helmchart) when [Installing with Native Helm](/vendor/helm-overview). Defaults to the chart name. Specifying a `releaseName` also allows you to deploy multiple instances of the same Helm chart, which was previously impossible. +The following diagram shows the flow of air gap build data and registry images from vendors to enterprise customers. -### Improvements {#improvements-1-73-0} -* Improved UX on the version history page when the application is up to date or when there are new available versions. +![Architecture diagram of Replicated vendor data storage](/images/data-storage.png) -### Bug Fixes {#bug-fixes-1-73-0} -* Fixes an issue where the preflight screen was displayed even if no analyzers were run. -* Fixes an issue that prevented you from excluding a Helm chart that was previously included when [Installing with Native Helm](/vendor/helm-overview). +[View a larger version of this image](/images/data-storage.png) -## 1.72.2 +As shown in the diagram above, vendors push application images to an image registry. Replicated stores this registry image data in AWS S3 buckets, which are logically isolated by vendor portal Team. Instances of the vendor's application that are installed by enterprise customers pull data from the image registry. -Released on June 22, 2022 +For more information about how Replicated secures images pushed to the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +The diagram also shows how enterprise customers access air gap build data from the customer download portal. Replicated stores this air gap build data in AWS S3 buckets. -### Bug Fixes {#bug-fixes-1-72-2} -* Fixed a bug that would cause duplicate Helm installations to be shown when running in helm-managed mode in clusters with open permissions. +## Data Recovery -## 1.72.1 +Our service provider's platform automatically restores customer applications and databases in the case of an outage. The provider's platform is designed to dynamically deploy applications within its cloud, monitor for failures, and recover failed platform components including customer applications and databases. -Released on June 17, 2022 +For more information, see the [Replicated Security White Paper](https://www.replicated.com/downloads/Replicated-Security-Whitepaper.pdf). -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +## Data Availability -### Improvements {#improvements-1-72-1} -* Config values are now stored in a secret when the admin console runs in Helm-managed mode (Alpha), so that the values can be rerendered when a user returns to the Config page. +Replicated availability is continuously monitored. For availability reports, see https://status.replicated.com. -### Bug Fixes {#bug-fixes-1-72-1} -* The dashboard "Disk Usage" graph now reports metrics for Prometheus deployments using the `kubernetes-service-endpoints` job. -* The configured Prometheus address now shows as the placeholder in the "Configure Prometheus address" modal. -* Fixes a bug that prevented an application from being deployed if a strict preflight check existed but was excluded. -* Fixes a bug that was caused when a top-level `templates` folder is not present in a Helm chart that also has subcharts and top-level charts. -* Fixes a bug where Kubernetes installer manifests included as part of an application release were applied when deploying the release. -* Updates the MinIO image to address the following critical and high severity CVEs: CVE-2021-42836, CVE-2021-41266, CVE-2020-26160, CVE-2018-25032, CVE-2022-0778, CVE-2022-25235, CVE-2022-25236, CVE-2022-25315, CVE-2022-24407. -* Updates the Dex image to address the following critical and high severity CVEs: CVE-2020-14040, CVE-2021-42836, CVE-2020-36067, CVE-2020-36066, CVE-2020-35380, CVE-2020-26521, CVE-2020-26892, CVE-2021-3121, CVE-2020-26160, CVE-2021-28831, CVE-2020-11080, CVE-2021-3450, CVE-2021-23840, CVE-2020-1967, CVE-2020-8286, CVE-2020-8285, CVE-2020-8231, CVE-2020-8177, CVE-2020-8169, CVE-2021-30139, CVE-2021-36159. -* Updates the local-volume-provider image to address CVE-2022-1664 with critical severity. +## Offsite Data Backup Add-on -## 1.72.0 +For additional data redundancy, an offsite data backup add-on is available to copy customers data to a separate cloud provider. This add-on mitigates against potential data loss by our primary service provider. For more information, see [Offsite Data Backup](offsite-backup). -Released on June 14, 2022 +================ +File: docs/vendor/database-config-adding-options.md +================ +# About Managing Stateful Services -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +This topic provides recommendations for managing stateful services that you install into existing clusters. -### New Features {#new-features-1-72-0} -* The admin console now shows the chart version and icon for the currently deployed Helm chart when running in Helm-managed mode (Alpha). +## Preflight Checks for Stateful Services -### Improvements {#improvements-1-72-0} -* Moves **Change password**, **Add new application**, and **Log out** functionality into a new menu in the top right of the navigation bar. -* Shows a meaningful error message when the license is expired on the dashboard version card. +If you expect to also install stateful services into existing clusters, you will likely want to expose [preflight analyzers that check for the existence of a storage class](https://troubleshoot.sh/reference/analyzers/storage-class/). -### Bug Fixes {#bug-fixes-1-72-0} -* Fixes a bug that caused the deploy confirmation modal on the dashboard to always show "Redeploy" even if the version was not already deployed. -* Fixes a discrepancy between the license expiry date in the vendor portal and the expiry date in the admin console. -* Sets the User-Agent to the KOTS version string in outgoing HTTP requests where missing. -* Removes the **Registry settings** tab when running in Helm-managed mode (Alpha). -* Removes **Diff versions** links from the application dashboard and version history page when running in Helm-managed mode (Alpha). -* Removes the instructions on how to edit files on the **View files** tab when running in Helm-managed mode (Alpha). +If you are allowing end users to provide connection details for external databases, you can often use a troubleshoot.sh built-in [collector](https://troubleshoot.sh/docs/collect/) and [analyzer](https://troubleshoot.sh/docs/analyze/) to validate the connection details for [Postgres](https://troubleshoot.sh/docs/analyze/postgresql/), [Redis](https://troubleshoot.sh/docs/collect/redis/), and many other common datastores. These can be included in both `Preflight` and `SupportBundle` specifications. -## 1.71.0 +## About Adding Persistent Datastores -Released on June 1, 2022 +You can integrate persistent stores, such as databases, queues, and caches. There are options to give an end user, such as embedding an instance alongside the application or connecting an application to an external instance that they will manage. -Support for Kubernetes: 1.21, 1.22, 1.23, and 1.24 +For an example of integrating persistent datastores, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). -### New Features -* Adds a `--port` flag to the `kots install` and `kots admin-console` commands to allow for overriding the local port on which to access the admin console. +================ +File: docs/vendor/embedded-disaster-recovery.mdx +================ +# Disaster Recovery for Embedded Cluster (Alpha) -### Improvements -* A temporary success message is displayed if preflight checks pass for a version. +This topic describes the disaster recovery feature for Replicated Embedded Cluster, including how to enable disaster recovery for your application. It also describes how end users can configure disaster recovery in the Replicated KOTS Admin Console and restore from a backup. -### Bug Fixes -* Fixes a nil pointer panic when checking for updates if a file in the new release contains incomplete metadata information. +:::important +Embedded Cluster disaster recovery is an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). +::: -## 1.70.1 +:::note +Embedded Cluster does not support backup and restore with the KOTS snapshots feature. For more information about using snapshots for existing cluster installations with KOTS, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). +::: -Released on May 19, 2022 +## Overview -Support for Kubernetes: 1.21, 1.22, and 1.23 +The Embedded Cluster disaster recovery feature allows your customers to take backups from the Admin Console and perform restores from the command line. Disaster recovery for Embedded Cluster is implemented with Velero. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. -### Improvements -* When enabling GitOps, the initial commit properly translates all labeled secrets to SealedSecrets. -* Improves the application dashboard and version history pages when GitOps is enabled. -* Prevents a user from generating a support bundle while another support bundle is being generated, and lets the user return to the `/troubleshoot/generate` route to see the progress of the current support bundle generation. -* Improves editing for scheduling automatic snapshots by making the cron expression input always visible. -* Adds a collector and analyzer for cases when NFS configuration fails because the `mount.nfs` binary is missing on the host. -* Cleans up failed `kotsadm-fs-minio-check` pods after the NFS backend for snapshots has been configured successfully. -* Supports Helm v3.8.2 in the app manager. -* Shows Helm installations when running in Helm managed mode (alpha). +The backups that your customers take from the Admin Console will include both the Embedded Cluster infrastructure and the application resources that you specify. -### Bug Fixes -* Fixes an issue where uploading the airgap bundle using the admin console hangs at 0%. -* Fixes an issue where applications using semantic versioning did not receive updates when `--app-version-label` was used in the [kots install](/reference/kots-cli-install) command. -* Fixes an issue where the application was re-deployed when the admin console restarted. -* Fixes an issue where existing Host Path and NFS snapshots did not show up after migrating away from MinIO. Note that this fix is only applicable to new migrations. Users who have already migrated away from MinIO can continue to take new snapshots, but pre-migration snapshots will be missing. -* Fixes an issue where changing the API version for a native Kubernetes object caused that object to be deleted and recreated instead of updated. -* Fixes an issue where image pull secrets were not created in additional namespaces when only Helm charts were used by the application. -* Fixes an issue where custom icons did not show on the TLS/cert page on Safari and Chrome. -* Fixes an issue where the admin console loaded resources from the internet. -* Fixes critical and high CVEs found in the KOTS Go binaries. +The Embedded Cluster infrastructure that is backed up includes components such as the KOTS Admin Console and the built-in registry that is deployed for air gap installations. No configuration is required to include Embedded Cluster infrastructure in backups. Vendors specify the application resources to include in backups by configuring a Velero Backup resource in the application release. -## 1.70.0 +## Requirements -Released on May 2, 2022 +Embedded Cluster disaster recovery has the following requirements: -Support for Kubernetes: 1.21, 1.22, and 1.23 +* The disaster recovery feature flag must be enabled for your account. To get access to disaster recovery, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). +* Embedded Cluster version 1.22.0 or later +* Backups must be stored in S3-compatible storage -### New Features -* Adds a `weight` parameter to the [Helm custom resource](/reference/custom-resource-helmchart) when [Installing with Native Helm](/vendor/helm-overview). Charts are applied by weight in ascending order, with lower numbered weights applied first. -* Adds the ability to change the admin console password from the **Change Password** link in the admin console page footer. -* Adds the ability to download `Config` file types for a given application sequence. -* Adds a template function `YamlEscape` to escape a string for inclusion in a YAML file. -* Adds the ability to allow uploading new TLS certificates used by kURL proxy with the [`reset-tls`](/reference/kots-cli-reset-tls) command. -* Adds the ability to dynamically set the number of results per page when browsing the application version history. +## Limitations and Known Issues -### Improvements -* When preflight checks are skipped during an initial installation, the application is still deployed. -* License and preflight errors are now displayed when performing an automated installation using the CLI. -* When changing the password using the `kubectl kots reset-password`, all active sessions are terminated and new sessions can be established with the new password. +Embedded Cluster disaster recovery has the following limitations and known issues: -### Bug Fixes -* Fixes an issue where ingress status informers always reported as "Missing" in Kubernetes 1.22+. -* Fixes an issue that caused image garbage collection in Kubernetes installer-created clusters (embedded clusters) to remove images outside of the application's dedicated registry namespace. -* Fixes an issue where a newer version might not have a **Deploy** button after the configuration is updated for the currently deployed version. -* Fixes an issue where the legends on the dashboard graphs were blank. -* Fixes an issue where hovering on a graph the tooltip showed "LLL" instead of a formatted date. +* During a restore, the version of the Embedded Cluster installation assets must match the version of the application in the backup. So if version 0.1.97 of your application was backed up, the Embedded Cluster installation assets for 0.1.97 must be used to perform the restore. Use `./APP_SLUG version` to check the version of the installation assets, where `APP_SLUG` is the unique application slug. For example: -## 1.69.1 + version command -Released on April 19, 2022 + [View a larger version of this image](/images/ec-version-command.png) -Support for Kubernetes: 1.21, 1.22, and 1.23 +* Any Helm extensions included in the `extensions` field of the Embedded Cluster Config are _not_ included in backups. Helm extensions are reinstalled as part of the restore process. To include Helm extensions in backups, configure the Velero Backup resource to include the extensions using namespace-based or label-based selection. For more information, see [Configure the Velero Custom Resources](#config-velero-resources) below. -### Improvements -* Updates `local-volume-provider` to v0.3.3. +* Users can only restore from the most recent backup. -### Bug Fixes -* Fixes an issue where links and text within the `app.k8s.io/v1beta1` `Application` kind were not templated. +* Velero is installed only during the initial installation process. Enabling the disaster recovery license field for customers after they have already installed will not do anything. -## 1.69.0 +* If the `--admin-console-port` flag was used during install to change the port for the Admin Console, note that during a restore the Admin Console port will be used from the backup and cannot be changed. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). -Released on April 8, 2022 +## Configure Disaster Recovery -Support for Kubernetes: 1.21, 1.22, and 1.23 +This section describes how to configure disaster recovery for Embedded Cluster installations. It also describes how to enable access to the disaster recovery feature on a per-customer basis. -### New Features -* Adds the ability to switch from a community license to a different license for the same application. See [Changing a Community License](/enterprise/updating-licenses#change-community-licenses). +### Configure the Velero Custom Resources {#config-velero-resources} -### Improvements -* The [ensure-secret](/reference/kots-cli-docker-ensure-secret) command now creates a new application version, based on the latest version, that adds the Docker Hub image pull secret to all Kubernetes manifests that have images. This avoids Docker Hub's rate limiting. -* CA certificates for snapshot storage endpoints can now be uploaded on the snapshot page of the admin console. -* User sessions expire after 12 hours of inactivity. -* Removes expired sessions from the store in a daily cleanup job. -* Adds a Beta option for vendors to exclude MinIO images from app manager air gap bundles from the download portal. For more information, see [ MinIO from Air Gap Bundles](/vendor/packaging-air-gap-excluding-minio) in the documentation. +This section describes how to set up Embedded Cluster disaster recovery for your application by configuring Velero [Backup](https://velero.io/docs/latest/api-types/backup/) and [Restore](https://velero.io/docs/latest/api-types/restore/) custom resources in a release. -### Bug Fixes -* Fixes an issue where the registry image pull secrets were not applied in the additional namespaces specified by the application in minimal RBAC installations. -* Fixes an issue where some releases could be missed if they were promoted while other releases were being downloaded and semantic versioning was enabled. -* Fixes an issue where the "Select a different file" link did not allow the user to change the selected file on the config page. +To configure Velero Backup and Restore custom resources for Embedded Cluster disaster recovery: -## 1.68.0 +1. In a new release containing your application files, add a Velero Backup resource. In the Backup resource, use namespace-based or label-based selection to indicate the application resources that you want to be included in the backup. For more information, see [Backup API Type](https://velero.io/docs/latest/api-types/backup/) in the Velero documentation. -Released on April 4, 2022 + :::important + If you use namespace-based selection to include all of your application resources deployed in the `kotsadm` namespace, ensure that you exclude the Replicated resources that are also deployed in the `kotsadm` namespace. Because the Embedded Cluster infrastructure components are always included in backups automatically, this avoids duplication. + ::: -Support for Kubernetes: 1.21, 1.22, and 1.23 + **Example:** -### New Features -* Adds the ability to make a KOTS application version required. Required version cannot be skipped during upgrades. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). -* Adds the `supportMinimalRBACPrivileges` field to the Application custom resource, and adds the `--use-minimal-rbac` flag to the `kots install` command. `supportMinimalRBACPrivileges` indicates that the application supports minimal RBAC, but it will not be used unless the `--use-minimal-rbac` flag is passed to the `kots install` command. See [`supportMinimalRBACPrivileges`](/reference/custom-resource-application#supportminimalrbacprivileges) in the Application custom resource. + The following Backup resource uses namespace-based selection to include application resources deployed in the `kotsadm` namespace: -### Improvements -* Adds pagination to the version history page and improves the admin console API performance. -* Displays on the cluster management page of the admin console the labels applied to nodes in a Kubernetes installer-created cluster. -* The default Troubleshoot analyzers will now specifically call out issues with Envoy/Contour if detected. + ```yaml + apiVersion: velero.io/v1 + kind: Backup + metadata: + name: backup + spec: + # Back up the resources in the kotsadm namespace + includedNamespaces: + - kotsadm + orLabelSelectors: + - matchExpressions: + # Exclude Replicated resources from the backup + - { key: kots.io/kotsadm, operator: NotIn, values: ["true"] } + ``` -### Bug Fixes -* Fixes a bug with automatic updates where new versions would be deployed automatically regardless of preflight outcomes. When automatic updates are configured, new versions will now only be deployed automatically if the preflights succeed. -* Fixes an issue where NFS snapshots could not be configured when MinIO is enabled in the cluster. -* Fixes an issue where updating the snapshot storage location to NFS or Host Path would incorrectly display a dialog indicating that Velero was not installed and configured properly. -* Fixes an issue that caused wrong metadata to be used at application install time when installing a specific version of an application with the `--app-version-label` flag. -* Fixes an issue that caused the support bundle analysis and/or redactions to not show up in the Troubleshoot page in the admin console in some cases. -* Fixes an issue where deployments weren't blocked when strict preflight analyzers failed due to parse/process errors. -* Fixes a style bug that caused the grid of metric graphs to be broken when there were more than three graphs. -* Fixes an issue on the config editor page that caused an element to be hidden under the navbar when the corresponding config item was clicked on from the sidebar. -* Fixes an issue where a version that was pulled in via automatic checks and deployed via automatic deployments would not be properly updated on the dashboard version card. -* Fixes an issue where two versions could show as being currently deployed on the version history page when using automatic deployments. -* Fixes an issue where AWS IAM instance roles could not be used when configuring the snapshot storage destination. +1. In the same release, add a Velero Restore resource. In the `backupName` field of the Restore resource, include the name of the Backup resource that you created. For more information, see [Restore API Type](https://velero.io/docs/latest/api-types/restore/) in the Velero documentation. -## 1.67.0 + **Example**: -Released on March 21, 2022 + ```yaml + apiVersion: velero.io/v1 + kind: Restore + metadata: + name: restore + spec: + # the name of the Backup resource that you created + backupName: backup + includedNamespaces: + - '*' + ``` -Support for Kubernetes: 1.21, 1.22, and 1.23 +1. For any image names that you include in your Backup and Restore resources, rewrite the image name using the Replicated KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions. This ensures that the image name is rendered correctly during deployment, allowing the image to be pulled from the user's local image registry (such as in air gap installations) or through the Replicated proxy registry. -### New Features -* Adds support for installing a specific application version. For more information about installing a specific application version, see [Online Installation in Existing Clusters](/enterprise/installing-existing-cluster and [Online Installation with the Kubernetes Installer](/enterprise/installing-embedded-cluster). -* Extends the ability of status informers to detect if the application is being updated. -* Adds the ability to provide a strict preflight, which cannot be skipped and must not have any failure outcomes. Any failure outcomes will prevent the user from deploying the application. For more information on strict preflights, see [Define KOTS Preflight Checks​](/vendor/preflight-kots-defining). -* New versions can automatically be deployed in the admin console, regardless of whether the vendor uses semantic versioning. For more information about automatically deploying new versions, see [Configure Automatic Updates​](/enterprise/updating-apps#configure-automatic-updates) in Updating an Application. + **Example:** -### Bug Fixes -* Fixes an issue that could cause images that are still used by the application to be deleted from the private registry in a Kubernetes installer-created cluster during image garbage collection. -* Fixes an issue where the same license could be installed more than once in some cases. -* Fixes an issue where the Cluster Management tab was not always initially present for Kubernetes installer-created clusters. -* Fixes an issue where attempting to re-download a pending application version would fail after upgrading the admin console from KOTS 1.65. -* Fixes an issue where the application icon in the metadata did not show as the favicon on the TLS pages. + ```yaml + apiVersion: velero.io/v1 + kind: Restore + metadata: + name: restore + spec: + hooks: + resources: + - name: restore-hook-1 + includedNamespaces: + - kotsadm + labelSelector: + matchLabels: + app: example + postHooks: + - init: + initContainers: + - name: restore-hook-init1 + image: + # Use HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace + # to template the image name + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' + tag: 1.24-alpine + ``` + For more information about how to rewrite image names using the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions, including additional examples, see [Task 1: Rewrite Image Names](helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart v2 Custom Resource_. -## 1.66.0 +1. If you support air gap installations, add any images that are referenced in your Backup and Restore resources to the `additionalImages` field of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release so they can be used during the backup and restore process in environments with limited or no outbound internet access. For more information, see [additionalImages](/reference/custom-resource-application#additionalimages) in _Application_. -Released on March 8, 2022 + **Example:** -Support for Kubernetes: 1.21, 1.22, and 1.23 + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-app + spec: + additionalImages: + - elasticsearch:7.6.0 + - quay.io/orgname/private-image:v1.2.3 + ``` -### New Features -* Adds the ability to exclude the applications or the admin console from full snapshot restores using the [`kots restore`](/reference/kots-cli-restore-index) command. -* Adds the ability to display the command to restore only the admin console from a [full snapshot](/enterprise/snapshots-understanding#full-snapshots-recommended) on the Full Snapshots page in the admin console. +1. (Optional) Use Velero functionality like [backup](https://velero.io/docs/main/backup-hooks/) and [restore](https://velero.io/docs/main/restore-hooks/) hooks to customize the backup and restore process as needed. -### Improvements -* Adds the [`--no-port-forward`](/reference/kots-cli-install#usage) flag to the `kots install` command to disable automatic port-forwarding. The old `--port-forward` flag has been deprecated. + **Example:** -### Bug Fixes -* Corrects the placeholder Prometheus URL in the admin console dashboard so that it is accurate for embedded installations. -* Fixes a bug where the warning message sometimes printed incorrectly when a mismatch was detected between the kots CLI version and the version of the admin console that was running in the cluster. -* Fixes a bug where the **See details** button on the support bundle analysis page did not show any information about an unhealthy pod. -* Allows a user to re-upload a license if the application is not yet installed. -* Allows GitOps to be disabled when it is enabled but has an invalid configuration. Previously, you were required to fix the configuration before disabling GitOps. + For example, a Postgres database might be backed up using pg_dump to extract the database into a file as part of a backup hook. It can then be restored using the file in a restore hook: -## 1.65.0 + ```yaml + podAnnotations: + backup.velero.io/backup-volumes: backup + pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U {{repl ConfigOption "postgresql_username" }} -d {{repl ConfigOption "postgresql_database" }} -h 127.0.0.1 > /scratch/backup.sql"]' + pre.hook.backup.velero.io/timeout: 3m + post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U {{repl ConfigOption "postgresql_username" }} -h 127.0.0.1 -d {{repl ConfigOption "postgresql_database" }} -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' + post.hook.restore.velero.io/wait-for-ready: 'true' # waits for the pod to be ready before running the post-restore hook + ``` -Released on February 25, 2022 +1. Save and the promote the release to a development channel for testing. -Support for Kubernetes: 1.20, 1.21, 1.22, and 1.23 +### Enable the Disaster Recovery Feature for Your Customers -### New Features -* Permanently enables the redesigned admin console app dashboard and version history pages introduced in [KOTS 1.60.0](#1600). -* Application versions that fail to download now appear in the version history. A new button is also present with the version to allow the download to be retried. Previously, these failures were lost when a newer version was downloaded successfully. -* Introduces the [`kots upstream download`](../reference/kots-cli-upstream-download) command to retry downloading a failed update of the upstream application. +After configuring disaster recovery for your application, you can enable it on a per-customer basis with the **Allow Disaster Recovery (Alpha)** license field. -### Improvements -* The port-forward initiated to access the admin console will continually retry when it is disconnected. If a new kotsadm pod comes up, the port forward will switch and forward to the new pod. -* If the `kots` CLI version doesn't match the KOTS API version in the cluster, a warning message is displayed advising the user to update the `kots` CLI to the appropriate version. +To enable disaster recovery for a customer: -### Bug Fixes -* Fixes uploading preflight results from the CLI. -* Fixes a bug where the app icon in the metadata would not show as the favicon in Google Chrome. +1. In the Vendor Portal, go to the [Customers](https://vendor.replicated.com/customers) page and select the target customer. -## 1.64.0 +1. On the **Manage customer** page, under **License options**, enable the **Allow Disaster Recovery (Alpha)** field. + + When your customer installs with Embedded Cluster, Velero will be deployed if the **Allow Disaster Recovery (Alpha)** license field is enabled. + +## Take Backups and Restore -Released on February 18, 2022 +This section describes how your customers can configure backup storage, take backups, and restore from backups. -Support for Kubernetes: 1.20, 1.21, 1.22, and 1.23 +### Configure Backup Storage and Take Backups in the Admin Console -### Improvements -* A MinIO image will no longer be present in new deployments when MinIO is not specified as an add-on in the Kubernetes installer specification. -* Enables an alternative that does not use MinIO for `hostPath` snapshots if the MinIO image is not present on the instance. +Customers with the **Allow Disaster Recovery (Alpha)** license field can configure their backup storage location and take backups from the Admin Console. -### Bug Fixes -* Fixes a bug that showed an incorrect diff on the version history page. -* Fixes deploy log errors for PVCs when using OpenEBS with Kubernetes 1.19 through 1.21. +To configure backup storage and take backups: -## 1.63.0 +1. After installing the application and logging in to the Admin Console, click the **Disaster Recovery** tab at the top of the Admin Console. -Released on February 11, 2022 +1. For the desired S3-compatible backup storage location, enter the bucket, prefix (optional), access key ID, access key secret, endpoint, and region. Click **Update storage settings**. -Supported on Kubernetes: 1.20, 1.21, 1.22, and 1.23 + backup storage settings -### New Features -* Changes the [`kots upstream upgrade`](../reference/kots-cli-upstream-upgrade) command to be synchronous by default and exposes error messages for it. + [View a larger version of this image](/images/dr-backup-storage-settings.png) -### Improvements -* Sets the Native Helm timeout to 60 minutes instead of 5 minutes. +1. (Optional) From this same page, configure scheduled backups and a retention policy for backups. -## 1.62.0 + scheduled backups + + [View a larger version of this image](/images/dr-scheduled-backups.png) -Released on February 4, 2022 +1. In the **Disaster Recovery** submenu, click **Backups**. Backups can be taken from this screen. -Supported on Kubernetes: 1.20, 1.21, 1.22, and 1.23 + backups page -### New Features -* Adds [`targetKotsVersion`](../reference/custom-resource-application#targetkotsversion) as a field in the application spec. This field allows you to set a target version of KOTS for a release. The initial installation of an application will fail if the currently installed KOTS version is greater than the target version. When a target version is set, end users will receive a notification in the admin console if their currently deployed version of KOTS is less than the target version. For more informaiton, see the documentation. + [View a larger version of this image](/images/dr-backups.png) -* Adds [`minKotsVersion`](../reference/custom-resource-application/#minkotsversion-beta) (Beta) as a field in the application spec. This allows you to specify the minimum supported KOTS version for a release. An application cannot be installed if the currently deployed KOTS version is less than the minimum KOTS version specified for a release. See the [`minKotsVersion` documentation](../reference/custom-resource-application/#minkotsversion-beta) for caveats since this is a Beta feature. +### Restore from a Backup -### Improvements -* Defaults [`kubectl kots get config --appslug`](../reference/kots-cli-get-config) to the app slug of the deployed application if there is only one in the namespace. -* Defaults [`kubectl kots get config --sequence`](../reference/kots-cli-get-config) to the sequence of the latest available version. +To restore from a backup: -### Bug Fixes -* Fixes a bug that caused the "Details" link, which shows the [application status](../vendor/admin-console-display-app-status), to be not visible in the new dashboard UI. -* Fixes the omission of certain password values from the rendered YAML file when using [`kubectl kots pull`](../reference/kots-cli-get-config). -* Fixes an issue that caused the license file included in a support bundle to contain a long array of integers instead of a string in the signature field. -* Fixes an issue which caused setting up a host path as a snapshot storage destination to fail. +1. SSH onto a new machine where you want to restore from a backup. -## 1.61.0 +1. Download the Embedded Cluster installation assets for the version of the application that was included in the backup. You can find the command for downloading Embedded Cluster installation assets in the **Embedded Cluster install instructions dialog** for the customer. For more information, [Online Installation with Embedded Cluster](/enterprise/installing-embedded). -Released on February 1, 2022 + :::note + The version of the Embedded Cluster installation assets must match the version that is in the backup. For more information, see [Limitations and Known Issues](#limitations-and-known-issues). + ::: -Supported on Kubernetes: 1.20, 1.21, 1.22, and 1.23 +1. Run the restore command: -### New Features -* Adds a CLI command to [get all available versions for an application](../reference/kots-cli-get-versions) from the app manager. -* Adds the ability to block installing or upgrading an application if the current KOTS version is incompatible with the KOTS version required by the application. This feature is experimental and is only available to vendors who have requested access. + ```bash + sudo ./APP_SLUG restore + ``` + Where `APP_SLUG` is the unique application slug. -### Bug Fixes -* Fixes a bug that caused images to be pushed to a private registry multiple times during an air gap installation. -* Fixes a bug that erroneously displays a message to edit the current config when performing a new installation. -* Fixes an issue that caused [image garbage collection](../enterprise/image-registry-embedded-cluster#enable-and-disable-image-garbage-collection) to only remove images with the "latest" tag. + Note the following requirements and guidance for the `restore` command: -## 1.60.0 + * If the installation is behind a proxy, the same proxy settings provided during install must be provided to the restore command using `--http-proxy`, `--https-proxy`, and `--no-proxy`. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). -Released on January 25, 2022 + * If the `--cidr` flag was used during install to the set IP address ranges for Pods and Services, this flag must be provided with the same CIDR during the restore. If this flag is not provided or is provided with a different CIDR, the restore will fail with an error message telling you to rerun with the appropriate value. However, it will take some time before that error occurs. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). -Supported on Kubernetes: 1.20, 1.21, and 1.22 + * If the `--local-artifact-mirror-port` flag was used during install to change the port for the Local Artifact Mirror (LAM), you can optionally use the `--local-artifact-mirror-port` flag to choose a different LAM port during restore. For example, `restore --local-artifact-mirror-port=50000`. If no LAM port is provided during restore, the LAM port that was supplied during installation will be used. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). -### New Features -* The admin console app dashboard and version history pages have been redesigned! This redesign improves the aesthetics of these pages and brings key functionality directly to the app dashboard. See [this blog](https://www.replicated.com/blog/new-features-announced-improvements-to-ux-host-preflights/) for more details. + You will be guided through the process of restoring from a backup. + +1. When prompted, enter the information for the backup storage location. -### Improvements -* Updates MinIO to RELEASE.2022-01-08T03-11-54Z (resolves CVE-2021-43858 CVE). -* Updates Postgres to version 10.19. + ![Restore prompts on the command line](/images/dr-restore.png) + [View a larger version of this image](/images/dr-restore.png) -### Bug Fixes -* Fixes an issue that caused images to be pushed multiple times during an [airgap installation](/enterprise/installing-existing-cluster-airgapped) when the [Native Helm](/vendor/helm-overview#native) feature is enabled. -* Fixes an issue that prevented the deployment status labels from breaking into multiple lines on small displays. +1. When prompted, confirm that you want to restore from the detected backup. -## 1.59.3 + ![Restore from detected backup prompt on the command line](/images/dr-restore-from-backup-confirmation.png) + [View a larger version of this image](/images/dr-restore-from-backup-confirmation.png) -Released on January 21, 2022 + After some time, the Admin console URL is displayed: -Supported on Kubernetes: 1.20, 1.21, and 1.22 + ![Restore from detected backup prompt on the command line](/images/dr-restore-admin-console-url.png) + [View a larger version of this image](/images/dr-restore-admin-console-url.png) -### Improvements -* Updates the [kubectl](../reference/custom-resource-application#kubectlversion) patch versions and added kubectl version 1.22.x. +1. (Optional) If the cluster should have multiple nodes, go to the Admin Console to get a join command and join additional nodes to the cluster. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). -### Bug Fixes -* Fixes an issue that caused the load balancer services to regenerate, resulting in downtime. +1. Type `continue` when you are ready to proceed with the restore process. -## 1.59.2 + ![Type continue when you are done adding nodes](/images/dr-restore-continue.png) + [View a larger version of this image](/images/dr-restore-continue.png) -Release on January 18, 2022 + After some time, the restore process completes. -Supported on Kubernetes: 1.19, 1.20, and 1.21 + If the `restore` command is interrupted during the restore process, you can resume by rerunning the `restore` command and selecting to resume the previous restore. This is useful if your SSH session is interrupted during the restore. -### Bug Fixes -* Adds a more descriptive error message to the KOTS CLI when the provided host path does not exist for snapshots storage. -* Fixes a bug that caused the "Send bundle to vendor" link to display when this feature is not enabled. -* Resolves CSS style issues. -* Fixes a bug where excluded Helm charts could not change between `UseHelmInstall: true` and `UseHelmInstall: false` without errors. -* Fixes a problem where the "Internal Storage" option was not selected by default in kURL clusters with the `disableS3` option set. -* Fixes a bug when Helm dependencies are aliased for Helm-native releases. +================ +File: docs/vendor/embedded-overview.mdx +================ +import EmbeddedCluster from "../partials/embedded-cluster/_definition.mdx" +import Requirements from "../partials/embedded-cluster/_requirements.mdx" +import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" +import HaArchitecture from "../partials/embedded-cluster/_multi-node-ha-arch.mdx" -## 1.59.1 +# Embedded Cluster Overview -Released on December 29, 2021 +This topic provides an introduction to Replicated Embedded Cluster, including a description of the built-in extensions installed by Embedded Cluster, an overview of the Embedded Cluster single-node and multi-node architecture, and requirements and limitations. -Supported on Kubernetes: 1.19, 1.20, and 1.21 +:::note +If you are instead looking for information about creating Kubernetes Installers with Replicated kURL, see the [Replicated kURL](/vendor/packaging-embedded-kubernetes) section. +::: -### Bug Fixes -* Fixes a `panic: runtime error` that occurs when the [`kots upstream upgrade`](../reference/kots-cli-upstream-upgrade) command is run. +## Overview -## 1.59.0 + -Released on December 22, 2021 +## Architecture -Supported on Kubernetes: 1.19, 1.20, and 1.21 +This section describes the Embedded Cluster architecture, including the built-in extensions deployed by Embedded Cluster. -### New Features -* Adds the `kubectl kots get config` command to export config values. This includes a `--decrypt` flag to decrypt sensitive values. -* The internal storage location for snapshots now uses a persistent volume instead of object storage when the `disableS3` flag is set to `true` for embedded clusters. For more information about removing KOTS use of object storage, see the [kURL add-on documentation](https://kurl.sh/docs/add-ons/kotsadm). - -### Improvements -* Adds version output for current and new releases to the [`upstream upgrade`](../reference/kots-cli-upstream-upgrade) CLI command. - -### Bug Fixes -* Fixes a bug that caused analyzers to surface errors in namespaces not used by the application when the admin console has cluster access in existing cluster installations. -* Fixes an issue that caused image pull secrets to be rendered in the admin console namespace instead of the `namespace` specified in the kots.io/v1beta1.HelmChart when using `useHelmInstall`. -* Fixes the `kots pull` CLI command to properly inject `imagePullSecrets` when using Helm Charts with `useHelmInstall` set to `true`. -* Fixes a bug that causes application images to not be deleted from a [private registry](../enterprise/image-registry-embedded-cluster). -* Fixes a bug that causes images included in support bundle's [`run` collector](https://troubleshoot.sh/docs/collect/run/#image-required) to not be deleted from a private registry. - -## 1.58.2 +### Single-Node Architecture -Released on December 14, 2021 +The following diagram shows the architecture of a single-node Embedded Cluster installation for an application named Gitea: -Supported on Kubernetes: 1.19, 1.20, and 1.21 +![Embedded Cluster single-node architecture](/images/embedded-architecture-single-node.png) -### Bug Fixes -* Fixes a bug that caused config updates to take a long time. +[View a larger version of this image](/images/embedded-architecture-single-node.png) -## 1.58.1 +As shown in the diagram above, the user downloads the Embedded Cluster installation assets as a `.tgz` in their installation environment. These installation assets include the Embedded Cluster binary, the user's license file, and (for air gap installations) an air gap bundle containing the images needed to install and run the release in an environment with limited or no outbound internet access. -Released on December 1, 2021 +When the user runs the Embedded Cluster install command, the Embedded Cluster binary first installs the k0s cluster as a systemd service. -Supported on Kubernetes: 1.19, 1.20, and 1.21 +After all the Kubernetes components for the cluster are available, the Embedded Cluster binary then installs the Embedded Cluster built-in extensions. For more information about these extensions, see [Built-In Extensions](#built-in-extensions) below. -### Bug Fixes -* Fixes a bug that caused Native Helm to skip deploying some Helm resources on automated installations. +Any Helm extensions that were included in the [`extensions`](/reference/embedded-config#extensions) field of the Embedded Cluster Config are also installed. The namespace or namespaces where Helm extensions are installed is defined by the vendor in the Embedded Cluster Config. -## 1.58.0 +Finally, Embedded Cluster also installs Local Artifact Mirror (LAM). In air gap installations, LAM is used to store and update images. -Released on December 1, 2021 +### Multi-Node Architecture -Supported on Kubernetes: 1.19, 1.20, and 1.21 +The following diagram shows the architecture of a multi-node Embedded Cluster installation: -### New Features - * Adds support for the semantic versioning of releases when the version labels are [valid](https://semver.org/). To use this feature, [enable semantic versioning for the channel](/vendor/releases-about#semantic-versioning) that the license is currently on. - * Adds the ability to automatically deploy new patch, minor, or major [valid](https://semver.org/) semantic versions when [semantic versioning is enabled](/vendor/releases-about#semantic-versioning). This new capability can be configured from the **Version History** page under the 'Configure automatic updates' option. +![Embedded Cluster multi-node architecture](/images/embedded-architecture-multi-node.png) -## 1.57.0 and earlier +[View a larger version of this image](/images/embedded-architecture-multi-node.png) -For release notes for app manager versions earlier than 1.58.0, see the [Replicated App Manager Release Notes v1.9.0 - v1.65.0](../pdfs/app-manager-release-notes.pdf) PDF. +As shown in the diagram above, in multi-node installations, the Embedded Cluster Operator, KOTS, and the image registry for air gap installations are all installed on one controller node. -================ -File: docs/release-notes/rn-embedded-cluster.md -================ ---- -toc_max_heading_level: 2 -pagination_next: null -pagination_prev: null ---- +For installations that include disaster recovery with Velero, the Velero Node Agent runs on each node in the cluster. The Node Agent is a Kubernetes DaemonSet that performs backup and restore tasks such as creating snapshots and transferring data during restores. -# Embedded Cluster Release Notes +Additionally, any Helm [`extensions`](/reference/embedded-config#extensions) that you include in the Embedded Cluster Config are installed in the cluster depending on the given chart and how it is configured to be deployed. -This topic contains release notes for the [Replicated Embedded Cluster](/vendor/embedded-overview) installer. The release notes list new features, improvements, bug fixes, known issues, and breaking changes. +### Multi-Node Architecture with High Availability -Additionally, these release notes list the versions of Kubernetes and Replicated KOTS that are available with each version of Embedded Cluster. +:::note +High availability (HA) for multi-node installations with Embedded Cluster is Alpha and is not enabled by default. For more informaiton about enabling HA, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha). +::: -## 2.1.3 + -Released on February 19, 2025 +## Built-In Extensions {#built-in-extensions} - - - - - - - - - - - - - - - -
    Version2.1.3+k8s-1.302.1.3+k8s-1.29
    Kubernetes Version1.30.91.29.13
    KOTS Version1.124.4
    +Embedded Cluster includes several built-in extensions. The built-in extensions provide capabilities such as application management and storage. Each built-in extension is installed in its own namespace. -### Improvements {#improvements-2-1-3} -* During `install` and `join`, permissions for the data directory are set to 755 to ensure successful operation. -* Adds a preflight check to verify execute permissions on the data directory and its parent directories. This prevents installation issues, including etcd permissions issues. -* The following kernel parameters are configured automatically: `fs.inotify.max_user_instances = 1024` and `fs.inotify.max_user_watches = 65536`. -* Adds a preflight check to ensure the following kernel parameters are set correctly: `fs.inotify.max_user_instances = 1024` and `fs.inotify.max_user_watches = 65536`. -* Surfaces better error messages during the installation if the node is not ready. +The built-in extensions installed by Embedded Cluster include: -## 2.1.2 +* **Embedded Cluster Operator**: The Operator is used for reporting purposes as well as some clean up operations. -Released on February 19, 2025 +* **KOTS:** Embedded Cluster installs the KOTS Admin Console in the kotsadm namespace. End customers use the Admin Console to configure and install the application. Rqlite is also installed in the kotsadm namespace alongside KOTS. Rqlite is a distributed relational database that uses SQLite as its storage engine. KOTS uses rqlite to store information such as support bundles, version history, application metadata, and other small amounts of data needed to manage the application. For more information about rqlite, see the [rqlite](https://rqlite.io/) website. - - - - - - - - - - - - - - - -
    Version2.1.2+k8s-1.302.1.2+k8s-1.29
    Kubernetes Version1.30.91.29.13
    KOTS Version1.124.4
    +* **OpenEBS:** Embedded Cluster uses OpenEBS to provide local PersistentVolume (PV) storage, including the PV storage for rqlite used by KOTS. For more information, see the [OpenEBS](https://openebs.io/docs/) documentation. -### Improvements {#improvements-2-1-2} -* The preflight check that ensures the system clock is synchronized no longer requires NTP to be active. This accommodates systems where the clock is managed by alternative protocols (e.g., PTP). -* If firewalld is enabled, it is now automatically configured at install time to allow required network traffic in the cluster. +* **(Disaster Recovery Only) Velero:** If the installation uses the Embedded Cluster disaster recovery feature, Embedded Cluster installs Velero, which is an open-source tool that provides backup and restore functionality. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. For more information about the disaster recovery feature, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). -### Bug Fixes {#bug-fixes-2-1-1} -* Fixes host preflight failures for kernel modules in environments where kernel modules are built in. +* **(Air Gap Only) Image registry:** For air gap installations in environments with limited or no outbound internet access, Embedded Cluster installs an image registry where the images required to install and run the application are pushed. For more information about installing in air-gapped environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). -## 2.1.1 +## Comparison to kURL -Released on February 18, 2025 +Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster offers several improvements such as: +* Significantly faster installation, updates, and node joins +* A redesigned Admin Console UI for managing the cluster +* Improved support for multi-node clusters +* One-click updates of both the application and the cluster at the same time - - - - - - - - - - - - - - - -
    Version2.1.1+k8s-1.302.1.1+k8s-1.29
    Kubernetes Version1.30.91.29.13
    KOTS Version1.124.4
    +Additionally, Embedded Cluster automatically deploys several built-in extensions like KOTS and OpenEBS to provide capabilities such as application management and storage. This represents an improvement over kURL because vendors distributing their application with Embedded Cluster no longer need choose and define various add-ons in the installer spec. For additional functionality that is not included in the built-in extensions, such as an ingress controller, vendors can provide their own [`extensions`](/reference/embedded-config#extensions) that will be deployed alongside the application. -### Bug Fixes {#bug-fixes-2-1-1} -* Installing now waits for the Local Artifact Mirror systemd service to be healthy before proceeding, and any errors are reported. Previously, the install appeared successful even if LAM failed to start. -* Fixes host preflight failures for kernel modules in environments where kernel modules are built in. +## Requirements -## 2.1.0 +### System Requirements -Released on February 14, 2025 + - - - - - - - - - - - - - - - -
    Version2.1.0+k8s-1.302.1.0+k8s-1.29
    Kubernetes Version1.30.91.29.13
    KOTS Version1.124.4
    +### Port Requirements -### Improvements {#improvements-2-1-0} -* The following kernel parameters are configured automatically: `net.ipv4.conf.all.forwarding = 1`, `net.ipv4.conf.default.forwarding = 1`, `net.bridge.bridge-nf-call-iptables = 1`, `net.ipv4.conf.default.rp_filter = 0`, and `net.ipv4.conf.all.rp_filter = 0`. -* The following kernel modules are configured automatically: `overlay`, `ip_tables`, `br_netfilter`, and `nf_conntrack`. -* Adds a preflight check to ensure the following kernel parameters are set correctly: `net.ipv4.conf.all.forwarding = 1`, `net.ipv4.conf.default.forwarding = 1`, `net.bridge.bridge-nf-call-iptables = 1`, `net.ipv4.conf.default.rp_filter = 0`, and `net.ipv4.conf.all.rp_filter = 0`. -* Adds a preflight check to ensure the `overlay`, `ip_tables`, `br_netfilter`, and `nf_conntrack` kernel modules were configured correctly. -* Adds a preflight check to ensure a node's IP address is not within the Pod and Service CIDR ranges that will be used by Kubernetes. If a conflict exists, a different CIDR block can be specified with `--cidr` or a different network interface can be specified with `--network-interface`. -* Adds a preflight check to ensure that SELinux is not running in enforcing mode. + -### Bug Fixes {#bug-fixes-2-1-0} -* Fixes an issue when installing on Amazon Linux 2 and other older Linux distributions that causes the installation to timeout waiting for storage to be ready. +## Limitations -## 2.0.0 +Embedded Cluster has the following limitations: -Released on February 7, 2025 +* **Reach out about migrating from kURL**: We are helping several customers migrate from kURL to Embedded Cluster. Reach out to Alex Parker at alexp@replicated.com for more information. - - - - - - - - - - - - - - - -
    Version2.0.0+k8s-1.302.0.0+k8s-1.29
    Kubernetes Version1.30.91.29.13
    KOTS Version1.124.3
    +* **Multi-node support is in beta**: Support for multi-node embedded clusters is in beta, and enabling high availability for multi-node clusters is in alpha. Only single-node embedded clusters are generally available. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). -### New Features {#new-features-2-0-0} -* The 2.0 release of Embedded Cluster introduces architecture changes that improve the reliability of the upgrade process, particularly the upgrade of Helm extensions like the Admin Console, OpenEBS, and vendor-supplied Helm extensions. As part of these improvements, upgrades from Embedded Cluster versions earlier than 1.8 are not supported. Online instances running Embedded Cluster versions earlier than 1.8.0 must upgrade to an Embedded Cluster version from 1.8.0 to 1.22.0 before upgrading to 2.0.0. Air gap instances running Embedded Cluster versions earlier than 1.8.0 must upgrade to version 1.8.0 before upgrading to later versions, including 2.0.0. If you have customers running these earlier versions, Replicated recommends using a [required release](https://docs.replicated.com/vendor/releases-about#properties) to ensure your customers upgrade to a supported version first. +* **Disaster recovery is in alpha**: Disaster Recovery for Embedded Cluster installations is in alpha. For more information, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). -### Improvements {#improvements-2-0-0} -* If you don't provide a new Admin Console password to `admin-console reset-password`, you'll be prompted for one. This prevents the password from ending up in your terminal history. -* If there is no TTY (like in CI), the CLI suppresses repeated log lines when there is a spinner, making output more readable. +* **Partial rollback support**: In Embedded Cluster 1.17.0 and later, rollbacks are supported only when rolling back to a version where there is no change to the [Embedded Cluster Config](/reference/embedded-config) compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same Embedded Cluster Config. For more information about how to enable rollbacks for your application in the KOTS Application custom resource, see [allowRollback](/reference/custom-resource-application#allowrollback) in _Application_. -## 1.22.0 +* **Changing node hostnames is not supported**: After a host is added to a Kubernetes cluster, Kubernetes assumes that the hostname and IP address of the host will not change. If you need to change the hostname or IP address of a node, you must first remove the node from the cluster. For more information about the requirements for naming nodes, see [Node name uniqueness](https://kubernetes.io/docs/concepts/architecture/nodes/#node-name-uniqueness) in the Kubernetes documentation. -Released on January 24, 2025 +* **Automatic updates not supported**: Configuring automatic updates from the Admin Console so that new versions are automatically deployed is not supported for Embedded Cluster installations. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). - - - - - - - - - - - - - - - -
    Version1.22.0+k8s-1.301.22.0+k8s-1.29
    Kubernetes Version1.30.91.29.13
    KOTS Version1.124.0
    +* **Embedded Cluster installation assets not available through the Download Portal**: The assets required to install with Embedded Cluster cannot be shared with users through the Download Portal. Users can follow the Embedded Cluster installation instructions to download and extract the installation assets. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded). -### New Features {#new-features-1-22-0} -* Updates the disaster recovery alpha feature so that rather than having to apply specific labels to all the resources you want backed up, you now have full control over how your application is backed up and restored. Specifically, you now provide a Velero Backup resource and a Restore resource in your application release. These resources are used to back up and restore your application, separate from the Embedded Cluster infrastructure. For more information, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). +* **`minKotsVersion` and `targetKotsVersion` not supported**: The [`minKotsVersion`](/reference/custom-resource-application#minkotsversion-beta) and [`targetKotsVersion`](/reference/custom-resource-application#targetkotsversion) fields in the KOTS Application custom resource are not supported for Embedded Cluster installations. This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed`. To avoid installation failures, do not use targetKotsVersion or minKotsVersion in releases that support installation with Embedded Cluster. -## 1.21.0 +* **Support bundles over 100MB in the Admin Console**: Support bundles are stored in rqlite. Bundles over 100MB could cause rqlite to crash, causing errors in the installation. You can still generate a support bundle from the command line. For more information, see [Generating Support Bundles for Embedded Cluster](/vendor/support-bundle-embedded). -Released on January 22, 2025 +* **Kubernetes version template functions not supported**: The KOTS [KubernetesVersion](/reference/template-functions-static-context#kubernetesversion), [KubernetesMajorVersion](/reference/template-functions-static-context#kubernetesmajorversion), and [KubernetesMinorVersion](/reference/template-functions-static-context#kubernetesminorversion) template functions do not provide accurate Kubernetes version information for Embedded Cluster installations. This is because these template functions are rendered before the Kubernetes cluster has been updated to the intended version. However, `KubernetesVersion` is not necessary for Embedded Cluster because vendors specify the Embedded Cluster version, which includes a known Kubernetes version. - - - - - - - - - - - - - - - -
    Version1.21.0+k8s-1.301.21.0+k8s-1.29
    Kubernetes Version1.30.61.29.10
    KOTS Version1.123.1
    +* **Custom domains not supported**: Embedded Cluster does not support the use of custom domains, even if custom domains are configured. We intend to add support for custom domains. For more information about custom domains, see [About Custom Domains](/vendor/custom-domains). -### New Features {#new-features-1-21-0} -* The `--no-prompt` flag is deprecated and replaced with the `--yes` flag. `--no-prompt` will be removed in a future release. -* The `--skip-host-preflights` flag is deprecated and replaced with `--ignore-host-preflights`. When `--ignore-host-preflights` is passed, the host preflights are still executed, but the user is prompted and can choose to continue if failures occur. This new behavior ensures that users see any incompatibilities in their environment, while still enabling them to bypass failures if absolutely necessary. To ignore host preflight failures in automation, use both the `--ignore-host-preflights` and `--yes` flags to address the prompt for `--ignore-host-preflights`. `--skip-host-preflights` will be removed in a future release. +* **KOTS Auto-GitOps workflow not supported**: Embedded Cluster does not support the KOTS Auto-GitOps workflow. If an end-user is interested in GitOps, consider the Helm install method instead. For more information, see [Installing with Helm](/vendor/install-with-helm). -### Improvements {#improvements-1-21-0} -* Adds preflight checks to ensure nodes joining the cluster can communicate with all other nodes in the cluster on ports 6443, 9443, 2380, and 10250. -* Adds a preflight check to ensure that communication can occur between the Pod and Service CIDRs that Kubernetes will use. When this preflight fails, it's often because of a firewall configuration that blocks communication between the Pod and Service CIDRs. -* Adds a preflight check to ensure IP forwarding is enabled (`net.ipv4.ip_forward = 1`). Many machines have IP forwarding disabled by default. As of 1.19.0, Embedded Cluster uses a sysctl configuration file to enable IP forwarding, so this preflight should only fail if Embedded Cluster couldn't enable IP forwarding. -* Adds a preflight check to ensure that a nameserver is configured in `/etc/resolv.conf`. -* If a network interface is not specified with the `--network-interface` flag, Embedded Cluster will use improved logic to determine which interface to use. -* The license file is now stored in the data directory and is included in host support bundles. -* Host support bundles now include whether `/etc/resolv.conf` has at least one nameserver configured. -* Host support bundles now include the output of `firewall-cmd --list-all`. -* Potentially sensitive CLI flag values are no longer included in metrics reporting. -* Usage and error messages have been improved for understandability. -* `kubernetes.default.svc.cluster.local` has been added as a Kubernetes API server SAN. +* **Downgrading Kubernetes not supported**: Embedded Cluster does not support downgrading Kubernetes. The admin console will not prevent end-users from attempting to downgrade Kubernetes if a more recent version of your application specifies a previous Embedded Cluster version. You must ensure that you do not promote new versions with previous Embedded Cluster versions. -### Bug Fixes {#bug-fixes-1-21-0} -* Support bundles now check that `modprobe`, `mount`, and `umount` exist in PATH rather than at hardcoded locations. -* Fixes an issue where `reset` commands run on partially-installed clusters could fail with errors like `no matches for kind "Installation"`. +* **Templating not supported in Embedded Cluster Config**: The [Embedded Cluster Config](/reference/embedded-config) resource does not support the use of Go template functions, including [KOTS template functions](/reference/template-functions-about). This only applies to the Embedded Cluster Config. You can still use template functions in the rest of your release as usual. -## 1.19.0 +* **Policy enforcement on Embedded Cluster workloads is not supported**: The Embedded Cluster runs workloads that require higher levels of privilege. If your application installs a policy enforcement engine such as Gatekeeper or Kyverno, ensure that its policies are not enforced in the namespaces used by Embedded Cluster. -Released on November 14, 2024 +* **Installing on STIG- and CIS-hardened OS images is not supported**: Embedded Cluster isn't tested on these images, and issues have arisen when trying to install on them. - - - - - - - - - - - - - - - -
    Version1.19.0+k8s-1.301.19.0+k8s-1.29
    Kubernetes Version1.30.51.29.9
    KOTS Version1.121.0
    +================ +File: docs/vendor/embedded-using.mdx +================ +import UpdateOverview from "../partials/embedded-cluster/_update-overview.mdx" +import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" +import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" +import EcConfig from "../partials/embedded-cluster/_ec-config.mdx" -### New Features {#new-features-1-19-0} -* Adds preflight checks to ensure that the following kernel parameters are set: `net.ipv4.conf.default.arp_filter = 0`, `net.ipv4.conf.default.arp_ignore = 0`, `net.ipv4.conf.all.arp_filter = 0`, and `net.ipv4.conf.all.arp_ignore = 0`. -* The following kernel parameters will be written to `/etc/sysctl.d/99-embedded-cluster.conf` and configured automatically during installation: `net.ipv4.ip_forward = 1`, `net.ipv4.conf.default.arp_filter = 0`, `net.ipv4.conf.default.arp_ignore = 0`, `net.ipv4.conf.all.arp_filter = 0`, and `net.ipv4.conf.all.arp_ignore = 0`. An error will not occur if Embedded Cluster fails to set these kernel parameters at install time. Instead, the aforementioned preflight checks will instruct the user to set these parameters. +# Using Embedded Cluster -### Improvements {#improvements-1-19-0} -* If a user downloads an air gap bundle but attempts to install without it, the user will be instructed how to pass the air gap bundle to `install`. They will then be asked if they want to continue with an online installation anyway. +This topic provides information about using Replicated Embedded Cluster, including how to get started, configure Embedded Cluster, access the cluster using kubectl, and more. For an introduction to Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). -## 1.18.0 +## Quick Start -Released on November 8, 2024 +You can use the following steps to get started quickly with Embedded Cluster. More detailed documentation is available below. - - - - - - - - - - - - - - - -
    Version1.18.0+k8s-1.301.18.0+k8s-1.29
    Kubernetes Version1.30.51.29.9
    KOTS Version1.120.3
    +1. Create a new customer or edit an existing customer and select the **Embedded Cluster Enabled** license option. Save the customer. -### New Features {#new-features-1-18-0} -* Adds support for passing ConfigValues using the `--config-values` flag for the `install` command. This also enables automated installations of both Embedded Cluster and the application. +1. Create a new release that includes your application. In that release, create an Embedded Cluster Config that includes, at minimum, the Embedded Cluster version you want to use. See the Embedded Cluster [GitHub repo](https://github.com/replicatedhq/embedded-cluster/releases) to find the latest version. -### Improvements {#improvements-1-18-0} -* When the Admin Console URL is printed at the end of the `install` command, it will now use the public IP address instead of the private IP address for AWS EC2 instances that use IMDSv2. -* During setup of the Admin Console when a self-signed certificate is used, the instructions are updated to better inform users how to ignore the warning on different browsers. + Example Embedded Cluster Config: -### Bug Fixes {#bug-fixes-1-18-0} -* Fixes an issue where registry logs weren't included in support bundles. -* Fixes an issue when installing on Azure that caused the Admin Console URL shown at the end of the `install` command to use the private IP address rather than the public IP address. -* Fixes an issue that prevented you from updating an application if the new version contained a required config item without a `default` or `value` set. -* The copy button now works for the command to validate the authenticity of the self-signed certificate during Admin Console setup. -* Fixes an issue where the **Config** page showed an error and wouldn't load. + -## 1.17.0 +1. Save the release and promote it to the channel the customer is assigned to. -Released on November 4, 2024 +1. Return to the customer page where you enabled Embedded Cluster. At the top right, click **Install instructions** and choose **Embedded Cluster**. A dialog appears with instructions on how to download the Embedded Cluster installation assets and install your application. - - - - - - - - - - - - - - - -
    Version1.17.0+k8s-1.301.17.0+k8s-1.29
    Kubernetes Version1.30.51.29.9
    KOTS Version1.120.1
    + ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) -### New Features {#new-features-1-17-0} -* Adds support for partial rollbacks. Partial rollbacks are supported only when rolling back to a version where there is no change to the Embedded Cluster Config compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same [Embedded Cluster Config](/reference/embedded-config). For more information about how to enable rollbacks for your application in the KOTS Application custom resource, see [allowRollback](/reference/custom-resource-application#allowrollback) in _Application_. -* Introduces a new landing page and guided installation workflow for the Admin Console. + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + +1. On your VM, run the commands in the **Embedded Cluster install instructions** dialog. -### Improvements {#improvements-1-17-0} -* Removes unused infrastructure images from the data directory on upgrades to free up storage space. -* Adds additional host collectors and analyzers to improve troubleshooting with support bundles. -* Support bundles now include information on connectivity between Pods and nodes to help resolve networking issues more quickly. -* The preflight check for connectivity to replicated.app and proxy.replicated.com now use any private CAs provided with `--private-ca`, in case a man-in-the-middle proxy is in use. + Embedded cluster install instruction dialog -### Bug Fixes {#bug-fixes-1-17-0} -* Fixes a panic that occurred when prompted to proceed after preflight warnings. -* Fixes an issue where `troubleshoot.sh/v1beta2` was erroneously printed to the screen during installation. + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) -## 1.16.0 +1. Enter an Admin Console password when prompted. -Released on October 23, 2024 + The Admin Console URL is printed when the installation finishes. Access the Admin Console to begin installing your application. During the installation process in the Admin Console, you have the opportunity to add nodes if you want a multi-node cluster. Then you can provide application config, run preflights, and deploy your application. - - - - - - - - - - - - - - - -
    Version1.16.0+k8s-1.301.16.0+k8s-1.29
    Kubernetes Version1.30.51.29.9
    KOTS Version1.119.0
    +## About Configuring Embedded Cluster -### New Features {#new-features-1-16-0} -* Adds support for Kubernetes 1.30 and removes support for 1.28. -* Adds a `--data-dir` flag to the `install` and `restore` commands so the data directory can be specified. By default, the data directory is `/var/lib/embedded-cluster`. If the `--data-dir` flag was provided at install time, then the same data directory must be provided when restoring. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install) and [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). -* Adds an `admin-console reset-password` command that allows resetting the password for the Admin Console. -* Adds a `--cidr` flag to the `install` command that replaces the `--pod-cidr` and `--service-cidr` flags. The CIDR range specified with the `--cidr` flag is split and used for both the Pod and Service CIDRs. See [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - :::note - The `--pod-cidr` and `--service-cidr` flags are hidden, but still functional. Replicated recommends that you update any automation that uses the `--pod-cidr` and - `--service-cidr` flags to use the `--cidr` flag instead. - ::: -* Adds the following preflight checks: - * Verify that the CIDR range used for the cluster does not overlap with existing routes. - * Verify the CPU supports x86-64-v2. - * Verify the data directory (`/var/lib/embedded-cluster` by default) is not symlinked. - -### Improvements {#improvements-1-16-0} -* For new installations, the `k0s` and `openebs-local` directories are now subdirectories of `/var/lib/embedded-cluster`. With this change, Embedded Cluster now only documents and includes preflight checks for `/var/lib/embedded-cluster`. -* Adds the `support-bundle` command to make it easier to generate support bundles. For more information, see [Generating Support Bundles for Embedded Cluster](/vendor/support-bundle-embedded). -* Improves the reliability of waiting for the Kubernetes server to start. -* Collects more information about the cluster in support bundles, including the Local Artifact Mirror and Kubernetes API Server logs. -* Requires that the Admin Console password is at least six characters. -* Improves the flexibility of configuring the Cluster Resources collector in support bundle specs by limiting KOTS's default collection to its own namespace. - -### Bug Fixes {#bug-fixes-1-16-0} -* Fixes an issue that could occur when resetting a worker node that used a custom data directory. -* Fixes an issue where k0s images were not updated within the cluster when k0s was upgraded. -* Fixes an issue where upgrading a cluster with a worker node that used a version of Embedded Cluster earlier than 1.15 would fail. -* Fixes an issue that prevented you from upgrading to an application version that didn't have Config and preflights. -* Fixes an issue where the Admin Console could reach out the internet when generating a support bundle in air gap environments. -* Fixes an issue that prevented you from installing Embedded Cluster using a multi-channel license and a channel other than the license's default. -* Fixes an issue that could cause the registry to fail to upgrade in air gap installations. -* Fixes an issue where the Replicated SDK failed to deploy if a private CA was provided to the installation but the SDK was installed into a different namespace than KOTS. -* If an application includes the Replicated SDK, the SDK will be deployed with the same ClusterRole as the Admin Console. -* Fixes an issue where node joins failed because of a version mismatch, even though the versions were the same. - -## 1.15.0 - Removed +To install an application with Embedded Cluster, an Embedded Cluster Config must be present in the application release. The Embedded Cluster Config lets you define several characteristics about the cluster that will be created. -:::important -Embedded Cluster 1.15.0 has been removed and is not available for use because of issues with upgrades. It continues to work for anyone already using it. -::: +For more information, see [Embedded Cluster Config](/reference/embedded-config). -Released on October 10, 2024 +## About Installing with Embedded Cluster - - - - - - - - - - - - - - - -
    Version1.15.0+k8s-1.291.15.0+k8s-1.28
    Kubernetes Version1.29.91.28.11
    KOTS Version1.117.5
    +This section provides an overview of installing applications with Embedded Cluster. -### New Features {#new-features-1-15-0} -* Adds the `--data-dir` flag to the `install` command so the data directory can be specified. By default, the data directory is `/var/lib/embedded-cluster`. +### Installation Overview -### Improvements {#improvements-1-15-0} -* Adds a preflight check to ensure the CPU supports x86-64-v2. -* Adds a preflight check to ensure the data directory (`/var/lib/embedded-cluster` by default) is not symlinked. -* Adds the `--data-dir` flag to the `restore` command. When restoring a backup that used a non-default data directory (i.e., the `--data-dir` flag was provided at install time), the same data directory must be provided when restoring. -* For new installations, the `k0s` and `openebs-local` directories are now subdirectories of `/var/lib/embedded-cluster`. We will only document and preflight for `/var/lib/embedded-cluster` now. -* The Admin Console password must be at least six characters. +The following diagram demonstrates how Kubernetes and an application are installed into a customer environment using Embedded Cluster: -### Bug Fixes {#bug-fixes-1-15-0} -* Fixes an issue that prevented you from installing Embedded Cluster using a multi-channel license and a channel other than the license's default. -* Fixes an issue that could cause the registry to fail to upgrade in air gap installations. -* Fixes an issue where node joins failed because of a version mismatch, even though the versions were the same. +![Embedded Cluster installs an app in a customer environment](/images/embedded-cluster-install.png) -## 1.14.2 +[View a larger version of this image](/images/embedded-cluster-install.png) -Released on September 26, 2024 +As shown in the diagram above, the Embedded Cluster Config is included in the application release in the Replicated Vendor Portal and is used to generate the Embedded Cluster installation assets. Users can download these installation assets from the Replicated app service (`replicated.app`) on the command line, then run the Embedded Cluster installation command to install Kubernetes and the KOTS Admin Console. Finally, users access the Admin Console to optionally add nodes to the cluster and to configure and install the application. - - - - - - - - - - - - - - - -
    Version1.14.2+k8s-1.291.14.2+k8s-1.28
    Kubernetes Version1.29.81.28.11
    KOTS Version1.117.3
    +### Installation Options -### Improvements {#improvements-1-14-2} +Embedded Cluster supports installations in online (internet-connected) environments and air gap environments with no outbound internet access. -* Preflight checks for the Admin Console and local artifact mirror ports now take into consideration ports specified by the user with the `--admin-console-port` and `--local-artifact-mirror-port` flags. -* Improves the display of preflight failures so they're more readable. +For online installations, Embedded Cluster also supports installing behind a proxy server. -## 1.14.1 +For more information about how to install with Embedded Cluster, see: +* [Online Installation wtih Embedded Cluster](/enterprise/installing-embedded) +* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) -Released on September 26, 2024 +### Customer-Specific Installation Instructions - - - - - - - - - - - - - - - -
    Version1.14.1+k8s-1.291.14.1+k8s-1.28
    Kubernetes Version1.29.81.28.11
    KOTS Version1.117.3
    +To install with Embedded Cluster, you can follow the customer-specific instructions provided on the **Customer** page in the Vendor Portal. For example: -### New Features {#new-features-1-14-1} +Embedded cluster install instruction dialog -* Adds host preflight checks to ensure that the required ports are open and available. For more information, see [Port Requirements](/vendor/embedded-overview#port-requirements). +[View a larger version of this image](/images/embedded-cluster-install-dialog.png) -### Improvements {#improvements-1-14-1} +### (Optional) Serve Installation Assets Using the Vendor API -* Adds the `--network-interface` flag for the `join` command so a network interface can optionally be selected when joining nodes. If this flag is not provided, the first valid, non-local network interface is used. -* The `reset` command now automatically reboots the machine, and the optional `--reboot` flag is no longer available. A reboot is required to reset iptables. +To install with Embedded Cluster, you need to download the Embedded Cluster installer binary and a license. Air gap installations also require an air gap bundle. Some vendors already have a portal where their customers can log in to access documentation or download artifacts. In cases like this, you can serve the Embedded Cluster installation essets yourself using the Replicated Vendor API, rather than having customers download the assets from the Replicated app service using a curl command during installation. -### Bug Fixes {#bug-fixes-1-14-1} +To serve Embedded Cluster installation assets with the Vendor API: -* Fixes an issue where nodes could fail to join with the error "unable to get network interface for address." +1. If you have not done so already, create an API token for the Vendor API. See [Using the Vendor API v3](/reference/vendor-api-using#api-token-requirement). -## 1.14.0 +1. Call the [Get an Embedded Cluster release](https://replicated-vendor-api.readme.io/reference/getembeddedclusterrelease) endpoint to download the assets needed to install your application with Embedded Cluster. Your customers must take this binary and their license and copy them to the machine where they will install your application. -Released on September 24, 2024 + Note the following: - - - - - - - - - - - - - - - -
    Version1.14.0+k8s-1.291.14.0+k8s-1.28
    Kubernetes Version1.29.81.28.11
    KOTS Version1.117.3
    + * (Recommended) Provide the `customerId` query parameter so that the customer’s license is included in the downloaded tarball. This mirrors what is returned when a customer downloads the binary directly using the Replicated app service and is the most useful option. Excluding the `customerId` is useful if you plan to distribute the license separately. -### New Features {#new-features-1-14-0} + * If you do not provide any query parameters, this endpoint downloads the Embedded Cluster binary for the latest release on the specified channel. You can provide the `channelSequence` query parameter to download the binary for a particular release. -* Introduces the `--admin-console-port` and `--local-artifact-mirror-port` flags to the `install` command so the ports for the Admin Console (default 30000) and the local artifact mirror (default 50000) can be chosen. -* Introduces the `--local-artifact-mirror-port` flag to the `restore` command so the port used for the local artifact mirror can be selected during the restore. If no port is provided, the port in use when the backup was taken will be used. -* Introduces the `--network-interface` flag to the `install` command so a network interface can be selected. If a network interface is not provided, the first valid, non-local network interface is used. +### About Host Preflight Checks -### Improvements {#improvements-1-14-0} +During installation, Embedded Cluster automatically runs a default set of _host preflight checks_. The default host preflight checks are designed to verify that the installation environment meets the requirements for Embedded Cluster, such as: +* The system has sufficient disk space +* The system has at least 2G of memory and 2 CPU cores +* The system clock is synchronized -* When a proxy server is configured, the default network interface's subnet will automatically be added to the no-proxy list if the node's IP address isn't already included. -* When joining nodes to an Embedded Cluster, the correct network interface is chosen based on the node IP address in the join command. -* The static IP addresses for replicated.app and proxy.replicated.com are now included in the failure messages for the preflight checks that verify connectivity to those endpoints, making it easier for end users to allowlist those endpoints. -* If the Replicated SDK is deployed by KOTS as part of an application, the SDK will automatically be configured with any additional CA certificates provided to `--private-ca` flag for the `install` command. +For Embedded Cluster requirements, see [Embedded Cluster Installation Requirements](/enterprise/installing-embedded-requirements). For the full default host preflight spec for Embedded Cluster, see [`host-preflight.yaml`](https://github.com/replicatedhq/embedded-cluster/blob/main/pkg/preflights/host-preflight.yaml) in the `embedded-cluster` repository in GitHub. +If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. For more information about host preflight checks for installations on VMs or bare metal servers, see [About Host Preflights](preflight-support-bundle-about#host-preflights). -## 1.13.1 +#### Limitations -Released on September 20, 2024 +Embedded Cluster host preflight checks have the following limitations: - - - - - - - - - - - - - - - -
    Version1.13.1+k8s-1.291.13.1+k8s-1.28
    Kubernetes Version1.29.81.28.11
    KOTS Version1.117.1
    +* The default host preflight checks for Embedded Cluster cannot be modified, and vendors cannot provide their own custom host preflight spec for Embedded Cluster. +* Host preflight checks do not check that any application-specific requirements are met. For more information about defining preflight checks for your application, see [Defining Preflight Checks](/vendor/preflight-defining). -### Bug Fixes {#bug-fixes-1-13-1} +#### Skip Host Preflight Checks -* Fixes an issue where you could not upgrade to a version that had special characters like `+` in the version label. +You can skip host preflight checks by passing the `--skip-host-preflights` flag with the Embedded Cluster `install` command. For example: -## 1.13.0 +```bash +sudo ./my-app install --license license.yaml --skip-host-preflights +``` -Released on September 17, 2024 +When you skip host preflight checks, the Admin Console still runs any application-specific preflight checks that are defined in the release before the application is deployed. - - - - - - - - - - - - - - - -
    Version1.13.0+k8s-1.291.13.0+k8s-1.28
    Kubernetes Version1.29.81.28.11
    KOTS Version1.117.0
    +:::note +Skipping host preflight checks is _not_ recommended for production installations. +::: -### New Features {#new-features-1-13-0} +## About Managing Multi-Node Clusters with Embedded Cluster -* Adds the [`PrivateCACert` template function](/reference/template-functions-static-context#privatecacert) to return the name of a ConfigMap containing additional trusted CA certificates provided by the end user with the `--private-ca` flag for the `install` command. +This section describes managing nodes in multi-node clusters created with Embedded Cluster. -### Bug Fixes {#bug-fixes-1-13-0} +### Defining Node Roles for Multi-Node Clusters -* Fixes an issue where user-provided proxy configuration was removed during upgrades. -* Fixes an issue where the disk performance preflight failed on certain architectures where fio was unable to run. +You can optionally define node roles in the Embedded Cluster Config. For multi-node clusters, roles can be useful for the purpose of assigning specific application workloads to nodes. If nodes roles are defined, users access the Admin Console to assign one or more roles to a node when it is joined to the cluster. -## 1.12.1 +For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. -Released on September 13, 2024 +### Adding Nodes - - - - - - - - - - - - - - - -
    Version1.12.1+k8s-1.291.12.1+k8s-1.28
    Kubernetes Version1.29.81.28.11
    KOTS Version1.116.1
    +Users can add nodes to a cluster with Embedded Cluster from the Admin Console. The Admin Console provides the join command used to add nodes to the cluster. -### New Features {#new-features-1-12-1} +For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). -* Adds the ability to provide additional trusted certificate authority certificates with the `install` command's `--private-ca` flag. This is useful when Embedded Cluster is installed behind an enterprise proxy that intercepts traffic and issues its own certificates. +### High Availability for Multi-Node Clusters (Alpha) -### Bug Fixes {#bug-fixes-1-12-1} +Multi-node clusters are not highly available by default. Enabling high availability (HA) requires that at least three controller nodes are present in the cluster. Users can enable HA when joining the third node. -* Removes unnecessary values that were previously added to the no proxy list automatically. -* KOTS now uses the fully qualified `.svc.cluster.local` address when making requests to the `kotsadm-rqlite` service to simplify HTTP proxy configuration. +For more information about creating HA multi-node clusters with Embedded Cluster, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha) in _Managing Multi-Node Clusters with Embedded Cluster_. -## 1.12.0 +## About Performing Updates with Embedded Cluster -Released on September 11, 2024 + - - - - - - - - - - - - - - - -
    Version1.12.0+k8s-1.291.12.0+k8s-1.28
    Kubernetes Version1.29.81.28.11
    KOTS Version1.116.0
    +For more information about updating, see [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). -### Improvements {#improvements-1-12-0} +## Access the Cluster -* Available updates and the check for updates button are shown on the **Dashboard** page of the Admin Console. The check for updates button is now also shown on the **Version history** page. These were removed in a previous version. -* The **Nodes** page displays guidance and easier access to the node join command during initial install. -* When nodes need to be added to the cluster during a restore operation, the `join` command is more clearly shown in the Admin Console. -* Hides a banner on the **View Files** page that told users to use `kubectl kots` commands that are not intended for Embedded Cluster. -* KOTS now uses the fully qualified `.svc.cluster.local` address when making requests to the `kotsadm-rqlite` and `kotsadm-minio` services for simplified HTTP proxy configuration using `NO_PROXY=.cluster.local`. +With Embedded Cluster, end-users are rarely supposed to need to use the CLI. Typical workflows, like updating the application and the cluster, are driven through the Admin Console. -### Bug Fixes {#bug-fixes-1-12-0} +Nonetheless, there are times when vendors or their customers need to use the CLI for development or troubleshooting. -* Fixes an issue where the values provided to the `--http-proxy`, `--https-proxy`, and `--no-proxy` flags for the kots install command were not propagated to the Replicated SDK. +To access the cluster and use other included binaries: -## 1.11.1 +1. SSH onto a controller node. -Released on August 30, 2024 +1. Use the Embedded Cluster shell command to start a shell with access to the cluster: - - - - - - - - - - - - - - - -
    Version1.11.1+k8s-1.291.11.1+k8s-1.28
    Kubernetes Version1.29.71.28.11
    KOTS Version1.114.0
    + ``` + sudo ./APP_SLUG shell + ``` -### Improvements {#improvements-1-11-1} + The output looks similar to the following: + ``` + __4___ + _ \ \ \ \ Welcome to APP_SLUG debug shell. + <'\ /_/_/_/ This terminal is now configured to access your cluster. + ((____!___/) Type 'exit' (or CTRL+d) to exit. + \0\0\0\0\/ Happy hacking. + ~~~~~~~~~~~ + root@alex-ec-2:/home/alex# export KUBECONFIG="/var/lib/embedded-cluster/k0s/pki/admin.conf" + root@alex-ec-2:/home/alex# export PATH="$PATH:/var/lib/embedded-cluster/bin" + root@alex-ec-2:/home/alex# source <(kubectl completion bash) + root@alex-ec-2:/home/alex# source /etc/bash_completion + ``` -* Adds a host preflight check to ensure that disk performance is sufficient for etcd. Specifically, the P99 write latency must be less than 10 ms. + The appropriate kubeconfig is exported, and the location of useful binaries like kubectl and Replicated’s preflight and support-bundle plugins is added to PATH. -## 1.11.0 + :::note + You cannot run the `shell` command on worker nodes. + ::: -Released on August 23, 2024 +1. Use the available binaries as needed. - - - - - - - - - - - - - - - -
    Version1.11.0+k8s-1.291.11.0+k8s-1.28
    Kubernetes Version1.29.71.28.11
    KOTS Version1.114.0
    + **Example**: -### Improvements {#improvements-1-11-0} + ```bash + kubectl version + ``` + ``` + Client Version: v1.29.1 + Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3 + Server Version: v1.29.1+k0s + ``` -* The default range available for NodePorts is now 80-32767 instead of 30000-32767. Many customers used [`unsupportedOverrides`](/reference/embedded-config#unsupportedoverrides) to configure this wider range for use with things like an ingress controller, so we have adjusted the default range accordingly. Changes to this range are not applied on upgrades, so existing installations will not be changed. -* Adds host preflight checks for connecting to replicated.app and proxy.replicated.com. If you use a custom domain for replicated.app, the custom domain will be used in the preflight check. -* Adds a host preflight check to ensure that neither `nameserver localhost` nor `nameserver 127.0.0.1` is present in `resolv.conf`. +1. Type `exit` or **Ctrl + D** to exit the shell. -### Bug Fixes {#bug-fixes-1-11-0} + :::note + If you encounter a typical workflow where your customers have to use the Embedded Cluster shell, reach out to Alex Parker at alexp@replicated.com. These workflows might be candidates for additional Admin Console functionality. + ::: -* Fixes several issues that caused node resets to fail. Single-node clusters are no longer drained before being reset. Resets will no longer fail with the error `unable to get installation` if the installation failed early on. And node resets will now work if bind mounts are used for `/var/lib/embedded-cluster`, `/var/lib/k0s`, and `/var/openebs`. -* Fixes an issue where preflight checks for `modprobe`, `mount`, and `unmount` in `PATH` did not use absolute paths. -* Fixes an issue where restoring did not work with S3-compatible object stores other than AWS S3. +## Reset a Node -## 1.10.0 +Resetting a node removes the cluster and your application from that node. This is useful for iteration, development, and when mistakes are made, so you can reset a machine and reuse it instead of having to procure another machine. -Released on August 13, 2024 +If you want to completely remove a cluster, you need to reset each node individually. - - - - - - - - - - - - - - - -
    Version1.10.0+k8s-1.291.10.0+k8s-1.28
    Kubernetes Version1.29.71.28.11
    KOTS Version1.114.0
    +When resetting a node, OpenEBS PVCs on the node are deleted. Only PVCs created as part of a StatefulSet will be recreated automatically on another node. To recreate other PVCs, the application will need to be redeployed. -### New Features {#new-features-1-10-0} +To reset a node: -* Adds support for the `dropdown` config item type, which creates a dropdown on the config screen. See [`dropdown`](/reference/custom-resource-config#dropdown) in Config. -* Adds the `radio` config item type, which is functionally equivalent to the `select_one` item type but is more clearly named. The `select_one` config item type is deprecated in favor of `radio` but is still fully functional. See [`radio`](/reference/custom-resource-config#radio) in _Config_. +1. SSH onto the machine. Ensure that the Embedded Cluster binary is still available on that machine. -:::note -For release notes for Embedded Cluster versions earlier than 1.10.0, see the [Embedded Cluster GitHub releases page](https://github.com/replicatedhq/embedded-cluster/releases). -::: +1. Run the following command to reset the node and automatically reboot the machine to ensure that transient configuration is also reset: -================ -File: docs/release-notes/rn-kubernetes-installer.md -================ ---- -toc_max_heading_level: 2 -pagination_next: null -pagination_prev: null ---- + ``` + sudo ./APP_SLUG reset + ``` + Where `APP_SLUG` is the unique slug for the application. -# kURL Release Notes + :::note + Pass the `--no-prompt` flag to disable interactive prompts. Pass the `--force` flag to ignore any errors encountered during the reset. + ::: -This topic contains release notes for the [Replicated kURL](/vendor/kurl-about) installer. The release notes list new features, improvements, bug fixes, known issues, and breaking changes. +## Additional Use Cases - +This section outlines some additional use cases for Embedded Cluster. These are not officially supported features from Replicated, but are ways of using Embedded Cluster that we or our customers have experimented with that might be useful to you. -## v2025.02.26-0 +### NVIDIA GPU Operator -Released on February 26, 2025 +The NVIDIA GPU Operator uses the operator framework within Kubernetes to automate the management of all NVIDIA software components needed to provision GPUs. For more information about this operator, see the [NVIDIA GPU Operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html) documentation. -### New Features {#new-features-v2025-02-26-0} -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verisons 1.13.10, 1.14.12, 1.15.8, 1.16.3 and 1.16.4. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2025-02-07T23-21-09Z and RELEASE.2025-02-18T16-25-55Z. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.80.0-69.3.3. -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.57.3. +You can include the NVIDIA GPU Operator in your release as an additional Helm chart, or using Embedded Cluster Helm extensions. For information about adding Helm extensions, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. -### Bug Fixes {#bug-fixes-v2025-02-26-0} -* Increased grafana pod limits to 200m cpu and 128Mi memory. -* Fixed Prometheus reporting for Rook 1.13 and later. +Using the NVIDIA GPU Operator with Embedded Cluster requires configuring the containerd options in the operator as follows: -## v2025.02.14-0 +```yaml +# Embedded Cluster Config -Released on February 14, 2025 + extensions: + helm: + repositories: + - name: nvidia + url: https://nvidia.github.io/gpu-operator + charts: + - name: gpu-operator + chartname: nvidia/gpu-operator + namespace: gpu-operator + version: "v24.9.1" + values: | + # configure the containerd options + toolkit: + env: + - name: CONTAINERD_CONFIG + value: /etc/k0s/containerd.d/nvidia.toml + - name: CONTAINERD_SOCKET + value: /run/k0s/containerd.sock +``` +When the containerd options are configured as shown above, the NVIDIA GPU Operator automatically creates the required configurations in the `/etc/k0s/containerd.d/nvidia.toml` file. It is not necessary to create this file manually, or modify any other configuration on the hosts. -### New Features {#new-features-v2025-02-14-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.32.2, 1.31.6, 1.30.10, and 1.29.14. -* Adds [Metrics Server add-on](https://kurl.sh/docs/add-ons/metrics-server) version 0.7.2. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.15.2. -* Adds [Cert Manager add-on](https://kurl.sh/docs/add-ons/cert-manager) version 1.17.1. +:::note +If you include the NVIDIA GPU Operator as a Helm extension, remove any existing containerd services that are running on the host (such as those deployed by Docker) before attempting to install the release with Embedded Cluster. If there are any containerd services on the host, the NVIDIA GPU Operator will generate an invalid containerd config, causing the installation to fail. +::: -### Bug Fixes {#bug-fixes-v2025-02-14-0} -* Fixes [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.7.25 to install the correct version on RHEL 8, CentOS 8, and Oracle Linux 8. +## Troubleshoot with Support Bundles -## v2025.02.12-0 + -Released on February 12, 2025 + -### New Features {#new-features-v2025-02-12-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.31.5, 1.30.9, and 1.29.13. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.7.25. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) versions 0.26.3 and 0.26.4. -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) versions 4.1.2 and 4.2.0. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.80.0-69.2.0. +================ +File: docs/vendor/helm-image-registry.mdx +================ +import StepCreds from "../partials/proxy-service/_step-creds.mdx" +import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" -## v2024.12.31-0 +# Using the Proxy Registry with Helm Installations -Released on December 31, 2024 +This topic describes how to use the Replicated proxy registry to proxy images for installations with the Helm CLI. For more information about the proxy registry, see [About the Replicated Proxy Registry](private-images-about). -### New Features {#new-features-v2024-12-31-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version 1.31.4, 1.30.8 and 1.29.12. -* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to include runc v1.2.3. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.26.2. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.79.2-67.5.0. +## Overview -## v2024.12.04-0 +With the Replicated proxy registry, each customer's unique license can grant proxy access to images in an external private registry. To enable the proxy registry for Helm installations, you must create a Secret with `type: kubernetes.io/dockerconfigjson` to authenticate with the proxy registry. -Released on December 4, 2024 +During Helm installations, after customers provide their license ID, a `global.replicated.dockerconfigjson` field that contains a base64 encoded Docker configuration file is automatically injected in the Helm chart values. You can use this `global.replicated.dockerconfigjson` field to create the required pull secret. -### New Features {#new-features-v2024-12-04-0} -* Adds support for RHEL 9.5 and Rocky Linux 9.5. -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.31.3, 1.30.7, 1.29.11, and 1.28.15. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.26.1. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.78.2-66.2.2. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-11-07T00-52-20Z. +For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. -## v2024.11.08-0 +## Enable the Proxy Registry -Released on November 8, 2024 +This section describes how to enable the proxy registry for applications deployed with Helm, including how to use the `global.replicated.dockerconfigjson` field that is injected during application deployment to create the required pull secret. -### Bug Fixes {#bug-fixes-v2024-11-08-0} -* Fixes an issue where the public-address flag provided to the install script is ignored and not included in the api server cert sans. +To enable the proxy registry: -## v2024.11.07-0 +1. -Released on November 7, 2024 +1. -### New Features {#new-features-v2024-11-07-0} -* Adds support for discovering the EC2 instance public IP address using AWS IMDSv2. -* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to include runc v1.2.1. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.26.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-10-29T16-01-48Z. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.77.2-65.8.0. +1. In your Helm chart templates, create a Kubernetes Secret to evaluate if the `global.replicated.dockerconfigjson` value is set, and then write the rendered value into a Secret on the cluster: -## v2024.10.24-0 + ```yaml + # /templates/replicated-pull-secret.yaml -Released on October 24, 2024 + {{ if .Values.global.replicated.dockerconfigjson }} + apiVersion: v1 + kind: Secret + metadata: + name: replicated-pull-secret + type: kubernetes.io/dockerconfigjson + data: + .dockerconfigjson: {{ .Values.global.replicated.dockerconfigjson }} + {{ end }} + ``` -### New Features {#new-features-v2024-10-24-0} -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.7. -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.31.2 1.30.6 1.29.10 1.28.15 1.27.16. + :::note + If you use the Replicated SDK, do not use `replicated` for the name of the image pull secret because the SDK automatically creates a Secret named `replicated`. Using the same name causes an error. + ::: -### Bug Fixes {#bug-fixes-v2024-10-24-0} -* Fixes an issue that could cause the Velero add-on to fail to install on Ubuntu 22.04. +1. Ensure that you have a field in your Helm chart values file for your image repository URL, and that any references to the image in your Helm chart access the field from your values file. -## v2024.09.26-0 + **Example**: -Released on September 26, 2024 + ```yaml + # values.yaml + ... + dockerconfigjson: '{{ .Values.global.replicated.dockerconfigjson }}' + images: + myapp: + # Add image URL in the values file + apiImageRepository: quay.io/my-org/api + apiImageTag: v1.0.1 + ``` + ```yaml + # /templates/deployment.yaml -### New Features {#new-features-v2024-09-26-0} -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.57.2. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.76.1-62.6.0. -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.31.1 1.30.5 1.29.9 1.28.14 1.27.16. + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + spec: + template: + spec: + containers: + - name: api + # Access the apiImageRepository field from the values file + image: {{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }} + ``` -### Bug Fixes {#bug-fixes-v2024-09-26-0} -* Fixes master CIS benchmark checks 1.1.13 and 1.1.14 for /etc/kubernetes/super-admin.conf file permissions. +1. In your Helm chart templates, add the image pull secret that you created to any manifests that reference the private image: -## v2024.09.06-0 - -Released on September 6, 2024 - -### Improvements {#improvements-v2024-09-06-0} -* Improves preflight checks for Amazon Linux 2023 and Ubuntu 24.04. - -## v2024.09.03-0 - -Released on September 3, 2024 - -### New Features {#new-features-v2024-09-03-0} -* Adds support for Amazon Linux 2023 and Ubuntu 24.04. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.76.0-62.3.0. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.6. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-08-26T15-33-07Z. - -## v2024.08.26-0 - -Released on August 26, 2024 - -### New Features {#new-features-v2024-08-26-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.27.16, 1.28.13, 1.29.8, 1.30.4, and 1.31.0. -* Adds support for CentOS Stream 9. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-08-17T01-24-54Z. - -### Bug Fixes {#bug-fixes-v2024-08-26-0} -* Fixes an issue where [Flannel](https://kurl.sh/docs/add-ons/flannel) versions older than 0.24.2 failed to install on instances with VMware NICs. - -## v2024.08.12-0 - -Released on August 12, 2024 - -### New Features {#new-features-v2024-08-12-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-08-03T04-33-23Z. -* Updates included kustomize binary to v5.4.3. - -## v2024.08.07-0 - -Released on August 7, 2024 - -### New Features {#new-features-v2024-08-07-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.30.3, 1.29.7, 1.28.12, and 1.27.16. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.5. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.33. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.14.0. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.75.2-61.6.0. -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 4.1.0. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.30.0. -* Updates crictl in [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) to version 1.30.2. + ```yaml + # /templates/example.yaml + ... + {{ if .Values.global.replicated.dockerconfigjson }} + imagePullSecrets: + - name: replicated-pull-secret + {{ end }} + ``` -### Removals {#removals-v2024-08-07-0} -* Removes all [Docker add-on](https://kurl.sh/docs/add-ons/docker) versions. Use the [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) instead. The Docker add-on was previously deprecated in March 2023. -* Removes [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) versions 1.6.0, 1.12.0, 2.6.0, and 2.12.9. -* Removes [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.33.0, 0.44.1, 0.46.0, 0.46.0-14.9.0, 0.47.0-15.2.0, 0.47.0-15.2.1, 0.47.0-16.0.1, 0.48.0-16.1.2, 0.48.0-16.10.0, 0.48.0-16.12.1, 0.49.0-17.0.0, 0.49.0-17.1.1, and 0.49.0-17.1.3. + **Example:** -## v2024.07.02-0 + ```yaml + # /templates/deployment.yaml + ... + image: "{{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }}" + {{ if .Values.global.replicated.dockerconfigjson }} + imagePullSecrets: + - name: replicated-pull-secret + {{ end }} + name: myapp + ports: + - containerPort: 3000 + name: http + ``` -Released on July 2, 2024 +1. Package your Helm chart and add it to a release. Promote the release to a development channel. See [Managing Releases with Vendor Portal](releases-creating-releases). -### New Features {#new-features-v2024-07-02-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.30.2 1.29.6 1.28.11 1.27.15. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.4. -* Remove `/var/lib/containerd` and `/var/lib/docker` as part of `tasks.sh reset`. +1. Install the chart in a development environment to test your changes: -### Bug Fixes {#bug-fixes-v2024-07-02-0} -* Fixes cluster subnets being changed on upgrade in some instances. + 1. Create a local `values.yaml` file to override the default external registry image URL with the URL for the image on `proxy.replicated.com`. + + The proxy registry URL has the following format: `proxy.replicated.com/proxy/APP_SLUG/EXTERNAL_REGISTRY_IMAGE_URL` + + Where: + * `APP_SLUG` is the slug of your Replicated application. + * `EXTERNAL_REGISTRY_IMAGE_URL` is the path to the private image on your external registry. -## v2024.06.12-0 + **Example** + ```yaml + # A local values.yaml file + ... + images: + myapp: + apiImageRepository: proxy.replicated.com/proxy/my-app/quay.io/my-org/api + apiImageTag: v1.0.1 -Released on June 12, 2024 + ``` -### New Features {#new-features-v2024-06-12-0} -* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.7 to use haproxy:2.9.7-alpine3.20. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.32. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.74.0-59.0.0. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) versions 0.25.2 and 0.25.3. -* Adds support for RHEL 8.10 and 9.4. -* Adds support for Oracle Linux 8.10. -* Adds support for Rocky Linux 9.4. + :::note + If you configured a custom domain for the proxy registry, use the custom domain instead of `proxy.replicated.com`. For more information, see [Using Custom Domains](custom-domains-using). + ::: + + 1. Log in to the Replicated registry and install the chart, passing the local `values.yaml` file you created with the `--values` flag. See [Installing with Helm](install-with-helm). -## v2024.05.17-0 +================ +File: docs/vendor/helm-install-airgap.mdx +================ +import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" -Released on May 17, 2024 +# Installing and Updating with Helm in Air Gap Environments -### New Features {#new-features-v2024-05-17-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.30.1 1.28.10 1.29.5 1.27.14. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.73.2-58.5.2. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-05-10T01-41-38Z. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.29.0. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.7. -* Updates [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.13.2 to use local-volume-provider:v0.6.4. +## Overview -## v2024.05.03-0 +Replicated supports installing and updating Helm charts in air gap environments with no outbound internet access. In air gap Helm installations, customers are guided through the process with instructions provided in the [Replicated Download Portal](/vendor/releases-share-download-portal). -Released on May 3, 2024 +When air gap Helm installations are enabled, an **Existing cluster with Helm** option is displayed in the Download Portal on the left nav. When selected, **Existing cluster with Helm** displays three tabs (**Install**, **Manual Update**, **Automate Updates**), as shown in the screenshot below: -### New Features {#new-features-v2024-05-03-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version 1.30.0. +![download helm option](/images/download-helm.png) -### Bug Fixes {#bug-fixes-v2024-05-03-0} -* Fixes list of host package dependencies for RHEL-9. -* Stop using default yum repos if all dependencies are already installed on RHEL 9. -* Stop installing sub-dependencies on RHEL-9 systems. +[View a larger version of this image](/images/download-helm.png) -## v2024.04.19-0 +Each tab provides instructions for how to install, perform a manual update, or configure automatic updates, respectively. -Released on April 19, 2024 +These installing and updating instructions assume that your customer is accessing the Download Portal from a workstation that can access the internet and their internal private registry. Direct access to the target cluster is not required. -### New Features {#new-features-v2024-04-19-0} -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.31. -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.4 1.28.9 1.27.13 1.26.15. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.13.2. +Each method assumes that your customer is familiar with `curl`, `docker`, `helm`, `kubernetes`, and a bit of `bash`, particularly for automating updates. -## v2024.04.16-0 +## Prerequisites -Released on April 16, 2024 +Before you install, complete the following prerequisites: -### New Features {#new-features-v2024-04-16-0} -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.6. -* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.10.0-6.2.0. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.28.3. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.25.1. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-04-06T05-26-02Z. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.73.1-58.1.1. +* Reach out to your account rep to enable the Helm air gap installation feature. -## v2024.04.11-0 + -Released on April 11, 2024 +## Install -### Bug Fixes {#new-features-v2024-04-11-0} -* Fixes an issue where dependencies for the 'fio' package caused Amazon Linux 2 to become CentOS 7. +The installation instructions provided in the Download Portal are designed to walk your customer through the first installation of your chart in an air gap environment. -## v2024.04.03-1 +To install with Helm in an air gap environment: -Released on April 3, 2024 +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers > [Customer Name] > Reporting**. -### New Features {#new-features-v2024-04-03-1} -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 4.0.0. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.28.2. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.13.1. +1. In the **Download portal** section, click **Visit download portal** to log in to the Download Portal for the customer. -## v2024.04.03-0 +1. In the Download Portal left nav, click **Existing cluster with Helm**. -Released on April 3, 2024 + ![download helm option](/images/download-helm.png) -### New Features {#new-features-v2024-04-03-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.3 1.28.8 1.27.12 1.26.15. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.24.4. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-03-26T22-10-45Z. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.72.0-57.2.0. -* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.4 to use haproxy:2.9.6. + [View a larger version of this image](/images/download-helm.png) -## v2024.02.23-0 +1. On the **Install** tab, in the **App version** dropdown, select the target application version to install. -Released on February 23, 2024 +1. Run the first command to authenticate into the Replicated proxy registry with the customer's credentials (the `license_id`). -### New Features {#new-features-v2024-02-23-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.2 1.28.7 1.27.11 1.26.14. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-02-17T01-15-57Z. -* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.4 to use haproxy 2.9.5. +1. Under **Get the list of images**, run the command provided to generate the list of images needed to install. -## v2024.02.05-0 +1. For **(Optional) Specify registry URI**, provide the URI for an internal image registry where you want to push images. If a registry URI is provided, Replicatd automatically updates the commands for tagging and pushing images with the URI. -Released on February 5, 2024 +1. For **Pull, tag, and push each image to your private registry**, copy and paste the docker commands provided to pull, tag, and push each image to your internal registry. -### New Features {#new-features-v2024-02-05-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.1 1.28.6 1.27.10 1.26.13. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) versions 0.24.1 and 0.24.2. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.6.27 and 1.6.28. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.3. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.71.2-56.6.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-02-04T22-36-13Z. -* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.4 to use haproxy 2.9.4. -* Users of VMWare clusters using the VMXNET3 NIC driver will see a new systemd .service file included that disables tcp checksum offloading on the flannel interface. This fixes an issue we have seen with dropped packets under certain combinations of VMWare NIC and cluster configurations. + :::note + If you did not provide a URI in the previous step, ensure that you manually replace the image names in the `tag` and `push` commands with the target registry URI. + ::: -### Improvements {#improvements-v2024-02-05-0} -* Install an openebs support bundle spec whenever openebs addon is added to a kURL spec. -* Install a velero support bundle spec whenever velero addon is added to a kURL spec. +1. Run the command to authenticate into the OCI registry that contains your Helm chart. -## v2024.01.09-0 +1. Run the command to install the `preflight` plugin. This allows you to run preflight checks before installing to ensure that the installation environment meets the requirements for the application. -Released on January 9, 2024 +1. For **Download a copy of the values.yaml file** and **Edit the values.yaml file**, run the `helm show values` command provided to download the values file for the Helm chart. Then, edit the values file as needed to customize the configuration of the given chart. -### New Features {#new-features-v2024-01-09-0} -* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to use runc v1.1.11. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2024-01-01T16-36-33Z. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.4. + If you are installing a release that contains multiple Helm charts, repeat these steps to download and edit each values file. -## v2024.01.02-0 + :::note + For installations with mutliple charts where two or more of the top-level charts in the release use the same name, ensure that each values file has a unique name to avoid installation error. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. + ::: -Released on January 2, 2024 +1. For **Determine install method**, select one of the options depending on your ability to access the internet and the cluster from your workstation. -### New Features {#new-features-v2024-01-02-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.29.0 1.28.5 1.28.4 1.27.9 1.27.8 1.26.12 1.26.11. -* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.3 to use HAProxy 2.9.1. -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.10.0. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.24.0. -* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.9.0-6.1.2. +1. Use the commands provided and the values file or files that you edited to run preflight checks and then install the release. -### Bug Fixes {#bug-fixes-v2024-01-02-0} -* Fixes an issue where the 'minimum-node-count' parameter for Rook storage would require port 31880 to be opened between the node joining the cluster and a primary node. -* Adds a preflight to Kubernetes 1.29.x+ to prevent installing KOTS versions prior to 1.96.2 due to version incompatibilities. +## Perform Updates -## v2023.12.14-0 +This section describes the processes of performing manual and automatic updates with Helm in air gap environments using the instructions provided in the Download Portal. -Released on December 14, 2023 +### Manual Updates -### New Features {#new-features-v2023-12-14-0} -* Adds support for RHEL 9.3 and Rocky Linux 9.3. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.6.25 and 1.6.26. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.2. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.70.0-55.0.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2023-12-02T10-51-33Z and RELEASE.2023-12-09T18-17-51Z. -* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.3 to use HAProxy 2.9.0. +The manual update instructions provided in the Download Portal are similar to the installation instructions. -### Bug Fixes {#bug-fixes-v2023-12-14-0} -* Fixes an issue where Kubernetes 1.27 or later could prune the pause image being used, causing pods to fail. +However, the first step prompts the customer to select their current version an the target version to install. This step takes [required releases](/vendor/releases-about#properties) into consideration, thereby guiding the customer to the versions that are upgradable from their current version. -## v2023.11.20-0 +The additional steps are consistent with installation process until the `preflight` and `install` commands where customers provide the existing values from the cluster with the `helm get values` command. Your customer will then need to edit the `values.yaml` to reference the new image tags. -Released on November 20, 2023 +If the new version introduces new images or other values, Replicated recommends that you explain this at the top of your release notes so that customers know they will need to make additional edits to the `values.yaml` before installing. -### New Features {#new-features-v2023-11-20-0} -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.8. -* Updates [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.3 to use HAProxy 2.8.4. -* Adds support for RHEL and Oracle Linux 8.9. +### Automate Updates -### Bug Fixes {#bug-fixes-v2023-11-20-0} -* Improve error reporting capabilities during weave to flannel migration. +The instructions in the Download Portal for automating updates use API endpoints that your customers can automate against. -## v2023.11.17-0 +The instructions in the Download Portal provide customers with example commands that can be put into a script that they run periodically (nightly, weekly) using GitHub Actions, Jenkins, or other platforms. -Released on November 17, 2023 +This method assumes that the customer has already done a successful manual installation, including the configuration of the appropriate `values`. -### New Features {#new-features-v2023-11-17-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-11-15T20-43-25Z. -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.57.1. +After logging into the registry, the customer exports their current version and uses that to query an endpoint that provides the latest installable version number (either the next required release, or the latest release) and export it as the target version. With the target version, they can now query an API for the list of images. -## v2023.11.16-0 +With the list of images the provided `bash` script will automate the process of pulling updated images from the repository, tagging them with a name for an internal registry, and then pushing the newly tagged images to their internal registry. -Released on November 16, 2023 +Unless the customer has set up the `values` to preserve the updated tag (for example, by using the `latest` tag), they need to edit the `values.yaml` to reference the new image tags. After doing so, they can log in to the OCI registry and perform the commands to install the updated chart. -### New Features {#new-features-v2023-11-16-0} -* Add [Cert Manager add-on](https://kurl.sh/docs/add-ons/cert-manager) version 1.13.2. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.69.1-53.0.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-11-11T08-14-41Z. -* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to use runc v1.1.10. +## Use a Harbor or Artifactory Registry Proxy -## v2023.11.02-0 +You can integrate the Replicated proxy registry with an existing Harbor or jFrog Artifactory instance to proxy and cache images on demand. For more information, see [Using a Registry Proxy for Helm Air Gap Installations](using-third-party-registry-proxy). -Released on November 2, 2023 +================ +File: docs/vendor/helm-install-overview.mdx +================ +import Helm from "../partials/helm/_helm-definition.mdx" -### New Features {#new-features-v2023-11-02-0} -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.7. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.27.0. -* Updates [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.1 to use local-volume-provider v0.5.5. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.23.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-11-01T18-37-25Z. +# About Helm Installations with Replicated -## v2023.10.26-0 +This topic provides an introduction to Helm installations for applications distributed with Replicated. -Released on October 26, 2023 +## Overview -### New Features {#new-features-v2023-10-26-0} -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.26.1. -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.57.0. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.6. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-10-16T04-13-43Z. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.1. + -### Bug Fixes {#bug-fixes-v2023-10-26-0} -* Improves the reliability of the reset task by adding directory removal retry logic. -* If the `fio` host package cannot be installed, installation will continue without host filesystem performance metrics. +Replicated strongly recommends that all applications are packaged using Helm because many enterprise users expect to be able to install an application with the Helm CLI. -## v2023.10.19-0 +Existing releases in the Replicated Platform that already support installation with Replicated KOTS and Replicated Embedded Cluster (and that include one or more Helm charts) can also be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. -Released on October 19, 2023 +For information about how to install with Helm, see: +* [Installing with Helm](/vendor/install-with-helm) +* [Installing and Updating with Helm in Air Gap Environments (Alpha)](helm-install-airgap) -### Bug Fixes {#bug-fixes-v2023-10-19-0} -* Fixes a bug where having multiple volumes attached to the same pod would cause some volumes not to be created on the correct node when migrating to OpenEBS +The following diagram shows how Helm charts distributed with Replicated are installed with Helm in online (internet-connected) customer environments: -## v2023.10.12-0 +diagram of a helm chart in a custom environment -Released on October 12, 2023 +[View a larger version of this image](/images/helm-install-diagram.png) -### New Features {#new-features-v2023-10-12-0} -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.12.0. -* Updates [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.26.0 to use Envoy v1.27.1. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-10-07T15-07-38Z. +As shown in the diagram above, when a release containing one or more Helm charts is promoted to a channel, the Replicated Vendor Portal automatically extracts any Helm charts included in the release. These charts are pushed as OCI objects to the Replicated registry. The Replicated registry is a private OCI registry hosted by Replicated at `registry.replicated.com`. For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). -### Bug Fixes {#bug-fixes-v2023-10-12-0} -* Fixes a bug introduced in v2023.10.11-0 that prevented migrating from Longhorn to OpenEBS or Rook-Ceph storage when Prometheus was installed. -* Fixes a race condition that could prevent Prometheus from being upgraded from very old versions. +For example, if your application in the Vendor Portal is named My App and you promote a release containing a Helm chart with `name: my-chart` to a channel with the slug `beta`, then the Vendor Portal pushes the chart to the following location: `oci://registry.replicated.com/my-app/beta/my-chart`. -## v2023.10.11-0 +Customers can install your Helm chart by first logging in to the Replicated registry with their unique license ID. This step ensures that any customer who installs your chart from the registry has a valid, unexpired license. After the customer logs in to the Replicated registry, they can run `helm install` to install the chart from the registry. -Released on October 11, 2023 +During installation, the Replicated registry injects values into the `global.replicated` key of the parent Helm chart's values file. For more information about the values schema, see [Helm global.replicated Values Schema](helm-install-values-schema). -### New Features {#new-features-v2023-10-11-0} -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.5. +## Limitations -### Improvements {#improvements-v2023-10-11-0} -* The filesystem performance preflight check uses the `fio` package instead of an internal implementation. To support the filesystem performance preflight check, the `fio` package is installed as part of the installation script. +Helm installations have the following limitations: - :::note - The `fio` is not automatically installed on Ubuntu 18.04 operating systems. This means that the filesystem performance preflight check does not run on Ubuntu 18.04 unless `fio` has been installed through other means. - ::: +* Installing with Helm in air gap environments is an Beta feature. For more information, see [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap). +* Helm CLI installations do not provide access to any of the features of the Replicated KOTS installer, such as: + * The KOTS Admin Console + * Strict preflight checks that block installation + * Backup and restore with snapshots + * Required releases with the **Prevent this release from being skipped during upgrades** option -### Bug Fixes {#bug-fixes-v2023-10-11-0} -* When migrating from Longhorn to OpenEBS storage, PVCs remain on the node where the pod that uses the PVC was previously running. +================ +File: docs/vendor/helm-install-release.md +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" +import HelmPackage from "../partials/helm/_helm-package.mdx" -## v2023.10.09-0 +# Packaging a Helm Chart for a Release -Released on October 9, 2023 +This topic describes how to package a Helm chart and the Replicated SDK into a chart archive that can be added to a release. -### Bug Fixes {#bug-fixes-v2023-10-09-0} -* Files in `/var/lib/kurl` are now properly owned by root:root and not the uid `1001` -* The kurl reset script now removes `/var/lib/cni` files +## Overview -## v2023.10.04-0 +To add a Helm chart to a release, you first add the Replicated SDK as a dependency of the Helm chart and then package the chart and its dependencies as a `.tgz` chart archive. -Released on October 4, 2023 +The Replicated SDK is a Helm chart can be installed as a small service alongside your application. The SDK provides access to key Replicated features, such as support for collecting custom metrics on application instances. For more information, see [About the Replicated SDK](replicated-sdk-overview). -### New Features {#new-features-v2023-10-04-0} -* Adds [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.3. +## Requirements and Recommendations -## v2023.10.03-0 +This section includes requirements and recommendations for Helm charts. -Released on October 3, 2023 +### Chart Version Requirement -### New Features {#new-features-v2023-10-03-0} -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.26.0. -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.9.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-09-30T07-02-29Z. +The chart version in your Helm chart must comply with image tag format requirements. A valid tag can contain only lowercase and uppercase letters, digits, underscores, periods, and dashes. -## v2023.09.26-0 +The chart version must also comply with the Semantic Versioning (SemVer) specification. When you run the `helm install` command without the `--version` flag, Helm retrieves the list of all available image tags for the chart from the registry and compares them using the SemVer comparison rules described in the SemVer specification. The version that is installed is the version with the largest tag value. For more information about the SemVer specification, see the [Semantic Versioning](https://semver.org) documentation. -Released on September 26, 2023 +### Chart Naming -### New Features {#new-features-v2023-09-26-0} -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.4. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.22.3. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.24. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-09-23T03-47-50Z. +For releases that contain more than one Helm chart, Replicated recommends that you use unique names for each top-level Helm chart in the release. This aligns with Helm best practices and also avoids potential conflicts in filenames during installation that could cause the installation to fail. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. -### Bug Fixes {#bug-fixes-v2023-09-26-0} -* Fixes an issue where the weave-to-flannel migration would provide prompts for remote nodes that incorrectly included the 'airgap' flag on non-airgap systems and the reverse. +### Helm Best Practices -## v2023.09.15-0 +Replicated recommends that you review the [Best Practices](https://helm.sh/docs/chart_best_practices/) guide in the Helm documentation to ensure that your Helm chart or charts follows the required and recommended conventions. -Released on September 15, 2023 +## Package a Helm Chart {#release} -### New Features {#new-features-v2023-09-15-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.28.2 1.27.6 1.26.9 1.25.14. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.68.0-51.0.0. +This procedure shows how to create a Helm chart archive to add to a release. For more information about the Helm CLI commands in this procedure, see the [Helm Commands](https://helm.sh/docs/helm/helm/) section in the Helm documentation. -## v2023.09.12-0 +To package a Helm chart so that it can be added to a release: -Released on September 12, 2023 +1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. -### Bug Fixes {#bug-fixes-v2023-09-12-0} -* Docker 20.10.x will properly use RHEL 8 packages when installing on RHEL 8 based operating systems. This is still considered an unsupported configuration. + + + For additional guidelines related to adding the SDK as a dependency, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. -## v2023.09.07-0 +1. Update dependencies and package the chart as a `.tgz` file: -Released on September 7, 2023 + -### New Features {#new-features-v2023-09-07-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.24.17. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.3. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.67.1-50.3.1. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-09-04T19-57-37Z. + :::note + + ::: -### Bug Fixes {#bug-fixes-v2023-09-07-0} -* Velero 1.11.1 and later properly removes the 'restic' daemonset when upgrading. This is replaced by a new daemonset named 'node-agent'. -* Velero 1.11.1 and later running with OpenEBS with no object storage creates no default backup location instead of a broken one. -* Fixes an issue when merging an Installer patch file containing HostPreflights definitions. +1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). -## v2023.08.28-0 + After the release is promoted, your Helm chart is automatically pushed to the Replicated registry. For information about how to install a release with the Helm CLI, see [Installing with Helm](install-with-helm). For information about how to install Helm charts with KOTS, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). -Released on August 28, 2023 +================ +File: docs/vendor/helm-install-troubleshooting.mdx +================ +# Troubleshooting Helm Installations with Replicated -### New Features {#new-features-v2023-08-28-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.28.1 1.27.5 1.26.8 1.25.13. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.12.2. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.11.1. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-08-23T10-07-06Z. +This topic provides troubleshooting information for common issues related to performing installations and upgrades with the Helm CLI. -## v2023.08.23-0 +## Installation Fails for Release With Multiple Helm Charts {#air-gap-values-file-conflict} -Released on August 23, 2023 +#### Symptom -### New Features {#new-features-v2023-08-23-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version 1.28.0. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.22.2. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.22. As this package has not been published for Ubuntu 18.04, 1.6.21 will be installed there when 1.6.22 is selected. +When performing installing a release with multiple Helm charts, the installation fails. You might also see the following error message: -### Improvements {#improvements-v2023-08-23-0} -* When PVC storage migrations from Rook or Longhorn to OpenEBS 3.7.0+ are required, the migrations will be performed before upgrading Kubernetes. -* When object storage migrations from Rook to MinIO 2023-08-04T17-40-21Z+ are required, the migrations will be performed before upgrading Kubernetes. +``` +Error: INSTALLATION FAILED: cannot re-use a name that is still in use +``` -## v2023.08.15-0 +#### Cause -Released on August 15, 2023 +In the Download Portal, each chart's values file is named according to the chart's name. For example, the values file for the Helm chart Gitea would be named `gitea-values.yaml`. -### New Features {#new-features-v2023-08-15-0} -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.3. +If any top-level charts in the release use the same name, the associated values files will also be assigned the same name. This causes each new values file downloaded with the `helm show values` command to overwrite any previously-downloaded values file of the same name. -### Bug Fixes {#bug-fixes-v2023-08-15-0} -* Fixes an issue where EKCO-created Rook-Ceph clusters would not mount volumes on RHEL 7 based operating systems. +#### Solution -## v2023.08.10-0 +Replicated recommends that you use unique names for top-level Helm charts in the same release. -Released on August 10, 2023 +Alternatively, if a release contains charts that must use the same name, convert one or both of the charts into subcharts and use Helm conditions to differentiate them. See [Conditions and Tags](https://helm.sh/docs/chart_best_practices/dependencies/#conditions-and-tags) in the Helm documentation. -### New Features {#new-features-v2023-08-10-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-08-04T17-40-21Z. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.66.0-48.3.1. -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.8.0. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.22.1. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.2. +================ +File: docs/vendor/helm-install-values-schema.mdx +================ +import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" -### Bug Fixes {#bug-fixes-v2023-08-10-0} -* Fixes an issue where the control plane would not get upgraded on remote primary nodes due to a missing file `/etc/kubernetes/audit.yaml`. -* Fixes an error `failed to pull image registry.k8s.io/coredns:v1.8.6` when upgrading from Kubernetes version 1.23.15 to 1.24.4. +# Helm global.replicated Values Schema -## v2023.08.07-0 +This topic describes the `global.replicated` values that are injected in the values file of an application's parent Helm chart during Helm installations with Replicated. -Released on August 7, 2023 +## Overview -### New Features {#new-features-v2023-08-07-0} -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) versions 1.12.0, 1.12.1 +When a user installs a Helm application with the Helm CLI, the Replicated registry injects a set of customer-specific values into the `global.replicated` key of the parent Helm chart's values file. +The values in the `global.replicated` field include the following: -### Bug Fixes {#bug-fixes-v2023-08-07-0} -* Fixes an issue where storage could not be moved from Longhorn to OpenEBS at the same time as Kubernetes was upgraded to 1.25 or later. +* The fields in the customer's license, such as the field names, descriptions, signatures, values, and any custom license fields that you define. Vendors can use this license information to check entitlements before the application is installed. For more information, see [Checking Entitlements in Helm Charts Before Deployment](/vendor/licenses-reference-helm). -## v2023.07.31-0 +* A base64 encoded Docker configuration file. To proxy images from an external private registry with the Replicated proxy registry, you can use the `global.replicated.dockerconfigjson` field to create an image pull secret for the proxy registry. For more information, see [Proxying Images for Helm Installations](/vendor/helm-image-registry). -Released on July 31, 2023 +The following is an example of a Helm values file containing the `global.replicated` values: -### New Features {#new-features-v2023-07-31-0} -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.66.0-48.1.2. -* Adds [Metrics Server add-on](https://kurl.sh/docs/add-ons/metrics-server) version 0.6.4. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.28.2. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.25.2. +```yaml +# Helm values.yaml +global: + replicated: + channelName: Stable + customerEmail: username@example.com + customerName: Example Customer + dockerconfigjson: eyJhdXRocyI6eyJd1dIRk5NbEZFVGsxd2JGUmFhWGxYWm5scloyNVRSV1pPT2pKT2NGaHhUVEpSUkU1... + licenseFields: + expires_at: + description: License Expiration + name: expires_at + signature: + v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... + title: Expiration + value: "2023-05-30T00:00:00Z" + valueType: String + licenseID: YiIXRTjiB7R... + licenseType: dev +``` -### Bug Fixes {#bug-fixes-v2023-07-31-0} +## `global.replicated` Values Schema -* Resolves an issue for the OpenEBS to Rook storage migration feature that caused a delay in storage availability when using the feature on new installations. See [Known Issues](#known-issues-v2023-07-06-0) in _v2023.07.06-0_. +The `global.replicated` values schema contains the following fields: -## v2023.07.21-0 +| Field | Type | Description | +| --- | --- | --- | +| `channelName` | String | The name of the release channel | +| `customerEmail` | String | The email address of the customer | +| `customerName` | String | The name of the customer | +| `dockerconfigjson` | String | Base64 encoded docker config json for pulling images | +| `licenseFields`| | A list containing each license field in the customer's license. Each element under `licenseFields` has the following properties: `description`, `signature`, `title`, `value`, `valueType`. `expires_at` is the default `licenseField` that all licenses include. Other elements under `licenseField` include the custom license fields added by vendors in the Vendor Portal. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). | +| `licenseFields.[FIELD_NAME].description` | String | Description of the license field | +| `licenseFields.[FIELD_NAME].signature.v1` | Object | Signature of the license field | +| `licenseFields.[FIELD_NAME].title` | String | Title of the license field | +| `licenseFields.[FIELD_NAME].value` | String | Value of the license field | +| `licenseFields.[FIELD_NAME].valueType` | String | Type of the license field value | +| `licenseID` | String | The unique identifier for the license | +| `licenseType` | String | The type of license, such as "dev" or "prod". For more information, see [Customer Types](/vendor/licenses-about#customer-types) in _About Customers and Licensing_. | -Released on July 21, 2023 +## Replicated SDK Helm Values -:::important -kURL v2023.07.21-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). -::: + -### New Features {#new-features-v2023-07-21-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.27.4 1.26.7 1.25.12 1.24.16. -* Updates [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) 1.27.3 to use crictl v1.27.1. -* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) with runc v1.1.8. +================ +File: docs/vendor/helm-native-about.mdx +================ +import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" +import TemplateLimitation from "../partials/helm/_helm-template-limitation.mdx" +import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" +import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" +import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" +import Deprecated from "../partials/helm/_replicated-deprecated.mdx" +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" +import ReplicatedHelmMigration from "../partials/helm/_replicated-helm-migration.mdx" +import Helm from "../partials/helm/_helm-definition.mdx" -### Known Issues {#known-issues-v2023-07-21-0} +# About Distributing Helm Charts with KOTS -kURL v2023.07.21-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). +This topic provides an overview of how Replicated KOTS deploys Helm charts, including an introduction to the KOTS HelmChart custom resource, limitations of deploying Helm charts with KOTS, and more. -## v2023.07.11-0 +## Overview -Released on July 11, 2023 + -:::important -kURL v2023.07.11-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). -::: +KOTS can install applications that include: +* One or more Helm charts +* More than a single instance of any chart +* A combination of Helm charts and Kubernetes manifests -### Bug Fixes {#bug-fixes-v2023-07-11-0} -* Fixes support for RHEL 9.2 -* Fixes adding the Registry add-on to multi-node clusters using Containerd. +Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise users expect to be able to install an application with the Helm CLI. -### Known Issues {#known-issues-v2023-07-11-0} +Deploying Helm charts with KOTS provides additional functionality not directly available with the Helm CLI, such as: +* The KOTS Admin Console +* Backup and restore with snapshots +* Support for air gap installations +* Support for embedded cluster installations on VMs or bare metal servers -kURL v2023.07.11-0 has a known issue for the OpenEBS to Rook storage migration feature that causes a delay in storage availability when using the feature on new installations. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). +Additionally, for applications packaged as Helm charts, you can support Helm CLI and KOTS installations from the same release without having to maintain separate sets of Helm charts and application manifests. The following diagram demonstrates how a single release containing one or more Helm charts can be installed using the Helm CLI and KOTS: -## v2023.07.10-0 +One release being installed into three different customer environments -Released on July 10, 2023 +[View a larger version of this image](/images/helm-kots-install-options.png) -:::important -kURL v2023.07.10-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). -::: +For a tutorial that demonstrates how to add a sample Helm chart to a release and then install the release using both KOTS and the Helm CLI, see [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup). -### Improvements {#improvements-v2023-07-10-0} -* Clarifies error message when installing on RHEL 9 variants without containerd. +## How KOTS Deploys Helm Charts -### Bug Fixes {#bug-fixes-v2023-07-10-0} -* Improves messaging when a subnet cannot be automatically discovered. +This section describes how KOTS uses the HelmChart custom resource to deploy Helm charts. -### Known Issues {#known-issues-v2023-07-10-0} +### About the HelmChart Custom Resource -kURL v2023.07.10-0 has a known issue for the OpenEBS to Rook storage migration feature that causes a delay in storage availability when using the feature on new installations. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). + -## v2023.07.06-0 +The HelmChart custom resource with `apiVersion: kots.io/v1beta2` (HelmChart v2) is supported with KOTS v1.99.0 and later. For more information, see [About the HelmChart kots.io/v1beta2 Installation Method](#v2-install) below. -Released on July 6, 2023 +KOTS versions earlier than v1.99.0 can install Helm charts with `apiVersion: kots.io/v1beta1` of the HelmChart custom resource. The `kots.io/v1beta1` HelmChart custom resource is deprecated. For more information, see [Deprecated HelmChart kots.io/v1beta1 Installation Methods](#deprecated-helmchart-kotsiov1beta1-installation-methods) below. -:::important -kURL v2023.07.06-0 has a known issue for the OpenEBS to Rook storage migration feature. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). -::: +### About the HelmChart v2 Installation Method {#v2-install} -### New Features {#new-features-v2023-07-06-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2023-06-23T20-26-00Z and RELEASE.2023-06-29T05-12-28Z. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.27.3. -* Adds the ability to start a cluster with OpenEBS and MinIO, and then migrate data to Rook-Ceph after three or more nodes are ready. For more information, see [Migrating CSI](https://kurl.sh/docs/install-with-kurl/migrating-csi#automated-local-to-distributed-storage-migrations) in the kURL documentation. +When you include a HelmChart custom resource with `apiVersion: kots.io/v1beta2` in a release, KOTS v1.99.0 or later does a Helm install or upgrade of the associated Helm chart directly. -### Known Issues {#known-issues-v2023-07-06-0} +The `kots.io/v1beta2` HelmChart custom resource does _not_ modify the chart during installation. This results in Helm chart installations that are consistent, reliable, and easy to troubleshoot. For example, you can reproduce the exact installation outside of KOTS by downloading a copy of the application files from the cluster with `kots download`, then using those files to install with `helm install`. And, you can use `helm get values` to view the values that were used to install. -kURL v2023.07.10-0 has a known issue for the OpenEBS to Rook storage migration feature that causes a delay in storage availability when using the feature on new installations. This issue is resolved in [v2023.07.31-0](#bug-fixes-v2023-07-31-0). +The `kots.io/v1beta2` HelmChart custom resource requires configuration. For more information, see [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). -## v2023.06.27-0 +For information about the fields and syntax of the HelmChart custom resource, see [HelmChart v2](/reference/custom-resource-helmchart-v2). -Released on June 27, 2023 +### Limitations -### New Features {#new-features-v2023-06-27-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.27.3 1.26.6 1.25.11 1.24.15. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-06-19T19-52-50Z. -* Adds support for RHEL 8.8. -* Adds a preflight check to require an object store or storage provider when using the Registry add-on. -* Updates the Velero add-on version 1.11.0 with new replicated/local-volume-provider image version v0.5.4. +The following limitations apply when deploying Helm charts with the `kots.io/v1beta2` HelmChart custom resource: -### Bug Fixes {#bug-fixes-v2023-06-27-0} -* Fixes an issue that prevented migrating away from Rook-Ceph when the `dashboard` or `prometheus` modules were unhealthy. -* Fixes an issue preventing Velero deployment rollout when using kURL version `v2023.06.20-0`. +* Available only for Helm v3. -## v2023.06.20-0 +* Available only for KOTS v1.99.0 and later. -Released on June 20, 2023 +* The rendered manifests shown in the `rendered` directory might not reflect the final manifests that will be deployed to the cluster. This is because the manifests in the `rendered` directory are generated using `helm template`, which is not run with cluster context. So values returned by the `lookup` function and the built-in `Capabilities` object might differ. -:::important -Versions earlier than v2023.06.20-0 contain a known issue that might put the system in a critical state when migrating from Weave to Flannel. Use v2023.06.20-0 or later when migrating from Weave to Flannel. -::: +* When updating the HelmChart custom resource in a release from `kots.io/v1beta1` to `kots.io/v1beta2`, the diff viewer shows a large diff because the underlying file structure of the rendered manifests is different. -### New Features {#new-features-v2023-06-20-0} -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.22.0. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.8. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.25.0. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.65.2-46.8.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-06-09T07-32-12Z. -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.17. +* Editing downstream Kustomization files to make changes to the application before deploying is not supported. This is because KOTS does not use Kustomize when installing Helm charts with the `kots.io/v1beta2` HelmChart custom resource. For more information about patching applications with Kustomize, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). -### Improvements {#improvements-v2023-06-20-0} -* Enhances the migration from Weave to Flannel to preserve custom IP Tables rules. +* -### Bug Fixes {#bug-fixes-v2023-06-20-0} -* Fixes all previous Flannel versions by backporting the fixes introduced in the latest release v2023.06.09-0 to solve the problems faced when migrating from Weave to Flannel. -* Fixes MinIO PVC resizing race condition for versions equals to or greater than `2023-06-09T07-32-12Z`. -* Fixes the migration from Weave to Flannel to allow the installer to retry the migration when an error is faced. + -## v2023.06.09-0 + For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). +## Support for Helm Hooks {#hooks} -Released on June 9, 2023 +KOTS supports the following hooks for Helm charts: +* `pre-install`: Executes after resources are rendered but before any resources are installed. +* `post-install`: Executes after resources are installed. +* `pre-upgrade`: Executes after resources are rendered but before any resources are upgraded. +* `post-upgrade`: Executes after resources are upgraded. +* `pre-delete`: Executes before any resources are deleted. +* `post-delete`: Executes after resources are deleted. +The following limitations apply to using hooks with Helm charts deployed by KOTS: -### New Features {#new-features-v2023-06-09-0} -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.65.1-46.5.0. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.27.1. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.11.7. -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.7.0. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.65.1-46.6.0. +* -### Improvements {#improvements-v2023-06-09-0} -* Enhance proxy installations by enabling the use of HTTP_PROXY and HTTPS_PROXY environment variables during the configuration of the container runtime. +* -### Bug Fixes {#bug-fixes-v2023-06-09-0} -* Fixes issue where Pods got stuck when migrating from Weave to Flannel. This fix also ensures that Weave is properly removed during the migration. This bug fix applies to Flannel versions 0.21.5 and later. -* Fixes an issue that could cause Rook upgrades from version 1.0.4 to 1.7.x to fail with `error rook-ceph-migrator pod not found`. +For more information about Helm hooks, see [Chart Hooks](https://helm.sh/docs/topics/charts_hooks/) in the Helm documentation. -## v2023.05.30-0 +## Air Gap Installations -Released on May 30, 2023 +KOTS supports installation of Helm charts into air gap environments with configuration of the HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. The `builder` key specifies the Helm values to use when building the air gap bundle for the application. -### Improvements {#improvements-v2023-05-30-0} -* Adds check to ensure connectivity to the registry with Containerd. This check helps identify misconfigurations, including invalid proxy settings. -* Adds a message informing the user of preflight check results when preflight checks have been ignored using the `host-preflight-ignore` flag. +For more information about how to configure the `builder` key to support air gap installations, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). -### Bug Fixes {#bug-fixes-v2023-05-30-0} -* Fixes an issue where the Longhorn to OpenEBS migration preparation preflight check failed due to the following error: `error scaling down pods using longhorn volumes: error scaling down *v1.statefulset default/kotsadm-rqlite: Operation cannot be fulfilled on statefulsets.apps "kotsadm-rqlite": the object has been modified; please apply your changes to the latest version and try again`. -* Fixes an issue with Longhorn volumes were not able to be rolled back after a storage migration with the error: `error rolling back volume ... replicas: Operation cannot be fulfilled on volumes.longhorn.io ...`. -* Fixes an issue uninstalling Weave by removing interfaces first and then removing lib/weave for Weave versions `0.21.5` and later. +## Resource Deployment Order -## v2023.05.25-0 +When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. -Released on May 25, 2023 +For information about how to set the deployment order for Helm charts with KOTS, see [Orchestrating Resource Deployment](/vendor/orchestrating-resource-deployment). -### New Features {#new-features-v2023-05-25-0} -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.11.6. -* Adds support for Oracle Linux 8.8. -* Adds support for Rocky Linux 9.2 and RHEL 9.2. +## Deprecated HelmChart kots.io/v1beta1 Installation Methods -### Improvements {#improvements-v2023-05-22-0} -* Improves logs for Registry add-on. +This section describes the deprecated Helm chart installation methods that use the HelmChart custom resource `apiVersion: kots.io/v1beta1`. -### Bug Fixes {#bug-fixes-v2023-05-25-0} -* Fixes issue where the additionalNoProxyAddresses value was not properly propagated through the additional-no-proxy-addresse flag in the outputted commands. -* Fixes OpenSSL calls used to configure Rook add-on by explicitly specifying the digest method in order to support RHEL 9.2. -* Fixes OpenSSL calls used to configure Registry add-on by explicitly specifying the digest method in order to support RHEL 9.2. +:::important + +::: -## v2023.05.22-0 +### useHelmInstall: true {#v1beta1} -Released on May 22, 2023 +:::note +This method was previously referred to as _Native Helm_. +::: -### New Features {#new-features-v2023-05-22-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.27.2, 1.26.5, 1.25.10 and 1.24.14. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.65.1-45.27.2 and 0.65.1-45.28.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-05-18T00-05-36Z. -* Enables the [Collectd add-on](https://kurl.sh/docs/add-ons/collectd) for Ubuntu 22.04. +When you include version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`, KOTS uses Kustomize to render the chart with configuration values, license field values, and rewritten image names. KOTS then packages the resulting manifests into a new Helm chart to install. For more information about Kustomize, see the [Kustomize documentation](https://kubectl.docs.kubernetes.io/). -### Improvements {#improvements-v2023-05-22-0} -* Adds further log information for Docker Proxy settings configuration. -* Adds further log information for containerd installations and configuration on version 1.5.10 or later. +The following diagram shows how KOTS processes Helm charts for deployment with the `kots.io/v1beta1` method: -### Bug Fixes {#bug-fixes-v2023-05-22-0} -* Fixes an issue with the [Weave add-on](https://kurl.sh/docs/add-ons/weave) for version 2.8.1-20230417 that prevented symbolic links to /opt/cni/bin from working. -* Fixes an issue that caused Rook upgrades from 1.0.4 to 1.8.x or later to fail with the error "pod has unsupported owner ReplicaSet". -* Improves stability of upgrades to Rook version 1.5.12. -* Updates the [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.11.0 with new replicated/local-volume-provider image version v0.5.3 to address the following high severity CVE: CVE-2022-29458. +![Flow chart of a v1beta1 Helm chart deployment to a cluster](/images/native-helm-flowchart.png) -## v2023.05.15-0 +[View a larger image](/images/native-helm-flowchart.png) -Released on May 15, 2023 +As shown in the diagram above, when given a Helm chart, KOTS: -### New Features {#new-features-v2023-05-15-0} -* Adds [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.2. +- Uses Kustomize to merge instructions from KOTS and the end user to chart resources (see steps 2 - 4 below) +- Packages the resulting manifest files into a new Helm chart (see step 5 below) +- Deploys the new Helm chart (see step 5 below) -### Bug Fixes {#bug-fixes-v2023-05-15-0} -* Adds fixes to ensure that the Firewalld check verifies if it is enabled and active, and to provide more comprehensive information about the Firewalld check. +To deploy Helm charts with version `kots.io/v1beta1` of the HelmChart custom resource, KOTS does the following: -## v2023.05.11-0 +1. **Checks for previous installations of the chart**: If the Helm chart has already been deployed with a HelmChart custom resource that has `useHelmInstall: false`, then KOTS does not attempt the install the chart. The following error message is displayed if this check fails: `Deployment method for chart has changed`. For more information, see [HelmChart kots.io/v1beta1 (useHelmInstall: false)](#v1beta1-false) below. -Released on May 11, 2023 +1. **Writes base files**: KOTS extracts Helm manifests, renders them with Replicated templating, and then adds all files from the original Helm tarball to a `base/charts/` directory. -### New Features {#new-features-v2023-05-11-0} -* Adds the ability to have fine-grained control over the Rook-Ceph node and device storage configuration through the [`rook.nodes`](https://kurl.sh/docs/add-ons/rook#per-node-storage-configuration) property of the specification. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.21. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.5. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.5. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-05-04T21-44-30Z. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) versions 1.24.4 and 1.25.0. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.65.1-45.26.0 and 0.65.1-45.27.1. -* Upgrades [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) to version 0.27.1. + Under `base/charts/`, KOTS adds a Kustomization file named `kustomization.yaml` in the directories for each chart and subchart. KOTS uses these Kustomization files later in the deployment process to merge instructions from Kustomize to the chart resources. For more information about Kustomize, see the [Kustomize website](https://kustomize.io). -### Bug Fixes {#bug-fixes-v2023-05-11-0} -* Fixes an issue that causes installations to fail when running preflight checks and the file `/etc/kubernetes/admin.conf` is not found due to a previous failed Kubernetes installation. + The following screenshot from the Replicated Admin Console shows a `base/charts/` directory for a deployed application. The `base/charts/` directory contains a Helm chart named postgresql with one subchart: -## v2023.05.08-0 + ![Base directory in the Admin Console](/images/native-helm-base.png) -Released on May 8, 2023 + In the screenshot above, a Kustomization file that targets the resources from the postgresql Helm chart appears in the `base/charts/postgresql/` directory: -### New Features {#new-features-v2023-05-08-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.27.1, 1.27.0, 1.26.4, 1.25.9 and 1.24.13. -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.6.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-04-20T17-56-55Z. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.63.0-45.19.0, 0.63.0-45.20.0, 0.63.0-45.21.0. -* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.7.0-6.0.1. -* Updates [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) runc version from v1.1.5 to v1.1.7. -* Adds the ability to bypass kubeadm upgrade preflight errors and warnings using the spec property [`kubernetes.upgradeIgnorePreflightErrors`](https://kurl.sh/docs/add-ons/kubernetes#advanced-install-options:~:text=upgradeIgnorePreflightErrors) or the flag [`--kubernetes-upgrade-ignore-preflight-errors=`](https://kurl.sh/docs/install-with-kurl/advanced-options#:~:text=internal%2Dload%2Dbalancer-,kubernetes%2Dupgrade%2Dignore%2Dpreflight%2Derrors,-Bypass%20kubeadm%20upgrade). -* Adds the ability to configure the maximum number of Pods that can run on each node (default 110) using the spec property [`kubernetes.maxPodsPerNode`](https://kurl.sh/docs/add-ons/kubernetes#advanced-install-options:~:text=the%20Kubernetes%20documentation.-,maxPodsPerNode,-The%20maximum%20number) or the flag [`--kubernetes-max-pods-per-node=`](https://kurl.sh/docs/install-with-kurl/advanced-options#:~:text=preflight%2Derrors%3DCoreDNSUnsupportedPlugins-,kubernetes%2Dmax%2Dpods%2Dper%2Dnode,-The%20maximum%20number). + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - secrets.yaml + - statefulset.yaml + - svc-headless.yaml + - svc.yaml + ``` + +1. **Writes midstream files with Kustomize instructions from KOTS**: KOTS then copies the directory structure from `base/charts/` to an `overlays/midstream/charts/` directory. The following screenshot shows an example of the midstream directory for the postgresql Helm chart: + + ![Midstream directory in the Admin Console UI](/images/native-helm-midstream.png) -### Improvements {#improvements-v2023-05-08-0} -* Reduces OpenEBS resource usage by removing NDM. -* Removes the `rook-upgrade` task. + As shown in the screenshot above, the midstream directory also contains a Kustomization file with instructions from KOTS for all deployed resources, such as image pull secrets, image rewrites, and backup labels. For example, in the midstream Kustomization file, KOTS rewrites any private images to pull from the Replicated proxy registry. -### Bug Fixes {#bug-fixes-v2023-05-08-0} -* Fixes an issue on RHEL 7 based distributions that caused the script to improperly calculate the bundle size when upgrading multiple Kubernetes versions and print the message 'total_archive_size + "935": syntax error: operand expected (error token is ""935"")'. -* Fixes an issue where high availability MinIO deployments were not migrated to Rook's object store. -* Fixes an issue that caused Rook upgrades of more than one minor version to upgrade to the latest patch version for the target minor version rather than to the specified patch version. -* Fixes an issue when upgrading Rook from v1.4.x or later in an air gap environment that caused the script to fail with ImagePullBackoff errors due to the failure to prompt the user to load images on remote nodes. - -## v2023.04.24-0 - -Released on April 24, 2023 - -### New Features {#new-features-v2023-04-24-0} -* Updates the [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) to support upgrading Kubernetes by more than two minor versions at the same time using a single spec. For air gap instances, users must provdide a package with the required assets during upgrade. For more information, see [Upgrading](https://kurl.sh/docs/install-with-kurl/upgrading#kubernetes) in the kURL documentation. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.4. -* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.8.1-20230417 and 2.6.5-20230417 to address the following high and critical severity CVEs: CVE-2023-27536, CVE-2023-27533, CVE-2023-27534, CVE-2023-27535. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.63.0-45.10.1 and 0.63.0-45.15.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-04-13T03-08-07Z. + The following shows an example of a midstream Kustomization file for the postgresql Helm chart: -### Improvements {#improvements-v2023-04-24-0} -* Updates kURL to use the `kurl-install-directory` specified for host os repositories. Previously, this was hardcoded to `/var/lib/kurl`. - -### Bug Fixes {#bug-fixes-v2023-04-24-0} -* Fixes an issue to ensure that the tasks.sh reset script respects the `kurl-install-directory` flag or discovers the directory from the cluster. -* Fixes an issue that caused the installation script to prompt for a load balancer address when running the installer with `ekco-enable-internal-load-balancer`. - -## v2023.04.13-0 + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + bases: + - ../../../../base/charts/postgresql + commonAnnotations: + kots.io/app-slug: helm-test + images: + - name: gcr.io/replicated-qa/postgresql + newName: proxy.replicated.com/proxy/helm-test/gcr.io/replicated-qa/postgresql + kind: Kustomization + patchesStrategicMerge: + - pullsecrets.yaml + resources: + - secret.yaml + transformers: + - backup-label-transformer.yaml + ``` -Released on April 13, 2023 + As shown in the example above, all midstream Kustomization files have a `bases` entry that references the corresponding Kustomization file from the `base/charts/` directory. -### New Features {#new-features-v2023-04-13-0} -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.3. -* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) version 2.8.1-20230406 to address the following high severity CVE: CVE-2023-0464. -* Updates the [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.10.2 with new kurlsh/s3cmd image to address the following high severity CVE: CVE-2023-0464. +1. **Writes downstream files for end user Kustomize instructions**: KOTS then creates an `overlays/downstream/this-cluster/charts` directory and again copies the directory structure of `base/charts/` to this downstream directory: -### Bug Fixes {#bug-fixes-v2023-04-13-0} -* Fixes an issue that causes migrations from Docker to containerd on multi-node clusters to fail with the error "Downgrading containerd is not supported". -* Fixes an issue that could cause installations to fail with the error "/var/lib/kurl does not exist" when using the `kurl-install-directory` flag. - -## v2023.04.11-0 - -Released on April 11, 2023 - -### New Features {#new-features-v2023-04-11-0} -* Adds support for RHEL and Rocky Linux 9. -* Makes the [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) mandatory. -* Updates kURL to always install the latest version of the [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) add-on, even if the EKCO add-on is not specified or if a different version is specified. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.26.5, and removes all versions earlier than 0.26.5. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-03-24T21-41-23Z. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.63.0-45.8.0, 0.63.0-45.8.1, and 0.63.0-45.9.1. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.24.3. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.20. -* Updates the [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.1 with new kurlsh/s3cmd image to address the following high severity CVE: CVE-2023-0464. - -### Improvements {#improvements-v2023-04-11-0} -* Adds a preflight check to ensure sufficient disk space is available for the Containerd, Rook, and OpenEBS add-ons. -* Adds a preflight check to ensure Kubernetes API Server is healthy prior to Kubernetes upgrades. -* Adds a preflight check to ensure Kubernetes API Server load balancer health prior to Kubernetes upgrades. -* Adds a preflight check to ensure Kubernetes API and ETCD certificates are present and valid prior to Kubernetes upgrades. -* Adds a preflight check to ensure nodes are healthy prior to Kubernetes upgrades. -* Adds a preflight check to ensure that kURL Pod(s) are running prior to Kubernetes upgrades. -* Adds a preflight check to ensure that MinIO pods are running prior to migrating object store data from Rook. -* Adds a preflight check to ensure that OpenEBS and Rook-Ceph are healthy prior to migrating from Rook to OpenEBS. -* Adds a preflight check to ensure that Longhorn and OpenEBS are healthy prior to migrating from Longhorn to OpenEBS. -* Adds a preflight check to ensure that Longhorn and Rook-Ceph are healthy prior to migrating from Longhorn to Rook Ceph. -* Adds a preflight check to prevent unsupported migrations from Longhorn to OpenEBS versions earlier than 3.3.0 and without an object store when Registry is present. -* Adds the ability to upgrade the containerd add-on in a kURL cluster by two minor versions at the same time. - -### Bug Fixes {#bug-fixes-v2023-04-11-0} -* Fixes an issue that could cause rerunning the install script to fail if the Kubernetes binaries are installed but the cluster was never installed or configured. - -## v2023.03.28-0 - -Released on March 28, 2023 - -### New Features {#new-features-v2023-03-28-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-03-20T20-16-18Z. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.11.2. -* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) version. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.4. -* Adds [Metrics Server add-on](https://kurl.sh/docs/add-ons/metrics-server) version 0.6.3. + ![Downstream directory in the Admin Console UI](/images/native-helm-downstream.png) -### Improvements {#improvements-v2023-03-28-0} -* Adds preflight checks to prevent installations without the `kotsadm.disableS3` option set to `true` from continuing without an Object Store. -* Adds preflight checks to prevent migrating from Rook to OpenEBS without MinIO when the Registry add-on is included in the spec. -* Removes the optional flag `force-reapply-addons` and makes it the default behavior to reapply all add-ons regardless of whether or not they change. - -### Bug Fixes {#bug-fixes-v2023-03-28-0} -* Fixes an issue when upgrading from Kubernetes releases that caused the script to fail with error "connection refused" and the message "couldn't retrieve DNS addon deployments" -* Fixes an issue that could cause the installation script to exit with an error when running preflights if kubectl is installed but Kubernetes is not installed or the cluster is down. -* Fixes an issue that prevented Rook from being fully removed after a migration to another PV provisioner. -* Fixes an issue that allowed the object store to be migrated more than one time during a storage migration. + As shown in the screenshot above, each chart and subchart directory in the downstream directory also contains a Kustomization file. These downstream Kustomization files contain only a `bases` entry that references the corresponding Kustomization file from the midstream directory. For example: -## v2023.03.21-0 + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + bases: + - ../../../../midstream/charts/postgresql + kind: Kustomization + ``` + + End users can edit the downstream Kustomization files to make changes before deploying the application. Any instructions that users add to the Kustomization files in the downstream directory take priority over midstream and base Kustomization files. For more information about how users can make changes before deploying, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). -Released on March 21, 2023 +1. **Deploys the Helm chart**: KOTS runs `kustomize build` for any Kustomization files in the `overlays/downstream/charts` directory. KOTS then packages the resulting manifests into a new chart for Helm to consume. -### New Features {#new-features-v2023-03-21-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.26.3 1.25.8 1.24.12 1.23.17 1.22.17. -* Adds a preflight check to ensure that a host is not updated with a version of the Kubernetes installer that is earlier than the version currently running in the cluster. -* Adds better logging information that highlights failures and warnings when migrating from Rook. + Finally, KOTS runs `helm upgrade -i --timeout 3600s -n `. The Helm binary processes hooks and weights, applies manifests to the Kubernetes cluster, and saves a release secret similar to `sh.helm.release.v1.chart-name.v1`. Helm uses this secret to track upgrades and rollbacks of applications. -### Bug Fixes {#bug-fixes-v2023-03-21-0} -* Fixes an issue when migrating from Rook that caused the Rook Ceph preflight health check to incorrectly report that Ceph was unhealthy because Ceph version information could not be found. This issue was caused by a bug in Rook Ceph versions earlier than 1.4.8. -* Fixes broken upgrades caused by not being able to uninstall Rook. Upgrade failures are highlighted in the console with further information. -* Fixes an issue where the installation script got stuck when migrating from Rook. Added timeouts with further information displayed in the console. -* Fixes a bug where Rook data was not removed after Rook Ceph was removed from the cluster. -* Fixes a bug in the Kubernetes installer v2023.03.20-0 where the registry add-on failed to create the object store. +### useHelmInstall: false {#v1beta1-false} -## v2023.03.20-0 +:::note +This method was previously referred to as _Replicated Helm_. +::: -Released on March 20, 2023 +When you use version `kots.io/v1beta1` of HelmChart custom resource with `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. KOTS also has additional functionality for specific Helm hooks. For example, when KOTS encounters an upstream Helm chart with a `helm.sh/hook-delete-policy` annotation, it automatically adds the same `kots.io/hook-delete-policy` to the Job object. -### New Features {#new-features-v2023-03-20-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-03-13T19-46-17Z. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.24.2. -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.5.0. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.10.2. +The resulting deployment is comprised of standard Kubernetes manifests. Therefore, cluster operators can view the exact differences between what is currently deployed and what an update will deploy. -### Improvements {#improvements-v2023-03-20-0} -* Adds checks to ensure that Rook Ceph and its Object Store are healthy before migrating from Rook to OpenEBS and Minio. -* Adds checks and better log information when removing Rook or Longhorn to notify users of the reasons for a failure. +### Limitations {#replicated-helm-limitations} -### Bug Fixes {#bug-fixes-v2023-03-20-0} -* Fixes an issue where the weave-to-flannel-\{primary,secondary\} tasks fail with "Flannel images not present...". +This section lists the limitations for version `kots.io/v1beta1` of the HelmChart custom resource. +#### kots.io/v1beta1 (useHelmInstall: true) Limitations -## v2023.03.13-0 +The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`: -Released on March 13, 2023 +* -### New Features {#new-features-v2023-03-13-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-03-09T23-16-13Z. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.3. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.63.0-45.7.1. +* Available only for Helm V3. -### Bug Fixes {#bug-fixes-v2023-03-13-0} -* Fixes upgrade timeouts by adding a check to wait for Rook rollout from 1.5.9 to 1.10.8 as is done for Rook 1.10.11. -* Adds a preflight check to prevent unsupported migrations from Rook to OpenEBS versions earlier than 3.3.0. -* Fixes an issue where MinIO failed to update when running in high availability mode. -* Fixes issue `failed to find plugin /opt/cni/bin/weave-net` when the installer is checking cluster networking by deleting the weave-net pod when the binary is not found to let it be re-created successfully. -* Increases timeout from 5 to 10 minutes waiting for sync-object-store pod to complete as part of the object store migration from Rook to OpenEBS. +* -## v2023.03.07-0 + For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). -Released on March 7, 2023 +* -### New Features {#new-features-v2023-03-07-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.26.2, 1.25.7, 1.24.11, 1.23.17, and 1.22.17. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) versions 0.63.0-45.3.0, 0.63.0-45.4.0, and 0.63.0-45.5.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-27T18-10-45Z. -* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) versions 3.7.0-5.6.0. -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.16. +* -### Improvements {#improvements-v2023-03-07-0} -* Adds colors to the preflight checks results to improve the user experience. +* -### Bug Fixes {#bug-fixes-v2023-03-07-0} -* Fixes an issue when migrating from Weave to Flannel that incorrectly prompts to load images with the airgap flag when online and without when offline. -* Fixes an issue that causes an HA install to fail after a node has been reset with error "stat: cannot stat '/etc/kubernetes/manifests/haproxy.yaml': No such file or directory". +* -## v2023.02.23-0 + For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. -Released on February 23, 2023 +#### kots.io/v1beta1 (useHelmInstall: false) Limitations {#v1beta1-false-limitations} -### New Features {#new-features-v2023-02-23-0} -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.63.0-45.2.0. -* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20230222 and 2.8.1-20230222 to address the following high severity CVEs: CVE-2022-4450, CVE-2023-0215, CVE-2023-0286. -* Updates [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.1 with new kurlsh/s3cmd image to address the following high severity CVEs: CVE-2022-4450, CVE-2023-0215, CVE-2023-0286. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-22T18-23-45Z. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.26.4. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.10.1. +The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: false`: -### Improvements {#improvements-v2023-02-23-0} -* kURL no longer chooses the node name and instead defers to kubeadm to infer the node name from the hostname. +* -### Bug Fixes {#bug-fixes-v2023-02-23-0} -* Fixes an issue where EKCO serialized an incorrect kubeadm `ClusterStatus(kubeadm.k8s.io/v1beta2)` config when purging a node with [`ekco-purge-node.sh`](https://kurl.sh/docs/add-ons/ekco#purge-nodes) for Kubernetes version 1.21 and earlier. Moreover, this bug prevented adding new nodes to the Kuberenetes cluster. +* -## v2023.02.21-0 +* -Released on February 21, 2023 + For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. -### New Features {#new-features-v2023-02-21-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-17T17-52-43Z. +================ +File: docs/vendor/helm-native-v2-using.md +================ +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" -### Bug Fixes {#bug-fixes-v2023-02-21-0} -* Fixes an issue that causes the install script to fail with error "ctr: flags --detach and --rm cannot be specified together" when using Containerd 1.6.18 and the EKCO Internal Load Balancer. +# Configuring the HelmChart Custom Resource v2 -## v2023.02.17-0 - Withdrawn +This topic describes how to configure the Replicated HelmChart custom resource version `kots.io/v1beta2` to support Helm chart installations with Replicated KOTS. -Released on February 17, 2023 +## Workflow -:::important -v2023.02.17-0 has been removed because Containerd 1.6.18 is incompatible with high availability installations using the EKCO internal load balancer. -::: +To support Helm chart installations with the KOTS `kots.io/v1beta2` HelmChart custom resource, do the following: +1. Rewrite image names to use the Replicated proxy registry. See [Rewrite Image Names](#rewrite-image-names). +1. Inject a KOTS-generated image pull secret that grants proxy access to private images. See [Inject Image Pull Secrets](#inject-image-pull-secrets). +1. Add a pull secret for any Docker Hub images that could be rate limited. See [Add Pull Secret for Rate-Limited Docker Hub Images](#docker-secret). +1. Configure the `builder` key to allow your users to push images to their own local registries. See [Support Local Image Registries](#local-registries). +1. (KOTS Existing Cluster and kURL Installations Only) Add backup labels to your resources to support backup and restore with the KOTS snapshots feature. See [Add Backup Labels for Snapshots](#add-backup-labels-for-snapshots). + :::note + Snapshots is not supported for installations with Replicated Embedded Cluster. For more information about configuring disaster recovery for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). + ::: -### New Features {#new-features-v2023-02-17-0} -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.63.0-45.1.0 and 0.63.0-45.1.1. -* Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.4.0. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.18. +## Task 1: Rewrite Image Names {#rewrite-image-names} -### Bug Fixes {#bug-fixes-v2023-02-17-0} -* Fixes an issue that causes Rook multi-version upgrades to fail if add-on airgap packages exist on the server prior to upgrading. -* Fixes a rare race condition that could cause data loss when migrating between storage providers. -## v2023.02.16-0 +Configure the KOTS HelmChart custom resource `values` key so that KOTS rewrites the names for both private and public images in your Helm values during deployment. This allows images to be accessed at one of the following locations, depending on where they were pushed: +* The [Replicated proxy registry](private-images-about) (`proxy.replicated.com` or your custom domain) +* A public image registry +* Your customer's local registry +* The built-in registry used in Replicated Embedded Cluster or Replicated kURL installations in air-gapped environments -Released on February 16, 2023 +You will use the following KOTS template functions to conditionally rewrite image names depending on where the given image should be accessed: +* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry): Returns true if the installation environment is configured to use a local image registry. HasLocalRegistry is always true in air gap installations. HasLocalRegistry is also true in online installations if the user configured a local private registry. +* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost): Returns the host of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryHost returns the host of the built-in registry. +* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace): Returns the namespace of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryNamespace returns the namespace of the built-in registry. -### New Features {#new-features-v2023-02-16-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-10T18-48-39Z. -* Warn the user if there is likely to be insufficient space to upgrade Rook multiple versions. +
    + What is the registry namespace? + + The registry namespace is the path between the registry and the image name. For example, `images.mycompany.com/namespace/image:tag`. +
    -### Bug Fixes {#bug-fixes-v2023-02-16-0} -* Fixes a misconfiguration in the kubelet that caused Kubernetes to garbage collect the pause image, which caused new containers to fail to start and get stuck in ContainerCreating. +### Task 1a: Rewrite Private Image Names -## v2023.02.14-0 +For any private images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the Replicated proxy registry (for online installations) or to the local registry in the user's installation environment (for air gap installations or online installations where the user configured a local registry). -Released on February 14, 2023 +To rewrite image names to the location of the image in the proxy registry, use the format `/proxy//`, where: +* `` is `proxy.replicated.com` or your custom domain. For more information about configuring a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). +* `` is the unique application slug in the Vendor Portal +* `` is the path to the image in your registry -### New Features {#new-features-v2023-02-14-0} -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.10.1. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.24.1. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.1. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-02-09T05-16-53Z. +For example, if the private image is `quay.io/my-org/nginx:v1.0.1` and `images.mycompany.com` is the custom proxy registry domain, then the image name should be rewritten to `images.mycompany.com/proxy/my-app-slug/quay.io/my-org/nginx:v1.0.1`. -### Bug Fixes {#bug-fixes-v2023-02-14-0} -* Fixes a broken link to the Rook zapping procedure in the output of the installation script. -* Changes the kubelet service file permissions to 600 to fix CIS benchmark failure 4.1.1: "Ensure that the kubelet service file permissions are set to 600 or more restrictive". -* Fixes an issue where containers were stuck in a ContainerCreating state after a Kubernetes upgrade. +For more information, see the example below. -## v2023.02.06-1 +#### Example -Released on February 6, 2023 +The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: -### Bug Fixes {#bug-fixes-v2023-02-06-1} -* Fixes an issue in [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.5 where restores fail to pull the `velero-restic-restore-helper` image in air gapped environments. +```yaml +# kots.io/v1beta2 HelmChart custom resource -## v2023.02.06-0 +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + ... + values: + image: + # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry's hostname + # Else use proxy.replicated.com or your custom proxy registry domain + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "images.mycompany.com" }}' + # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry namespace + # Else use the image's namespace at the proxy registry domain + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' + tag: v1.0.1 +``` -Released on February 6, 2023 +The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource above correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown below: -### New Features {#new-features-v2023-02-06-0} -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.21.0. +```yaml +# Helm chart values.yaml file -### Improvements {#improvements-v2023-02-06-0} -* If there are multiple network interfaces on a single host, the [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) prompts users to choose an interface or use the interface of the [private-address](https://kurl.sh/docs/install-with-kurl/advanced-options#reference) flag when specified, instead of using the default gateway interface. -* Prompts users when preflight warnings occur, and allows users to cancel the installation and fix the root cause before resuming the installation. +image: + registry: quay.io + repository: my-org/nginx + tag: v1.0.1 +``` -### Bug Fixes {#bug-fixes-v2023-02-06-0} -* Fixes an issue where the Prometheus adapter was not able to install custom metrics due to an incorrect URL to the Prometheus service. -* Fixes an issue where running kubectl commands with Kubernetes version 1.26 was generating the warning "Got empty response for: custom.metrics.k8s.io/v1beta1". +During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. -## v2023.02.02-0 +Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: -Released on February 2, 2023 +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - name: + image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} +``` -### New Features {#new-features-v2023-02-02-0} -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.24.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-01-31T02-24-19Z. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.26.3. -* Flannel CNI is no longer supported with the Docker container runtime. Containerd is required. +### Task 1b: Rewrite Public Image Names -### Improvements {#improvements-v2023-02-02-0} -* When upgrading multiple versions of Rook, users can download a single air gap bundle containing all versions of the Rook air gap packages, instead of downloading each version package separately. +For any public images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the location of the image in the public registry (for online installations) or the local registry (for air gap installations or online installations where the user configured a local registry. -## v2023.01.31-0 +For more information, see the example below. -Released on January 31, 2023 +#### Example -### New Features {#new-features-v2023-01-31-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.26.1 1.25.6 1.24.10 1.23.16. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.16. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.5. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2023-01-25T00-19-54Z. -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.15. -* Adds a `serverFlags` configuration option to the [Velero add-on](https://kurl.sh/docs/add-ons/velero) to allow users to pass additional flags to the `velero server` command in the Velero pod. This can also be set using the [velero-server-flags](https://kurl.sh/docs/install-with-kurl/advanced-options#reference) cli flag when running the install script. +The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: -### Improvements {#improvements-v2023-01-31-0} -* Adds TCP connection host preflight checks for ports 2379 and 6443. -* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.8.1-20230130 to address the following high severity CVE: [CVE-2022-43551](https://avd.aquasec.com/nvd/cve-2022-43551). -* Adds a warning message when Flannel is the cluster CNI suggesting the user check that UDP port 8472 is open when joining a node or migrating from Weave to Flannel. -* Adds Flannel UDP port 8472 status preflight check. +```yaml +# kots.io/v1beta2 HelmChart custom resource -### Bug Fixes {#bug-fixes-v2023-01-31-0} -* Fixes an error due to missing images from registry.k8s.io when updating Kubernetes from 1.21 to 1.23.{0-14} and 1.22 to 1.24.{0-8} in airgapped environments. -* Fixes an issue that could cause Flannel pods on remote airgapped nodes to fail with ImagePullBackoff errors. -* Fixes an issue that could cause single node upgrades to Rook add-on version 1.6.11 with Ceph filesystem enabled to fail with error "filesystem-singlenode.yaml: No such file or directory". - -## v2023.01.23-0 +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + ... + values: + image: + # If a local registry is used, use that registry's hostname + # Else, use the public registry host (ghcr.io) + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "ghcr.io" }}' + # If a local registry is used, use the registry namespace provided + # Else, use the path to the image in the public registry + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "cloudnative-pg" }}/cloudnative-pg' + tag: catalog-1.24.0 +``` -Released on January 23, 2023 +The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown in the example below: -### New Features {#new-features-v2023-01-23-0} -* Allows migrating multi-node [Weave](https://kurl.sh/docs/add-ons/weave) installations to [Flannel](https://kurl.sh/docs/add-ons/flannel). -* The [Rook add-on](https://kurl.sh/docs/add-ons/rook) can now be upgraded from version 1.0.x to 1.10.8, latest supported Rook version, as part of the installation script. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2023-01-12T02-06-16Z, RELEASE.2023-01-18T04-36-38Z and RELEASE.2023-01-20T02-05-44Z. -* Adds [metrics-server add-on](https://kurl.sh/docs/add-ons/metrics-server) version 0.6.2. +```yaml +# Helm chart values.yaml file -### Bug Fixes {#bug-fixes-v2023-01-23-0} -* Creates .kube/config for installations where .kube/config was not created. +image: + registry: ghcr.io + repository: cloudnative-pg/cloudnative-pg + tag: catalog-1.24.0 +``` -## v2023.01.13-1 +During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in your Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: -Released on January 13, 2023 +```yaml +apiVersion: v1 +kind: Pod +spec: + containers: + - name: + image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} +``` -### Bug Fixes {#bug-fixes-v2023-01-13-1} -* Reverts a bug fix made in v2023.01.03-0 which caused `.kube/config` to not be created. For more information, see [Known Issue](#known-issues-v2023-01-13-0) below. +## Task 2: Inject Image Pull Secrets {#inject-image-pull-secrets} -## v2023.01.13-0 +Kubernetes requires a Secret of type `kubernetes.io/dockerconfigjson` to authenticate with a registry and pull a private image. When you reference a private image in a Pod definition, you also provide the name of the Secret in a `imagePullSecrets` key in the Pod definition. For more information, see [Specifying imagePullSecrets on a Pod](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) in the Kubernetes documentation. -:::important -The Kubernetes installer v2023.01.13-0 has a known issue that affects the creation of .kube/config in the home directory. See [Known Issue](#known-issues-v2023-01-13-0) below. This issue is resolved in v2023.01.13-1. -::: +During installation, KOTS creates a `kubernetes.io/dockerconfigjson` type Secret that is based on the customer license. This pull secret grants access to the private image through the Replicated proxy registry or in the Replicated registry. Additionally, if the user configured a local image registry, then the pull secret contains the credentials for the local registry. You must provide the name of this KOTS-generated pull secret in any Pod definitions that reference the private image. -Released on January 13, 2023 +You can inject the name of this pull secret into a field in the HelmChart custom resource using the Replicated ImagePullSecretName template function. During installation, KOTS sets the value of the corresponding field in your Helm chart `values.yaml` file with the rendered value of the ImagePullSecretName template function. -### New Features {#new-features-v2023-01-13-0} -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.5. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2023-01-02T09-40-09Z and RELEASE.2023-01-06T18-11-18Z. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) verison 1.10.8. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.15. -* Adds automatic data migration from Longhorn to OpenEBS. -* Adds a migration path for Weave to Flannel on single-node Kubernetes clusters. This migration requires downtime. -* Adds logs for kURL execution which can be found under `/var/log/kurl/`. +#### Example -### Bug Fixes {#bug-fixes-v2023-01-13-0} -* Fixes an issue where the process get stuck in failures scenarios by adding timeouts and improving log info when upgrading from the Rook `1.0.4` to `1.4.9`. -* Fixes upgrading Rook from `1.0.4-14.2.21` to `1.4.9`. -* Fixes a bug on Ubuntu where the installer would sometimes remove packages when attempting to install Kubernetes. -* Fixes a timeout waiting for new versions of Rook and Ceph to roll out on upgrades by increase wait timeouts from 10 to 20 minutes. +The following example shows a `spec.values.image.pullSecrets` array in the HelmChart custom resource that uses the ImagePullSecretName template function to inject the name of the KOTS-generated pull secret: -### Known Issue {#known-issues-v2023-01-13-0} +```yaml +# kots.io/v1beta2 HelmChart custom resource -This issue is resolved in v2023.01.13-1. +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + image: + # Note: Use proxy.replicated.com or your custom domain + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/ecr.us-east-1.amazonaws.com/my-org" }}/api' + pullSecrets: + - name: '{{repl ImagePullSecretName }}' +``` -v2023.01.13-0 has a known issue where the .kube/config might not be created in the home directory. This does not affect the ability to access the cluster when you run bash -l with kubectl. +The `spec.values.image.pullSecrets` array in the HelmChart custom resource corresponds to a `image.pullSecrets` array in the Helm chart `values.yaml` file, as shown in the example below: -If you cannot connect to the cluster with kubectl or did not find the .kube/config file, Replicated recommends that you copy .kube/config to your home directory: +```yaml +# Helm chart values.yaml file -``` -cp /etc/kubernetes/admin.conf $HOME/.kube/config +image: + registry: ecr.us-east-1.amazonaws.com + repository: my-org/api/nginx + pullSecrets: + - name: my-org-secret ``` -Then, grant permissions to the $HOME/.kube/config file. +During installation, KOTS renders the ImagePullSecretName template function and adds the rendered pull secret name to the `image.pullSecrets` array in the Helm chart `values.yaml` file. +Any templates in the Helm chart that access the `image.pullSecrets` field are updated to use the name of the KOTS-generated pull secret, as shown in the example below: -## v2023.01.03-0 +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - name: nginx + image: {{ .Values.image.registry }}/{{ .Values.image.repository }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 2 }} + {{- end }} +``` -Released on January 3, 2023 +## Task 3: Add Pull Secret for Rate-Limited Docker Hub Images {#docker-secret} -:::important -v2023.01.03-0 has a known issue that can cause critical system packages to be removed from Ubuntu machines. This known issue is resolved in v2023.01.13-1. To avoid this known issue, do not upgrade to v2023.01.03-0, and instead upgrade directly to v2023.01.13-1. -::: +Docker Hub enforces rate limits for Anonymous and Free users. To avoid errors caused by reaching the rate limit, your users can run the `kots docker ensure-secret` command, which creates an `-kotsadm-dockerhub` secret for pulling Docker Hub images and applies the secret to Kubernetes manifests that have images. For more information, see [Avoiding Docker Hub Rate Limits](/enterprise/image-registry-rate-limits). -### New Features {#new-features-v2023-01-03-0} -* [Rook add-on](https://kurl.sh/docs/add-ons/rook) can now be upgraded and migrated from version 1.4.3 up to version 1.7.x as part of the installation script. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.26.2. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.23.2. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-12-12T19-27-27Z. -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.13. -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.26.0, 1.25.5, 1.24.9, 1.23.15, and 1.22.17. -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.14. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.6.13 and 1.6.14. +If you are deploying a Helm chart with Docker Hub images that could be rate limited, to support the use of the `kots docker ensure-secret` command, any Pod definitions in your Helm chart templates that reference the rate-limited image must be updated to access the `-kotsadm-dockerhub` pull secret, where `` is your application slug. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). -### Improvements {#improvements-v2023-01-03-0} +You can do this by adding the `-kotsadm-dockerhub` pull secret to a field in the `values` key of the HelmChart custom resource, along with a matching field in your Helm chart `values.yaml` file. During installation, KOTS sets the value of the matching field in the `values.yaml` file with the `-kotsadm-dockerhub` pull secret, and any Helm chart templates that access the value are updated. -* Disk and volume validation checks now run prior to migrating from Rook to OpenEBS. A failed validation check aborts the upgrade. +For more information about Docker Hub rate limiting, see [Understanding Docker Hub rate limiting](https://www.docker.com/increase-rate-limits) on the Docker website. -### Bug Fixes {#bug-fixes-v2023-01-03-0} -* Fixes installation conflicts when installing the containerd add-on and Docker is already installed on the host. Now the installation checks to see if Docker is installed and provides users with the option to automatically remove Docker. -* Fixes an issue where EKCO's provisioned HAProxy load balancer pod crashed when it did not have access to the Config file. -* Fixes an issue that causes air gapped upgrades to Rook add-on version 1.7.11 to fail with ImagePullBackoff errors. -* Fixes an issue with the Docker preflight check not failing on some unsupported operating systems. -* Fixes an issue that could cause Rook upgrades to fail if EKCO is scaled down, due to failures to recreate the Rook OSD deployments when the rook-priority.kurl.sh MutatingAdmissionWebhook is unreachable. +#### Example -## v2022.12.12-0 +The following Helm chart `values.yaml` file includes `image.registry`, `image.repository`, and `image.pullSecrets` for a rate-limited Docker Hub image: -Released on December 12, 2022 +```yaml +# Helm chart values.yaml file -### New Features {#new-features-v2022-12-12-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) versions 1.25.5, 1.24.9, 1.23.15, and 1.22.17. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.6.11 and 1.6.12. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions 0.26.0 and 0.26.1. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.4. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.20.2. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.23.1. +image: + registry: docker.io + repository: my-org/example-docker-hub-image + pullSecrets: [] +``` -### Bug Fixes {#bug-fixes-v2022-12-12-0} -* Fixes an issue that prevented upgrading from Rook 1.0.4 to 1.4.9 due to error "pool(s) have non-power-of-two pg_num". -* Fixes an issue that caused Rook add-on upgrades from 1.0.4 to 1.4.9 to hang indefinitely with 50% pgs degraded when EKCO add-on is included in the upgrade spec. -* Fixes an issue that prevented containerd.io to be installed or upgraded when the host has docker.io package installed on Ubuntu. -* Fixes preflight checks to only recommend Docker Enterprise Edition to RHEL installs when containerd is not selected. -* Fixes an issue where a deprecated version of Docker was being installed when Docker or containerd add-on versions were not explicitly set. +The following HelmChart custom resource includes `spec.values.image.registry`, `spec.values.image.repository`, and `spec.values.image.pullSecrets`, which correspond to those in the Helm chart `values.yaml` file above. -## v2022.11.29-0 +The `spec.values.image.pullSecrets` array lists the `-kotsadm-dockerhub` pull secret, where the slug for the application is `example-app-slug`: -Released on November 29, 2022 +```yaml +# kots.io/v1beta2 HelmChart custom resource -### New Features {#new-features-v2022-11-29-0} -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.12. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.10. -* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.8.1-20221122 to address the following high and critical severity CVEs: CVE-2022-42915, CVE-2022-42915, CVE-2022-42916, CVE-2022-42916. -* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) version 2.6.5-20221122 to address the following high and critical severity CVEs: CVE-2022-42915, CVE-2022-42915, CVE-2022-42916, CVE-2022-42916. +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + image: + registry: docker.io + repository: my-org/example-docker-hub-image + pullSecrets: + - name: example-app-slug-kotsadm-dockerhub +``` -### Improvements {#improvements-v2022-11-29-0} -* Binaries installed by kURL into /use/local/bin are now owned by root. -* Containerd add-on versions are now shipped with the respective supported runc version. Containerd addon versions 1.6.4 and later are built with runc version `v1.1.4` instead of `v1.0.0-rc95`. +During installation, KOTS adds the `example-app-slug-kotsadm-dockerhub` secret to the `image.pullSecrets` array in the Helm chart `values.yaml` file. Any templates in the Helm chart that access `image.pullSecrets` are updated to use `example-app-slug-kotsadm-dockerhub`: -### Bug Fixes {#bug-fixes-v2022-11-29-0} -* Fixes an issue that causes Rook add-on version 1.0.4-14.2.21 to fail to install on Oracle Linux 7 with host dependency resolution errors. -* Fixes an issue that causes Rook upgrades to unnecessarily pause for an extended period of time, with the message "failed to wait for Rook", before proceeding with the upgrade. -* Fixes an issue that leaves the EKCO operator scaled down to 0 replicas when upgrading a cluster with Rook add-on versions 1.8.10 and 1.9.12. +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: example +spec: + containers: + - name: example + image: {{ .Values.image.registry }}/{{ .Values.image.repository }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 2 }} + {{- end }} +``` -## v2022.11.16-1 +## Task 4: Support the Use of Local Image Registries {#local-registries} -Released on November 16, 2022 +Local image registries are required for KOTS installations in air-gapped environments with no outbound internet connection. Also, users in online environments can optionally use a local registry. For more information about how users configure a local image registry with KOTS, see [Configuring Local Image Registries](/enterprise/image-registry-settings). -### Bug Fixes {#bug-fixes-v2022-11-16-1} -* Fixes a bug that blocked installations. +To support the use of local registries, configure the `builder` key. For more information about how to configure the `builder` key, see [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. -## v2022.11.16-0 +## Task 5: Add Backup Labels for Snapshots (KOTS Existing Cluster and kURL Installations Only) {#add-backup-labels-for-snapshots} -Released on November 16, 2022 +:::note +The Replicated [snapshots](snapshots-overview) feature for backup and restsore is supported only for existing cluster installations with KOTS. Snapshots are not support for installations with Embedded Cluster. For more information about disaster recovery for installations with Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery.mdx). +::: -### New Features {#new-features-v2022-11-16-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.25.4 1.24.8 1.23.14 1.22.16. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.3. -* PVMigrate now checks for available disk space before starting to migrate volumes. -* RHEL 8.7 and Oracle Linux 8.7 are now supported. +The snapshots feature requires the following labels on all resources in your Helm chart that you want to be included in the backup: +* `kots.io/backup: velero` +* `kots.io/app-slug: APP_SLUG`, where `APP_SLUG` is the slug of your Replicated application. -## v2022.11.10-1 +For more information about snapshots, see [Understanding Backup and Restore](snapshots-overview). -Released on November 10, 2022 +To support backup and restore with snapshots, add the `kots.io/backup: velero` and `kots.io/app-slug: APP_SLUG` labels to fields under the HelmChart custom resource `optionalValues` key. Add a `when` statement that evaluates to true only when the customer license has the `isSnapshotSupported` entitlement. -### Bug Fixes {#bug-fixes-v2022-11-10-1} -* Fixes an issue where [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.89.0+ fails to install when a proxy is configured. +The fields that you create under the `optionalValues` key must map to fields in your Helm chart `values.yaml` file. For more information about working with the `optionalValues` key, see [optionalValues](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. -## v2022.11.10-0 +#### Example -Released on November 10, 2022 +The following example shows how to add backup labels for snapshots in the `optionalValues` key of the HelmChart custom resource: -### Improvements {#improvements-v2022-11-10-0} -* OpenEBS Local PV Storage Class will now be the default if no other Storage Class is specified for OpenEBS add-on versions 3.3.0 and above. Previously, OpenEBS was only the default if `openebs.localPVStorageClassName` was set to `"default"`. +```yaml +# kots.io/v1beta2 HelmChart custom resource -### Bug Fixes {#bug-fixes-v2022-11-10-0} -* Fixes an issue that could cause installations or upgrades to fail with error "syntax error: operand expected (error token is ""0" + "1"")" on RHEL 7 based distributions. -* Fixes an issue that causes installations to fail with no default Storage Class for specs with `openebs.localPVStorageClassName` set to anything other than `"default"` and no other CSI add-on specified. +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + ... + optionalValues: + # add backup labels only if the license supports snapshots + - when: "repl{{ LicenseFieldValue `isSnapshotSupported` }}" + recursiveMerge: true + values: + mariadb: + commonLabels: + kots.io/backup: velero + kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} + podLabels: + kots.io/backup: velero + kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} +``` -## v2022.11.09-0 +## Additional Information -Released on November 9, 2022 +### About the HelmChart Custom Resource -### New Features {#new-features-v2022-11-09-0} -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.25.0. This version adds management of the rqlite StatefulSet deployed by the app manager. If a Kubernetes installer cluster has at least three healthy nodes and the OpenEBS localpv storage class is available, rqlite is scaled up to three replicas for data replication and high availability. -## v2022.11.07-0 + -Released on November 7, 2022 +For more information about the HelmChart custom resource, including the unique requirements and limitations for the keys described in this topic, see [HelmChart v2](/reference/custom-resource-helmchart-v2). -### New Features {#new-features-v2022-11-07-0} -* Removes support for the BETA K3s add-on and the BETA RKE2 add-on. It is recommended to use the [OpenEBS add-on](https://kurl.sh/docs/add-ons/openEBS#localpv) for the single-node LocalPV use case with kURL. For more information about this decision, see the [ADR](https://github.com/replicatedhq/kURL/blob/main/docs/arch/adr-007-deprecate-k3s-and-rke2.md). -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.24.1. -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.20.1. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.60.1-41.7.3. +### HelmChart v1 and v2 Differences -### Bug Fixes {#bug-fixes-v2022-11-07-0} -* Fixes CRD errors when updating from Prometheus 0.49.0-17.1.3 on Kubernetes versions that do not support Server-Side Apply. +To support the use of local registries with version `kots.io/v1beta2` of the HelmChart custom resource, provide the necessary values in the builder field to render the Helm chart with all of the necessary images so that KOTS knows where to pull the images from to push them into the local registry. -## v2022.11.02-0 +For more information about how to configure the `builder` key, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles) and [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. -Released on November 2, 2022 +The `kots.io/v1beta2` HelmChart custom resource has the following differences from `kots.io/v1beta1`: -### New Features {#new-features-v2022-11-02-0} -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.9. -* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.7.0-5.5.0. -* Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.11. + + + + + + + + + + + + + + + + + + + + + + + + + + +
    HelmChart v1beta2HelmChart v1beta1Description
    apiVersion: kots.io/v1beta2apiVersion: kots.io/v1beta1apiVersion is updated to kots.io/v1beta2
    releaseNamechart.releaseNamereleaseName is a top level field under spec
    N/AhelmVersionhelmVersion field is removed
    N/AuseHelmInstalluseHelmInstall field is removed
    -### Improvements {#improvements-v2022-11-02-0} -* Prompts and warns users of downtime before migrating from Rook-backed PersistentVolumeClaims to OpenEBS Local PV when OpenEBS is included in the specification and Rook is removed. For migration information, see [Migrating to Change kURL CSI Add-ons](https://kurl.sh/docs/install-with-kurl/migrating-csi). -* Updates the kurlsh/s3cmd image to tag 20221029-37473ee for [Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.1 and [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.2, to address the high severity CVE: CVE-2022-43680. +### Migrate Existing KOTS Installations to HelmChart v2 -### Bug Fixes {#bug-fixes-v2022-11-02-0} -* Fixes an issue that could cause the migration from Rook-backed PersistentVolumeClaims to unnecessarily hang for 5 minutes. For migration information, see [Migrating to Change kURL CSI Add-ons](https://kurl.sh/docs/install-with-kurl/migrating-csi). -* Fixes an issue that could cause kURL to attempt to migrate CRI from Docker to Containerd when the CRI is already Containerd. -* Fixes an issue with [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) versions 1.12.0 and 2.6.0 that could cause installations to fail with the error `failed calling webhook "admission-webhook.openebs.io"`. -* Fixes an issue that could cause the kURL installer to disable EKCO management of the internal load balancer during an upgrade. See [Internal Load Balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). -* Fixes an issue where [Weave add-on](https://kurl.sh/docs/add-ons/weave) "latest" version resolves to 2.6.5-20221006 instead of 2.6.5-20221025. -* Fixes an issue where kURL will migrate to both OpenEBS Local PV and Longhorn from Rook-backed PersistentVolumeClaims when both add-ons are included in the specification and Rook is removed. kURL now prefers to migrate to OpenEBS. +Existing KOTS installations can be migrated to use the KOTS HelmChart v2 method, without having to reinstall the application. -## v2022.10.28-1 +There are different steps for migrating to HelmChart v2 depending on the application deployment method used previously. For more information, see [Migrating Existing Installations to HelmChart v2](helm-v2-migrate). -Released on October 28, 2022 +================ +File: docs/vendor/helm-optional-charts.md +================ +# Example: Including Optional Helm Charts -### Bug Fixes {#bug-fixes-v2022-10-28-1} -* Fixes an issue that causes kURL to erroneously prompt the end-user for a Rook to OpenEBS Local PV migration when upgrading and the OpenEBS version 3.3.0 is included in the spec. +This topic describes using optional Helm charts in your application. It also provides an example of how to configure the Replicated HelmChart custom resource to exclude optional Helm charts from your application when a given condition is met. -## v2022.10.28-0 +## About Optional Helm Charts -Released on October 28, 2022 +By default, KOTS creates an instance of a Helm chart for every HelmChart custom resource manifest file in the upstream application manifests. However, you can configure your application so that KOTS excludes certain Helm charts based on a conditional statement. -### New Features {#new-features-v2022-10-28-0} -* When Rook is installed on the cluster but not included in the kURL spec, the OpenEBS add-on version 3.3.0 and later automatically migrates any Rook-backed PersistentVolumeClaims (PVCs) to OpenEBS Local PV. +To create this conditional statement, you add a Replicated KOTS template function to an `exclude` field in the HelmChart custom resource file. For example, you can add a template function that evaluates to `true` or `false` depending on the user's selection for a configuration field on the KOTS Admin Console Config page. +KOTS renders the template function in the `exclude` field, and excludes the chart if the template function evaluates to `true`. -### Improvements {#improvements-v2022-10-28-0} -* The replicatedhq/local-volume-provider image has been updated to v0.4.0 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.2 which addresses the following high and critical CVEs: CVE-2021-33574, CVE-2021-35942, CVE-2022-23218, CVE-2022-23219, CVE-2020-1752, CVE-2020-6096, CVE-2021-3326, CVE-2021-3999. +For all optional components, Replicated recommends that you add a configuration option to allow the user to optionally enable or disable the component. +This lets you support enterprises that want everything to run in the cluster and those that want to bring their own services for stateful components. -## v2022.10.26-0 +For more information about template functions, see [About Template Functions](/reference/template-functions-about). -Released on October 26, 2022 +## Example -### New Features {#new-features-v2022-10-26-0} -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.24.0. -* MinIO deploys a highly-available StatefulSet with EKCO when the OpenEBS localpv storage class is enabled and at least three nodes are available. For more information, see [Manage MinIO with EKCO](https://kurl.sh/docs/add-ons/ekco#manage-minio-with-ekco) in _EKCO Add-on_ in the kURL documentation. +This example uses an application that has a Postgres database. +The community-supported Postgres Helm chart is available at https://github.com/bitnami/charts/tree/main/bitnami/postgresql. -### Improvements {#improvements-v2022-10-26-0} -* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20221025 and 2.8.1-20221025 to address the following high CVEs: CVE-2022-40303, CVE-2022-40304. +In this example, you create a configuration field on the Admin Console Config page that lets the user provide their own Postgres instance or use a Postgres service that is embedded with the application. Then, you configure the HelmChart custom resource in a release for an application in the Replicated Vendor Portal to conditionally exclude the optional Postgres component. -## v2022.10.24-0 +### Step 1: Create the Configuration Fields -Released on October 24, 2022 +To start, define the Admin Console Config page that gives the user a choice of "Embedded Postgres" or "External Postgres", where "External Postgres" is user-supplied. -### New Features {#new-features-v2022-10-24-0} -* Adds [Flannel add-on](https://kurl.sh/docs/add-ons/flannel) version 0.20.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-10-20T00-55-09Z. +1. Log in to the [Vendor Portal](https://vendor.replicated.com). Create a new application for this example, or open an existing application. Then, click **Releases > Create release** to create a new release for the application. -## v2022.10.21-0 +1. In the Config custom resource manifest file in the release, add the following YAML to create the "Embedded Postgres" or "External Postgres" configuration options: -Released on October 21, 2022 + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: example-application + spec: + groups: + - name: database + title: Database + description: Database Options + items: + - name: postgres_type + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres + - name: embedded_postgres_password + type: password + value: "{{repl RandomString 32}}" + hidden: true + - name: external_postgres_uri + type: text + title: External Postgres Connection String + help_text: Connection string for a Postgres 10.x server + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + ``` -### New Features {#new-features-v2022-10-21-0} -* [Rook add-on](https://kurl.sh/docs/add-ons/rook) versions 1.9.12 and later are now supported on Kubernetes 1.25. -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.25.3 1.24.7 1.23.13. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-10-15T19-57-03Z. -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.23.0. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.2 which addresses the following high and critical CVEs: CVE-2021-33574, CVE-2021-35942, CVE-2022-23218, CVE-2022-23219, CVE-2020-1752, CVE-2020-6096, CVE-2021-3326, CVE-2021-3999. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.1. -* For [Rook add-on](https://kurl.sh/docs/add-ons/rook) versions 1.9.12 and later, [Ceph metrics collection and a Ceph Grafana dashboard](https://kurl.sh/docs/add-ons/rook#monitor-rook-ceph) are now enabled when the Prometheus add-on is installed. -* The replicatedhq/local-volume-provider image has been updated to v0.3.10 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.2 to address CVE-2022-37434 with critical severity. + The YAML above does the following: + * Creates a field with "Embedded Postgres" or "External Postgres" radio buttons + * Uses the Replicated RandomString template function to generate a unique default password for the embedded Postgres instance at installation time + * Creates fields for the Postgres password and connection string, if the user selects the External Postgres option -### Bug Fixes {#bug-fixes-v2022-10-21-0} -* Fixes an issue that causes the .kube/config to get removed on a Kubernetes upgrade. -* With the release of [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.1, fixes an issue that could cause EKCO to fail to perform operations dependent on Rook version on Rook upgrades, including maintaining CSI Pod resources and scaling the ceph-mgr Pod replica count. -* With the release of [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.2, fixes an issue that causes upgrades of Kubernetes to fail on secondary nodes when EKCO [Internal Load Balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer) is enabled. -* Fixes an issue that causes EKCO to log RBAC errors when the Rook add-on is not installed. + The following shows how this Config custom resource manifest file displays on the Admin Console Config page: -## v2022.10.13-0 + ![Postgres Config Screen](/images/postgres-config-screen.gif) -Released on October 13, 2022 +### Step 2: Create a Secret for Postgres -### New Features {#new-features-v2022-10-13-0} -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.9.12. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.23.0 with support for Rook 1.9. +The application has a few components that use Postgres, and they all mount the Postgres connection string from a single Secret. -### Bug Fixes {#bug-fixes-v2022-10-13-0} -* Fixes an issue that could prevent the EKCO deployment from scaling back up from zero replicas after running the Kubernetes installer script. +Define a Secret for Postgres that renders differently if the user selects the Embedded Postgres or External Postgres option: -## v2022.10.10-0 +1. In the release, create a Secret file and add the following YAML: -Released on October 10, 2022 + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: postgresql-secret + stringData: + uri: postgres://username:password@postgresql:5432/database?sslmode=disable + ``` -### New Features {#new-features-v2022-10-10-0} -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-10-08T20-11-00Z. -* Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.8.10. +1. Edit the `uri` field in the Secret to add a conditional statement that renders either a connection string to the embedded Postgres chart or to the user supplied instance: -### Bug Fixes {#bug-fixes-v2022-10-10-0} -* Fixes an issue that could cause installations to fail with error "yaml: did not find expected node content" when installing behind an HTTP_PROXY. + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: postgresql-secret + stringData: + uri: repl{{ if ConfigOptionEquals "postgres_type" "embedded_postgres" }}postgres://myapplication:repl{{ ConfigOption "embedded_postgres_password" }}@postgres:5432/mydatabase?sslmode=disablerepl{{ else }}repl{{ ConfigOption "external_postgres_uri" }}repl{{ end }} + ``` -## v2022.10.07-0 + As shown above, you must use a single line for the conditional statement. Optionally, you can use the Replicated Base64Encode function to pipe a string through. See [Base64Encode](/reference/template-functions-static-context#base64encode) in _Static Context_. -Released on October 7, 2022 +### Step 3: Add the Helm Chart -### New Features {#new-features-v2022-10-07-0} +Next, package the Helm chart and add it to the release in the Vendor Portal: -* New KOTS add-on versions are now automatically added to the Kubernetes installer upon a new release of KOTS. +1. Run the following commands to generate a `.tgz` package of the Helm chart: - This means that the Kubernetes installer no longer needs to release to make a new version of KOTS available. So, the addition of new KOTS add-on versions will not be stated in the Kubernetes installer release notes. -For information about the features, improvements, and bug fixes included in each new version of KOTS, see the [App Manager Release Notes](https://docs.replicated.com/release-notes/rn-app-manager). -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) versions RELEASE.2022-10-05T14-58-27Z and RELEASE.2022-10-02T19-29-29Z. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.22.0. + ``` + helm repo add bitnami https://charts.bitnami.com/bitnami + helm fetch bitnami/postgresql + ``` -### Improvements {#improvements-v2022-10-07-0} -* Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20221006 and 2.8.1-20221006 to address the following critical CVEs: CVE-2022-2795, CVE-2022-2881, CVE-2022-2906, CVE-2022-3080, CVE-2022-38177, CVE-2022-38178. -* Updates kurlsh/s3cmd image to tag 20221006-27d5371 for latest [Registry](https://kurl.sh/docs/add-ons/registry) and [Velero](https://kurl.sh/docs/add-ons/velero) add-on versions to address the following critical CVE: CVE-2022-40674. +1. Drag and drop the `.tgz` file into the file tree of the release. The Vendor Portal automatically creates a new HelmChart custom resource named `postgresql.yaml`, which references the `.tgz` file you uploaded. -## v2022.09.30-0 + For more information about adding Helm charts to a release in the Vendor Portal, see [Managing Releases with the Vendor Portal](releases-creating-releases). -Released on September 30, 2022 +### Step 4: Edit the HelmChart Custom Resource -### New Features {#new-features-v2022-09-30-0} -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.21.1. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-09-25T15-44-53Z. -* Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.2. -* Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.6.1-5.4.2. -* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.86.1. +Finally, edit the HelmChart custom resource: -## v2022.09.28-0 +1. In the HelmChart custom resource, add a mapping to the `values` key so that it uses the password you created. Also, add an `exclude` field to specify that the Postgres Helm chart must only be included when the user selects the embedded Postgres option on the Config page: -Released on September 28, 2022 + ```yaml + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: postgresql + spec: + exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' + chart: + name: postgresql + chartVersion: 12.1.7 -### New Features {#new-features-v2022-09-28-0} -* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.86.0. + releaseName: samplechart-release-1 + + # values are used in the customer environment, as a pre-render step + # these values will be supplied to helm template + values: + auth: + username: username + password: "repl{{ ConfigOption `embedded_postgres_password` }}" + database: mydatabase + ``` -## v2022.09.23-0 +1. Save and promote the release. Then, install the release in a development environment to test the embedded and external Postgres options. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). -Released on September 23, 2022 +================ +File: docs/vendor/helm-optional-value-keys.md +================ +import Values from "../partials/helm/_helm-cr-values.mdx" +import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" +import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" +import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" +import ConfigExample from "../partials/helm/_set-values-config-example.mdx" +import LicenseExample from "../partials/helm/_set-values-license-example.mdx" +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; -### New Features {#new-features-v2022-09-23-0} -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.59.1-40.1.0. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-09-17T00-09-45Z. -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.25.2 1.24.6 1.23.12 1.22.15. +# Setting Helm Values with KOTS -### Improvements {#improvements-v2022-09-23-0} -* Messaging while upgrading Rook-Ceph add-on to newer versions has been improved. -* When run on an unsupported operating system, kURL now links to the [list of supported systems](https://kurl.sh/docs/install-with-kurl/system-requirements#supported-operating-systems). -* Online installations now downloads files from kurl.sh instead of Amazon S3. +This topic describes how to use the Replicated KOTS HelmChart custom resource to set and delete values in `values.yaml` files for Helm charts deployed with Replicated KOTS. -## v2022.09.19-0 +For a tutorial that demonstrates how to set Helm values in a sample Helm chart using the KOTS HelmChart custom resource, see [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). -Released on September 19, 2022 +## Overview -### New Features {#new-features-v2022-09-19-0} -* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.85.0. +The KOTS HelmChart custom resource [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) keys create a mapping between KOTS and the `values.yaml` file for the corresponding Helm chart. This allows you to set or delete Helm values during installation or upgrade with KOTS, without having to make any changes to the Helm chart itself. -### Bug Fixes {#bug-fixes-v2022-09-19-0} -* Fixes an issue that could cause air gapped Kubernetes upgrades to fail Sonobuoy tests with a missing image. +You can create this mapping by adding a value under `values` or `optionalValues` that uses the exact same key name as a value in the corresponding Helm chart `values.yaml` file. During installation or upgrade, KOTS sets the Helm chart `values.yaml` file with any matching values from the `values` or `optionalValues` keys. -## v2022.09.16-0 +The `values` and `optionalValues` keys also support the use of Replicated KOTS template functions. When you use KOTS template functions in the `values` and `optionalValues` keys, KOTS renders the template functions and then sets any matching values in the corresponding Helm chart `values.yaml` with the rendered values. For more information, see [About Template Functions](/reference/template-functions-about). -Released on September 16, 2022 +Common use cases for the HelmChart custom resource `values` and `optionalValues` keys include: +* Setting Helm values based on user-supplied values from the KOTS Admin Console configuration page +* Setting values based on the user's unique license entitlements +* Conditionally setting values when a given condition is met +* Deleting a default value key from the `values.yaml` file that should not be included for KOTS installations -### New Features {#new-features-v2022-09-16-0} -* Adds [Kubernetes](https://kurl.sh/docs/add-ons/kubernetes) version(s) 1.25.1 1.25.0 1.24.5 1.23.11 1.22.14. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.58.0-39.12.1. -* Improved output when waiting for rook-ceph to become healthy. +For more information about the syntax for these fields, see [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. -### Improvements {#improvements-v2022-09-16-0} -* Updates the replicatedhq/local-volume-provider image to v0.3.8 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.1 to address CVE-2022-2509 with high severity. +## Set Values -### Bug Fixes {#bug-fixes-v2022-09-16-0} -* Fixes an issue that prevents upgrading Kubernetes to 1.24.x if the CRI has previously been migrated from Docker to Containerd. -* Fixes an issue that causes stateful pods mounting Persistent Volumes to get stuck in a `Terminating` state when upgrading single node Kubernetes clusters and using the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn). +This section describes how to use KOTS template functions or static values in the HelmChart custom resource `values` key to set existing Helm values. -## v2022.09.12-0 +### Using a Static Value -Released on September 12, 2022 +You can use static values in the HelmChart custom resource `values` key when a given Helm value must be set the same for all KOTS installations. This allows you to set values for KOTS installations only, without affecting values for any installations that use the Helm CLI. -### New Features {#new-features-v2022-09-12-0} -* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.84.0. -* Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.21.0. -* [Rook 1.0.x to 1.4.9 upgrades](https://kurl.sh/docs/add-ons/rook#upgrades) can now be completed in airgapped clusters. +For example, the following Helm chart `values.yaml` file contains `kotsOnlyValue.enabled`, which is set to `false` by default: -### Bug Fixes {#bug-fixes-v2022-09-12-0} -* [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions 0.21.0 or greater will now forcefully delete Envoy pods that change from a ready state to one where the Envoy container is not ready and have been in that state for at least 5 minutes. This has been added as a work around to a [known issue](https://github.com/projectcontour/contour/issues/3192) that may be caused by resource contention. +```yaml +# Helm chart values.yaml +kotsOnlyValue: + enabled: false +``` -## Release v2022.09.08-1 +The following HelmChart custom resource contains a mapping to `kotsOnlyValue.enabled` in its `values` key, which is set to `true`: -Released on September 8, 2022 +```yaml +# KOTS HelmChart custom resource -### New Features {#new-features-v2022-09-08-1} -* Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.22.1. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-09-07T22-25-02Z. +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 -### Improvements {#improvements-v2022-09-08-1} -* The [Cert Manager add-on](https://kurl.sh/docs/add-ons/cert-manager) now supports upgrading from 1.0.3 to 1.9.1. -* The Rook 1.0 to 1.4 migration will now prompt the user to load images used by the migration on other nodes before starting. + values: + kotsOnlyValue: + enabled: true +``` -## Release v2022.09.08-0 +During installation or upgrade with KOTS, KOTS sets `kotsOnlyValue.enabled` in the Helm chart `values.yaml` file to `true` so that the KOTS-only value is enabled for the installation. For installations that use the Helm CLI instead of KOTS, `kotsOnlyValue.enabled` remains `false`. -Released on September 8, 2022 +### Using KOTS Template Functions -### New Features {#new-features-v2022-09-08-0} -* Adds support for [Docker add-on](https://kurl.sh/docs/add-ons/docker) on Ubuntu version 22.04. -* Adds [Cert Manager add-on](https://kurl.sh/docs/add-ons/cert-manager) version 1.9.1. -* Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.8. -* Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version 2022-09-01T23-53-36Z. -* Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.58.0-39.11.0. +You can use KOTS template functions in the HelmChart custom resource `values` key to set Helm values with the rendered template functions. For more information, see [About Template Functions](/reference/template-functions-about). -## Release v2022.09.01-1 + + + + + + + + -Released on September 1, 2022 +## Conditionally Set Values -### New Features {#new-features-v2022-09-01-1} + -* Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.83.0. +For example, the following HelmChart custom resource uses the `optionalValues` key and the [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to set user-supplied values for an external MariaDB database: -## Release v2022.09.01-0 +```yaml +# KOTS HelmChart custom resource -Released on September 1, 2022 +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: wordpress +spec: + chart: + name: wordpress + chartVersion: 15.3.2 -### New Features {#new-features-v2022-09-01-0} -* The [Rook add-on](https://kurl.sh/docs/add-ons/rook) can now be upgraded from version 1.0.x to 1.4.x or 1.5.x as part of the installation script for internet-connected installations only. - Upgrading from version 1.0.x to 1.4.x or 1.5.x migrates data off of any hostpath-based OSDs in favor of block device-based OSDs, and performs a rolling upgrade through Rook versions 1.1.9, 1.2.7 and 1.3.11 before installing 1.4.9 (and 1.5.12 if applicable). - The upstream Rook project introduced a requirement for block storage in versions 1.3.x and later. -* Adds [Docker add-on](https://kurl.sh/docs/add-ons/docker) version 20.10.17. - Note that Ubuntu version 22.04 only supports Docker version 20.10.17 and later. + releaseName: sample-release-1 -### Bug Fixes {#bug-fixes-v2022-09-01-0} -* Fixes an issue that causes migrations to fail from Docker to containerd due to uninstalled `docker-scan-plugin` package. -* Fixes an issue that causes migrations to fail from Rook to Longhorn 1.3.1 with 2 conflicting default storage classes. + optionalValues: + - when: "repl{{ ConfigOptionEquals `mariadb_type` `external`}}" + recursiveMerge: false + values: + externalDatabase: + host: "repl{{ ConfigOption `external_db_host`}}" + user: "repl{{ ConfigOption `external_db_user`}}" + password: "repl{{ ConfigOption `external_db_password`}}" + database: "repl{{ ConfigOption `external_db_database`}}" + port: "repl{{ ConfigOption `external_ db_port`}}" +``` -## Release v2022.08.25-0 +During installation, KOTS renders the template functions and sets the `externalDatabase` values in the HelmChart `values.yaml` file only when the user selects the `external` option for `mariadb_type` on the Admin Console configuration page. -Released on August 25, 2022 +### About Recursive Merge for optionalValues {#recursive-merge} -### New Features {#new-features-v2022-08-25-0} + -- Adds [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) versions 1.24.4, 1.23.10, 1.22.13 and 1.21.14. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kots) version 1.82.0 -- Adds [Minio add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-08-22T23-53-06Z. -- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.58.0-39.9.0. -- Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20220825 and 2.8.1-20220825 to address the following critical severity CVE: CVE-2022-37434. +For example, the following HelmChart custom resource has both `values` and `optionalValues`: -### Improvements {#improvements-v2022-08-25-0} +```yaml +values: + favorite: + drink: + hot: tea + cold: soda + dessert: ice cream + day: saturday -- Removes support for the BETA Local Path Provisioner Add-On. It is recommended to use the [OpenEBS](https://kurl.sh/docs/add-ons/openEBS#localpv) add-on for the LocalPV use case. -- The Rook [1.0 to 1.4 task](https://kurl.sh/docs/add-ons/rook#upgrades) will now print new lines when waiting for pods to be rolled out, OSDs to be added, or certain migrations to complete. Previously, one line was printed and then overwritten with updates. -- Updates kurlsh/s3cmd image to tag 20220825-237c19d for latest [Registry](https://kurl.sh/docs/add-ons/registry) and [Velero](https://kurl.sh/docs/add-ons/velero) add-on versions to address the following critical and high severity CVEs: CVE-2022-37434 +optionalValues: + - when: '{{repl ConfigOptionEquals "example_config_option" "1" }}' + recursiveMerge: false + values: + example_config_option: + enabled: true + favorite: + drink: + cold: lemonade +``` -### Bug Fixes {#bug-fixes-v2022-08-25-0} +The `values.yaml` file for the associated Helm chart defines the following key value pairs: -- Fixes the [reset task](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node) which fails when unable to find the kurlsh/weaveexec image. -- Fixes the [Rook 1.0 to 1.4 task](https://kurl.sh/docs/add-ons/rook#upgrades) which would wait for health indefinitely after upgrading to 1.4.9 on single-node installations. +```yaml +favorite: + drink: + hot: coffee + cold: soda + dessert: pie +``` +The `templates/configmap.yaml` file for the Helm chart maps these values to the following fields: -## Release v2022.08.23-0 +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap +data: + favorite_day: {{ .Values.favorite.day }} + favorite_dessert: {{ .Values.favorite.dessert }} + favorite_drink_cold: {{ .Values.favorite.drink.cold }} + favorite_drink_hot: {{ .Values.favorite.drink.hot }} +``` -Released on August 23, 2022 +When `recursiveMerge` is set to `false`, the ConfigMap for the deployed application includes the following key value pairs: -### New Features {#new-features-v2022-08-23-0} +```yaml +favorite_day: null +favorite_dessert: pie +favorite_drink_cold: lemonade +favorite_drink_hot: coffee +``` -- Adds new [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) version 3.3.0. +In this case, the top level keys in `optionalValues` override the top level keys in `values`. -### Bug Fixes {#bug-fixes-v2022-08-23-0} +KOTS then uses the values from the Helm chart `values.yaml` to populate the remaining fields in the ConfigMap: `favorite_day`, `favorite_dessert`, and `favorite_drink_hot`. -- Fixes an issue that causes Weave 2.6.x and 2.8.x versions of Weave to resolve to the incorrect versions without the latest CVE fixes. -- Updates the replicatedhq/local-volume-provider image to v0.3.7 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.1 to address CVE-2021-44716, CVE-2021-33194, and CVE-2022-21221 with high severity. +When `recursiveMerge` is set to `true`, the ConfigMap for the deployed application includes the following key value pairs: -## Release v2022.08.22-0 +```yaml +favorite_day: saturday +favorite_dessert: ice cream +favorite_drink_cold: lemonade +favorite_drink_hot: tea +``` -Released on August 22, 2022 +In this case, all keys from `values` and `optionalValues` are merged. Because both include `favorite.drink.cold`, KOTS uses `lemonade` from `optionalValues`. -### New Features {#new-features-v2022-08-22-0} +## Delete a Default Key -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kots) version 1.81.1 -- Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.10. +If the Helm chart `values.yaml` contains a static value that must be deleted when deploying with KOTS, you can set the value to `"null"` (including the quotation marks) in the `values` key of the HelmChart custom resource. -## Release v2022.08.19-0 +A common use case for deleting default value keys is when you include a community Helm chart as a dependency. Because you cannot control how the community chart is built and structured, you might want to change some of the default behavior. -Released on August 19, 2022 +For example, the following HelmChart custom resource sets an `exampleKey` value to `"null"` when the chart is deployed with KOTS: -### New Features {#new-features-v2022-08-19-0} +```yaml +# KOTS HelmChart custom resource -- Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.7.11. - - Upgrades Ceph cluster from Octopus to [Pacific](https://docs.ceph.com/en/quincy/releases/pacific/). -- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.20.0 with support for [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.7.11. -- Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.1. -- Adds a new tasks.sh command, [`rook-10-to-14`](https://kurl.sh/docs/add-ons/rook#upgrades), that upgrades Rook 1.0 installations to Rook 1.4.9. This command only works for online installations. +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 -### Improvements {#improvements-v2022-08-19-0} + values: + exampleKey: "null" +``` -- The [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) auto-upgrades experimental feature is no longer supported as of EKCO version 0.20.0. +For more information about using a `null` value to delete a key, see [Deleting a Default Key](https://helm.sh/docs/chart_template_guide/values_files/#deleting-a-default-key) in the Helm documentation. -### Bug Fixes {#bug-fixes-v2022-08-19-0} +================ +File: docs/vendor/helm-packaging-airgap-bundles.mdx +================ +import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" +import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" +import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" +import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" -- Fixes an issue that causes Rook upgrades to fail on single node installations because of Rook MDS pod anti-affinity rules. -- Fixes an issue that can cause a migration from Docker to Containerd to fail due to listing nodes using the incorrect Kubernetes api resource group. +# Packaging Air Gap Bundles for Helm Charts -## Release v2022.08.16-0 +This topic describes how to package and build air gap bundles for releases that contain one or more Helm charts. This topic applies to applications deployed with Replicated KOTS. -Released on August 16, 2022 +## Overview -### New Features {#new-features-v2022-08-16-0} + -- Adds [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.3.1. -- Adds [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.7. +When building the `.airgap` bundle for a release that contains one or more Helm charts, the Vendor Portal renders the Helm chart templates in the release using values supplied in the KOTS HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. -## Release v2022.08.12-1 +## Configure the `builder` Key -Released on August 12, 2022 +You should configure the `builder` key if you need to change any default values in your Helm chart so that the `.airgap` bundle for the release includes all images needed to successfully deploy the chart. For example, you can change the default Helm values so that images for any conditionally-deployed components are always included in the air gap bundle. Additionally, you can use the `builder` key to set any `required` values in your Helm chart that must be set for the chart to render. -### New Features {#new-features-v2022-08-08-0} +The values in the `builder` key map to values in the given Helm chart's `values.yaml` file. For example, `spec.builder.postgres.enabled` in the example HelmChart custom resource below would map to a `postgres.enabled` field in the `values.yaml` file for the `samplechart` chart: -- Adds KOTS add-on version 1.81.0. See [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm). +```yaml +# KOTS HelmChart custom resource -## Release v2022.08.12-0 +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + builder: + postgres: + enabled: true +``` -Released on August 12, 2022 +For requirements, recommendations, and examples of common use cases for the `builder` key, see the sections below. -### Bug Fixes {#bug-fixes-v2022-08-12-0} +### Requirements and Recommendations -- Fixes an issue that causes snapshots to fail after Rook to MinIO migration. + -## Release v2022.08.11-0 +### Example: Set the Image Registry for Air Gap Installations -Released on August 11, 2022 +For air gap installations, if the [Replicated proxy registry](/vendor/private-images-about) domain `proxy.replicated.com` is used as the default image name for any images, you need to rewrite the image to the upstream image name so that it can be processed and included in the air gap bundle. You can use the `builder` key to do this by hardcoding the upstream location of the image (image registry, repository, and tag), as shown in the example below: -### Improvements {#improvements-v2022-08-11-0} +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + builder: + my-service: + image: + registry: 12345.dkr.ecr.us-west-1.amazonaws.com + repository: my-app + tag: "1.0.2" +``` +When building the `.airgap` bundle for the release, the Vendor Portal uses the registry, repository, and tag values supplied in the `builder` key to template the Helm chart, rather than the default values defined in the Helm `values.yaml` file. This ensures that the image is pulled from the upstream registry using the credentials supplied in the Vendor Portal, without requiring any changes to the Helm chart directly. -- Add Collectd Ubuntu 22.04 compatibility to host preflight checks -- Add `additional noproxy` addresses to the join command +### Example: Include Conditional Images -## Release v2022.08.08-0 +Many applications have images that are included or excluded based on a given condition. For example, enterprise users might have the option to deploy an embedded database with the application or bring their own database. To support this use case for air gap installations, the images for any conditionally-deployed components must always be included in the air gap bundle. -Released on August 8, 2022 + -### New Features {#new-features-v2022-08-08-0} +## Related Topics -- Adds Ubuntu 22.04 support. -- Adds KOTS add-on version 1.80.0. See [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm). +* [builder](/reference/custom-resource-helmchart-v2#builder) +* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) +* [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped) -### Improvements {#improvements-v2022-08-08-0} +================ +File: docs/vendor/helm-v2-migrate.md +================ +# Migrating Existing Installations to HelmChart v2 -- Adds a new preflight check to disallow the Docker add-on installation on Ubuntu 22.04. +This topic describes how to migrate existing Replicated KOTS installations to the KOTS HelmChart `kots.io/v1beta2` (HelmChart v2) installation method, without having to reinstall the application. It also includes information about how to support both HelmChart v1 and HelmChart v2 installations from a single release, and lists frequently-asked questions (FAQs) related to migrating to HelmChart v2. -### Bug Fixes {#bug-fixes-v2022-08-08-0} +## Migrate to HelmChart v2 -- Fixes an issue that could cause downloading add-on packages to fail with a TAR error. +### Requirements -## Release v2022.08.04-0 +* The HelmChart v2 custom resource is supported with KOTS v1.99.0 and later. If any of your customers are running a version of KOTS earlier than v1.99.0, see [Support Customers on KOTS Versions Earlier Than v1.99.0](#support-both-v1-v2) below for more information about how to support both HelmChart v1 and HelmChart v2 installations from the same release. -Released on August 4, 2022 +* The Helm `--take-ownership` flag is supported with KOTS v1.124.0 and later. -### New Features {#new-features-v2022-08-04-0} +* The `kots.io/keep` annotation is supported with KOTS v1.122.0 and later. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.79.0. -- Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.22.0. -- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.58.0-39.4.0. -- Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-08-02T23-59-16Z. +### Migrate From HelmChart v1 with `useHelmInstall: true` -### Improvements {#improvements-v2022-08-04-0} +To migrate existing installations from HelmChart v1 with `useHelmInstall: true` to HelmChart v2: -- The install script will now retry add-on package downloads for some failure scenarios. +1. In a development environment, install an application release using the KOTS HelmChart v1 with `useHelmInstall: true` method. You will use this installation to test the migration to HelmChart v2. -### Bug Fixes {#bug-fixes-v2022-08-04-0} +1. Create a new release containing your application files. -- Fixes an issue as of kURL version v2022.08.03-0 that improperly sets auth_allow_insecure_global_id_reclaim to true for new installations. +1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). -## Release v2022.08.03-0 +1. Promote the release to an internal-only channel that your team uses for testing. -Released on August 3, 2022 +1. In your development environment, log in to the Admin Console and confirm that you can upgrade to the new HelmChart v2 release. -### New Features {#new-features-v2022-08-03-0} +1. When you are done testing, promote the release to one or more of your customer-facing channels. Customers can follow the standard upgrade process in the Admin Console to update their instance. -- Adds [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.6.11. -- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.19.9. +### Migrate From HelmChart v1 with `useHelmInstall: false` -### Bug Fixes {#bug-fixes-v2022-08-03-0} +This section describes how to migrate existing HelmChart v1 installations with `useHelmInstall: false`. -- Fixes an issue in [Rook add-on](https://kurl.sh/docs/add-ons/rook) versions 1.5.11 and 1.5.12 that could cause Rook upgrades to fail from versions prior to 1.5.11 due to `auth_allow_insecure_global_id_reclaim` improperly set to `false` for [unpatched Ceph versions](https://docs.ceph.com/en/quincy/security/CVE-2021-20288/). -- Fixes an issue in [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions prior to 0.19.9 that could cause Ceph to remain in `HEALTH_WARN` state for as long as an hour on a new installation. +:::note +When the `useHelmInstall` field is _not_ set in the HelmChart custom resource, `false` is the default value. +::: -## Release v2022.07.29-0 +These migration steps ensure that KOTS does not uninstall any resources that were previously deployed without Helm, and that Helm takes ownership of these existing resources. -Released on July 29, 2022 +To migrate existing installations from HelmChart v1 and `useHelmInstall: false` to HelmChart v2: -### New Features {#new-features-v2022-07-29-0} +1. Create a new release containing your application files: -- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.19.6. + 1. In the release, for any resources defined in Kubernetes manifests or in your Helm `templates` that were previously installed with HelmChart v1 and `useHelmInstall: false`, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling these resources when upgrading using the HelmChart v2 method. -### Improvements {#improvements-v2022-07-29-0} + **Example:** + + ```yaml + apiVersion: apps/v1 + kind: Statefulset + metadata: + name: postgresql + # Add the kots.io/keep annotation + annotations: + kots.io/keep: "true" + ``` + + 1. Save the release. + +1. Create another new release: -- kURL is now [CIS Kubernetes Benchmark](https://www.cisecurity.org/benchmark/kubernetes) compliant using the latest [github.com/aquasecurity/kube-bench](https://github.com/aquasecurity/kube-bench) version v0.6.8 when property `kubernetes.cisCompliance` is set to `true`. + 1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). -### Bug Fixes {#bug-fixes-v2022-07-29-0} + 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: -- Fixes an issue in [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions prior to 0.19.6 that causes unnecessary downtime when adding additional primary nodes and using the EKCO [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). -- Fixes an issue in [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions prior to 0.19.6 that causes long running kubectl commands such as `kubectl logs` or `kubectl exec` to timeout after 20 seconds of inactivity when using the EKCO [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). + ```yaml + # HelmChart v2 + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: samplechart + spec: + helmUpgradeFlags: + - --take-ownership + ``` -## Release v2022.07.28-0 + When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. -Released on July 28, 2022 + 1. Save the release. -### New Features {#new-features-v2022-07-28-0} +1. Test the migration process: -- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.19.3. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.78.0. + 1. Promote the first release to an internal-only channel that your team uses for testing. -### Improvements {#improvements-v2022-07-28-0} + 1. In a development environment, install the first release. -- Updates the haproxy image to tag 2.6.2-alpine3.16 for [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.19.3 to address the following critical severity CVEs: CVE-2022-1586, CVE-2022-1587. -- The property `kubernetes.loadBalancerUseFirstPrimary`, and equivalent flag `kubernetes-load-balancer-use-first-primary`, has been added to automatically use the first primary address as the cluster control plane endpoint. This settings is not recommended. Enable the [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) and use the property `ekco.enableInternalLoadBalancer` instead. + 1. Promote the second release to the same channel. + + 1. In your development environment, access the Admin Console to upgrade to the second release. -### Bug Fixes {#bug-fixes-v2022-07-28-0} +1. When you are done testing, promote the first release to one or more of your customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. -- Fixes an issue with [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions prior to 0.19.3 which causes registry certificates generated to be expired upon renewal. +1. Promote the second release to the same customer-facing channel(s). Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. -## Release v2022.07.22-0 +1. Instruct customers to migrate by first upgrading to the release where the `kots.io.keep` annotation is applied to your resources, then upgrading to the release with HelmChart v2. -Released on July 22, 2022 +1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. -### New Features {#new-features-v2022-07-22-0} +### Migrate From Standard Kubernetes Manifests -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.77.0. +This section describes how to migrate existing KOTS installations of applications that were previously packaged as standard Kubernetes manifests and are now packaged as one or more Helm charts. This migration path involves performing two upgrades to ensure that KOTS does not uninstall any resources that were adopted into Helm charts, and that Helm can take ownership of resources that were previously deployed without Helm. -### Improvements {#improvements-v2022-07-22-0} +To migrate applications that were previously packaged as standard Kubernetes manifests: -- Updates the kurlsh/s3cmd image to tag 20220722-4585dda for the latest [Registry](https://kurl.sh/docs/add-ons/registry) and [Velero](https://kurl.sh/docs/add-ons/velero) add-on versions, to address the following high severity CVEs: CVE-2022-30065, CVE-2022-2097, CVE-2022-30065. +1. Create a new release containing the Kubernetes manifests for your application: -## Release v2022.07.20-0 + 1. For each of the application manifests in the release, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling resources that were previously installed without Helm when upgrading using the HelmChart v2 method. -Released on July 20, 2022 + **Example:** + + ```yaml + apiVersion: apps/v1 + kind: Statefulset + metadata: + name: postgresql + annotations: + kots.io/keep: "true" + ``` -### New Features {#new-features-v2022-07-20-0} + 1. Save the release. -- Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20220720 and 2.8.1-20220720 with a fix for broken iptables command on RHEL 8 based distributions. -- Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version RELEASE.2022-07-17T15-43-14Z. +1. Create another new release: -### Bug Fixes {#bug-fixes-v2022-07-20-0} + 1. In the release, add your application Helm chart(s). Remove the application manifests for resources that were adopted into the Helm chart(s). -- Fixes an issue on RHEL 8 based distributions that causes the iptables command to report error `table "filter" is incompatible, use 'nft' tool` when using [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20220616 and 2.8.1-20220616. + 1. For each Helm chart in the release, add a corresponding KOTS HelmChart custom resource with `apiVersion` set to `kots.io/v1beta2`. Configure the resource to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). -## Release v2022.07.15-2 + 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: -Released on July 15, 2022 + ```yaml + # HelmChart v1 beta2 + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: samplechart + spec: + helmUpgradeFlags: + - --take-ownership + ``` -### Improvements {#improvements-v2022-07-15-2} + When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. -- Updates the local-volume-provider image to v0.3.6 for the [Velero add-on](https://kurl.sh/docs/add-ons/velero) to address CVE-2021-38561 with high severity. + 1. Save the release. -## Release v2022.07.15-1 +1. Test the migration process: -Released on July 15, 2022 + 1. Promote the first release to an internal-only channel that your team uses for testing. -### New Features {#new-features-v2022-07-15-1} + 1. In a development environment, install the first release. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.76.1. -- Adds [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) versions 1.24.3, 1.23.9, 1.22.12 and 1.21.14. -- Adds [Weave add-on](https://kurl.sh/docs/add-ons/weave) versions 2.6.5-20220616 and 2.8.1-20220616 with Replicated-created security patches. + 1. Promote the second release to the same channel. + + 1. In your development environment, access the Admin Console to upgrade to the second release. Upgrading to the second release migrates the installation to HelmChart v2. -### Improvements {#improvements-v2022-07-15-1} +1. After you are done testing the migration process, promote the first release containing your application manifests with the `kots.io/keep` annotation to one or more customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. -- Changes Weave version 2.6.5 and 2.8.1 to once again use upstream weave images. +1. Promote the second release containing your Helm chart(s) to the same channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. -### Bug Fixes {#bug-fixes-v2022-07-15-1} +1. Instruct customers to migrate by first upgrading to the release containing the standard manifests, then upgrading to the release packaged with Helm. -- Fixes an issue that caused Rook to Longhorn migration failures due to Ceph claiming Longhorn devices. +1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. -## Release v2022.07.15-0 +## Support Customers on KOTS Versions Earlier Than v1.99.0 {#support-both-v1-v2} -Released on July 15, 2022 +The HelmChart v2 installation method requires KOTS v1.99.0 or later. If you have existing customers that have not yet upgraded to KOTS v1.99.0 or later, Replicated recommends that you support both the HelmChart v2 and v1 installation methods from the same release until all installations are running KOTS v1.99.0 or later. -### Improvements {#improvements-v2022-07-15-0} +To support both installation methods from the same release, include both versions of the HelmChart custom resource for each Helm chart in your application releases (HelmChart `kots.io/v1beta2` and HelmChart `kots.io/v1beta1` with `useHelmInstall: true`). -- Improved health checks for [MinIO](https://kurl.sh/docs/add-ons/minio), [OpenEBS](https://kurl.sh/docs/add-ons/openebs), and [GoldPinger](https://kurl.sh/docs/add-ons/goldpinger) add-ons. +When you include both versions of the HelmChart custom resource for a Helm chart, installations with KOTS v1.98.0 or earlier use the v1 method, while installations with KOTS v1.99.0 or later use v2. -## Release v2022.07.12-0 +After all customers are using KOTS v1.99.0 or later, you can remove the HelmChart v1 custom resources so that all customers are using the HelmChart v2 method. -Released on July 12, 2022 +## HelmChart v2 Migration FAQs -### New Features {#new-features-v2022-07-12-0} +This section includes FAQs related to migrating existing installations to the KOTS HelmChart v2 method. -- Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version 2022-07-06T20-29-49Z to address the following high severity CVE: CVE-2022-1271. -- Adds [Docker Registry add-on](https://kurl.sh/docs/add-ons/registry) version 2.8.1. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.76.0. +### Which migration scenarios require the `kots.io/keep` annotation? -### Improvements {#improvements-v2022-07-12-0} -- Updates kurlsh/s3cmd image to tag 20220711-9578884 for latest [Registry](https://kurl.sh/docs/add-ons/registry) and [Velero](https://kurl.sh/docs/add-ons/velero) add-on versions to address the following critical and high severity CVEs: CVE-2018-25032, CVE-2021-30139, CVE-2021-36159, CVE-2021-3711, CVE-2021-3712, CVE-2021-42378, CVE-2021-42379, CVE-2021-42380, CVE-2021-42381, CVE-2021-42382, CVE-2021-42383, CVE-2021-42384, CVE-2021-42385, CVE-2021-42386, CVE-2021-45960, CVE-2021-46143, CVE-2022-0778, CVE-2022-1271, CVE-2022-22822, CVE-2022-22823, CVE-2022-22824, CVE-2022-22825, CVE-2022-22826, CVE-2022-22827, CVE-2022-23852, CVE-2022-23990, CVE-2022-25235, CVE-2022-25236, CVE-2022-25314, CVE-2022-25315, CVE-2022-28391. +When applied to a resource in a release, the `kots.io/keep` annotation prevents the given resource from being uninstalled. The `kots.io/keep` annotation can be used to prevent KOTS from deleting resources that were adopted into Helm charts or otherwise previously deployed without Helm. -## Release v2022.07.07-0 +To prevent existing resources from being uninstalled during upgrade, the `kots.io/keep` annotation is required for the following types of migrations: + * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 + * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 -Released on July 7, 2022 +`kots.io/keep` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. -### Improvements {#improvements-v2022-07-07-0} +### Which migration scenarios require the `--take-ownership` flag? -- Adds [containerd add-on](https://kurl.sh/docs/add-ons/containerd) version 1.6.6. -- Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.8. +When the `--take-ownership` flag is enabled, Helm automatically takes ownership of resources that were previously deployed to the cluster without Helm. -## Release v2022.07.05-0 +The `--take-ownership` flag is required for the following types of migrations: + * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 + * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 -Released on July 5, 2022 +`--take-ownership` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. -### New Features {#new-features-v2022-07-05-0} +### What is the difference between HelmChart v1 with `useHelmInstall: false` and `useHelmInstall: true`? -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.75.0. +With HelmChart v1 and `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. This differs from both the HelmChart v1 with `useHelmInstall: true` and HelmChart v2 methods, where KOTS installs the application using Helm. -## Release v2022.07.01-1 +Because the HelmChart v1 with `useHelmInstall: false` method does not deploy resources with Helm, it is necessary to use the `kots.io/keep` annotation and the Helm `--take-ownership` flag when migrating to the HelmChart v2 installation method. These ensure that Helm can take ownership of existing resources and that the resources are not uninstalled during upgrade. -Released on July 1, 2022 +For more information about how KOTS deploys Helm charts, including information about the deprecated HelmChart v1 installation methods, see [About Distributing Helm Charts with KOTS](helm-native-about). -### New Features {#new-features-v2022-07-01-1} +================ +File: docs/vendor/identity-service-configuring.md +================ +:::important +This topic is deleted from the product documentation because this Beta feature is deprecated. +::: -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.74.0. +# Enabling and Configuring Identity Service (Beta) -## Release v2022.07.01-0 +This topic describes how to enable the identity service (Beta) feature, and how to regulate access to application resources using role based access control (RBAC). -Released on July 01, 2022 +## About Identity Service -### New Features {#new-features-v2022-07-01-0} +When you enable the identity service for an application, the Replicated app manager deploys [Dex](https://dexidp.io/) as an intermediary that can be configured to control access to the application. Dex implements an array of protocols for querying other user-management systems, known as connectors. For more information about connectors, see [Connectors](https://dexidp.io/docs/connectors/) in the Dex documentation. -- Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.5.1. -- Adds support for RHEL and Oracle Linux 8.6. -- Adds support for upgrading OpenEBS 1.x directly to 2.12+ or 3.2+. -- The default [RKE2](https://kurl.sh/docs/add-ons/rke2) spec now includes the latest version of the [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) +## Limitations and Requirements -## Release v2022.06.29-0 +Identity service has the following limitations and requirements: -Released on June 29, 2022 +* Requires the identity service option is enabled in customer licenses. +* Is available only for embedded cluster installations with the kURL installer. +* Is available only through the Replicated Admin Console. -### New Features {#new-features-v2022-06-29-0} +## Enable and Configure Identity Service -- Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.9.0. -- Adds [OpenEBS add-on](https://kurl.sh/docs/add-ons/openebs) versions 2.12.9 and 3.2.0. Only localpv volumes are supported. +Use the Identity custom resource to enable and configure the identity service for your application. For an example application that demonstrates how to configure the identity service, see the [`kots-idp-example-app`](https://github.com/replicatedhq/kots-idp-example-app) on GitHub. -## Release v2022.06.24-0 +To begin, create a new release in the [Vendor Portal](https://vendor.replicated.com). Add an Identity custom resource file and customize the file for your application. For more information about the Identity custom resource, see [Identity (Beta)](/reference/custom-resource-identity) in _Reference_. -Released on June 24, 2022 +**Example:** + +```YAML +apiVersion: kots.io/v1beta1 +kind: Identity +metadata: + name: identity +spec: + requireIdentityProvider: true + identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + oidcRedirectUris: + - https://{{repl ConfigOption "ingress_hostname"}}/callback + roles: + - id: access + name: Access + description: Restrict access to IDP Example App +``` -### New Features {#new-features-v2022-06-24-0} +Make the identity service accessible from the browser by configuring the service name and port. The app manager provides the service name and port to the application through the identity template functions so that the application can configure ingress for the identity service. For more information about the identity template functions, see [Identity Context](/reference/template-functions-identity-context) in _Reference_. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.73.0. -- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.57.0-36.2.0 to address the following critical and high severity CVEs: CVE-2022-28391, CVE-2022-0778, CVE-2022-28391, CVE-2022-1271, CVE-2018-25032. +**Example:** -## Release v2022.06.22-0 +```YAML +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: idp-app + annotations: + kubernetes.io/ingress.allow-http: 'false' + ingress.kubernetes.io/force-ssl-redirect: 'true' + kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} + labels: + app: idp-app +spec: + tls: + - hosts: + - repl{{ ConfigOption "ingress_hostname" }} + secretName: idp-ingress-tls + rules: + - host: repl{{ or (ConfigOption "ingress_hostname") "~" }} + http: + paths: + - path: / + backend: + serviceName: idp-app + servicePort: 80 + - path: /oidcserver + backend: + serviceName: repl{{ IdentityServiceName }} + servicePort: repl{{ IdentityServicePort }} +``` +In your Deployment manifest file, add environment variables to configure all of the information that your application needs to communicate and integrate with the identity service. -Released on June 22, 2022 +**Example:** -### Improvements {#improvements-v2022-06-22-0} +```YAML +apiVersion: apps/v1 +kind: Deployment +metadata: + name: idp-app + labels: + app: idp-app +spec: + replicas: 1 + selector: + matchLabels: + app: idp-app + template: + metadata: + labels: + app: idp-app + spec: + containers: + - name: idp-app + image: replicated/kots-idp-example-app:latest + imagePullPolicy: Always + ports: + - containerPort: 5555 + volumeMounts: + - name: tls-ca-volume + mountPath: /idp-example + readOnly: true + args: ["--issuer-root-ca=/idp-example/tls.ca"] + env: + - name: CERT_SHA + value: repl{{ sha256sum (ConfigOption "tls_cert") }} + - name: LISTEN + value: http://0.0.0.0:5555 + - name: ISSUER + value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + - name: CLIENT_ID + value: repl{{ IdentityServiceClientID }} + - name: CLIENT_SECRET + value: repl{{ IdentityServiceClientSecret }} # TODO: secret + - name: REDIRECT_URI + value: https://{{repl ConfigOption "ingress_hostname"}}/callback + - name: EXTRA_SCOPES + value: groups + - name: RESTRICTED_GROUPS + value: | + {{repl IdentityServiceRoles | keys | toJson }} + hostAliases: + - ip: 172.17.0.1 + hostnames: + - myapp.kotsadmdevenv.com + volumes: + - name: tls-ca-volume + secret: + secretName: idp-app-ca +``` -- Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.21.1. -- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.57.0-36.0.3. -- Adds [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.56.7. +## Configuring Access with RBAC -### Bug Fixes {#bug-fixes-v2022-06-22-0} +You can also regulate access to your application resources using role based access control (RBAC). -- Fixes CVEs for [Weave add-on](https://kurl.sh/docs/add-ons/weave) version 2.8.1. CVEs addressed: CVE-2021-36159, CVE-2021-25216, CVE-2021-30139, CVE-2020-8620, CVE-2020-8621, CVE-2020-8623, CVE-2020-8625, CVE-2021-25215, CVE-2021-28831, CVE-2020-8169, CVE-2020-8177, CVE-2020-8231, CVE-2020-8285, CVE-2020-8286, CVE-2020-28196, CVE-2021-23840, CVE-2021-3450, CVE-2021-3517, CVE-2021-3518. -- Updates the local-volume-provider image to v0.3.5 for the [Velero add-on](https://kurl.sh/docs/add-ons/velero) to address CVE-2022-1664 with critical severity. +In the Identity custom resource, provide a list of the available roles within your application in the `roles` section. For more information, see [`roles`](/reference/custom-resource-identity#roles) in _Reference_. -## Release v2022.06.17-0 +**Example:** -Released on June 17, 2022 +```YAML +apiVersion: kots.io/v1beta1 +kind: Identity +metadata: + name: identity +spec: + requireIdentityProvider: true + identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + oidcRedirectUris: + - https://{{repl ConfigOption "ingress_hostname"}}/callback + roles: + - id: access + name: Access + description: Restrict access to IDP Example App +``` -### Improvements {#improvements-v2022-06-17-0} +Then, using the Admin Console, your customer has the ability to create groups and assign specific roles to each group. +This mapping of roles to groups is returned to your application through the `IdentityServiceRoles` template function that you configure in your Deployment manifest file under the environment variable `RESTRICTED_GROUPS`. For more information, see [`IdentityServiceRoles`](/reference/template-functions-identity-context#identityserviceroles) in _Reference_. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.72.1. -- Adds [MinIO add-on](https://kurl.sh/docs/add-ons/minio) version 2022-06-11T19-55-32Z to address the following critical and high severity CVEs: CVE-2020-14040, CVE-2021-42836, CVE-2020-36067, CVE-2020-36066, CVE-2020-35380, CVE-2020-26521, CVE-2020-26892, CVE-2021-3121, CVE-2020-26160, CVE-2021-28831, CVE-2020-11080, CVE-2021-3450, CVE-2021-23840, CVE-2020-1967, CVE-2020-8286, CVE-2020-8285, CVE-2020-8231, CVE-2020-8177, CVE-2020-8169, CVE-2021-30139, CVE-2021-36159. -- Adds details to the documentation for the [AWS add-on](https://kurl.sh/docs/add-ons/aws) to include details on applying the appropriate [AWS IAM](https://aws.amazon.com/iam/) roles required for the add-on to function properly and additional specific requirements necessary for integrating with [AWS ELB](https://aws.amazon.com/elasticloadbalancing/) service. +**Example:** -### Bug Fixes {#bug-fixes-v2022-06-17-0} +```YAML +apiVersion: apps/v1 +kind: Deployment +metadata: + name: idp-app + labels: + app: idp-app +spec: + replicas: 1 + selector: + matchLabels: + app: idp-app + template: + metadata: + labels: + app: idp-app + spec: + containers: + - name: idp-app + image: replicated/kots-idp-example-app:latest + imagePullPolicy: Always + ports: + - containerPort: 5555 + volumeMounts: + - name: tls-ca-volume + mountPath: /idp-example + readOnly: true + args: ["--issuer-root-ca=/idp-example/tls.ca"] + env: + - name: CERT_SHA + value: repl{{ sha256sum (ConfigOption "tls_cert") }} + - name: LISTEN + value: http://0.0.0.0:5555 + - name: ISSUER + value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + - name: CLIENT_ID + value: repl{{ IdentityServiceClientID }} + - name: CLIENT_SECRET + value: repl{{ IdentityServiceClientSecret }} # TODO: secret + - name: REDIRECT_URI + value: https://{{repl ConfigOption "ingress_hostname"}}/callback + - name: EXTRA_SCOPES + value: groups + - name: RESTRICTED_GROUPS + value: | + {{repl IdentityServiceRoles | keys | toJson }} + hostAliases: + - ip: 172.17.0.1 + hostnames: + - myapp.kotsadmdevenv.com + volumes: + - name: tls-ca-volume + secret: + secretName: idp-app-ca +``` -- Fixes a bug where the [AWS add-on](https://kurl.sh/docs/add-ons/aws) would fail if `latest` or `0.1.x` was used. -- Fixes a bug when `excludeStorageClass` is set to `true` would cause the [AWS add-on](https://kurl.sh/docs/add-ons/aws) to fail. +================ +File: docs/vendor/insights-app-status.md +================ +import StatusesTable from "../partials/status-informers/_statusesTable.mdx" +import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" +import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" +import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" -## Release v2022.06.14-0 +# Enabling and Understanding Application Status -Released on June 14, 2022 +This topic describes how to configure your application so that you can view the status of application instances in the Replicated Vendor Portal. It also describes the meaning of the different application statuses. -### New Features {#new-features-v2022-06-14-0} +## Overview -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.72.0. -- Adds [Local Path Provisioner add-on](https://kurl.sh/docs/add-ons/local-path-provisioner) (Beta) as an additional storage provisioner. +The Vendor Portal displays data on the status of instances of your application that are running in customer environments, including the current state (such as Ready or Degraded), the instance uptime, and the average amount of time it takes your application to reach a Ready state during installation. For more information about viewing instance data, see [Instance Details](instance-insights-details). -### Bug Fixes {#bug-fixes-v2022-06-14-0} +To compute and display these insights, the Vendor Portal interprets and aggregates the state of one or more of the supported Kubernetes resources that are deployed to the cluster as part of your application. -- Fixes an issue where the `HTTPS_PROXY` variable was not set properly for the [containerd add-on](https://kurl.sh/docs/add-ons/containerd) service. + -## Release v2022.06.01-0 +For more information about how instance data is sent to the Vendor Portal, see [About Instance and Event Data](instance-insights-event-data). -Released on June 1, 2022 +## Enable Application Status Insights -### Improvements +To display insights on application status, the Vendor Portal requires that your application has one or more _status informers_. Status informers indicate the Kubernetes resources deployed as part of your application that are monitored for changes in state. -- Adds support for Kubernetes versions for 1.21.12, 1.22.9, 1.23.6 and 1.24.0. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.71.0. -- Adds [containerd add-on](https://kurl.sh/docs/add-ons/containerd) versions 1.5.10, 1.5.11, and 1.6.4. -- Adds [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.2.4. -- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) versions 0.19.1 and 0.19.2. -- In addition to the `ekco.enableInternalLoadBalancer` parameter in the installer specification, the `ekco-enable-internal-load-balancer` flag can now be specified at install time to enable the EKCO [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). -- Upgraded the replicated/local-volume-provider image to v0.3.4 for [Velero add-on](https://kurl.sh/docs/add-ons/velero) v1.8.1. +To enable status informers for your application, do one of the following, depending on the installation method: +* [Helm Installations](#helm-installations) +* [KOTS Installations](#kots-installations) -### Bug Fixes +### Helm Installations -- Fixes an issue that caused the `less` command to break after installing on Amazon Linux 2. -- Fixes an issue that caused installations with Velero and the `kotsadm.disableS3` flag set to `true` to fail on RHEL-based distributions. -- Fixes an issue that caused `bash: _get_comp_words_by_ref: command not found` to be printed after pressing tab when `bash-completion` is not installed. -- Fixes an issue where migrating the object store from Rook to MinIO would fail due to undefined metrics functions. +To get instance status data for applications installed with Helm, the Replicated SDK must be installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). -## Release v2022.05.19-0 +After you include the SDK as a dependency, the requirements for enabling status informers vary depending on how your application is installed: -Released on May 19, 2022 +* For applications installed by running `helm install` or `helm upgrade`, the Replicated SDK automatically detects and reports the status of the resources that are part of the Helm release. No additional configuration is required to get instance status data. -### Improvements +* For applications installed by running `helm template` then `kubectl apply`, the SDK cannot automatically detect and report the status of resources. You must configure custom status informers by overriding the `statusInformers` value in the Replicated SDK chart. For example: -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.70.1. -- Does not install Helm unless the experimental Helm charts feature is in use. + ```yaml + # Helm chart values.yaml file -## Release v2022.05.16-0 + replicated: + statusInformers: + - deployment/nginx + - statefulset/mysql + ``` -Released on May 16, 2022 + :::note + Applications installed by running `helm install` or `helm upgrade` can also use custom status informers. When the `replicated.statusInformers` field is set, the SDK detects and reports the status of only the resources included in the `replicated.statusInformers` field. + ::: -### Improvements +### KOTS Installations -- Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.21.0. -- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.56.2-35.2.0. -- Adds [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.8.1. +For applications installed with Replicated KOTS, configure one or more status informers in the KOTS Application custom resource. For more information, see [Adding Resource Status Informers](admin-console-display-app-status). -## Release v2022.05.11-0 +When Helm-based applications that include the Replicated SDK and are deployed by KOTS, the SDK inherits the status informers configured in the KOTS Application custom resource. In this case, the SDK does _not_ automatically report the status of the resources that are part of the Helm release. This prevents discrepancies in the instance data in the vendor platform. -Released on May 11, 2022 -### Improvements +## View Resource Status Insights {#resource-status} -- The storage class created by the AWS add-on is now named `aws-ebs` instead of `default`. +For applications that include the Replicated SDK, the Vendor Portal also displays granular resource status insights in addition to the aggregate application status. For example, you can hover over the **App status** field on the **Instance details** page to view the statuses of the indiviudal resources deployed by the application, as shown below: -## Release v2022.05.10-0 +resource status pop up -Released on May 10, 2022 +[View a larger version of this image](/images/resource-status-hover-current-state.png) -### New Features +Viewing these resource status details is helpful for understanding which resources are contributing to the aggregate application status. For example, when an application has an Unavailable status, that means that one or more resources are Unavailable. By viewing the resource status insights on the **Instance details** page, you can quickly understand which resource or resources are Unavailable for the purpose of troubleshooting. -- Introduces the AWS add-on, which sets up the AWS cloud provider in a Kubernetes installer-created cluster. For more information, see [AWS Add-On](https://kurl.sh/docs/add-ons/aws) in the kURL open source documentation. +Granular resource status details are automatically available when the Replicated SDK is installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). -### Improvements +## Understanding Application Status -- OpenEBS is now marked as incompatible with Kubernetes 1.22+. +This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. -## Release v2022.05.06-0 +### About Resource Statuses {#resource-statuses} -Released on May 6, 2022 +Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. -### New Features +The following table lists the supported Kubernetes resources and the conditions that contribute to each status: -- Adds a `resticTimeout` configuration option to the [Velero add-on](https://kurl.sh/docs/add-ons/velero) to allow users to configure the value that gets passed to the `--restic-timeout` flag in the Velero pod. This can also be set using the [`velero-restic-timeout` flag](https://kurl.sh/docs/install-with-kurl/advanced-options#reference) when running the install script. + -### Improvements +### Aggregate Application Status -- The “latest” version for the [containerd add-on](https://kurl.sh/docs/add-ons/containerd) is no longer pinned to 1.4.6. The “latest” version was pinned to 1.4.6 because later versions of containerd are not supported on Ubuntu 16. kURL removed support for Ubuntu 16 in [v2022.04.29-0](#release-v20220429-0). -- Adds the `NoExecute` effect to the toleration for the Weave-Net DaemonSet for versions 2.5.2, 2.6.4, and 2.6.5. -- Ensures that OpenEBS pods run with critical priority so that they are not evicted before other pods that depend on them. + -### Bug Fixes + -- Fixes an issue that could cause a migration from Docker to containerd to fail from a miscalculation of available disk space. -- Fixes an issue that caused an upgrade of Kubernetes to fail when enabling the [EKCO internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). +================ +File: docs/vendor/install-with-helm.mdx +================ +import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" +import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" -## Release v2022.05.02-0 +# Installing with Helm -Released on May 2, 2022 +This topic describes how to use Helm to install releases that contain one or more Helm charts. For more information about the `helm install` command, including how to override values in a chart during installation, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. -### Improvements +## Prerequisites -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.70.0. +Before you install, complete the following prerequisites: -## Release v2022.04.29-0 + -Released on April 29, 2022 +## Firewall Openings for Online Installations with Helm {#firewall} -### Improvements + -- Installs an NFS client package as part of the [Velero add-on](https://kurl.sh/docs/add-ons/velero). -- Removes support for Ubuntu 16.04 (end of life April 29, 2021). -- The [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) recommends that the user change the password after installation. + + + + + + + + + + + + + + + + + +
    DomainDescription
    `replicated.app` *

    Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

    `registry.replicated.com`

    Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

    `proxy.replicated.com`

    Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

    -### Bug Fixes +* Required only if the [Replicated SDK](/vendor/replicated-sdk-overview) is included as a dependency of the application Helm chart. -- Fixes an issue that caused upgrades of two versions of Kubernetes on remote masters to fail with error "docker: executable file not found in $PATH". -- Fixes an issue that caused a migration from Containerd to Docker to fail on air gapped instances with image pull errors. +## Install -## Release v2022.04.19-0 +To install a Helm chart: -Released on April 19, 2022 +1. In the Vendor Portal, go to **Customers** and click on the target customer. -### Improvements +1. Click **Helm install instructions**. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.69.1. -- Adds [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.3.0-5.1.0. + Helm install button -### Bug Fixes + [View a larger image](/images/helm-install-button.png) -- Fixes a bug where the `installerVersion` in the kURL manifest was not fully applied. +1. In the **Helm install instructions** dialog, run the first command to log in to the Replicated registry: -## Release v2022.04.08-1 + ```bash + helm registry login registry.replicated.com --username EMAIL_ADDRESS --password LICENSE_ID + ``` + Where: + * `EMAIL_ADDRESS` is the customer's email address + * `LICENSE_ID` is the ID of the customer's license -Released on April 8, 2022 + :::note + You can safely ignore the following warning message: `WARNING: Using --password via the CLI is insecure.` This message is displayed because using the `--password` flag stores the password in bash history. This login method is not insecure. -### Improvements + Alternatively, to avoid the warning message, you can click **(show advanced)** in the **Helm install instructions** dialog to display a login command that excludes the `--password` flag. With the advanced login command, you are prompted for the password after running the command. + ::: -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.69.0. +1. (Optional) Run the second and third commands to install the preflight plugin and run preflight checks. If no preflight checks are defined, these commands are not displayed. For more information about defining and running preflight checks, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). -## Release v2022.04.08-0 +1. Run the fourth command to install using Helm: -Released on April 8, 2022 + ```bash + helm install RELEASE_NAME oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART_NAME + ``` + Where: + * `RELEASE_NAME` is the name of the Helm release. + * `APP_SLUG` is the slug for the application. For information about how to find the application slug, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). + * `CHANNEL` is the lowercased name of the channel where the release was promoted, such as `beta` or `unstable`. Channel is not required for releases promoted to the Stable channel. + * `CHART_NAME` is the name of the Helm chart. -### Improvements + :::note + To install the SDK with custom RBAC permissions, include the `--set` flag with the `helm install` command to override the value of the `replicated.serviceAccountName` field with a custom service account. For more information, see [Customizing RBAC for the SDK](/vendor/replicated-sdk-customizing#customize-rbac-for-the-sdk). + ::: -- Adds support for Kubernetes versions 1.21.11, 1.22.8, and 1.23.5. -- Adds support for containerd version 1.4.13. +1. (Optional) In the Vendor Portal, click **Customers**. You can see that the customer you used to install is marked as **Active** and the details about the application instance are listed under the customer name. -### Bug Fixes + **Example**: -- Fixes a bug that caused cross-cluster restores to fail in some situations. -- Fixes an issue where Contour and Envoy requested too much CPU, causing other pods to not get scheduled in 4 CPU single node installations. -- Fixes a bug where persistent volume migrations sometimes failed due to a nil pointer dereference. -- Fixes a bug where the migration from Rook's object store to MinIO would fail after failing to get the logs of the sync-object-store pod. -- Increases the timeout while waiting for the kotsadm deployment to start, in order to improve the success rate when migrating from Rook to Longhorn. -- Fixes a bug that caused migrating from Docker to containerd to fail when also upgrading Kubernetes by more than one minor version in multi-node clusters. + ![example customer in the Vendor Portal with an active instance](/images/sdk-customer-active-example.png) + [View a larger version of this image](/images/sdk-customer-active-example.png) -## Release v2022.04.04-0 +================ +File: docs/vendor/installer-history.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" -Released on April 4, 2022 +# Installer History -### New Features + -- Adds the `kubeReserved` and `systemReservedResources` options to the [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) to reserve resources for Kubernetes and OS system daemons. For more information, see [Kube Reserved](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#kube-reserved) and [System Reserved](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved). -- Adds the `evictionThresholdResources` option to the [Kubernetes add-on](https://kurl.sh/docs/add-ons/kubernetes) to set [Kubernetes eviction thresholds](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds). +This topic describes how to access the installation commands for all active and inactive kURL installers promoted to a channel. -### Improvements +## About Using Inactive Installers -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.68.0. +Each release channel in the Replicated Vendor Portal saves the history of kURL installers that were promoted to the channel. You can view the list of historical installers on the **kURL Installer History** page for each channel. For more information, see [About the Installer History Page](#about) below. -## Release v2022.03.23-0 +It can be useful to access the installation commands for inactive installers to reproduce an issue that a user is experiencing for troubleshooting purposes. For example, if the user's cluster is running the inactive installer version 1.0.0, then you can install with version 1.0.0 in a test environment to troubleshoot. -Released on March 23, 2022 +You can also send the installation commands for inactive installers to your users as needed. For example, a user might have unique requirements for specific versions of Kubernetes or add-ons. -### Improvements +## About the Installer History Page {#about} -- Adds an optional [CIS Compliance](https://kurl.sh/docs/install-with-kurl/cis-compliance) flag to the Kubernetes installer specification that configures the instance to meet the [Center for Internet Security (CIS)](https://www.cisecurity.org/cis-benchmarks/) compliance benchmark. -- Fixes a bug that could cause an unbound variable error when restoring from a backup. +The **kURL Installer History** page for each channel includes a list of all the kURL installers that have been promoted to the channel, including the active installer and any inactive installers. -## Release v2022.03.22-0 +To access the **kURL Installer History** page, go to **Channels** and click the **Installer history** button on the target channel. -Released on March 22, 2022 +The following image shows an example **kURL Installer History** page with three installers listed: -### Bug Fixes +![Installer History page in the Vendor Portal](/images/installer-history-page.png) -- Fixes a bug that caused installations to fail with the error “incorrect binary usage” for all installers that include KOTS add-on version 1.67.0. +[View a larger version of this image](/images/installer-history-page.png) -## Release v2022.03.21-0 +The installers are listed in the order in which they were promoted to the channel. The installer at the top of the list is the active installer for the channel. -Released on March 21, 2022 +The **kURL Installer History** page includes the following information for each installer listed: -### Improvements +* Version label, if provided when the installer was promoted +* Sequence number +* Installation command +* Installer YAML content -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.67.0. -- Adds the [`app-version-label` flag](https://kurl.sh/docs/install-with-kurl/advanced-options#reference), which takes a version label as an argument and tells KOTS to install that particular version of an application. If this flag is not passed, the latest version of the application is installed. See [Online Installation with the Kubernetes Installer​](/enterprise/installing-embedded-cluster). +================ +File: docs/vendor/instance-data-export.md +================ +import Download from "../partials/customers/_download.mdx" -## Release v2022.03.11-0 +# Export Customer and Instance Data -Released on March 11, 2022 +This topic describes how to download and export customer and instance data from the Replicated Vendor Portal. -### New Features -* Adds the [labels flag](https://kurl.sh/docs/install-with-kurl/advanced-options), which applies the given labels to the node. +## Overview -### Bug Fixes -* Fixes false validation errors when creating a new installer that includes one or more of the following fields: `excludeBuiltinHostPreflights`, `hostPreflightIgnore`, `hostPreflightEnforceWarnings`, and `storageOverProvisioningPercentage`. +While you can always consume customer and instance insight data directly in the Replicated Vendor Portal, the data is also available in a CSV format so that it can be imported into any other system, such as: +* Customer Relationship Management (CRM) systems like Salesforce or Gainsight +* Data warehouses like Redshift, Snowflake, or BigQuery +* Business intelligence (BI) tools like Looker, Tableau, or PowerBI -## Release v2022.03.08-0 +By collecting and organizing this data wherever it is most visible and valuable, you can enable your team to make better decisions about where to focus efforts across product, sales, engineering, and customer success. -Released on March 8, 2022 +## Bulk Export Instance Event Timeseries Data -### Improvements +You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Get instance events in either JSON or CSV format](https://replicated-vendor-api.readme.io/reference/listappinstanceevents) in the Vendor API v3 documentation. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.66.0. +The `/app/{app_id}/events` endpoint returns data scoped to a given application identifier. It also allows filtering based on time periods, instances identifiers, customers identifers, and event types. You must provide at least **one** query parameter to scope the query in order to receive a response. -### Bug Fixes - -- Fixes a bug where the `installerVersion` field for the [kURL add-on](https://kurl.sh/docs/add-ons/kurl) was stripped when creating or promoting the installer. - -## Release v2022.03.04-1 - -Released on March 4, 2022 - -### Improvements - -- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.18.0. - -## Release v2022.03.04-0 - -Released on March 4, 2022 - -### Improvements - -- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.17.0. -- Adds CPU resource requests and limits to the [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.20.1+ to prevent Envoy from becoming unresponsive. - -## Release v2022.03.01-0 - -Released on March 1, 2022 - -### Improvements - -- Adds [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.20.1. - -## Release v2022.02.28-0 - -Released on February 28, 2022 - -### Improvements - -- Adds the [storage over-provisioning percentage](https://longhorn.io/docs/1.2.3/references/settings/#storage-over-provisioning-percentage) option to the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn). - -### Bug Fixes +By bulk exporting this instance event data with the `/app/{app_id}/events` endpoint, you can: +* Identify trends and potential problem areas +* Demonstrate the impact, adoption, and usage of recent product features -- Fixes the KOTS `uiBindPort` for the beta K3s and RKE2 installers so that they won't error on deploy. This port now defaults to 30880, and the allowable range is ports 30000-32767. +### Filter Bulk Data Exports -## Release v2022.02.25-0 +You can use the following types of filters to filter timeseries data for bulk export: -Released on February 25, 2022 +- **Filter by date**: + - Get instance events recorded _at or before_ the query date. For example: + ```bash + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?before=2023-10-15" + ``` + - Get instance events recorded _at or after_ the query date. For example: + ```shell + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-10-15" + ``` + - Get instance events recorded within a range of dates [after, before]. For example: + ```shell + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-05-02&before=2023-10-15" + ``` +- **Filter by customer**: Get instance events from one or more customers using a comma-separated list of customer IDs. For example: + ```bash + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?customerIDs=1b13241,2Rjk2923481" + ``` +- **Filter by event type**: Get instance events by event type using a comma-separated list of event types. For example: + ```bash + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?eventTypes=numUsers,numProjects" + ``` -### Improvements +:::note +If any filter is passed for an object that does not exist, no warning is given. For example, if a `customerIDs` filter is passed for an ID that does not exist, or for an ID that the user does not have access to, then an empty array is returned. +::: -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.65.0. -## Release v2022.02.23-0 +## Download Customer Instance Data CSVs + -Released on February 23, 2022 +### Data Dictionary -### Bug Fixes +The following table lists the data fields that can be included in the customers and instances CSV downloads, including the label, data type, and description. -- Fixes a race condition when migrating from Rook-Ceph to Longhorn with both Prometheus and [EKCO v0.13+](https://kurl.sh/docs/add-ons/ekco#auto-resource-scaling) installed. -- Fixes a bug that caused RHEL 8 installations utilizing the [containerd add-on](https://kurl.sh/docs/add-ons/containerd) to fail because of conflicting dependency package versions. -- Fixes a bug that caused RHEL 7 installations to fail because of conflicting openssl-lib package versions. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    LabelTypeDescription
    customer_idstringCustomer identifier
    customer_namestringThe customer name
    customer_created_datetimestamptzThe date the license was created
    customer_license_expiration_datetimestamptzThe expiration date of the license
    customer_channel_idstringThe channel id the customer is assigned
    customer_channel_namestringThe channel name the customer is assigned
    customer_app_idstringApp identifier
    customer_last_activetimestamptzThe date the customer was last active
    customer_typestringOne of prod, trial, dev, or community
    customer_statusstringThe current status of the customer
    customer_is_airgap_enabledbooleanThe feature the customer has enabled - Airgap
    customer_is_geoaxis_supportedbooleanThe feature the customer has enabled - GeoAxis
    customer_is_gitops_supportedbooleanThe feature the customer has enabled - KOTS Auto-GitOps
    customer_is_embedded_cluster_download_enabledbooleanThe feature the customer has enabled - Embedded Cluster
    customer_is_identity_service_supportedbooleanThe feature the customer has enabled - Identity
    customer_is_snapshot_supportedbooleanThe feature the customer has enabled - Snapshot
    customer_has_entitlementsbooleanIndicates the presence or absence of entitlements and entitlment_* columns
    customer_entitlement__*string/integer/booleanThe values of any custom license fields configured for the customer. For example, customer_entitlement__active-users.
    customer_created_by_idstringThe ID of the actor that created this customer: user ID or a hashed value of a token.
    customer_created_by_typestringThe type of the actor that created this customer: user, service-account, or service-account.
    customer_created_by_descriptionstringThe description of the actor that created this customer. Includes username or token name depending on actor type.
    customer_created_by_linkstringThe link to the actor that created this customer.
    customer_created_by_timestamptimestamptzThe date the customer was created by this actor. When available, matches the value in the customer_created_date column
    customer_updated_by_idstringThe ID of the actor that updated this customer: user ID or a hashed value of a token.
    customer_updated_by_typestringThe type of the actor that updated this customer: user, service-account, or service-account.
    customer_updated_by_descriptionstringThe description of the actor that updated this customer. Includes username or token name depending on actor type.
    customer_updated_by_linkstringThe link to the actor that updated this customer.
    customer_updated_by_timestamptimestamptzThe date the customer was updated by this actor.
    instance_idstringInstance identifier
    instance_is_activebooleanThe instance has pinged within the last 24 hours
    instance_first_reported_attimestamptzThe timestamp of the first recorded check-in for the instance.
    instance_last_reported_attimestamptzThe timestamp of the last recorded check-in for the instance.
    instance_first_ready_attimestamptzThe timestamp of when the cluster was considered ready
    instance_kots_versionstringThe version of KOTS or the Replicated SDK that the instance is running. The version is displayed as a Semantic Versioning compliant string.
    instance_k8s_versionstringThe version of Kubernetes running in the cluster.
    instance_is_airgapbooleanThe cluster is airgaped
    instance_is_kurlbooleanThe instance is installed in a Replicated kURL cluster (embedded cluster)
    instance_last_app_statusstringThe instance's last reported app status
    instance_clientstringIndicates whether this instance is managed by KOTS or if it's a Helm CLI deployed instance using the SDK.
    instance_kurl_node_count_totalintegerTotal number of nodes in the cluster. Applies only to kURL clusters.
    instance_kurl_node_count_readyintegerNumber of nodes in the cluster that are in a healthy state and ready to run Pods. Applies only to kURL clusters.
    instance_cloud_providerstringThe cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.
    instance_cloud_provider_regionstringThe cloud provider region where the instance is running. For example, us-central1-b
    instance_app_versionstringThe current application version
    instance_version_agestringThe age (in days) of the currently deployed release. This is relative to the latest available release on the channel.
    instance_is_gitops_enabledbooleanReflects whether the end user has enabled KOTS Auto-GitOps for deployments in their environment
    instance_gitops_providerstringIf KOTS Auto-GitOps is enabled, reflects the GitOps provider in use. For example, GitHub Enterprise.
    instance_is_skip_preflightsbooleanIndicates whether an end user elected to skip preflight check warnings or errors
    instance_preflight_statusstringThe last reported preflight check status for the instance
    instance_k8s_distributionstringThe Kubernetes distribution of the cluster.
    instance_has_custom_metricsbooleanIndicates the presence or absence of custom metrics and custom_metric__* columns
    instance_custom_metrics_reported_attimestamptzTimestamp of latest custom_metric
    custom_metric__*string/integer/booleanThe values of any custom metrics that have been sent by the instance. For example, custom_metric__active_users
    instance_has_tagsbooleanIndicates the presence or absence of instance tags and instance_tag__* columns
    instance_tag__*string/integer/booleanThe values of any instance tag that have been set by the vendor. For example, instance_tag__name
    -## Release v2022.02.18-0 +================ +File: docs/vendor/instance-insights-details.md +================ +# Instance Details -### Improvements +This topic describes using the Replicated Vendor Portal to quickly understand the recent events and performance of application instances installed in your customers' environments. +## About the Instance Details Page {#about-page} -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.64.0. +The Vendor Portal provides insights about the health, status, and performance of the active application instances associated with each customer license on the **Instance details** page. You can use the insights on the **Instance details** page to more quickly troubleshoot issues with your customers' active instances, helping to reduce support burden. -## Release v2022.02.17-0 +For example, you can use the **Instance details** page to track the following events for each instance: -Released on February 17, 2022 +* Recent performance degradation or downtime +* Length of instance downtime +* Recent changes to the cluster or infrastructure +* Changes in the number of nodes, such as nodes lost or added +* Changes in the cluster's Kubernetes version +* Changes in the application version that the instance is running -### New Features -- (Beta) Introduces support for the [K3s](https://kurl.sh/docs/add-ons/k3s) and [RKE2](https://kurl.sh/docs/add-ons/rke2) add-ons. -- (Beta) Introduces support for a [single-node optimized installer specification](https://kurl.sh/docs/create-installer/single-node-optimized), using either [K3s](https://kurl.sh/docs/add-ons/k3s) or [RKE2](https://kurl.sh/docs/add-ons/rke2). -- The [KOTS](https://kurl.sh/docs/add-ons/kostadm) add-on no longer includes the MinIO image. +To access the **Instance details** page, go to **Customers** and click the **Customer reporting** button for the customer that you want to view: -### Improvements -- Automatic detection of the host's private IP on subsequent runs of the installation script. +![Customer reporting button on the Customers page](/images/customer-reporting-button.png) -### Bug Fixes -- Fixes an erroneous host preflight failure when using EKCO's [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer). -- Fixes a bug that caused containerd to fail with x509 errors when pulling images from the local kURL registry. -- Fixes a bug that resulted in the `kurl-config` ConfigMap to be missing when using [K3s](https://kurl.sh/docs/add-ons/k3s) and [RKE2](https://kurl.sh/docs/add-ons/rke2) distributions. +From the **Reporting** page for the selected customer, click the **View details** button for the desired application instance. -## Release v2022.02.11-1 +The following shows an example of the **Instance details** page: -Released on February 11, 2022 +![Instance details full page](/images/instance-details.png) -### Improvements -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.63.0. +[View a larger version of this image](/images/instance-details.png) -## Release v2022.02.11-0 +As shown in the image above, the **Instance details** page includes the following sections: -Released on February 11, 2022 +* **Current State**: Information about the state of the instance, such as the current application version. See [Current State](#current-state) below. +* **Instance Insights**: Key performance indicators (KPIs) related to health, performance, and adoption. See [Insights](#insights) below. +* **Instance Information**: Information about the cluster where the instance is installed, such as the version of Kubernetes running on the cluster. See [Instance Information](#instance-information) below. +* **Custom Metrics**: The values for any custom metrics that are configured for the application, from the most recent check-in. For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). +* **Instance Uptime**: Details about instance uptime over time. See [Instance Uptime](#instance-uptime) below. +* **Instance Activity**: Event data stream. See [Instance Activity](#instance-activity) below. -### Bug Fixes -- Fixes a failing preflight for the TCP load balancer check when EKCO's internal load balancer is enabled. +### Current State -## Release v2022.02.09-0 +The **Current State** section displays the following event data about the status and version of the instance: -### Improvements -- Adds support for Kubernetes versions 1.22.6, 1.21.9, and 1.20.15. -- Adds support for Contour version 1.20.0. -- Adds support for K3s version 1.23.3+k3s1. This feature is experimental and is only available to vendors who have requested access. -- Adds support for RKE2 version 1.22.6+rke2r1. This feature is experimental and is only available to vendors who have requested access. -- Updates the latest installer specification (https://kurl.sh/latest) to Kubernetes 1.23.x. +* **App status**: The status of the application. Possible statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information about enabling application status insights and how to interpret the different statuses, see [Enabling and Understanding Application Status](insights-app-status). -## Release v2022.02.04-0 + Additionally, for applications that include the [Replicated SDK](/vendor/replicated-sdk-overview), you can hover over the **App status** field to view the statuses of the indiviudal resources deployed by the application, as shown in the example below: -Released on February 4, 2022 + resource status pop up -### Improvements -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.62.0. + [View a larger version of this image](/images/resource-status-hover-current-state.png) -### Bug Fixes -- Fixes an installer failure in scenarios where custom host preflights are enabled with other installer flags. -- Fixes a bug that allowed for weak ciphers in etcd, kubelet, and kube apiserver. +* **App version**: The version label of the currently running release. You define the version label in the release properties when you promote the release. For more information about defining release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. -## Release v2022.02.01-0 + If there is no version label for the release, then the Vendor Portal displays the release sequence in the **App version** field. You can find the sequence number associated with a release by running the `replicated release ls` command. See [release ls](/reference/replicated-cli-release-ls) in the _Replicated CLI_ documentation. -Released on February 1, 2022 +* **Version age**: The absolute and relative ages of the instance: -### New Features -- Adds support for RHEL 8.5. + * **Absolute age**: `now - current_release.promoted_date` + + The number of days since the currently running application version was promoted to the channel. For example, if the instance is currently running version 1.0.0, and version 1.0.0 was promoted to the channel 30 days ago, then the absolute age is 30. -### Improvements -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.61.0. + * **Relative age (Days Behind Latest)**: `channel.latest_release.promoted_date - current_release.promoted_date` + + The number of days between when the currently running application version was promoted to the channel and when the latest available version on the channel was promoted. + + For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. The latest version available on the Stable channel is 1.5.0. If 1.0.0 was promoted 30 days ago and 1.5.0 was promoted 10 days ago, then the relative age of the application instance is 20 days. -### Bug Fixes -- Fixes Velero backup labels not being added to registry secrets when the secrets were already present. -- Fixes restoration of snapshots of the registry from pre-IPV6 support on new clusters. -- Fixes using the `skip-system-package-install` flag with the containerd add-on. +* **Versions behind**: The number of versions between the currently running version and the latest version available on the channel where the instance is assigned. -## Release v2022.01.28-2 + For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. If the later versions 1.1.0, 1.2.0, 1.3.0, 1.4.0, and 1.5.0 were also promoted to the Stable channel, then the instance is five versions behind. -Released on January 28, 2022 +* **Last check-in**: The timestamp when the instance most recently sent data to the Vendor Portal. -### Bug Fixes +### Instance Insights {#insights} -- Changes the [filesystem write latency host preflight for etcd](https://kurl.sh/docs/install-with-kurl/host-preflights#primary-nodes) to warn when greater than or equal to 10ms. +The **Insights** section includes the following metrics computed by the Vendor Portal: -## Release v2022.01.28-1 +* [Uptime](#uptime) +* [Time to Install](#time-to-install) -Released on January 28, 2022 +#### Uptime -### New Features -- Registry backup and restore scripts include more user-friendly logging within the container. +The Vendor Portal computes the total uptime for the instance as the fraction of time that the instance spends with a Ready, Updating, or Degraded status. The Vendor Portal also provides more granular details about uptime in the **Instance Uptime** graph. See [Instance Uptime](#instance-uptime) below. -### Bug Fixes -- Fixes airgap Postgres images in [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.60.0. +High uptime indicates that the application is reliable and able to handle the demands of the customer environment. Low uptime might indicate that the application is prone to errors or failures. By measuring the total uptime, you can better understand the performance of your application. -## Release v2022.01.28-0 +The following table lists the application statuses that are associated with an Up or Down state in the total uptime calculation: -Released on January 28, 2022 + + + + + + + + + + + + + +
    Uptime StateApplication Statuses
    UpReady, Updating, or Degraded
    DownMissing or Unavailable
    -### New Features -- Adds support for Kubernetes version 1.23.x. +:::note +The Vendor Portal includes time spent in a Degraded status in the total uptime for an instance because an app may still be capable of serving traffic when some subset of desired replicas are available. Further, it is possible that a Degraded state is expected during upgrade. +::: -### Bug Fixes -- Fixes a bug that caused the installer to exit when installing Antrea version 1.4.0+ with encryption and without the requisite WireGuard kernel module. +#### Time to Install -## Release v2022.01.25-0 +The Vendor Portal computes both _License time to install_ and _Instance time to install_ metrics to represent how quickly the customer was able to deploy the application to a Ready state in their environment. -Released on January 25, 2022 +Replicated recommends that you use Time to Install as an indicator of the quality of the packaging, configuration, and documentation of your application. -### New Features -- [Host preflight](https://kurl.sh/docs/install-with-kurl/host-preflights/) failures are now blocking, and the installer will exit with error. Warnings do not cause the installer to exit. Warnings can be enforced and errors can be ignored with [`host-preflight-enforce-warnings` and `host-preflight-ignore`](https://kurl.sh/docs/install-with-kurl/advanced-options). +If the installation process for your application is challenging, poorly documented, lacks appropriate preflight checks, or relies heavily on manual steps, then it can take days or weeks to deploy the application in customer environments. A longer Time to Install generally represents a significantly increased support burden and a degraded customer installation experience. -### Improvements -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.60.0. -- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.16.0, which does a rollout restart of the envoy pods after generating the new certificates instead of restarting all pods at once. It will also validate and renew certificates on startup. +The following describes the _License time to install_ and _Instance time to install_ metrics: -### Bug Fixes -- Fix legacy `apiregistration.k8s.io/v1beta1` resource for Prometheus 0.53.1-30.1.0. +* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. -## Release v2022.01.24-0 + License time to install represents the time that it takes for a customer to successfully deploy your application after you intend to distribute the application to the customer. Replicated uses the timestamp of when you create the customer license in the Vendor Portal to represent your intent to distribute the application because creating the license file is generally the final step before you share the installation materials with the customer. -Released on January 24, 2022 + License time to install includes several activities that are involved in deploying the application, including the customer receiving the necessary materials and documentation, downloading the assets, provisioning the required hardware, networking, external systems, completing the preflight checks, and finally installing, configuring, and deploying the application. -### Bug Fixes -- Reverts an update to React-DOM that was causing the TestGrid UI to fail. +* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. -## Release v2022.01.22-0 + Instance time to install is the length of time that it takes for the application to reach a Ready state after the customer starts a deployment attempt in their environment. Replicated considers a deployment attempt started when the Vendor Portal first records an event for the instance. + + For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. + + :::note + Instance time to install does _not_ include any deployment attempts that a customer might have made that did not generate an event. For example, time spent by the customer discarding the server used in a failed attempt before attempting to deploy the instance again on a new server. + ::: -Released on January 22, 2022 +### Instance Information -### Bug Fixes -- Changes the default Kubernetes version from 1.22.x to 1.21.x to mitigate an incompatibility with the default Prometheus version. +The **Instance Information** section displays the following details about the cluster infrastructure where the application is installed as well as vendor-defined metadata about the instance: -## Release v2022.01.21-0 +* The Kubernetes distribution for the cluster. For example, GKE or EKS. +* The version of Kubernetes running in the cluster. +* The version of KOTS or the Replicated SDK installed in the cluster. +* For **First Seen**, the timestamp of the first event that the Vendor Portal generated for the instance. For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. +* If detected, the cloud provider and region where the cluster is running. For example, `GCP: us-central1`. +* An optional vendor-defined name for the instance. +* Optional vendor-defined instance tags in the form of key-value pairs. Each instance can have a maximum of 10 tags. -Released on January 21, 2022 +In addition to the details listed above, the **Instance Information** section also displays the following for embedded clusters provisioned by Replicated kURL: +* Node operating systems +* Node operating systems versions +* Total number of cluster nodes +* Number of cluster nodes in a Ready state +* ID of the kURL installer specification -### Improvements -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version v1.59.3. -- Adds [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.53.1-30.1.0. -- Adds [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.15.0, which supports auto-renewal of Contour and Envoy certs. -- Moves the [`latest`](https://kurl.sh/latest) installer on kurl.sh to Kubernetes 1.22.5. +### Instance Uptime -### Bug Fixes -- Fixes a bug that caused the **Internal Storage** snapshot option to be missing when an object store is available. -- Fixes random Alert Manager and Grafana Nodeports in the Prometheus add-on for versions 0.53.1-30.1.0+. +The **Instance Uptime** graph shows the percentage of a given time period that the instance was in an Up, Degraded, or Down state. +To determine if the instance is Up, Degraded, or Down, the Vendor Portal uses the application status. Possible application statuses are Ready, Updating, Degraded, Unavailable, and Missing. The following table lists the application statuses that are associated with each state in the **Instance Uptime** graph: -## Release v2022.01.18-0 + + + + + + + + + + + + + + + + + +
    Uptime StateApplication Statuses
    UpReady or Updating
    DegradedDegraded
    DownMissing or Unavailable
    -Released on January 18, 2022 +The following shows an example of an **Instance Uptime** graph: -### New Features -- Adds the ability to exclude the built-in host preflights during installation. +![Uptime Graph on the Instance details page](/images/instance-uptime-graph.png) -### Improvements -- Adds support for Kubernetes v1.22.5. -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version v1.59.2. -- Adds version 0.14.0 of the EKCO add-on, which adds Kubernetes v1.22+ support. +You can hover over the bars in the **Instance Uptime** graph to view more detail about the percent of time that the instance was in each state during the given time period. -### Bug Fixes -- Fixes a race condition with Storage Class migration. -- Fixes a bug related to long Persistent Volume Claim (PVC) names when migrating Storage Classes. -- Fixes some host preflight error messages. +![Uptime Graph with event markers on the Instance details page](/images/instance-uptime-graph-event-markers.png) -## Release v2022.01.05-0 +You can hover over the event markers in the **Instance Uptime** graph to view more detail about the events that occurred during that given interval on the graph. If more than two events occurred in that period, the event marker displays the number of events that occurred during that period. If you click the event marker or the event in the tooltip, the **Instance Activity** section highlights the event or the first event in the group. -Released on January 5, 2022 +### Instance Activity -### Improvements -- Adds support for Kubernetes 1.19.16, 1.20.14, 1.21.8. +The **Instance Activity** section displays recent events for the instance. The data stream is updated each time an instance _check-in_ occurs. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. -### Bug Fixes -- Resolves an error when installing the Velero add-on with Kubernetes 1.21 and `disableS3=true` set for KOTS. -- Fixes an issue with the KOTS URL not printing correctly when performing a re-install or upgrade. +The timestamp of events displayed in the **Instance Activity** stream is the timestamp when the Replicated Vendor API received data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. -## Release v2022.01.04-0 +The following shows an example of the **Instance Activity** data stream: -Released on January 4, 2022 +![Instance Activity section of Instance details page](/images/instance-activity.png) -### Bug Fixes -- Reverts `latest` version of Kubernetes to v1.19.x. +You can filter the **Instance Activity** stream by the following categories: -## Release v2021.12.29-0 +* [App install/upgrade](#app-install-upgrade) +* [App status](#app-status) +* [Cluster status](#cluster) +* [Custom metrics](#custom-metrics) +* [Infrastructure status](#infrastructure) +* [KOTS version](#kots) +* [Replicated SDK version](#sdk) +* [Upstream update](#upstream) -Released on December 29, 2021 +The following tables describe the events that can be displayed in the **Instance Activity** stream for each of the categories above: +#### App install/upgrade {#app-install-upgrade} -### New Features -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.59.1. + + + + + + + + + + + + + +
    LabelDescription
    App ChannelThe ID of the channel the application instance is assigned.
    App VersionThe version label of the release that the instance is currently running. The version label is the version that you assigned to the release when promoting it to a channel.
    +#### App status {#app-status} -## Release v2021.12.23-0 + + + + + + + + + +
    LabelDescription
    App Status +

    A string that represents the status of the application. Possible values: Ready, Updating, Degraded, Unavailable, Missing. For applications that include the Replicated SDK, hover over the application status to view the statuses of the indiviudal resources deployed by the application.

    +

    For more information, see Enabling and Understanding Application Status.

    +
    -Released on December 23, 2021 +#### Cluster status {#cluster} -### New Features -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.59.0. -- Adds support for [cluster migration away from object storage](https://kurl.sh/docs/install-with-kurl/removing-object-storage). KOTS can now be deployed without an object store with [no loss of snapshot or registry functionality](https://kurl.sh/docs/add-ons/kotsadm). + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    LabelDescription
    Cluster Type +

    Indicates if the cluster was provisioned by kURL.

    +

    Possible values:

    +
      +
    • kURL: The cluster is provisioned by kURL.
    • +
    • Existing: The cluster is not provisioned by kURL.
    • +
    +

    For more information about kURL clusters, see Creating a kURL installer.

    +
    Kubernetes VersionThe version of Kubernetes running in the cluster.
    Kubernetes Distribution +

    The Kubernetes distribution of the cluster.

    +

    Possible values:

    +
      +
    • EKS
    • +
    • GKE
    • +
    • K3S
    • +
    • RKE2
    • +
    +
    kURL Nodes Total +

    Total number of nodes in the cluster.

    +

    Note: Applies only to kURL clusters.

    +
    kURL Nodes Ready +

    Number of nodes in the cluster that are in a healthy state and ready to run Pods.

    +

    Note: Applies only to kURL clusters.

    +
    New kURL Installer +

    The ID of the kURL installer specification that kURL used to provision the cluster. Indicates that a new Installer specification was added. An installer specification is a manifest file that has apiVersion: cluster.kurl.sh/v1beta1 and kind: Installer.

    +

    For more information about installer specifications for kURL, see Creating a kURL installer.

    +

    Note: Applies only to kURL clusters.

    +
    -## Release v2021.12.21-0 +#### Custom metrics {#custom-metrics} -Released on December 21, 2021 +You can filter the activity feed by any custom metrics that are configured for the application. The labels for the custom metrics vary depending on the custom key value pairs included in the data set that is sent to the Vendor Portal. For example, the key value pair `"num_projects": 5` is displayed as **Num Projects: 5** in the activity feed. -### Improvements -- Updates front-end dependencies to latest available versions. +For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). +#### Infrastructure status {#infrastructure} -## Release v2021.12.17-0 + + + + + + + + + + + + + +
    LabelDescription
    Cloud Provider +

    The cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.

    +

    Possible values:

    +
      +
    • AWS
    • +
    • GCP
    • +
    • DigitalOcean
    • +
    +
    Cloud Region +

    The cloud provider region where the instance is running. For example, us-central1-b

    +
    -Released on December 17, 2021 +#### KOTS version {#kots} -### Bug Fixes -- Improves experimental [IPv6](https://kurl.sh/docs/install-with-kurl/ipv6) support. + + + + + + + + + +
    LabelDescription
    KOTS VersionThe version of KOTS that the instance is running. KOTS version is displayed as a Semantic Versioning compliant string.
    -## Release v2021.12.14-0 +#### Replicated SDK version {#sdk} -Released on December 14, 2021 + + + + + + + + + +
    LabelDescription
    Replicated SDK VersionThe version of the Replicated SDK that the instance is running. SDK version is displayed as a Semantic Versioning compliant string.
    -### New Features -- Adds [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.58.2. +#### Upstream update {#upstream} -### Bug Fixes -- Adds Contour 1.19.1 images that were missing from airgap bundles. + + + + + + + + + +
    LabelDescription
    Versions Behind +

    The number of versions between the version that the instance is currently running and the latest version available on the channel.

    +

    Computed by the Vendor Portal each time it receives instance data.

    +
    +================ +File: docs/vendor/instance-insights-event-data.mdx +================ +import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" -## Release v2021.12.10-0 +# About Instance and Event Data -Released on December 10, 2021 +This topic provides an overview of the customer and instance insights that you can view in the Replicated Vendor Portal. It includes information about how the Vendor Portal accesses data as well as requirements and limitations. -### New Features -- Adds the ability to skip the installation of system packages by passing the `skip-system-package-install` flag. Using this flag will automatically enable a preflight check that will detect if the necessary system packages for the included addons are already installed. +## How the Vendor Portal Collects Instance Data {#about-reporting} -### Improvements -- kURL `latest` installer spec is now pinned to Kubernetes version 1.21.x -- kURL `latest` installer spec will now pin to addon minor versions rather than `latest`. +This section describes how the Vendor Portal collects instance data from online and air gap environments. -## Release v2021.12.09-0 +### Online Instances -Released on December 9, 2021 +For instances running in online (internet-connected) environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), then both send instance data. -### Improvements -- Adds support for Oracle Linux 8.5. +The data sent to the Vendor Portal includes properties such as the current version and status of the instance. For a full overview of what data might be included, see the [Replicated Data Transmission Policy](https://docs.replicated.com/vendor/policies-data-transmission). -### Bug Fixes -- Temporarily removes the Prometheus add-on version 0.52.0-22.0.0 due to an [upstream bug](https://github.com/prometheus-community/helm-charts/issues/1500). +The following diagram shows the flow of different types of data from customer environments to the Vendor Portal: -## Release v2021.12.08-0 +![Telemetry sent from instances to vendor platform](/images/telemetry-diagram.png) -Released on December 8, 2021 +[View a larger version of this image](/images/telemetry-diagram.png) -### New Features -- Adds [EKCO](https://kurl.sh/docs/add-ons/ekco) version 0.13.0. -- Adds Velero version 1.7.1. -- Adds Longhorn version 1.2.2. -- Adds Sonobuoy version 0.55.1. -- Adds Antrea version 1.4.0. -- Adds Prometheus version 0.52.0-22.0.0. -- Updates pvmigrate to 0.4.1. +As shown in the diagram above, application instance data, application status data, and details about the KOTS and the SDK instances running in the cluster are all sent to the Vendor Portal through the Replicated app service: +* When both KOTS and the SDK are installed in the cluster, they both send application instance data, including information about the cluster where the instance is running. +* KOTS and the SDK both send information about themselves, including the version of KOTS or the SDK running in the cluster. +* Any custom metrics configured by the software vendor are sent to the Vendor Portal through the Replicated SDK API. For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). +* Application status data, such as if the instance is ready or degraded, is sent by KOTS. If KOTS is not installed in the cluster, then the SDK sends the application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). -### Bug Fixes -- Prevents EKCO from trying to manage Rook when Rook is not installed. -- Fixes missing packages in some Longhorn migration scenarios. +### Air Gap Instances -## Release v2021.12.02-0 + -Released on December 2, 2021 +For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.58.1. +## Frequency of Data Sent to the Vendor Portal -## Release v2021.12.01-0 +This section describes how frequently data is sent to the Vendor Portal for online and air gap instances. -Released on December 1, 2021 +### From the Replicated SDK (Online Instances Only) -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.58.0. +When installed alongside the application in an online environment, the SDK automatically sends instance data to the Vendor Portal when any of the following occur: -### Bug Fixes -- Host packages installed as DNF modules are now reset after installation to allow for running yum update without dependency errors. +* The SDK sends data every four hours. +* The instance checks for updates. An update check occurs when the instance makes a request to the `/api/v1/app/updates` SDK API endpoint. See [app](/reference/replicated-sdk-apis#app) in _Replicated SDK API (Alpha)_. -## Release v2021.11.22-0 +* The instance completes a Helm update to a new application version. After the update completes, the SDK sends data when it restarts. -Released on November 22, 2021 +* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.57.0. +### From KOTS (Online Instances Only) +When installed alongisde the application in an online environment, KOTS automatically sends instance data to the Vendor Portal when any of the following occur: -## Release v2021.11.09-0 +* The instance checks for updates. By default, KOTS checks for updates every four hours. Additionally, an update check can occur when a user clicks the **Check for updates** button in the Replicated Admin Console. -Released on November 09, 2021 + :::note + KOTS users can modify or disable automatic update checks from the Admin Console. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). + ::: -### Improvements -- kURL will now report when migrations occur between the Rook Ceph and MiniO object stores. -- kURL will now report when migrations occur between the Rook Ceph and Longhorn storage classes. +* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). -### Bug Fixes -- Fixed an issue that prevented the versions of Longhorn and MinIO from resolving in kurl.sh/latest. +* (KOTS v1.92 and later only) The instance deploys a new application version. -## Release v2021.11.08-0 +### From Air Gap Instances -Released on November 08, 2021 +For air gap instances, the frequency of data sent to the Vendor Portal depends on how frequently support bundles are collected in the customer environment and uploaded to the Vendor Portal. -### Improvements -- The default configuration for https://kurl.sh/latest was updated to replace Rook with Longhorn and MinIO. +For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). -## Release v2021.11.05-0 +## How the Vendor Portal Generates Events and Insights {#about-events} -Released on November 05, 2021 +When the Vendor Portal receives instance data, it evaluates each data field to determine if there was a change in its value. For each field that changes in value, the Vendor Portal creates an _event_ to record the change. For example, a change from Ready to Degraded in the application status generates an event. -### New Features -- Added mechanism to migrate registry contents from s3 to a persistent volume. Note that this cannot be triggered yet, but will later be used once all object storage-related migrations are available. -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.56.0. +In addition to creating events for changes in data sent by the instance, the Vendor Portal also generates events for changes in values of computed metrics. The Vendor Portal updates the values of computed metrics each time it receives instance data. For example, the Vendor Portal computes a _Versions behind_ metric that tracks the number of versions behind the latest available version for the instance. When the instance checks for updates and a new update is available, the value of this metric changes and the Vendor Portal generates an event. -### Bug Fixes -- Reverted changes to https://kurl.sh/latest that were introduced in [v2021.11.04-0](https://kurl.sh/release-notes/v2021.11.04-0). As a result, Rook and Kubernetes 1.19 are once again in the default configuration. +The Vendor Portal uses events to display insights for each active instance in a **Instance details** dashboard. For more information about using the Vendor Portal **Instance details** page to monitor active instances of your application, see [Instance Details](instance-insights-details). -## Release v2021.11.04-0 +## Requirements -Released on November 04, 2021 +The following requirements apply to collecting instance telemetry: -### Improvements -- The default configuration for https://kurl.sh/latest was updated to include Kubernetes 1.21 instead of 1.19, and Rook was replaced with Longhorn and MinIO. Note that using `rook: latest` with `kubernetes: latest` no longer works as Rook 1.0.4 is not compatible with Kubernetes 1.20+. To avoid this, pin a specific version instead of using `latest`. +* Replicated KOTS or the Replicated SDK must be installed in the cluster where the application instance is running. -## Release v2021.11.02-0 +* For KOTS installations and for Helm CLI installations that use `helm template` then `kubectl apply`, additional configuration is required to get application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). -Released on November 02, 2021 +* To view resource status details for an instance on the **Instance details** page, the Replicated SDK must be installed in the cluster alongside the application. For more information, see [View Resource Status Insights](insights-app-status#resource-status) in _Enabling and Understanding Application Status_. -### Improvements -- Rook Ceph versions 1.4+ will now display an info-level message when trying to mount an external disk, along with some troubleshooting tips. +* There are additional requirements for collecting telemetry from air gap instances. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). -### Bug Fixes -- kURL [yaml patches](https://kurl.sh/docs/install-with-kurl/#modifying-an-install-using-a-yaml-patch-file-at-runtime) that include non-breaking spaces will now cause the installer to fail with a helpful error. -- Null or empty kURL [yaml patches](https://kurl.sh/docs/install-with-kurl/#modifying-an-install-using-a-yaml-patch-file-at-runtime) will not remove the configuration provided by the kURL spec. +## Limitations -## Release v2021.10.22-0 +The Vendor Portal has the following limitations for reporting instance data and generating events: -Released on October 22, 2021 +* **Active instances**: Instance data is available for _active_ instances. An instance is considered inactive when its most recent check-in was more than 24 hours ago. An instance can become inactive if it is decommissioned, stops checking for updates, or otherwise stops reporting. -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.55.0. + The Vendor Portal continues to display data for an inactive instance from its most-recently seen state. This means that data for an inactive instance might continue to show a Ready status after the instance becomes inactive. Replicated recommends that you use the timestamp in the **Last Check-in** field to understand if an instance might have become inactive, causing its data to be out-of-date. +* **Air gap instances**: There are additional limitations for air gap telemetry. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). +* **Instance data freshness**: The rate at which data is updated in the Vendor Portal varies depending on how often the Vendor Portal receives instance data. +* **Event timestamps**: The timestamp of events displayed on the **Instances details** page is the timestamp when the Replicated Vendor API received the data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. +* **Caching for kURL cluster data**: For clusters created with Replicated kURL (embedded clusters), KOTS stores the counts of total nodes and ready nodes in a cache for five minutes. If KOTS sends instance data to the Vendor Portal within the five minute window, then the reported data for total nodes and ready nodes reflects the data in the cache. This means that events displayed on the **Instances details** page for the total nodes and ready nodes can show values that differ from the current values of these fields. -## Release v2021.10.20-0 +================ +File: docs/vendor/instance-notifications-config.mdx +================ +import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" -Released on October 20, 2021 -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.54.0. +# Configuring Instance Notifications (Beta) -### Bug Fixes -- Fixed a bug caused when Ceph update versions are not applied to all Ceph components. -- Reverted the ability for the registry add-on to run with two replicas and a RWX volume when used with Longhorn. This was originally released in [v2021.10.01-0](https://kurl.sh/release-notes/v2021.10.01-0). + -## Release v2021.10.08-0 +This topic describes how to configure Slack or email notifications in the Replicted Vendor Portal for instances of your application. -Released on October 08, 2021 +For information about creating and managing instance notifications with the Vendor API v3, see the [notifications](https://replicated-vendor-api.readme.io/reference/subscribeinstanceevents) section in the Vendor API v3 documentation. -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.53.0. +## Overview -## Release v2021.10.04-0 +Teams can receive notifications about customer instances through a Slack channel. Individual users can also receive email notifications. -Released on October 04, 2021 +Instance notifications can be disabled when they are no longer needed. For example, a team member can turn off their email notifications for a customer instance when they are no longer responsible for supporting that customer. -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.52.1. +## Prerequisite -## Release v2021.10.01-0 +For Slack notifications, you must configure a Slack webhook in the Vendor Portal at the Team level before you can turn on instance notifications. For more information, see [Configuring a Slack Webhook (Beta)](team-management-slack-config). -Released on October 01, 2021 +For email notification, no prior configuration is required. The email address listed in your Vendor Portal account settings is used. -### New Features -- Containerd is now the default container runtime, replacing the previous default container runtime, Docker. -- Log rotation will now be configured by default for the [Docker add-on](https://kurl.sh/docs/add-ons/docker), where the [max-size](https://docs.docker.com/config/containers/logging/json-file/#options) parameter for the log file is set to `10m`. -- Added the ability to configure log rotation through kubelet, which helps when using containerd instead of docker. -- Re-enabled the ability to declare custom host preflight checks in the kURL installer spec. +## Configure Notifications -### Improvements -- When Longhorn is specified in an installer spec but an object store (e.g., MinIO) is not, the [Registry add-on](https://kurl.sh/docs/add-ons/registry) will be deployed with two replicas and a ReadWriteMany (RWX) volume for greater availability. +Follow this procedure to configure Slack or email notifications for application instances. You can enable notifications for application status changes, system events such as Kubernetes upgrades, or changes in the values of any custom metrics configured for the application. -### Bug Fixes -- Fixed a bug that didn't allow User and Service Account tokens to authenticate to the kURL API. +To configure notifications: -## Release v2021.09.30-0 +1. Go to **Applications > Customers**, and click an active customer instance that you want to receive notifications for. -Released on September 30, 2021 + Customer instances list in the Vendor Portal -### Bug Fixes -- Fixed a bug to allow User and Service Account token authenticate to the API -- Fixed a bug that could cause upgrades from Rook 1.0.4 to 1.0.4-14.2.21 to fail -- Fixed a bug that would cause snapshots not to restore after a Rook to Longhorn migration +1. On the Instance Details page, click **Notifications**. -### Improvements -- Sysctl parameters required for pod networking are now enabled for all operating systems in /etc/sysctl.conf + -## Release v2021.09.27-4 +1. From the **Configure Instance Notifications** dialog, select the types of notifications to enable. + + ![Configure Instance Notifications dialog](/images/instance-notifications-dialog.png) -Released on September 27, 2021 + [View a larger version of this image](/images/instance-notifications-dialog.png) -### Bug Fixes -- Due to a bug, removed the ability to add custom host preflights in the kURL installer spec. This was initially released in [v2021.09.24-0](https://kurl.sh/release-notes/v2021.09.24-0). +1. Click **Save**. -## Release v2021.09.24-0 +1. Repeat these steps to configure notifications for other application instances. -Released on September 24, 2021 -### New Features -- Custom host preflight checks can be declared in the kURL installer spec. -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.52.0. +## Test Notifications -### Bug Fixes -- Fixed an issue that prevented Rook add-on preflight checks from executing. +After you enable notifications for a running development instance, test that your notifications are working as expected. -## Release v2021.09.20-0 +Do this by forcing your application into a non-ready state. For example, you can delete one or more application Pods and wait for a ReplicationController to recreate them. -Released on September 20, 2021 +Then, look for notifications in the assigned Slack channel. You also receive an email if you enabled email notifications. -### Bug Fixes -- Fixed a bug that could cause the EKCO addon to fail when mistakenly trying to deploy the `PodImageOverrides` mutating webhook configuration. +:::note +There is a 30-second buffer between event detection and notifications being sent. This buffer provides better roll-ups and reduces noise. +::: -## Release v2021.09.17-0 +================ +File: docs/vendor/kots-faq.mdx +================ +import SDKOverview from "../partials/replicated-sdk/_overview.mdx" +import EmbeddedKubernetes from "../partials/kots/_embedded-kubernetes-definition.mdx" +import Helm from "../partials/helm/_helm-definition.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" -Released on September 17, 2021 +# Replicated FAQs -### New Features -- Added Kubernetes versions 1.21.5, 1.21.4, 1.21.3, 1.20.11, 1.20.10, and 1.19.15. +This topic lists frequently-asked questions (FAQs) for different components of the Replicated Platform. -## Release v2021.09.16-0 +## Getting Started FAQs -Released on September 16, 2021 +### What are the supported application packaging options? -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kots) nightly version. +Replicated strongly recommends that all applications are packaged using Helm. -## Release v2021.09.15-0 + -Released on September 15, 2021 +Many enterprise customers expect to be able to install an application with Helm in their own cluster. Packaging with Helm allows you to support installation with the Helm CLI and with the Replicated installers (Replicated Emebdded Cluster and Replicated KOTS) from a single release in the Replicated Platform. -### New Features -- Added [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.12.0. -- Host preflights check disk space in /opt/replicated/rook with Rook 1.0.4. +For vendors that do not want to use Helm, applications distributed with Replicated can be packaged as Kubernetes manifest files. -### Improvements -- Host preflight block device checks run for all versions of Rook 1.4+. +### How do I get started with Replicated? -## Release v2021.09.09-0 +Replicated recommends that new users start by completing one or more labs or tutorials to get familiar with the processes of creating, installing, and iterating on releases for an application with the Replicated Platform. -Released on September 9, 2021 +Then, when you are ready to begin onboarding your own application to the Replicated Platform, see [Replicated Onboarding](replicated-onboarding) for a list of Replicated features to begin integrating. -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kots) version 1.50.2. +#### Labs -## Release v2021.08.27-0 +The following labs in Instruqt provide a hands-on introduction to working with Replicated features, without needing your own sample application or development environment: -Released on August 27, 2021 +* [Distributing Your Application with Replicated](https://play.instruqt.com/embed/replicated/tracks/distributing-with-replicated?token=em_VHOEfNnBgU3auAnN): Learn how to quickly get value from the Replicated Platform for your application. +* [Delivering Your Application as a Kubernetes Appliance](https://play.instruqt.com/embed/replicated/tracks/delivering-as-an-appliance?token=em_lUZdcv0LrF6alIa3): Use Embedded Cluster to distribute Kubernetes and an application together as a single appliance. +* [Avoiding Installation Pitfalls](https://play.instruqt.com/embed/replicated/tracks/avoiding-installation-pitfalls?token=em_gJjtIzzTTtdd5RFG): Learn how to use preflight checks to avoid common installation issues and assure your customer is installing into a supported environment. +* [Closing the Support Information Gap](https://play.instruqt.com/embed/replicated/tracks/closing-information-gap?token=em_MO2XXCz3bAgwtEca): Learn how to use support bundles to close the information gap between your customers and your support team. +* [Protecting Your Assets](https://play.instruqt.com/embed/replicated/tracks/protecting-your-assets?token=em_7QjY34G_UHKoREBd): Assure your customers have the right access to your application artifacts and features using Replicated licensing. -### New Features -- Clusters with containerd enabled will be automatically migrated from docker when docker is detected. Previously containerd would not be installed when docker was detected. +#### Tutorials -### Bug Fixes -- Fixed an issue that prevented the [internal load balancer](https://kurl.sh/docs/add-ons/ekco#internal-load-balancer) from being started on remote nodes when not explicitly enabled. -- Fixed an issue that could cause the [minio add-on](https://kurl.sh/docs/add-ons/minio) to wait forever when creating a PVC. +The following getting started tutorials demonstrate how to integrate key Replicated features with a sample Helm chart application: +* [Install a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup): Create a release that can be installed on a VM with the Embedded Cluster installer. +* [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup): Create a release that can be installed with both the KOTS installer and the Helm CLI. +* [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup): Configure the Admin Console Config screen to collect user-supplied values. +* [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup): Create preflight checks for your application by addin a spec for preflight checks to a Secret in the Helm templates. -## Release v2021.08.20-0 +### What are air gap installations? -Released on August 20, 2021 +_Air gap_ refers to a computer or network that does not have outbound internet access. Air-gapped environments are common for enterprises that require high security, such as government agencies or financial institutions. -### New Features -- Added a new parameter to the [MinIO addon](https://kurl.sh/docs/add-ons/minio), `claimSize`. This defaults to `10Gi` and allows setting the size of the MinIO PVC. -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kots) version 1.50.1. +Traditionally, air-gapped systems are physically isolated from the network. For example, an air-gapped server might be stored in a separate location away from network-connected servers. Physical access to air-gapped servers is often restricted as well. -## Release v2021.08.16-0 +It is also possible to use _virtual_ or _logical_ air gaps, in which security controls such as firewalls, role-based access control (RBAC), and encryption are used to logically isolate a device from a network. In this way, network access is still restricted, but there is not a phyiscal air gap that disconnects the device from the network. -Released on August 16, 2021 +Replicated supports installations into air-gapped environments. In an air gap installation, users first download the images and other assets required for installation on an internet-connected device. These installation assets are usually provided in an _air gap bundle_ that ISVs can build in the Replicated Vendor Portal. Then, users transfer the installation assets to their air-gapped machine where they can push the images to an internal private registry and install. -### New Features -- New feature flag [licenseURL](https://kurl.sh/docs/install-with-kurl/#vendor-licensing-agreement-beta) for kURL allows vendors to include a URL to a licensing agreement for non-airgap installs. -- Added [Antrea add-on](https://kurl.sh/docs/add-ons/antrea) version 1.2.1. -- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.49.0-17.1.3. -- Added [local-volume-provider](https://github.com/replicatedhq/local-volume-provider) plugin to Velero addon versions 1.5.1 through 1.6.2. -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.50.0. +For more information, see: +* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) +* [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap) -### Bug Fixes -- Docker preflights will no longer run when docker is not configured within kURL. +### What is the Commercial Sotware Distribution Lifecycle? -## Release v2021.08.09-0 +Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. -Released on August 9, 2021 +Replicated has developed the Commercial Software Distribution Lifecycle to represent the stages that are essential for every company that wants to deliver their software securely and reliably to customer-controlled environments. -### New Features -- Added [Sonobuoy add-on](https://kurl.sh/docs/add-ons/sonobuoy) version 0.53.0. -- Added [Goldpinger add-on](https://kurl.sh/docs/add-ons/goldpinger) version 3.2.0-4.2.1. -- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.49.0-17.1.1. +This lifecycle was inspired by the DevOps lifecycle and the Software Development Lifecycle (SDLC), but it focuses on the unique things requirements for successfully distributing commercial software to tens, hundreds, or thousands of enterprise customers. -### Bug Fixes -- The [Rook add-on block storage](https://kurl.sh/docs/add-ons/rook#block-storage) flag is no longer required to be set for version 1.4.3+. Instead, it is assumed to be set to true for these versions. +The phases are: +* Develop +* Test +* Release +* License +* Install +* Report +* Support -## Release v2021.08.06-0 +For more information about the Replicated features that enhance each phase of the lifecycle, see [Introduction to Replicated](../intro-replicated). -Released on August 6, 2021 +## Compatibility Matrix FAQs -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.49.0. +### What types of clusters can I create with Compatibility Matrix? -## Release v2021.08.04-0 +You can use Compatibility Matrix to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports a variety of VM and cloud distributions, including Red Hat OpenShift, Replicated Embedded Cluster, and Oracle Container Engine for Kubernetes (OKE). For a complete list, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). -Released on August 4, 2021 +### How does billing work? -### New Features -- The kURL installer can now differentiate between installs and upgrades. -- Added [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.11.0 with support for [internal load balancing with HAProxy on HA installs](https://kurl.sh/docs/install-with-kurl/#highly-available-k8s-ha). +Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a running status and ends when the cluster is deleted. For more information, see [Billing and Credits](/vendor/testing-about#billing-and-credits). -## Release v2021.08.03-0 +### How do I buy credits? -Released on August 3, 2021 +To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to **[Compatibility Matrix > Buy additional credits](https://vendor.replicated.com/compatibility-matrix)**. Otherwise, to request credits, log in to the Vendor Portal and go to **[Compatibility Matrix > Request more credits](https://vendor.replicated.com/compatibility-matrix)**. -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.48.1. +### How do I add Comaptibility Matrix to my CI/CD pipelines? -### Bug Fixes -- Fixed an issue where the kotsadm config would be overriden when updating kURL. +You can use Replicated CLI commands to integrate Compatibility Matrix into your CI/CD development and production workflows. This allows you to programmatically create multiple different types of clusters where you can deploy and test your application before releasing. -## Release v2021.07.30-1 +For more information, see [About Integrating with CI/CD](/vendor/ci-overview). -Released on July 30, 2021 +## KOTS and Embedded Cluster FAQs -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.48.0. +### What is the Admin Console? -## Release v2021.07.30-0 +The Admin Console is the user interface deployed by the Replicated KOTS installer. Users log in to the Admin Console to configure and install the application. Users also access to the Admin Console after installation to complete application mangement tasks such as performing updates, syncing their license, and generating support bundles. For installations with Embedded Cluster, the Admin Console also includes a **Cluster Management** tab where users can manage the nodes in the cluster. -Released on July 30, 2021 +The Admin Console is available in installations with Replicated Embedded Cluster and Replicated KOTS. -### New Features -- Added [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.18.0. -- Added [Antrea add-on](https://kurl.sh/docs/add-ons/antrea) version 1.2.0. -- Longhorn 1.1.2+ will automatically migrate Rook-backed PVCs to Longhorn-backed if Rook is installed but no longer included in the kURL spec. -- MinIO will automatically import Rook-backed object store data if Rook is installed but no longer included in the kURL spec. -- Rook will automatically be uninstalled if all data is migrated successfully to both Longhorn and MinIO. +The following shows an example of the Admin Console dashboard for an Embedded Cluster installation of an application named "Gitea": +admin console dashboard -## Release v2021.07.23-1 +[View a larger version of this image](/images/gitea-ec-ready.png) -Released on July 23, 2021 +### How do Embedded Cluster installations work? -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.47.3. -- Added [Velero add-on](https://kurl.sh/docs/add-ons/velero) version 1.6.2. -- Added [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.1.2. -- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.49.0-17.0.0. -- Added Kubernetes versions 1.21.3, 1.20.9, and 1.19.13. +To install with Embedded Cluster, users first download and extract the Embedded Cluster installation assets for the target application release on their VM or bare metal server. Then, they run an Embedded Cluster installation command to provision the cluster. During installation, Embedded Cluster also installs Replicated KOTS in the cluster, which deploys the Admin Console. -## Release v2021.07.23-0 +After the installation command finishes, users log in to the Admin Console to provide application configuration values, optionally join more nodes to the cluster, run preflight checks, and deploy the application. -Released on July 23, 2021 +Customer-specific Embedded Cluster installation instructions are provided in the Replicated Vendor Portal. For more information, see [Installing with Embedded Cluster](/enterprise/installing-embedded). -### New Features -- Host preflight results are now tracked in the directory `/var/lib/kurl/host-preflights`. +### Does Replicated support installations into air gap environments? -### Improvements -- Host preflights can now be run with an installer spec from STDIN, for example `kubectl get installer 6abe39c -oyaml | /var/lib/kurl/bin/kurl host preflight -`. -- Host preflight added to check disk usage in /var/lib/docker. +Yes. The Embedded Cluster and KOTS installers support installation in _air gap_ environments with no outbound internet access. -### Bug Fixes -- Fixed an issue that would cause [.x versions](https://kurl.sh/docs/create-installer/#x-patch-versions) to fail for the kotsadm addon. -- Fixed an issue where warning messages would be displayed for passing preflight checks. -- Fixed an issue where terminal control characters were erroneously displayed in noninteractive preflight check output. -- Fixed an issue where invalid configurations for Rook version 1.4 or greater would pass validation checks. +To support air gap installations, vendors can build air gap bundles for their application in the Vendor Portal that contain all the required assets for a specific release of the application. Additionally, Replicated provides bundles that contain the assets for the Replicated installers. -## Release v2021.07.20-0 +For more information about how to install with Embedded Cluster and KOTS in air gap environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). -Released on July 20, 2021 +### Can I deploy Helm charts with KOTS? -### Bug Fixes -- Fixed an issue that would cause the installer to panic when `spec.selinuxConfig` is not empty or the `preserve-selinux-config` flag is specified and `spec.firewalldConfig` is empty. +Yes. An application deployed with KOTS can use one or more Helm charts, can include Helm charts as components, and can use more than a single instance of any Helm chart. Each Helm chart requires a unique HelmChart custom resource (`apiVersion: kots.io/v1beta2`) in the release. -## Release v2021.07.19-0 +For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). -Released on July 19, 2021 +### What's the difference between Embedded Cluster and kURL? -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.47.2 -- The [Rook add-on's](https://kurl.sh/docs/add-ons/rook) object store can be migrated to [MinIO](https://kurl.sh/docs/add-ons/minio) with the `migrate-rgw-to-minio` task. +Replicated Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster feature offers significantly faster installation, updates, and node joins, a redesigned Admin Console UI, improved support for multi-node clusters, one-click updates that update the application and the cluster at the same time, and more. -### Improvements -- Weave add-on host preflight check will not fail on connection timeout on metrics ports 6781 and 6782. -- The preflight check for ftype on XFS filesystems has been added to all versions of containerd 1.3.7+. + -### Bug Fixes -- The [EKCO add-on's](https://kurl.sh/docs/add-ons/ekco) reboot service no longer depends on docker when using containerd. +For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). -## Release v2021.07.16-0 +### How do I enable Embedded Cluster and KOTS installations for my application? -Released on July 16, 2021 +Releases that support installation with KOTS include the manifests required by KOTS to define the Admin Console experience and install the application. -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.47.1. +In addition to the KOTS manifests, releases that support installation with Embedded Cluster also include the Embedded Cluster Config. The Embedded Cluster Config defines aspects of the cluster that will be provisioned and also sets the version of KOTS that will be installed. -### Improvements -- The [containerd add-on](https://kurl.sh/docs/add-ons/containerd) will check XFS filesystems have ftype enabled before attempting to install. -- The load balancer address preflight check will now validate that a valid address is provided before validating the network. +For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). -### Bug Fixes -- The default preflight check for memory pass value has been changed from 8Gi to 8G. +### Can I use my own branding? -## Release v2021.07.13-0 +The KOTS Admin Console and the Replicated Download Portal support the use of a custom logo. Additionally, software vendors can use custom domains to alias the endpoints for Replicated services. -Released on July 13, 2021 +For more information, see [Customizing the Admin Console and Download Portal](/vendor/admin-console-customize-app-icon) and [About Custom Domains](custom-domains). -### New Features -- Preflight results will now be stored on the host in the directory /var/lib/kurl/host-preflights. -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.47.0. +## Replicated SDK FAQs -### Improvements -- When downloading a bundle from the kURL server, the bundle creation process will fail early in the situation where one of the layers is unavailable, instead of returning a partial bundle. -- Added better messaging to the user when the kurlnet-client pod fails. +### What is the SDK? -## Release v2021.07.09-0 + -Released on July 9, 2021 +### Is the SDK supported in air gap environments? -### New Features -- All add-ons with versions that conform to semver now support the notation `Major.Minor.x`. When specified using this notation, the version will resolve to the greatest patch version for the specified major and minor version. -- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.48.1-16.12.1. -- Added Sonobuoy add-on version 0.52.0. +Yes. The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. -### Bug Fixes -- The [reset task](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node) will now properly remove Kubernetes host packages. +For more information, see [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). -## Release v2021.07.02-0 +### How do I develop against the SDK API? -Released on July 2, 2021 +You can use the Replicated SDK in _integration mode_ to develop locally against the SDK API without needing to make real changes in the Replicated Vendor Portal or in your environment. -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.46.0. +For more information, see [Developing Against the SDK API](/vendor/replicated-sdk-development). -### Bug Fixes -- Fixed CVE-2021-20288 Rook 1.5.11 and 1.0.4-14.2.21. +### How does the Replicated SDK work with KOTS? -## Release v2021.06.30-1 +The Replicated SDK is a Helm chart that can be installed as a small service alongside an application, or as a standalone component. The SDK can be installed using the Helm CLI or KOTS. -Released on June 30, 2021 +Replicated recommends that all applications include the SDK because it provides access to key functionality not available through KOTS, such as support for sending custom metrics from application instances. When both the SDK and KOTS are installed in a cluster alongside an application, both send instance telemetry to the Vendor Portal. -### Bug Fixes +For more information about the SDK installation options, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). -- Fixed an issue which caused newer versions of kURL to have outdated scripts. This issue affects kURL versions v2021.06.24-0, v2021.06.24-1, v2021.06.25-0, and v2021.06.30-0. +## Vendor Portal FAQs -## Release v2021.06.30-0 +### How do I add and remove team members? -Released on June 30, 2021 +Admins can add, remove, and manage team members from the Vendor Portal. For more information, see [Managing Team Members](/vendor/team-management). -### New Features -- Added the ability to configure the Kubernetes service type used by the [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) to expose Prometheus, Grafana and Alertmanager. The currently accepted options are "NodePort" as the default, and "ClusterIP". -- [Migrations](https://kurl.sh/docs/install-with-kurl/migrating) are a supported way to change CSI, CRI, and CNI providers. +### How do I manage RBAC policies for my team members? -### Bug Fixes -- Fixed an issue that would cause Kubernetes upgrades to fail when the hostname of a node contains uppercase characters. -- Fixed an issue that prevented containerd from trusting the registry certificate except on the first primary. +By default, every team has two policies created automatically: Admin and Read Only. If you have an Enterprise plan, you will also have the Sales and Support policies created automatically. These default policies are not configurable. -## Release v2021.06.25-0 +You can also configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. -Released on June 25, 2021 +For more information, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). -### New Features -- Added support for Kubernetes versions 1.21.2, 1.20.8, 1.19.12 and 1.18.20. -- Added [KOTS](https://kurl.sh/docs/add-ons/kotsadm) add-on version 1.45.0. -- Added [Containerd](https://kurl.sh/docs/add-ons/containerd) add-on version 1.4.6. -- Added [Contour](https://kurl.sh/docs/add-ons/contour) add-on version 1.16.0. -- Added [EKCO](https://kurl.sh/docs/add-ons/ekco) add-on version 0.10.3. -- Added [Rook](https://kurl.sh/docs/add-ons/rook) add-on version 1.5.12. -- Added [Velero](https://kurl.sh/docs/add-ons/velero) add-on version 1.6.1. -- Added [Antrea](https://kurl.sh/docs/add-ons/antrea) add-on version 1.1.0. +### Can I alias Replicated endpoints? -### Bug Fixes -- Fixed an issue that would cause an upgrade of Prometheus from version 0.44.1 to any later version to cause the Contour Pods to crash. -- Fixed an issue in earlier versions of the Prometheus add-on which prevented the Grafana Dashboard from connecting to the Prometheus data store. -- Fixed an issue that could cause a kURL upgrade to fail if new add-ons had been added to kURL (even if they were not used in that installer). +Yes. Replicated supports the use of custom domains to alias the endpoints for Replicated services, such as the Replicated app service and the Replicated proxy registry. -## Release v2021.06.24-1 +Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. -Released on June 24, 2021 +For more information, see [Using Custom Domains](/vendor/custom-domains-using). -### Bug Fixes -- Fixed a bug in which the [Rook](https://kurl.sh/docs/add-ons/rook) add-on (version 1.0.4-14.2.21) was referencing the incorrect ceph image. +### How does Replicated collect telemetry from instances of my application? -## Release v2021.06.24-0 +For instances running in online (internet-connected) customer environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster, then both send instance data. -Released on June 24, 2021 +For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. The telemetry stored in the Secret is collected when a support bundle is generated in the environment. When the support bundle is uploaded to the Vendor Portal, the telemetry is associated with the correct customer and instance ID, and the Vendor Portal updates the instance insights and event data accordingly. -### New Features -- The [Goldpinger](https://kurl.sh/docs/add-ons/goldpinger) add-on has been added to monitor network connectivity. +For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). -### Improvements -- Host packages installed on CentOS, RHEL and Oracle Linux will now be installed using yum rather than rpm and no longer force overwrite previously installed versions. -- The Prometheus add-on (Version 0.48.1-16.10.0+) will now pass the flag [--storage.tsdb.retention.size=9GB](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) to avoid filling the PVC completely. +================ +File: docs/vendor/kurl-about.mdx +================ +import KurlDefinition from "../partials/kurl/_kurl-definition.mdx" +import Installers from "../partials/kurl/_installers.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" -### Bug Fixes -- Fixed a bug with the `kurl-registry-ip` flag that caused errors when restoring airgap clusters while using the Containerd add-on. +# Introduction to kURL -## Release v2021.06.22-0 + -Released on June 22, 2021 +This topic provides an introduction to the Replicated kURL installer, including information about kURL specifications and installations. -### Bug Fixes -- Fixed an issue that caused Rook-Ceph to have insecure connection claims. See [CVE-2021-20288](https://docs.ceph.com/en/latest/security/CVE-2021-20288/) for details. -- A new [Rook](https://kurl.sh/docs/add-ons/rook) add-on version 1.0.4-14.2.21 has been added with an upgraded Ceph version 14.2.21. +:::note +The Replicated KOTS entitlement is required to install applications with KOTS and kURL. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. +::: -## Release v2021.06.17-0 +## Overview -Released on June 17, 2021 + -### New Features -- Added support for RHEL 8.4 and CentOS 8.4. +### kURL Installers -### Improvements -- Added support for [versioned kurl installers](https://kurl.sh/docs/install-with-kurl/#versioned-releases) to the installation spec validator (if an add-on version was not present in the version of kurl specified, an error will be returned). + -## Release v2021.06.15-0 +To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release. For more information about creating kURL installers, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). -Released on June 15, 2021 +### kURL Installations -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.44.1. -- Added a new field, kurl.InstallerVersion, that allows [pinning the kURL installer version](https://kurl.sh/docs/install-with-kurl/#versioned-releases). +To install with kURL, users run a kURL installation script on their VM or bare metal server to provision a cluster. -### Improvements -- Containerd configuration will be regenerated when rerunning the installer. New settings have been added to the [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) to allow you to preserve the existing config or to add additional fields. +When the KOTS add-on is included in the kURL installer spec, the kURL installation script installs the KOTS CLI and KOTS Admin Console in the cluster. After the installation script completes, users can access the Admin Console at the URL provided in the ouput of the command to configure and deploy the application with KOTS. -## Release v2021.06.11-0 +The following shows an example of the output of the kURL installation script: -Released on June 11, 2021 +```bash + Installation + Complete ✔ -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.44.0. +Kotsadm: http://10.128.0.35:8800 +Login with password (will not be shown again): 3Hy8WYYid -## Release v2021.06.08-0 +This password has been set for you by default. It is recommended that you change +this password; this can be done with the following command: +kubectl kots reset-password default +``` -Released on June 8, 2021 +kURL installations are supported in online (internet-connected) and air gapped environments. -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.43.2. +For information about how to install applications with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). -## Release v2021.06.07-0 +## About the Open Source kURL Documentation -Released on June 7, 2021 +The open source documentation for the kURL project is available at [kurl.sh](https://kurl.sh/docs/introduction/). -### Improvements --Added HTTPS proxy configuration to KOTS (>= v1.43.1). +The open source kURL documentation contains additional information including kURL installation options, kURL add-ons, and procedural content such as how to add and manage nodes in kURL clusters. Software vendors can use the open source kURL documentation to find detailed reference information when creating kURL installer specs or testing installation. -## Release v2021.06.04-0 +================ +File: docs/vendor/kurl-nodeport-services.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" -Released on June 4, 2021 +# Exposing Services Using NodePorts -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.43.1. -- Added [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) version 0.10.2 with support for Longhorn PVCs in the node shutdown script. -- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.48.0-16.1.2. + -### Improvements -- Added HTTPS proxy configuration to Velero. -- Installing the Docker add-on will no longer install additional recommended packages on Ubuntu. -- Added a preinstallation check to the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) that validates that nodes support bidirectional mount propagation. -- The replicated/kurl-util image now includes the Linux command line utilities curl, ipvsadm, netcat, openssl, strace, sysstat, tcpdump and telnet for debugging purposes. +This topic describes how to expose NodePort services in [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about) installations on VMs or bare metal servers. -## Release v2021.05.28-01 +## Overview -Released on May 28, 2021 +For installations into existing clusters, KOTS automatically creates a port forward tunnel to expose the Admin Console. Unlike installations into existing clusters, KOTS does _not_ automatically open the port forward tunnel for installations in embedded clusters provisioned on virtual machines (VMs) or bare metal servers. This is because it cannot be verified that the ports are secure and authenticated. For more information about the KOTS port forward tunnel, see [Port Forwarding Services with KOTS](/vendor/admin-console-port-forward). -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.43.0. +Instead, to expose the Admin Console in installations with [Embedded Cluster](/vendor/embedded-overview) or [kURL](/vendor/kurl-about), KOTS creates the Admin Console as a NodePort service so it can be accessed at the node's IP address on a node port (port 8800 for kURL installations and port 30000 for Embedded Cluster installations). Additionally, for kURL installations, the UIs of Prometheus, Grafana, and Alertmanager are also exposed using NodePorts. -### Improvements -- A host preflight check for the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) will ensure sufficient disk space is available in /var/lib/longhorn. -- A priority class is now set on the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) to delay its eviction. +For installations on VMs or bare metal servers where your application must be accessible from the user's local machine rather than from inside the cluster, you can expose application services as NodePorts to provide access to the application after installation. -## Release v2021.05.28-0 +## Add a NodePort Service -Released on May 28, 2021 +Services with `type: NodePort` are able to be contacted from outside the cluster by connecting to any node using the appropriate protocol and port. For more information about working with the NodePort service type, see [type: NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) in the Kubernetes documentation. -### Improvements -- The [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) will include a ServiceMonitor for Longhorn when the [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) is installed. -- The [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) will no longer hardcode `storageClassName: default` for better compatibility with PVC Provisioner add-ons. +The following shows an example of a NodePort type service: -### Bug Fixes -- Fixed an issue that caused the [Versioned](https://kurl.sh/docs/install-with-kurl/#versioned-releases) airgap installer to download incomplete packages for previous versions. +```yaml +apiVersion: v1 +kind: Service +metadata: + name: sentry + labels: + app: sentry +spec: + type: NodePort + ports: + - port: 9000 + targetPort: 9000 + nodePort: 9000 + protocol: TCP + name: sentry + selector: + app: sentry + role: web +``` -## Release v2021.05.26-2 +After configuring a NodePort service for your application, you can add a link to the service on the Admin Console dashboard where it can be accessed by users after the application is installed. For more information, see [About Accessing NodePort Services](#about-accessing-nodeport-services) below. -Released on May 26, 2021 +### Use KOTS Annotations to Conditionally Deploy NodePort Services -### Bug Fixes -- Fixed an issue that caused installations on Oracle Linux 8.4 to fail. +You can use the KOTS [`kots.io/when`](/vendor/packaging-include-resources#kotsiowhen) annotation to conditionally deploy a service. This is useful when you want to deploy a ClusterIP or LoadBalancer service for existing cluster installations, and deploy a NodePort service for Embedded Cluster or kURL installations. -## Release v2021.05.26-1 +To conditionally deploy a service based on the installation method, you can use the following KOTS template functions in the `kots.io/when` annotation: +* [IsKurl](/reference/template-functions-static-context#iskurl): Detects kURL installations. For example, `repl{{ IsKurl }}` returns true for kURL installations, and `repl{{ not IsKurl }}` returns true for non-kURL installations. +* [Distribution](/reference/template-functions-static-context#distribution): Returns the distribution of the cluster where KOTS is running. For example, `repl{{ eq Distribution "embedded-cluster" }}` returns true for Embedded Cluster installations and `repl{{ ne Distribution "embedded-cluster" }}` returns true for non-Embedded Cluster installations. -Released on May 26, 2021 +For example, the following `sentry` service with `type: NodePort` includes `annotation.kots.io/when: repl{{ eq Distribution "embedded-cluster" }}`. This creates a NodePort service _only_ when installing with Embedded Cluster: -### Bug Fixes -- Fixed release generator. + ```yaml + apiVersion: v1 + kind: Service + metadata: + name: sentry + labels: + app: sentry + annotations: + # This annotation ensures that the NodePort service + # is only created in Embedded Cluster installations + kots.io/when: repl{{ eq Distribution "embedded-cluster" }} + spec: + type: NodePort + ports: + - port: 9000 + targetPort: 9000 + nodePort: 9000 + protocol: TCP + name: sentry + selector: + app: sentry + role: web + ``` -## Release v2021.05.26-0 +Similarly, to ensure that a `sentry` service with `type: ClusterIP` is only created in existing cluster installations, add `annotations.kots.io/when: repl{{ ne Distribution "embedded-cluster" }}` to the ClusterIP specification: -Released on May 26, 2021 +```yaml +apiVersion: v1 +kind: Service +metadata: + name: sentry + labels: + app: sentry +annotations: + # This annotation ensures that the ClusterIP service + # is only created in existing cluster installations + kots.io/when: repl{{ ne Distribution "embedded-cluster" }} +spec: + type: ClusterIP + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + name: sentry + selector: + app: sentry + role: web +``` -### New Features -- Added Kubernetes versions 1.21.1, 1.20.7, 1.19.11 and 1.18.19. -- Added [Rook add-on](https://kurl.sh/docs/add-ons/rook) version 1.5.11. -- Added [Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) version 0.47.1-16.0.1. +## About Accessing NodePort Services -### Improvements -- The [Containerd add-on](https://kurl.sh/docs/add-ons/containerd) will now be upgraded to conform to the latest kURL spec installed. -- The version of runC included with Docker and Containerd has been upgraded to [v1.0.0-rc95](https://github.com/opencontainers/runc/releases/tag/v1.0.0-rc95). +This section describes providing access to NodePort services after installation. -### Bug Fixes -- Fixed an issue that caused the Grafana dashboard to fail to show graphs due to a misconfigured Prometheus service URL. +### VM Firewall Requirements +To be able to access the Admin Console and any NodePort services for your application, the firewall for the VM where the user installs must allow HTTP traffic and allow inbound traffic to the port where the service is exposed from their workstation. Users can consult their cloud provider's documentation for more information about updating firewall rules. -## Release v2021.05.24-0 +### Add a Link on the Admin Console Dashboard {#add-link} -Released on May 24, 2021 +You can provide a link to a NodePort service on the Admin Console dashboard by configuring the `links` array in the Kubernetes SIG Application custom resource. This provides users with an easy way to access the application after installation. For more information, see [Adding Links to the Dashboard](admin-console-adding-buttons-links). -### New Features -- Added the ability to configure proxies for Velero backups. -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.42.1. +For example: -## Release v2021.05.21-1 +Admin Console dashboard with Open App link -Released on May 21, 2021 +[View a larger version of this image](/images/gitea-open-app.png) -### Improvements -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.42.0. +================ +File: docs/vendor/kurl-reset.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" -## Release v2021.05.21-0 +# Resetting a kURL Cluster -Released on May 21, 2021 + -### Improvements -- The [longhorn](https://kurl.sh/docs/add-ons/longhorn) data directory permissions are now restricted to the root user. +This topic describes how to use the kURL `reset` command to reset a kURL cluster. -### Bug Fixes -- Fixed an issue that prevented Rook 1.4.9+ from installing on Kubernetes 1.21. +## Overview -## Release v2021.05.17-0 +If you need to reset a kURL installation, such as when you are testing releases with kURL, You can use the kURL `tasks.sh` `reset` command to remove Kubernetes from the system. -Released on May 17, 2021 +Alterntaively, you can discard your current VM (if you are using one) and recreate the VM with a new OS to reinstall with kURL. -### Improvements -- The following improvements have been made to prompts requiring user feedback: - - For interactive terminal sessions, all prompts will no longer timeout. - - For non-interactive terminal sessions, all prompts that require user input will now fail. - - For non-interactive terminal sessions, confirmation prompts will now automatically confirm or deny based on the default. - - Preflight failures and warnings will no longer prompt to confirm or deny, and instead will continue. - - Properties [`spec.kurl.ignoreRemoteLoadImagesPrompt`](https://kurl.sh/docs/install-with-kurl/advanced-options) and [`spec.kurl.ignoreRemoteUpgradePrompt`](https://staging.kurl.sh/docs/install-with-kurl/advanced-options) have been added to the `kurl.sh/v1beta1.Installer` spec to bypass prompts for automation purposes. +For more information about the `reset` command, see [Resetting a Node](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node) in the kURL documentation. -### Bug Fixes -- Fixed an issue that could cause the node ready check to falsely report as successful causing unforseen issues with an installation. +To reset a kURL installation: -## Release v2021.05.14-1 +1. Access the machine where you installed with kURL. -Released on May 14, 2021 +1. Run the following command to remove Kubernetes from the system: -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.41.1. + ``` + curl -sSL https://k8s.kurl.sh/latest/tasks.sh | sudo bash -s reset + ``` -## Release v2021.05.14-0 +1. Follow the instructions in the output of the command to manually remove any files that the `reset` command does not remove. -Released on May 14, 2021 +If the `reset` command is unsuccessful, discard your current VM, and recreate the VM with a new OS to reinstall the Admin Console and an application. -### New Features -- Kurl clusters can be configured to use [dedicated primary nodes](https://kurl.sh/docs/install-with-kurl/dedicated-primary) reserved for control-plane components. -- Added [Antrea add-on](https://kurl.sh/docs/add-ons/antrea) version 1.0.1. -- Added [Contour add-on](https://kurl.sh/docs/add-ons/contour) version 1.15.1. +================ +File: docs/vendor/licenses-about-types.md +================ +# About Community Licenses -### Improvements -- RPM install command will now suppress signature verification errors. +This topic describes community licenses. For more information about other types of licenses, see [Customer Types](licenses-about#customer-types) in _About Customers_. -## Release v2021.05.07-1 +## Overview -Released on May 7, 2021 +Community licenses are intended for use with a free or low cost version of your application. For example, you could use community licenses for an open source version of your application. -### New Features -- Added [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) version 1.41.0. +After installing an application with a community license, users can replace their community license with a new license of a different type without having to completely reinstall the application. This means that, if you have several community users who install with the same license, then you can upgrade a single community user without editing the license for all community users. -### Improvements -- Allow the `WEAVE_TAG` environment variable to be specified to pin the Weave version when running the [reset task](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node). +Community licenses are supported for applications that are installed with Replicated KOTS or with the Helm CLI. -### Bug Fixes -- Fixed Weave iptables reset when running the [reset task](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node). -- Added the ability to specicify a [release version](https://kurl.sh/docs/install-with-kurl/#versioned-releases) when running the kURL installer. -- Added [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.1.1. +For applications installed with KOTS, community license users can upload a new license file of a different type in the Replicated admin console. For more information, see [Upgrade from a Community License](/enterprise/updating-licenses#upgrade-from-a-community-license) in _Updating Licenses in the Admin Console_. -## Release v2021.05.07-0 +## Limitations -Released on May 7, 2021 +Community licenses function in the same way as the other types of licenses, with the following +exceptions: -### New Features -- Added the ability to specify a [release version](https://kurl.sh/docs/install-with-kurl/#versioned-releases) when running the kURL installer. -- Added [Longhorn add-on](https://kurl.sh/docs/add-ons/longhorn) version 1.1.1. +* Updating a community license to another type of license cannot be reverted. +* Community license users are not supported by the Replicated Support team. +* Community licenses cannot support air gapped installations. +* Community licenses cannot include an expiration date. -### Bug Fixes -- Fixed an issue with the [EKCO add-on](https://kurl.sh/docs/add-ons/ekco) that would cause a node to hang on shutdown if there were any unmounted rbd devices. +## Community License Admin Console Branding -================ -File: docs/release-notes/rn-replicated-sdk.md -================ ---- -toc_max_heading_level: 2 -pagination_next: null -pagination_prev: null ---- +For applications installed with KOTS, the branding in the admin console for community users differs in the following ways: -# Replicated SDK Release Notes +* The license tile on the admin console **Dashboard** page is highlighted in yellow and with the words **Community Edition**. -This topic contains release notes for the [Replicated SDK](/vendor/replicated-sdk-overview). The release notes list new features, improvements, bug fixes, known issues, and breaking changes. + ![Community License Dashboard](/images/community-license-dashboard.png) + + [View a larger version of this image](/images/community-license-dashboard.png) -## 1.1.1 +* All support bundles and analysis in the admin console are clearly marked as **Community Edition**. -Released on February 19, 2025 - -### Improvements {#improvements-1-1-1} -* Addresses CVE-2025-0665, CVE-2025-0725, and CVE-2024-12797 + ![Community License Support Bundle](/images/community-license-bundle.png) + + [View a larger version of this image](/images/community-license-bundle.png) -## 1.1.0 +================ +File: docs/vendor/licenses-about.mdx +================ +import ChangeChannel from "../partials/customers/_change-channel.mdx" -Released on February 4, 2025 +# About Customers and Licensing -### New Features {#new-features-1-1-0} -* Adds the ability to pass custom labels to the Replicated SDK Helm Chart via the `commonLabels` and `podLabels` Helm values. For more information, see [Add Custom Labels](/vendor/replicated-sdk-customizing#add-custom-labels) in _Customizing the Replicated SDK_. +This topic provides an overview of customers and licenses in the Replicated Platform. -## 1.0.0 +## Overview -Released on December 23, 2024 +The licensing features of the Replicated Platform allow vendors to securely grant access to software, making license agreements available to the application in end customer environments at startup and runtime. -This release removes the pre-release from the version number. +The Replicated Vendor Portal also allows vendors to create and manage customer records. Each customer record includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements. -## 1.0.0-beta.33 +Vendors can use these licensing features to enforce entitlements such as license expiration dates, and to track and report on software usage for the purpose of surfacing insights to both internal teams and customers. -Released on December 23, 2024 +The following diagram provides an overview of licensing with the Replicated Platform: -### New Features {#new-features-1-0-0-beta-33} -* Adds support for setting `affinity` for the Replicated SDK deployment -* Adds `/app/status` [API](/reference/replicated-sdk-apis) that returns detailed application status information. -* Adds support for mocking channelID, channelName, channelSequence, releaseSequence in current release info returned by /app/info API. +![App instance communicates with the Replicated licensing server](/images/licensing-overview.png) -### Bug Fixes {#bug-fixes-1-0-0-beta-33} -* Fixes a bug that could result in an instance being reported as unavailable if the application includes an Ingress resource. +[View a larger version of this image](/images/licensing-overview.png) -## 1.0.0-beta.32 +As shown in the diagram above, the Replicated license and update server manages and distributes customer license information. The license server retrieves this license information from customer records managed by vendors in the Vendor Portal. -Released on December 9, 2024 +During installation or upgrade, the customer's license ID is used to authenticate with the license server. The license ID also provides authentication for the Replicated proxy registry, securely granting proxy access to images in the vendor's external registry. -### Bug Fixes {#bug-fixes-1-0-0-beta-32} -* Fixes an issue that caused [custom metrics](/vendor/custom-metrics#configure-custom-metrics) to not be collected. +The license server is identified with a CNAME record where it can be accessed from end customer environments. When running alongside an application in a customer environment, the Replicated SDK retrieves up-to-date customer license information from the license server during runtime. The in-cluster SDK API `/license/` endpoints can be used to get customer license information on-demand, allowing vendors to programmatically enforce and report on license agreements. -## 1.0.0-beta.31 +Vendors can also integrate internal Customer Relationship Management (CRM) tools such as Salesforce with the Replicated Platform so that any changes to a customer's entitlements are automatically reflected in the Vendor Portal. This ensures that updates to license agreements are reflected in the customer environment in real time. -Released on October 17, 2024 +## About Customers -### New Features {#new-features-1-0-0-beta-31} -* Adds support for specifying ClusterRole using the [clusterRole](/vendor/replicated-sdk-customizing#custom-clusterrole) key. +Each customer that you create in the Replicated Vendor Portal has a unique license ID. Your customers use their license when they install or update your application. -## 1.0.0-beta.30 +You assign customers to channels in the Vendor Portal to control their access to your application releases. Customers can install or upgrade to releases that are promoted to the channel they are assigned. For example, assigning a customer to your Beta channel allows that customer to install or upgrade to only releases promoted to the Beta channel. -Released on October 16, 2024 +Each customer license includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements, such as if the license has an expiration date or what application functionality the customer can access. Replicated securely delivers these entitlements to the application and makes them available at installation or at runtime. -### New Features {#new-features-1-0-0-beta-30} -* Adds support for custom Certificate Authorities using the [privateCASecret](/vendor/replicated-sdk-customizing#custom-certificate-authority) key. +For more information about how to create and manage customers, see [Creating and Managing Customers](releases-creating-customer). -### Improvements {#improvements-1-0-0-beta-30} -* This release addresses CVE-2024-41110. +### Customer Channel Assignment {#channel-assignment} -## 1.0.0-beta.29 + -Released on October 9, 2024 +For example, if the latest release promoted to the Beta channel is version 1.25.0 and version 1.10.0 is marked as required, when you edit an existing customer to assign them to the Beta channel, then the KOTS Admin Console always fetches 1.25.0, even though 1.10.0 is marked as required. The required release 1.10.0 is ignored and is not available to the customer for upgrade. -### New Features {#new-features-1-0-0-beta-23} -* Adds support for setting individual image name component values instead of the entire image: registry, repository, and tag. +For more information about how to mark a release as required, see [Properties](releases-about#properties) in _About Channels and Releases_. For more information about how to synchronize licenses in the Admin Console, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). -## 1.0.0-beta.28 +### Customer Types -Released on September 20, 2024 +Each customer is assigned one of the following types: -### New Features {#new-features-1-0-0-beta-23} -* Adds support for custom Certificate Authorities using the [privateCAConfigmap](/vendor/replicated-sdk-customizing#custom-certificate-authority) key. +* **Development**: The Development type can be used internally by the development +team for testing and integration. +* **Trial**: The Trial type can be used for customers who are on 2-4 week trials +of your software. +* **Paid**: The Paid type identifies the customer as a paying customer for which +additional information can be provided. +* **Community**: The Community type is designed for a free or low cost version of your application. For more details about this type, see [Community Licenses](licenses-about-types). +* (Beta) **Single Tenant Vendor Managed**: The Single Tenant Vendor Managed type is for customers for whom your team is operating the application in infrastructure you fully control and operate. Single Tenant Vendor Managed licenses are free to use, but come with limited support. The Single Tenant Vendor Managed type is a Beta feature. Reach out to your Replicated account representative to get access. -## 1.0.0-beta.27 +Except Community licenses, the license type is used solely for reporting purposes and a customer's access to your application is not affected by the type that you assign. -Released on August 16, 2024 +You can change the type of a license at any time in the Vendor Portal. For example, if a customer upgraded from a trial to a paid account, then you could change their license type from Trial to Paid for reporting purposes. -### Bug Fixes {#bug-fixes-1-0-0-beta-27} -* Fixes an issue that caused k0s to be reported as the distribution for Embedded Clusters. +### About Managing Customers -## 1.0.0-beta.26 +Each customer record in the Vendor Portal has built-in fields and also supports custom fields: +* The built-in fields include values such as the customer name, customer email, and the license expiration date. You can optionally set initial values for the built-in fields so that each new customer created in the Vendor Portal starts with the same set of values. +* You can also create custom fields to define entitlements for your application. For example, you can create a custom field to set the number of active users permitted. -Released on July 31, 2024 +For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). -### Bug Fixes {#bug-fixes-1-0-0-beta-26} -* Fixes an issue that caused k8s minor version parsing errors to be logged repeatedly. +You can make changes to a customer record in the Vendor Portal at any time. The license ID, which is the unique identifier for the customer, never changes. For more information about managing customers in the Vendor Portal, see [Creating and Managing Customers](releases-creating-customer). -## 1.0.0-beta.25 +### About the Customers Page -Released on July 3, 2024 +The following shows an example of the **Customers** page: -### Bug Fixes {#bug-fixes-1-0-0-beta-25} -* Various bug fixes and refactoring of tests. +![Customers page](/images/customers-page.png) -## 1.0.0-beta.24 +[View a larger version of this image](/images/customers-page.png) -Released on July 2, 2024 +From the **Customers** page, you can do the following: -### Improvements {#improvements-1-0-0-beta-24} -* Adds caching and rate-limiting to the `/api/v1/app/custom-metrics` and `/api/v1/app/instance-tags` endpoints -* Adds a ten-second default timeout to the SDK's HTTP client +* Create new customers. -## 1.0.0-beta.23 +* Download CSVs with customer and instance data. -Released on June 21, 2024 +* Search and filter customers. -### New Features {#new-features-1-0-0-beta-23} -* Adds support for `PATCH` and `DELETE` methods on the [custom application metrics](/vendor/custom-metrics) endpoint: `/api/v1/app/custom-metrics`. +* Click the **Manage customer** button to edit details such as the customer name and email, the custom license fields assigned to the customer, and the license expiration policy. For more information, see [Creating and Managing Customers](releases-creating-customer). -## 1.0.0-beta.22 +* Download the license file for each customer. -Released on June 12, 2024 +* Click the **Customer reporting** button to view data about the active application instances associated with each customer. For more information, see [Customer Reporting](customer-reporting). -### Improvements {#improvements-1-0-0-beta-22} -* The `/app/info` and `/license/info` endpoints now return additional app and license info, respectively. -* Updates the SDK's support bundle spec to extract license, app, history, and release information with an exec collector. +* View instance details for each customer, including the version of the application that this instance is running, the Kubernetes distribution of the cluster, the last check-in time, and more: -## 1.0.0-beta.21 + + + [View a larger version of this image](/images/customer-reporting-details.png) -Released on June 6, 2024 +* Archive customers. For more information, see [Creating and Managing Customers](releases-creating-customer). -### Bug Fixes {#bug-fixes-1-0-0-beta-21} -* Fixes an issue where the replicated pod logs collector could fail in environments with namespace-restricted RBAC. +* Click on a customer on the **Customers** page to access the following customer-specific pages: + * [Reporting](#about-the-customer-reporting-page) + * [Manage customer](#about-the-manage-customer-page) + * [Support bundles](#about-the-customer-support-bundles-page) -## 1.0.0-beta.20 +### About the Customer Reporting Page -Released on May 14, 2024 +The **Reporting** page for a customer displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page for a customer that has two active application instances: -### Bug Fixes {#bug-fixes-1-0-0-beta-20} -* Fixes an issue where the namespace fields in the support bundle spec were not quoted, which caused the linter to show schema warnings. +![Customer reporting page in the Vendor Portal](/images/customer-reporting-page.png) +[View a larger version of this image](/images/customer-reporting-page.png) -## 1.0.0-beta.19 +For more information about interpreting the data on the **Reporting** page, see [Customer Reporting](customer-reporting). -Released on April 26, 2024 +### About the Manage Customer Page -### New Features {#new-features-1-0-0-beta-19} -* Adds Supply-chain Levels for Software Artifacts (SLSA) generation for the Replicated SDK image. +The **Manage customer** page for a customer displays details about the customer license, including the customer name and email, the license expiration policy, custom license fields, and more. - For example, you can run the following to validate the attestation for the SDK image: - ```bash - cosign download attestation replicated/replicated-sdk:VERSION | jq -r .payload | base64 -d | jq - ``` - Where `VERSION` is the target version of the SDK. +The following shows an example of the **Manage customer** page: - You can also search Sigstor using Rekor at https://search.sigstore.dev/ +![Manage customer page in the Vendor Portal](/images/customer-details.png) +[View a larger version of this image](/images/customer-details.png) -## 1.0.0-beta.18 +From the **Manage customer** page, you can view and edit the customer's license fields or archive the customer. For more information, see [Creating and Managing Customers](releases-creating-customer). -Released on April 26, 2024 +### About the Customer Support Bundles Page -### Improvements {#improvements-1-0-0-beta-18} -* Updates the Replicated SDK image to resolve CVE-2024-2961 with high severity, and CVE-2023-6237, CVE-2024-24557, and CVE-2023-45288 with medium severity. +The **Support bundles** page for a customer displays details about the support bundles collected from the customer. Customers with the **Support Bundle Upload Enabled** entitlement can provide support bundles through the KOTS Admin Console, or you can upload support bundles manually in the Vendor Portal by going to **Troubleshoot > Upload a support bundle**. For more information about uploading and analyzing support bundles, see [Inspecting Support Bundles](support-inspecting-support-bundles). -## 1.0.0-beta.17 +The following shows an example of the **Support bundles** page: -Released on April 8, 2024 +![Support bundles page in the Vendor Portal](/images/customer-support-bundles.png) +[View a larger version of this image](/images/customer-support-bundles.png) -### New Features {#new-features-1-0-0-beta-17} -* Adds a new [`POST /app/instance-tags`](/reference/replicated-sdk-apis#post-appinstance-tags) endpoint that allows an application to programmatically send instance tags to the vendor portal. +As shown in the screenshot above, the **Support bundles** page lists details about the collected support bundles, such as the date the support bundle was collected and the debugging insights found. You can click on a support bundle to view it in the **Support bundle analysis** page. You can also click **Delete** to delete the support bundle, or click **Customer Reporting** to view the **Reporting** page for the customer. -## 1.0.0-beta.16 +## About Licensing with Replicated -Released on February 19, 2024 +### About Syncing Licenses -### New Features {#new-features-1-0-0-beta-16} -* Adds support for running the SDK on ARM64 nodes. +When you edit customer licenses for an application installed with a Replicated installer (Embedded Cluster, KOTS, kURL), your customers can use the KOTS Admin Console to get the latest license details from the Vendor Portal, then deploy a new version that includes the license changes. Deploying a new version with the license changes ensures that any license fields that you have templated in your release using [KOTS template functions](/reference/template-functions-about) are rendered with the latest license details. -## 1.0.0-beta.15 +For online instances, KOTS pulls license details from the Vendor Portal when: +* A customer clicks **Sync license** in the Admin Console. +* An automatic or manual update check is performed by KOTS. +* An update is performed with Replicated Embedded Cluster. See [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). +* An application status changes. See [Current State](instance-insights-details#current-state) in _Instance Details_. -Released on February 15, 2024 +For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). -### Improvements {#improvements-1-0-0-beta-15} -* Upgrades the helm.sh/helm/v3 go module to 3.14.0 to resolve GHSA-7ww5-4wqc-m92c and GHSA-45x7-px36-x8w8 with medium severity. -* Upgrades the go version used to build the Replicated SDK to 1.21.7 to resolve CVE-2023-45285, CVE-2023-44487, CVE-2023-39325, and CVE-2023-39323 with high severity, and CVE-2023-39326, CVE-2023-39319, and CVE-2023-39318 with medium severity. +### About Syncing Licenses in Air-Gapped Environments -## 1.0.0-beta.14 +To update licenses in air gap installations, customers need to upload the updated license file to the Admin Console. -Released on February 5, 2024 +After you update the license fields in the Vendor Portal, you can notify customers by either sending them a new license file or instructing them to log into their Download Portal to downlaod the new license. -### Improvements {#improvements-1-0-0-beta-14} -* Adds `fsGroup` and `supplementalGroups` to the default PodSecurityContext for the Replicated SDK deployment. +For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). -## 1.0.0-beta.13 +### Retrieving License Details with the SDK API -Released on January 2, 2024 +The [Replicated SDK](replicated-sdk-overview) includes an in-cluster API that can be used to retrieve up-to-date customer license information from the Vendor Portal during runtime through the [`license`](/reference/replicated-sdk-apis#license) endpoints. This means that you can add logic to your application to get the latest license information without the customer needing to perform a license update. The SDK API polls the Vendor Portal for updated data every four hours. -### Improvements {#improvements-1-0-0-beta-13} -* Upgrades the helm.sh/helm/v3 go module to v3.13.3 to resolve CVE-2023-39325 and GHSA-m425-mq94-257g with high severity and CVE-2023-44487 and GHSA-jq35-85cj-fj4p with medium severity. +In KOTS installations that include the SDK, users need to update their licenses from the Admin Console as described in [About Syncing Licenses](#about-syncing-licenses) above. However, any logic in your application that uses the SDK API will update the user's license information without the customer needing to deploy a license update in the Admin Console. -## 1.0.0-beta.12 +For information about how to use the SDK API to query license entitlements at runtime, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). -Released on November 6, 2023 +### License Expiration Handling {#expiration} -### New Features {#new-features-1-0-0-beta-12} -* Adds support for custom metrics in air gapped installs. +The built-in `expires_at` license field defines the expiration date for a customer license. When you set an expiration date in the Vendor Portal, the `expires_at` field is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. -## 1.0.0-beta.11 +Replicated enforces the following logic when a license expires: +* By default, instances with expired licenses continue to run. + To change the behavior of your application when a license expires, you can can add custom logic in your application that queries the `expires_at` field using the Replicated SDK in-cluster API. For more information, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). +* Expired licenses cannot log in to the Replicated registry to pull a Helm chart for installation or upgrade. +* Expired licenses cannot pull application images through the Replicated proxy registry or from the Replicated registry. +* In Replicated KOTS installations, KOTS prevents instances with expired licenses from receiving updates. -Released on October 30, 2023 +### Replacing Licenses for Existing Installations -### New Features {#new-features-1-0-0-beta-11} -* Adds support for running in air gapped mode. -* Renames the `images.replicated` Helm value to `images.replicated-sdk`. +Community licenses are the only license type that can be replaced with a new license without needing to reinstall the application. For more information, see [Community Licenses](licenses-about-types). -## 1.0.0-beta.10 +Unless the existing customer is using a community license, it is not possible to replace one license with another license without reinstalling the application. When you need to make changes to a customer's entitlements, Replicated recommends that you edit the customer's license details in the Vendor Portal, rather than issuing a new license. -Released on October 13, 2023 +================ +File: docs/vendor/licenses-adding-custom-fields.md +================ +# Managing Customer License Fields -### Improvements {#improvements-1-0-0-beta-10} -* Adds support for adding custom tolerations to the SDK deployment via the `tolerations` value. -* Status informers will no longer be automatically generated if the user explicitly passes an empty array for the `statusInformers` value. +This topic describes how to manage customer license fields in the Replicated Vendor Portal, including how to add custom fields and set initial values for the built-in fields. -### Bug Fixes {#bug-fixes-1-0-0-beta-10} -* Fixes a bug that caused no status code to be returned from the custom metrics API requests. +## Set Initial Values for Built-In License Fields (Beta) -## 1.0.0-beta.9 +You can set initial values to populate the **Create Customer** form in the Vendor Portal when a new customer is created. This ensures that each new customer created from the Vendor Portal UI starts with the same set of built-in license field values. -Released on October 6, 2023 +:::note +Initial values are not applied to new customers created through the Vendor API v3. For more information, see [Create a customer](https://replicated-vendor-api.readme.io/reference/createcustomer-1) in the Vendor API v3 documentation. +::: -### Improvements {#improvements-1-0-0-beta-9} -* Adds support for setting additional environment variables in the replicated deployment via the `extraEnv` value. -* Updates the helm.sh/helm/v3 go module to v3.13.0 to resolve GHSA-6xv5-86q9-7xr8 with medium severity. +These _initial_ values differ from _default_ values in that setting initial values does not update the license field values for any existing customers. -### Bug Fixes {#bug-fixes-1-0-0-beta-9} -* Fixes an issue where data returned from API endpoints and instance reporting was outdated after a chart was upgraded. +To set initial values for built-in license fields: -## 1.0.0-beta.8 +1. In the Vendor Portal, go to **License Fields**. -Released on September 19, 2023 +1. Under **Built-in license options**, click **Edit** next to each license field where you want to set an initial value. -### Bug Fixes {#bug-fixes-1-0-0-beta-8} -* Fixes an issue where the `replicated` Pod/API failed to come up due to the inability to generate status informers if the application contains empty YAML documents, or documents that only have comments. + ![Edit Initial Value](/images/edit-initial-value.png) -## 1.0.0-beta.7 + [View a larger version of this image](/images/edit-initial-value.png) -Released on September 15, 2023 +## Manage Custom License Fields -### Improvements {#improvements-1-0-0-beta-7} -* The [custom metrics](/vendor/custom-metrics#configure-custom-metrics) API no longer requires authorization header. +You can create custom license fields in the Vendor Portal. For example, you can create a custom license field to set the number of active users permitted. Or, you can create a field that sets the number of nodes a customer is permitted on their cluster. -## 1.0.0-beta.6 +The custom license fields that you create are displayed in the Vendor Portal for all new and existing customers. If the custom field is not hidden, it is also displayed to customers under the **Licenses** tab in the Replicated Admin Console. -Released on September 7, 2023 +### Limitation -### New Features {#new-features-1-0-0-beta-6} +The maximum size for a license field value is 64KB. -Renames the SDK's Kubernetes resources and the library SDK chart from `replicated-sdk` to `replicated` to better align with standard SDK naming conventions. +### Create Custom License Fields -The `replicated-sdk` naming convention is still supported and existing integrations can continue to use `replicated-sdk` as the name of the SDK Kubernetes resources and SDK chart name. However, Replicated recommends that new integrations use the `replicated` naming convention. +To create a custom license field: -To update the naming convention of an existing integration from `replicated-sdk` to `replicated`, do the following before you upgrade to 1.0.0-beta.6 to avoid breaking changes: +1. Log in to the Vendor Portal and select the application. -* Update the dependencies entry for the SDK in the parent chart: +1. On the **License Fields** page, click **Create license field**. - ```yaml - dependencies: - - name: replicated - repository: oci://registry.replicated.com/library - version: 1.0.0-beta.6 - ``` + create a new License Field dialog -* Update any requests to the SDK service in the cluster to use `replicated:3000` instead of `replicated-sdk:3000`. + [View a larger version of this image](/images/license-add-custom-field.png) -* Update any automation that references the installation command for integration mode to `helm install replicated oci://registry.replicated.com/library/replicated --version 1.0.0-beta.6`. +1. Complete the following fields: -* If the SDK's values are modified in the `values.yaml` file of the parent chart, change the field name for the SDK subchart in the `values.yaml` file from `replicated-sdk` to `replicated`. + | Field | Description | + |-----------------------|------------------------| + | Field | The name used to reference the field. This value cannot be changed. | + | Title| The display name for the field. This is how the field appears in the Vendor Portal and the Admin Console. You can change the title in the Vendor Portal. | + | Type| The field type. Supported formats include integer, string, text (multi-line string), and boolean values. This value cannot be changed. | + | Default | The default value for the field for both existing and new customers. It is a best practice to provide a default value when possible. The maximum size for a license field value is 64KB. | + | Required | If checked, this prevents the creation of customers unless this field is explicitly defined with a value. | + | Hidden | If checked, the field is not visible to your customer in the Replicated Admin Console. The field is still visible to you in the Vendor Portal. **Note**: The Hidden field is displayed only for vendors with access to the Replicated installers (KOTS, kURL, Embedded Cluster). | -* Change the field name of any values that are provided at runtime to the SDK from `replicated-sdk` to `replicated`. For example, `--set replicated.integration.enabled=false`. +### Update Custom License Fields -For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). +To update a custom license field: -## 1.0.0-beta.5 +1. Log in to the Vendor Portal and select the application. +1. On the **License Fields** page, click **Edit Field** on the right side of the target row. Changing the default value for a field updates the value for each existing customer record that has not overridden the default value. -Released on September 1, 2023 + :::important + Enabling **Is this field is required?** updates the license field to be required on all new and existing customers. If you enable **Is this field is required?**, you must either set a default value for the field or manually update each existing customer to provide a value for the field. + ::: + +### Set Customer-Specific Values for Custom License Fields -### New Features {#new-features-1-0-0-beta-5} -* Adds support for sending [custom application metrics](/vendor/custom-metrics) via the `/api/v1/app/custom-metrics` endpoint. -* Adds support for installing the Helm chart via `helm template` then `kubectl apply` the generated manifests. Limitations to installing with this approach include: - - The [app history endpoint](/reference/replicated-sdk-apis#get-apphistory) will always return an empty array because there is no Helm history in the cluster. - - Status informers will not be automatically generated and would have to be provided via the [replicated-sdk.statusInformers](/vendor/insights-app-status#helm-installations) Helm value. +To set a customer-specific value for a custom license field: -## 0.0.1-beta.4 +1. Log in to the Vendor Portal and select the application. +1. Click **Customers**. +1. For the target customer, click the **Manage customer** button. +1. Under **Custom fields**, enter values for the target custom license fields for the customer. -Released on August 17, 2023 + :::note + The maximum size for a license field value is 64KB. + ::: -### New Features {#new-features-0-0-1-beta-4} -* Adds support for OpenShift clusters. + Custom license fields section in the manage customer page -### Improvements {#improvements-0-0-1-beta-4} -* Application updates returned by the `/api/v1/app/updates` endpoint show in order from newest to oldest. + [View a larger version of this image](/images/customer-license-custom-fields.png) -## 0.0.1-beta.3 +### Delete Custom License Fields -Released on August 11, 2023 +Deleted license fields and their values do not appear in the customer's license in any location, including your view in the Vendor Portal, the downloaded YAML version of the license, and the Admin Console **License** screen. -### Bug Fixes {#bug-fixes-0-0-1-beta-3} -* Fixes an issue where generating a support bundle failed when using the Replicated SDK support bundle Secret in the Helm chart. The failure occurred due to a syntax issue where the `selector` field expected an array of strings instead of a map. +By default, deleting a custom license field also deletes all of the values associated with the field in each customer record. -## 0.0.1-beta.2 +Only administrators can delete license fields. -Released on August 4, 2023 +:::important +Replicated recommends that you take care when deleting license fields. -### New Features {#new-features-0-0-1-beta-2} -* Includes the application status as part of the [/app/info](/reference/replicated-sdk-apis#get-appinfo) endpoint response. +Outages can occur for existing deployments if your application or the Admin Console **Config** page expect a license file to provide a required value. +::: -### Improvements {#improvements-0-0-1-beta-2} -* The replicated-sdk image is now built using a distroless base image from Chainguard, which significantly reduces the overall size and attack surface. +To delete a custom license field: -## 0.0.1-beta.1 +1. Log in to the Vendor Portal and select the application. +1. On the **License Fields** page, click **Edit Field** on the right side of the target row. +1. Click **Delete** on the bottom left of the dialog. +1. (Optional) Enable **Preserve License Values** to save values for the license field that were not set by the default in each customer record. Preserved license values are not visible to you or the customer. -Released on July 28, 2023 + :::note + If you enable **Preserve License Values**, you can create a new field with the same name and `type` as the deleted field to reinstate the preserved values. + ::: -### Improvements {#improvements-0-0-1-beta-1} -* Renames the SDK's Kubernetes resources and the library SDK chart from `replicated` to `replicated-sdk` to distinguish them from other replicated components. +1. Follow the instructions in the dialog and click **Delete**. ================ -File: docs/release-notes/rn-vendor-platform.md +File: docs/vendor/licenses-download.md ================ ---- -toc_max_heading_level: 2 -pagination_next: null -pagination_prev: null ---- +import AirGapLicenseDownload from "../partials/install/_airgap-license-download.mdx" -# Vendor Platform Release Notes +# Downloading Customer Licenses -This topic contains release notes for the Replicated Vendor Platform, which includes the [Vendor Portal](/vendor/vendor-portal-creating-account), the [Replicated CLI](/reference/replicated-cli-installing), and [Compatibility Matrix](/vendor/testing-about). The release notes list new features, improvements, bug fixes, known issues, and breaking changes. +This topic describes how to download a license file from the Replicated Vendor Portal. - +For information about how to download customer licenses with the Vendor API v3, see [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) in the Vendor API v3 documentation. -## v2025.03.08-0 +## Download Licenses -Released on March 8, 2025 +You can download license files for your customers from the **Customer** page in the Vendor Portal. -### Bug Fixes {#bug-fixes-v2025-03-08-0} -* Fixes an issue on the Compatibility Matrix **Usage History** page that caused the `pageSize` parameter to be set to an incorrect value. +To download a license: -## v2025.03.06-1 +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page. +1. In the row for the target customer, click the **Download License** button. -Released on March 6, 2025 + ![Download license button](/images/download-license-button.png) -### Bug Fixes {#bug-fixes-v2025-03-06-1} -* Updates the Download Portal to no longer show KOTS pre-releases. + [View a larger version of this image](/images/download-license-button.png) -## v2025.02.07-1 +## Enable and Download Air Gap Licenses {#air-gap-license} -Released on February 7, 2025 +The **Airgap Download Enabled** license option allows KOTS to install an application without outbound internet access using the `.airgap` bundle. -### Bug Fixes {#bug-fixes-v2025-02-07-1} -* Fixes a bug that caused the behavior of check boxes for instance events filters to be reversed. +To enable the air gap entitlement and download the license: -## v2025.02.06-2 + -Released on February 6, 2025 +================ +File: docs/vendor/licenses-install-types.mdx +================ +import InstallerOnlyAnnotation from "../partials/helm/_installer-only-annotation.mdx" -### Bug Fixes {#bug-fixes-v2025-02-06-2} -* Fixes a bug when viewing a release that caused the **Help** sidebar to be unopenable after it was closed. +# Managing Install Types for a License -## v2025.02.04-2 +This topic describes how to manage which installation types and options are enabled for a license. -Released on February 4, 2025 +## Overview -### Bug Fixes {#bug-fixes-v2025-02-04-2} -* Fixes an issue on the Compatibility Matrix Usage History page which caused items to appear outside the range of the selected date time. +You can control which installation methods are available to each of your customers by enabling or disabling **Install types** fields in the customer's license. -## v2025.02.03-4 +The following shows an example of the **Install types** field in a license: -Released on February 3, 2025 +![Install types license fields](/images/license-install-types.png) -### Bug Fixes {#bug-fixes-v2025-02-03-4} -* Fixes a bug that could cause private application images hosted in Docker Hub to be shown using anonymous commands in the [Download Portal](https://docs.replicated.com/vendor/helm-install-airgap). +[View a larger version of this image](/images/license-install-types.png) -## v2025.01.31-2 +The installation types that are enabled or disabled for a license determine the following: +* The Replicated installers ([Replicated KOTS](../intro-kots), [Replicated Embedded Cluster](/vendor/embedded-overview), [Replicated kURL](/vendor/kurl-about)) that the customer's license entitles them to use +* The installation assets and/or instructions provided in the Replicated Download Portal for the customer +* The customer's KOTS Admin Console experience -Released on January 31, 2025 +Setting the supported installation types on a per-customer basis gives you greater control over the installation method used by each customer. It also allows you to provide a more curated Download Portal experience, in that customers will only see the installation assets and instructions that are relevant to them. -### Bug Fixes {#bug-fixes-v2025-01-31-2} -* Updates the Helm instructions in the Download Portal to use the correct file name for `values.yaml` depending on if there is more than one Helm chart in the given release. For releases with multiple Helm charts, the values file for each Helm chart is named according to the name of the chart. This avoids file name conflicts for users when downloading and editing each values file. +## Understanding Install Types {#install-types} -## v2025.01.31-1 +In the customer license, under **Install types**, the **Available install types** field allows you to enable and disable different installation methods for the customer. -Released on January 31, 2025 +You can enable one or more installation types for a license. -### New Features {#new-features-v2025-01-31-1} -* Adds a new `instance_kurl_install_started_at` column to the customer instance exports. `instance_kurl_install_started_at` is the date and time when the install for the given kURL instance was reported to start. +The following describes each installation type available, as well as the requirements for enabling each type: -## v2025.01.30-0 + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Install TypeDescriptionRequirements
    Existing Cluster (Helm CLI)

    Allows the customer to install with Helm in an existing cluster. The customer does not have access to the Replicated installers (Embedded Cluster, KOTS, and kURL).

    When the Helm CLI Air Gap Instructions (Helm CLI only) install option is also enabled, the Download Portal displays instructions on how to pull Helm installable images into a local repository. See Understanding Additional Install Options below.

    +

    The latest release promoted to the channel where the customer is assigned must contain one or more Helm charts. It can also include Replicated custom resources, such as the Embedded Cluster Config custom resource, the KOTS HelmChart, Config, and Application custom resources, or the Troubleshoot Preflight and SupportBundle custom resources.

    + +
    Existing Cluster (KOTS install)Allows the customer to install with Replicated KOTS in an existing cluster. +
      +
    • Your Vendor Portal team must have the KOTS entitlement
    • +
    • The latest release promoted to the channel where the customer is assigned must contain KOTS custom resources, such as the KOTS HelmChart, Config, and Application custom resources. For more information, see [About Custom Resources](/reference/custom-resource-about).
    • +
    +
    kURL Embedded Cluster (first generation product) +

    Allows the customer to install with Replicated kURL on a VM or bare metal server.

    +

    Note: For new installations, enable Replicated Embedded Cluster (current generation product) instead of Replicated kURL (first generation product).

    +
    +
      +
    • Your Vendor Portal team must have the kURL entitlement
    • +
    • A kURL installer spec must be promoted to the channel where the customer is assigned. For more information, see Creating a kURL Installer.
    • +
    +
    Embedded Cluster (current generation product)Allows the customer to install with Replicated Embedded Cluster on a VM or bare metal server. +
      +
    • Your Vendor Portal team must have the Embedded Cluster entitlement
    • +
    • The latest release promoted to the channel where the customer is assigned must contain an Embedded Cluster Config custom resource. For more information, see Embedded Cluster Config.
    • +
    +
    -Released on January 30, 2025 +## Understanding Additional Install Options {#install-options} -### New Features {#new-features-v2025-01-30-0} -* Adds a link to download Embedded Cluster installation assets in the Download Portal. -* Adds a button to log out of the Download Portal. +After enabling installation types in the **Available install types** field, you can also enable the following options in the **Additional install options** field: -### Bug Fixes {#bug-fixes-v2025-01-30-0} -* Fixes a bug that would prevent demoting a channel release when it was the only release on the channel. -* Fixes a bug that could have marked the wrong release as active if the semantic version for a demoted release was reused by multiple releases on the given channel. + + + + + + + + + + + + + + + + +
    Install TypeDescriptionRequirements
    Helm CLI Air Gap Instructions (Helm CLI only)

    When enabled, a customer will see instructions on the Download Portal on how to pull Helm installable images into their local repository.

    Helm CLI Air Gap Instructions is enabled by default when you select the Existing Cluster (Helm CLI) install type. For more information see [Installing with Helm in Air Gap Environments](/vendor/helm-install-airgap)

    The Existing Cluster (Helm CLI) install type must be enabled
    Air Gap Installation Option (Replicated Installers only)

    When enabled, new installations with this license have an option in their Download Portal to install from an air gap package or do a traditional online installation.

    +

    At least one of the following Replicated install types must be enabled:

    +
      +
    • Existing Cluster (KOTS install)
    • +
    • kURL Embedded Cluster (first generation product)
    • +
    • Embedded Cluster (current generation product)
    • +
    +
    -## v2025.01.29-4 +## About Migrating Existing Licenses to Use Install Types -Released on January 29, 2025 +By default, when an existing customer license is migrated to include the Beta **Install types** field, the Vendor Portal automatically enables certain install types so that the customer does not experience any interruptions or errors in their deployment. -### Bug Fixes {#bug-fixes-v2025-01-29-4} -* Removes a duplicated section from the Download Portal. -* Fixes a bug where app name would be missing from the app bundle header in the Download Portal. +The Vendor Portal uses the following logic to enable install types for migrated licenses: -## v2025.01.29-1 +If the existing license has the **KOTS Install Enabled** field enabled, then the Vendor Portal enables the following install types in the migrated license by default: +* Existing Cluster (Helm CLI) +* Existing Cluster (KOTS install) +* kURL Embedded Cluster (first generation product) +* Embedded Cluster (current generation product) -Released on January 29, 2025 +Additionally, if the existing **KOTS Install Enabled** license also has the **Airgap Download Enabled** option enabled, then the Vendor Portal enables both of the air gap install options in the migrated license (**Helm CLI Air Gap Instructions (Helm CLI only)** and **Air Gap Installation Option (Replicated Installers only)**). -### Bug Fixes {#bug-fixes-v2025-01-29-1} -* Fixes bug that would result in the "Next" button being hidden from the support form. +Otherwise, if the **KOTS Install Enabled** field is disabled for the existing license, then the Vendor Portal enables only the **Existing Cluster (Helm CLI)** install type by default. All other install types will be disabled by default. -## v2025.01.28-1 +================ +File: docs/vendor/licenses-reference-helm.md +================ +# Checking Entitlements in Helm Charts Before Deployment -Released on January 28, 2025 +This topic describes how to check license entitlements before a Helm chart is installed or upgraded. The information in this topic applies to Helm charts installed with Replicated KOTS or Helm. -### New Features {#new-features-v2025-01-28-1} -* Adds `(demoted)` text label to any demoted channel releases in the Embedded Cluster install instructions accessed from the Vendor Portal customer manage page. +The Replicated SDK API can be used to check entitlements at runtime. For more information, see [Querying Entitlements with the Replicated SDK API](licenses-reference-sdk). -## v2025.01.27-0 +## Overview -Released on January 27, 2025 +The Replicated registry automatically injects customer entitlement information in the `global.replicated.licenseFields` field of your Helm chart values. For example: -### New Features {#new-features-v2025-01-27-0} -* Adds support for demoting and un-demoting releases from the **Release History** page in the Vendor Portal. +```yaml +# Helm chart values.yaml +global: + replicated: + licenseFields: + expires_at: + description: License Expiration + name: expires_at + signature: + v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... + title: Expiration + value: "2023-05-30T00:00:00Z" + valueType: String +``` -## v2025.01.23-1 +You can access the values in the `global.replicated.licenseFields` field from your Helm templates to check customer entitlements before installation. -Released on January 23, 2025 +## Prerequisite -### New Features {#new-features-v2025-01-23-1} -* Adds pagination and search to the **Channels** page in Vendor Portal. +Add the Replicated SDK to your application: +* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ +* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Kubernetes Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ -## v2025.01.17-3 +## Check Entitlements Before Installation or Upgrade -Released on January 17, 2025 +To check entitlements before installation: -### New Features {#new-features-v2025-01-17-3} -* Compatibility Matrix: Adds `/v3/cmx/stats` to query historical Compatibility Matrix usage data. See [Get CMX usage stats](https://replicated-vendor-api.readme.io/reference/getcmxstats) in the Vendor API v3 documentation. +1. Create or edit a customer to use for testing: -## v2025.01.15-4 + 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). -Released on January 15, 2025 + 1. Edit the built-in license fields or add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). -### New Features {#new-features-v2025-01-15-4} -* Show the vendor's GitHub Collab repository in the Vendor Portal. +1. In your Helm chart, update the Helm templates with one or more directives to access the license field. For example, you can access the built-in `expires_at` field with `{{ .Values.global.replicated.licenseFields.expires_at }}`. Add the desired logic to control application behavior based on the values of license fields. -## v2025.01.06-5 + For more information about accessing values files from Helm templates, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the _Chart Template Guide_ section of the Helm documentation. -Released on January 6, 2025 +1. Test your changes by promoting a new release and installing in a development environment: + + 1. Package your Helm chart and its dependencies into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). + + 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + + 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). -### New Features {#new-features-v2025-01-06-5} -* Adds the Vendor API v3 [/cmx/history](https://replicated-vendor-api.readme.io/reference/listcmxhistory) endpoint, which can be used to get historical data on Compatibility Matrix usage. +1. Repeat these steps to add and test new license fields. -## v2025.01.06-2 +================ +File: docs/vendor/licenses-reference-kots-runtime.mdx +================ +# Querying Entitlements with the KOTS License API -Released on January 6, 2025 +This topic describes how to use the Replicated KOTS License API to query license fields during runtme. The information in this topic applies to applications installed with KOTS. -### Bug Fixes {#bug-fixes-v2025-01-06-2} -* Fixes a bug that could cause instances to not receive updates on [semver](/vendor/releases-about#semantic-versioning) enabled channels when [--app-version-label](/reference/kots-cli-install#usage) flag is used during the install. +:::important +Using the KOTS License API to check entitlements during runtime is _not_ recommended for new applications distributed with Replciated. Instead, Replicated recommends that you include the Replicated SDK with your application and query entitlements during runtime using the SDK in-cluster API. See [Checking Entitlements with the Replicated SDK](licenses-reference-sdk). +::: -## v2025.01.02-1 +## Overview -Released on January 2, 2025 +KOTS includes default logic to control access to features in the KOTS Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. -### Bug Fixes {#bug-fixes-v2025-01-02-1} -* Fixes a bug that caused the Download Portal to display a blank screen. +For information about creating custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). -## v2024.12.31-2 +The KOTS Admin Console runs on the customer's cluster and provides entitlement information during application runtime. You can query the admin console `/license/v1/license` endpoint to enforce entitlements at runtime. -Released on December 31, 2024 +## Query Fields -### New Features {#new-features-v2024-12-31-2} -* Adds ability to enable and disable [Development Mode](/vendor/replicated-sdk-development) per customer in the Replicated SDK. +To reference license fields at runtime, send an HTTP request to the admin console `/license/v1/license` endpoint at the following location: -## v2024.12.27-1 +``` +http://kotsadm:3000/license/v1/license +``` -Released on December 27, 2024 +The query returns a response in YAML format. For example: -### Bug Fixes {#bug-fixes-v2024-12-27-1} -* Fixes a bug that would cause the configured GitHub username to not show up on the Account Settings page when logging in with Google. +```javascript +{"license_id":"WicPRaoCv1pJ57ZMf-iYRxTj25eZalw3", +"installation_id":"a4r1s31mj48qw03b5vwbxvm5x0fqtdl6", +"assignee":"FirstCustomer", +"release_channel":"Unstable", +"license_type":"trial", +"expiration_time":"2026-01-23T00:00:00Z", +"fields":[ + {"field":"Customer ID","title":"Customer ID (Internal)","type":"Integer","value":121,"hide_from_customer":true}, + {"field":"Modules","title":"Enabled Modules","type":"String","value":"Analytics, Integration"}]} +``` +## Parse the API Response -## v2024.12.17-1 +To return a license field value, parse the response using the name of the license +field. -Released on December 17, 2024 +For example, the following Javascript parses the response for the value of a +`seat_count` custom field: -### New Features {#new-features-v2024-12-17-1} -* Compatibility Matrix: View your remaining credit balance using the `v3/cluster/status` endpoint via 'credit_balance'. The value is in cents. +```javascript +import * as rp from "request-promise"; -## v2024.12.11-5 +rp({ + uri: "http://kotsadm:3000/license/v1/license", + json: true +}).then(license => { + const seatCount = license.fields.find((field) => { + return field.field === "seat_count"; + }); + console.log(seatCount.value); +}).catch(err => { + // Handle error response from `kotsadm` +}); +``` -Released on December 11, 2024 +================ +File: docs/vendor/licenses-reference-sdk.mdx +================ +# Querying Entitlements with the Replicated SDK API -### Bug Fixes {#bug-fixes-v2024-12-11-5} -* Fixes a bug that would hide air gap instances on the **Customer Reporting** page even if they existed. +This topic describes how to query license entitlements at runtime using the Replicated SDK in-cluster API. The information in this topic applies to applications installed with Replicated KOTS or Helm. -## v2024.12.11-1 +## Overview -Released on December 11, 2024 +The Replicated SDK retrieves up-to-date customer license information from the Vendor Portal during runtime. This means that any changes to customer licenses are reflected in real time in the customer environment. For example, you can revoke access to your application when a license expires, expose additional product functionality dynamically based on entitlements, and more. For more information about distributing the SDK with your application, see [About the Replicated SDK](replicated-sdk-overview). -### New Features {#new-features-v2024-12-11-1} -* Downloaded support bundle file names will now include customer name and instance name or ID if available. +After the Replicated SDK is initialized and running in a customer environment, you can use the following SDK API endpoints to get information about the license: +* `/api/v1/license/info`: List license details, including the license ID, the channel the customer is assigned, and the license type. +* `/api/v1/license/fields`: List all the fields in the license. +* `/api/v1/license/fields/{field_name}`: List details about a specific license field, including the field name, description, type, and the value. -## v2024.12.10-0 +For more information about these endpoints, see [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API_. -Released on December 10, 2024 +## Prerequisite -### Bug Fixes {#bug-fixes-v2024-12-10-0} -* Compatibility Matrix: Fix `update ttl` for VM-based clusters (including k3s, OpenShift, rke2, and so on). +Add the Replicated SDK to your application: +* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ +* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Standard Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ -## v2024.12.06-4 +## Query License Entitlements at Runtime {#runtime} -Released on December 6, 2024 +To use the SDK API to query entitlements at runtime: -### Bug Fixes {#bug-fixes-v2024-12-06-4} -* Compatiblity Matrix: Fix for `cluster ls` not taking into account end-time when including terminated clusters. +1. Create or edit a customer to use for testing: -## v2024.12.06-2 + 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). -Released on December 6, 2024 + 1. Edit the built-in fields and add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). -### Bug Fixes {#bug-fixes-v2024-12-06-2} -* Fixes a bug that could cause the Replicated CLI to fail to promote a new release to a channel. +1. (Recommended) Develop against the SDK API `license` endpoints locally: -## v2024.12.05-5 + 1. Install the Replicated SDK as a standalone component in your cluster. This is called _integration mode_. Installing in integration mode allows you to develop locally against the SDK API without needing to create releases for your application in the Vendor Portal. See [Developing Against the SDK API](/vendor/replicated-sdk-development). -Released on December 5, 2024 + 1. In your application, add logic to control application behavior based on the customer license information returned by the SDK API service running in your cluster. See [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API (Beta)_. -### Bug Fixes {#bug-fixes-v2024-12-05-5} -* Compatibility Matrix: Display time in local timezone on the **Cluster History** page. + **Example:** -## v2024.12.04-2 + ```bash + curl replicated:3000/api/v1/license/fields/expires_at + ``` -Released on December 4, 2024 + ```json + { + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2M..." + } + } + ``` -### Bug Fixes {#bug-fixes-v2024-12-04-2} -* Fixes a bug that could cause the "email is required for customers with helm install enabled" error when creating or updating customers. +1. When you are ready to test your changes outside of integration mode, do the following: -## v2024.12.04-1 + 1. Package your Helm chart and its dependencies (including the Replicated SDK) into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). -Released on December 4, 2024 + 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). -### Bug Fixes {#bug-fixes-v2024-12-04-1} -* Compatibility Matrix: Fix cluster assignment for EKS, AKS, GKE and OKE in case no warm clusters are available. + 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). -## v2024.12.02-2 + 1. (Optional) As needed, verify the license information returned by the SDK API in your development environment using port forwarding to access the SDK service locally: -Released on December 2, 2024 + 1. Use port forwarding to access the `replicated` service from the local development environment on port 3000: -### Bug Fixes {#bug-fixes-v2024-12-02-2} -* Fixes a bug that could cause the [kURL Embedded Cluster](https://docs.replicated.com/vendor/licenses-install-types#install-types) option to be disabled for customers even though there is a kURL Installer spec available. + ```bash + kubectl port-forward service/replicated 3000 + ``` -## v2024.12.02-0 + The output looks similar to the following: -Released on December 2, 2024 + ```bash + Forwarding from 127.0.0.1:3000 -> 3000 + ``` -### New Features {#new-features-v2024-12-02-0} -* Adds support for `kots.io/installer-only` annotation on Kuberntes specs. For more information, see [kots.io/installer-only Annotation](/vendor/licenses-install-types#installer-only-annotation) in _Managing Install Types for a License (Beta)_. + For more information about `kubectl port-forward`, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the kubectl reference documentation. -## v2024.11.27-1 + 1. With the port forward running, in another terminal, use the SDK API to return information about the license. -Released on November 27, 2024 + **Example:** -### Bug Fixes {#bug-fixes-v2024-11-27-1} -* Fixes an issue where a KOTS release was incorrectly identified as a [Helm CLI-only](/vendor/licenses-install-types#install-types) release, preventing it from being promoted. -## v2024.11.27-0 + ``` + curl localhost:3000/api/v1/license/fields/expires_at + ``` -Released on November 27, 2024 +1. Repeat these steps to add and test new license fields. -### Bug Fixes {#bug-fixes-v2024-11-27-0} -* Fixes a bug where Helm install instructions in the [Download Portal](/vendor/helm-install-airgap) didn't use custom domains. +1. (Recommended) Use signature verification in your application to ensure the integrity of the license field. See [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). -## v2024.11.26-6 +================ +File: docs/vendor/licenses-referencing-fields.md +================ +# Checking Entitlements in Preflights with KOTS Template Functions -Released on November 26, 2024 +This topic describes how to check custom entitlements before installation or upgrade using preflight checks and KOTS template functions in the License context. The information in this topic applies to applications installed with KOTS. -### Bug Fixes {#bug-fixes-v2024-11-26-6} -* Fixes a bug where it causes the Customer Portal to show a blank screen when missing data from an endpoint. +## Overview -## v2024.11.26-2 +KOTS includes default logic to control access to features in the Replicated Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. -Released on November 26, 2024 +For more information, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). -### Bug Fixes {#bug-fixes-v2024-11-26-2} -* Fixes a bug that caused images to be excluded from the Helm air gap install instructions. +## Add Preflights to Check Entitlements Before Installation or Upgrade {#install} -## v2024.11.20-5 +To enforce entitlements when your customer installs or updates your application, +you can use the Replicated LicenseFieldValue template function in your application to read the value of license fields. The LicenseFieldValue template function accepts the built-in license fields and any custom fields that you configure. For more information, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. -Released on November 20, 2024 +For example, a license might limit how many nodes are permitted in a customer's +cluster. You could define this limit by creating a `node_count` custom license field: -### New Features {#new-features-v2024-11-20-5} -* Allows the user to edit cluster tags from the **Edit Cluster** page. +| Name | Key | Type | Description | +|------|-----|------|-------------| +| Node Count | node_count | Integer | The maximum number of nodes permitted | -## v2024.11.20-2 +To enforce the node count when a customer installs or updates your application, +you can use LicenseFieldValue to create a preflight check that references the custom `node_count` field: -Released on November 20, 2024 +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: example-preflight-checks +spec: + analyzers: + - nodeResources: + checkName: Node Count Check + outcomes: + - fail: + when: 'count() > {{repl LicenseFieldValue "node_count"}}' + message: The cluster has more nodes than the {{repl LicenseFieldValue "node_count"}} you are licensed for. + - pass: + message: The number of nodes matches your license ({{repl LicenseFieldValue "node_count"}}) +``` -### Bug Fixes {#bug-fixes-v2024-11-20-2} -* Fixes a bug that could cause the [Channel installation command](/vendor/releases-about#channels-page) to use a kURL Installer other than the latest. +In the example above, the preflight check uses the `nodeResources` analyzer and the value of the custom `node_count` field to determine if the customer has exceeded the maximum number of nodes permitted by their license. If the preflight checks fails, a failure message is displayed to the user and KOTS prevents the installation or upgrade from continuing. -## v2024.11.18-0 +For more information about this example, see [How Can I Use License Custom Fields Value in a Pre-Flight Check?](https://help.replicated.com/community/t/how-can-i-use-license-custom-fields-value-in-a-pre-flight-check/624) in Replicated Community. -Released on November 18, 2024 +For more information about defining preflight checks, see [Defining Preflight Checks](preflight-defining). -### Bug Fixes {#bug-fixes-v2024-11-18-0} -* Fixes a bug where the Helm install instructions would not appear on the **Customer** pages if the KOTS install license option was not enabled. +================ +File: docs/vendor/licenses-using-builtin-fields.mdx +================ +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" -## v2024.11.13-0 +# Built-In License Fields -Released on November 13, 2024 +This topic describes the built-in license fields that appear customer licenses for applications distributed with Replicated. -### Bug Fixes {#bug-fixes-v2024-11-13-0} -* Fixes a bug that could cause an error message similar to the following to display when attempting to update an existing customer: "This team cannot create customers with kurl install enabled". +## Overview -## v2024.11.12-4 +The license associated with each customer record in the Replicated Vendor Portal includes several built-in fields. These built-in fields include customer properties (such as the customer name, customer email, and the Vendor Portal channel where the customer is assigned), the license expiration date, as well as the Replicated features that are enabled for the customer (such as the supported install types or Admin Console features). -Released on November 12, 2024 +When a customer installs an application distributed with Replicated, the values for each built-in and custom field in their license can be accessed using the [Replicated SDK](/vendor/replicated-sdk-overview) in-cluster API [license](/reference/replicated-sdk-apis#license) endpoints. Applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster) can also access license fields using the Replicated KOTS [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function. -### Bug Fixes {#bug-fixes-v2024-11-12-4} -* Fixes a bug on the **Customer** page that caused the **Embedded Cluster Install Instructions** to be hidden when the Embedded Cluster install type was enabled for the license. +The following shows an example of a customer license: -## v2024.11.12-4 +```yaml +apiVersion: kots.io/v1beta1 +kind: License +metadata: + name: customertest +spec: + appSlug: gitea + channelID: 2iy68JBTkvUqamgD... + channelName: Beta + channels: + - channelID: 2iy68JBTkvUqamgD... + channelName: Beta + channelSlug: beta + endpoint: https://replicated.app + isDefault: true + isSemverRequired: true + replicatedProxyDomain: proxy.replicated.com + customerEmail: example@replicated.com + customerName: Customer Test + endpoint: https://replicated.app + entitlements: + expires_at: + description: License Expiration + signature: {} + title: Expiration + value: "" + valueType: String + isAirgapSupported: true + isEmbeddedClusterDownloadEnabled: true + isKotsInstallEnabled: true + isSemverRequired: true + isSupportBundleUploadSupported: true + licenseID: 2sY6ZC2J9sO2... + licenseSequence: 4 + licenseType: prod + replicatedProxyDomain: proxy.replicated.com + signature: eyJsaWNlbnNlRGF... +``` -Released on November 12, 2024 +## License Field Names -### Bug Fixes {#bug-fixes-v2024-11-12-4} -* Fixes a bug on the **Customer** page that caused the **Embedded Cluster Install Instructions** to be hidden when the Embedded Cluster install type was enabled for the license. +This section lists the built-in fields that are included in customer licenses for applications distributed with Replicated. -## v2024.11.12-2 +:::note +The built-in license fields are reserved field names. +::: -Released on November 12, 2024 +### General License Fields -### Improvements {#improvements-v2024-11-12-2} -* Updates the styles and removes irrelevant content for errored clusters on the Compatibility Matrix Clusters page. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription
    `appSlug`The unique application slug that the customer is associated with. This value never changes.
    `channelID`The ID of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.
    `channelName`The name of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.
    `licenseID`, `licenseId`Unique ID for the installed license. This value never changes.
    `customerEmail`The customer email address.
    `endpoint`For applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster), this is the endpoint that the KOTS Admin Console uses to synchronize the licenses and download updates. This is typically `https://replicated.app`.
    `entitlementValues`Includes both the built-in `expires_at` field and any custom license fields. For more information about adding custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields).
    `expires_at`

    Defines the expiration date for the license. The date is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. If a license does not expire, this field is missing.

    For information about the default behavior when a license expires, see [License Expiration Handling](licenses-about#expiration) in _About Customers_.

    `licenseSequence`Every time a license is updated, its sequence number is incremented. This value represents the license sequence that the client currently has.
    `customerName`The name of the customer.
    `signature`The base64-encoded license signature. This value will change when the license is updated.
    `licenseType`A string value that describes the type of the license, which is one of the following: `paid`, `trial`, `dev`, `single-tenant-vendor-managed` or `community`. For more information about license types, see [Managing License Type](licenses-about-types).
    -## v2024.11.11-0 +### Install Types -Released on November 11, 2024 +The table below describes the built-in license fields related to the supported install type(s). For more information, see [Managing Install Types for a License](/vendor/licenses-install-types). -### Improvements {#improvements-v2024-11-11-0} -* Compatibility Matrix: Clusters in error will remain visible for about 5 minutes before they will be transferred to the cluster history. + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription
    `isEmbeddedClusterDownloadEnabled`

    If a license supports installation with Replicated Embedded Cluster, this field is set to `true` or missing. If Embedded Cluster installations are not supported, this field is `false`.

    This field requires that the vendor has the Embedded Cluster entitlement and that the release at the head of the channel includes an [Embedded Cluster Config](/reference/embedded-config) custom resource. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.

    `isHelmInstallEnabled`

    If a license supports Helm installations, this field is set to `true` or missing. If Helm installations are not supported, this field is set to `false`. This field requires that the vendor packages the application as Helm charts and, optionally, Replicated custom resources.

    This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.

    `isKotsInstallEnabled`

    If a license supports installation with Replicated KOTS, this field is set to `true`. If KOTS installations are not supported, this field is either `false` or missing.

    This field requires that the vendor has the KOTS entitlement.

    `isKurlInstallEnabled`

    If a license supports installation with Replicated kURL, this field is set to `true` or missing. If kURL installations are not supported, this field is `false`.

    This field requires that the vendor has the kURL entitlement and a promoted kURL installer spec. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.

    -### Bug Fixes {#bug-fixes-v2024-11-11-0} -* Fixes exception raised when submitting a support case without a GitHub username. -* When downloading an Embedded Cluster installation asset, a 400 status code and message will now be returned when an air gap bundle does not exist and `airgap=true` is set in the URL. +### Install Options -## v2024.11.06-1 +The table below describes the built-in license fields related to install options. -Released on November 6, 2024 + + + + + + + + + + + + + +
    Field NameDescription
    `isAirgapSupported`

    If a license supports air gap installations with the Replicated installers (KOTS, kURL, Embedded Cluster), then this field is set to `true`. If Replicated installer air gap installations are not supported, this field is missing.

    When you enable this field for a license, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.

    `isHelmAirgapEnabled`

    If a license supports Helm air gap installations, then this field is set to `true` or missing. If Helm air gap is not supported, this field is missing.

    When you enable this feature, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.

    This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.

    -### Bug Fixes {#bug-fixes-v2024-11-06-1} -* Fixes a bug in the Helm Install Instructions modal when entering an email address. +### Admin Console Feature Options -## v2024.11.01-1 +The table below describes the built-in license fields related to the Admin Console feature options. The Admin Console feature options apply only to licenses that support installation with the Replicated installers (KOTS, kURL, Embedded Cluster). -Released on November 1, 2024 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Field NameDescription
    `isDisasterRecoverySupported`If a license supports the Embedded Cluster disaster recovery feature, this field is set to `true`. If a license does not support disaster recovery for Embedded Cluster, this field is either missing or `false`. **Note**: Embedded Cluster Disaster Recovery is in Alpha. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). For more information, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery).
    `isGeoaxisSupported`(kURL Only) If a license supports integration with GeoAxis, this field is set to `true`. If GeoAxis is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor has the GeoAxis entitlement. It also requires that the vendor has access to the Identity Service entitlement.
    `isGitOpsSupported` If a license supports the KOTS AutoGitOps workflow in the Admin Console, this field is set to `true`. If Auto-GitOps is not supported, this field is either `false` or missing. See [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow).
    `isIdentityServiceSupported`If a license supports identity-service enablement for the Admin Console, this field is set to `true`. If identity service is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor have access to the Identity Service entitlement.
    `isSemverRequired`If set to `true`, this field requires that the Admin Console orders releases according to Semantic Versioning. This field is controlled at the channel level. For more information about enabling Semantic Versioning on a channel, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_.
    `isSnapshotSupported`If a license supports the snapshots backup and restore feature, this field is set to `true`. If a license does not support snapshots, this field is either missing or `false`. **Note**: This field requires that the vendor have access to the Snapshots entitlement.
    `isSupportBundleUploadSupported`If a license supports uploading a support bundle in the Admin Console, this field is set to `true`. If a license does not support uploading a support bundle, this field is either missing or `false`.
    -### Bug Fixes {#bug-fixes-v2024-11-01-1} -* Fix default license install options when creating new license. +================ +File: docs/vendor/licenses-verify-fields-sdk-api.md +================ +# Verifying License Field Signatures with the Replicated SDK API -## v2024.10.28-3 +This topic describes how to verify the signatures of license fields when checking customer license entitlements with the Replicated SDK. -Released on October 28, 2024 +## Overview -### Bug Fixes {#bug-fixes-v2024-10-28-3} -* Fixes a bug that could cause the **Customer Email** field to be required. +To prevent man-in-the-middle attacks or spoofing by your customers, license fields are cryptographically signed with a probabilistic signature scheme (PSS) signature to ensure their integrity. The PSS signature for a license field is included in the response from the Replicated SDK API `/license/fields` and `/license/fields/{field-name}` endpoints as a Base64 encoded string. -## v2024.10.25-8 +The following shows an example of a Base64 encoded PSS signature for an `expires_at` field returned by the SDK API: -Released on October 25, 2024 +```json +{ + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2MD0YBlIAZEs1zXpmvwLdfcoTsZMOj0lZbxkPN5dPhEPIVcQgrzfzwU5HIwQbwc2jwDrLBQS4hGOKdxOWXnBUNbztsHXMqlAYQsmAhspRLDhBiEoYpFV/8oaaAuNBrmRu/IVAW6ahB4KtP/ytruVdBup3gn1U/uPAl5lhzuBifaW+NDFfJxAX..." + } +} +``` -### Bug Fixes {#bug-fixes-v2024-10-25-8} -* Fixes a bug where users could not create a new customer when there are required license fields. +Replicated recommends that you use signature verification to ensure the integrity of each license field you use in your application. For more information about how to check entitlements in your application for Helm CLI installations, see [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). -## v2024.10.25-3 +## Requirement -Released on October 25, 2024 +Include the Replicated SDK as a dependency of your application Helm chart. For more information, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. -### Improvements {#improvements-v2024-10-25-3} -* Add GitHub issue URL to feature request confirmation modal. +## Use Your Public Key to Verify License Field Signatures -## v2024.10.24-2 +In your application, you can use your public key (available in the Vendor Portal) and the MD5 hash of a license field value to verify the PSS signature of the license field. -Released on October 24, 2024 +To use your public key to verify license field signatures: -### Improvements {#improvements-v2024-10-24-2} -* Renames "Embedded cluster" to "Embedded Kubernetes" and "Bring my own cluster" to "Bring my own Kubernetes" in the Download Portal side bar. +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Settings** page. -## v2024.10.23-6 +1. Click the **Replicated SDK Signature Verification** tab. -Released on October 23, 2024 + ![signature verification page](/images/signature-verification.png) + [View a larger version of this image](/images/signature-verification.png) -### Bug Fixes {#bug-fixes-v2024-10-23-6} -* Compatibility Matrix: Fixes a bug where the `--min-nodes` count could be specified without specifying `--max-nodes` count. +1. Under **Your public key**, copy the key and save it in a secure location. -## v2024.10.23-3 +1. (Optional) Under **Verification**, select the tab for the necessary programming language, and copy the code sample provided. -Released on October 23, 2024 +1. In your application, add logic that uses the public key to verify the integrity of license field signatures. If you copied one of the code samples from the Vendor Portal in the previous step, paste it into your application and make any additional edits as required. -### New Features {#new-features-v2024-10-23-3} -* Compatibility Matrix: Oracle Kubernetes Engine (OKE) now available in Beta. + If you are not using one of the code samples provided, consider the following requirements for verifying license field values: + * License field signatures included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints are Base64 encoded and must be decoded before they are verified. + * The MD5 hash of the license field value is required to verify the signature of the license field. The raw license field value is included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints. The MD5 hash of the value must be calculated and used for signature verification. -## v2024.10.17-1 +================ +File: docs/vendor/namespaces.md +================ +# Application Namespaces -Released on October 17, 2024 +Replicated strongly recommends that applications are architected to deploy a single application into a single namespace when possible. -### Improvements {#improvements-v2024-10-17-1} -* Makes the wording and styling consistent on the Helm and Embedded Cluster install instructions modals. -* Presents users a new step to update their GitHub username when creating support cases or feature requests. +If you are distributing your application with Replicated KOTS, you can implement an architecture in which a single application is deployed into a single namespace. -## v2024.10.16-0 +To do this, omit any namespace in the application manifests `metadata.namespace`. Do not use the Config custom resource object to make the namespace user-configurable. -Released on October 16, 2024 +When you do not specify a namespace in application manifests, KOTS deploys to whatever namespace it is already running in. This gives the most flexibility when deploying to end user environments, as users already select the namespace where KOTS runs. Scoping to a single namespace also allows the app to run with minimal Kubernetes permissions, which can reduce friction when an application runs as a tenant in a large cluster. Overall, letting the end user manage namespaces is the easiest way to reduce friction. -### Improvements {#improvements-v2024-10-16-0} -* Capitalize "Embedded Cluster" in the support workflow. +The following examples demonstrate the recommended approach of excluding the namespace from the application manifests, as well as the incorrect approaches of hardcoding the namespace or injecting the namespace as a user-supplied value: -## v2024.10.10-5 +**Recommended** -Released on October 10, 2024 +```yaml +# good, namespace absent +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spline-reticulator +spec: +``` -### Bug Fixes {#bug-fixes-v2024-10-10-5} -* Adds the "Copy URL" button for the Download Portal link back into the Download Portal section of the **Customer Reporting** page. +**Not Recommended** -## v2024.10.01-0 +```yaml +# bad, hardcoded +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spline-reticulator + namespace: graphviz-pro +spec: +``` -Released on October 1, 2024 +```yaml +# bad, configurable +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spline-reticulator + namespace: repl{{ ConfigOption "gv_namespace" }} +spec: +``` -### New Features {#new-features-v2024-10-01-0} -* Compatibility Matrix: Adds API support for VM create, versions, update ttl, ls. +================ +File: docs/vendor/offsite-backup.md +================ +# Offsite Data Backup -## v2024.09.30-2 +Replicated stores customer data in multiple databases across Amazon Web +Services (AWS) S3 buckets. Clustering and network redundancies help to avoid a +single point of failure. -Released on September 30, 2024 +The offsite data backup add-on provides additional redundancy by copying data to +an offsite Google Cloud Provider (GCP) storage location. This helps to mitigate +any potential data loss caused by an outage to AWS. -### Bug Fixes {#bug-fixes-v2024-09-30-2} -* Fixes a bug that could cause release notes to not be shown on the [Channel History](/vendor/releases-about#properties) page. +:::note +The offsite data backup add-on is available only to [Replicated Enterprise](https://www.replicated.com/pricing/) customers at an additional cost. Please [open a product request](https://vendor.replicated.com/support?requestType=feature&productArea=vendor) if you are interested in this feature. +::: -## v2024.09.27-4 +## Overview -Released on September 27, 2024 +When the offsite data backup add-on is enabled, data is migrated from Replicated's existing AWS S3 buckets to a dedicated second set of AWS S3 buckets. These buckets are only used for vendors with this add-on enabled, and all vendor data remains logically isolated by vendor Team. After data is migrated from existing S3 buckets to the secondary buckets, +all data is deleted from the original S3 buckets. -### Bug Fixes {#bug-fixes-v2024-09-27-4} -* Fixes a bug where you could not disable the Embedded Cluster license entitlement once it was enabled. -* Fixes a bug that would show an error when estimating the cost of a Compatibility Matrix cluster, even when the request was successful. +To ensure customer data in the offsite GCP storage remains up-to-date, the GCP +account uses the Google Storage Transfer service to synchronize at least daily with the +secondary dedicated S3 buckets. -## v2024.09.27-1 +The offsite GCP data backup functions only as secondary data storage and does not serve customer +data. Customer data continues to be served from the AWS S3 buckets. In the case of an AWS outage, Replicated can use a manual +process to restore customer data from the GCP backups into a production-grade database. -Released on September 27, 2024 +For more information, see [Architecture](#architecture) below. -### Bug Fixes {#bug-fixes-v2024-09-27-1} -* Fixes a bug in Customer Portal that would result in "Unauthorized" message when downloading Embedded Cluster installer. +## Architecture -## v2024.09.26-4 +The following diagram shows the flow of air gap build data and registry image data +when the offsite data backup add-on is enabled. The flow of data that is backed +up offsite in GCP is depicted with green arrows. -Released on September 26, 2024 +![architecture of offsite data storage with the offsite data backup add-on](../../static/images/offsite-backup.png) -### Improvements {#improvements-v2024-09-26-4} -* Improves styles in order to make it more obvious that license ID is required when creating a cluster using Embedded Cluster on the Compatibility Matrix Create Cluster page and modal. +[View a larger version of this image](../../static/images/offsite-backup.png) -## v2024.09.26-2 +As shown in the diagram above, when the offsite data backup add-on is enabled, +registry and air gap data are stored in dedicated S3 buckets. Both of +these dedicated S3 buckets back up data to offsite storage in GCP. -Released on September 26, 2024 +The diagram also shows how customer installations continue to pull data from the +vendor registry and the customer portal when offsite data backup is enabled. -### New Features {#new-features-v2024-09-26-2} -* Compatibility Matrix: Alpha support for creating clusters of ubuntu servers (20.04). +================ +File: docs/vendor/operator-defining-additional-images.mdx +================ +import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" -## v2024.09.25-2 +# Defining Additional Images -Released on September 25, 2024 +This topic describes how to define additional images to be included in the `.airgap` bundle for a release. -### New Features {#new-features-v2024-09-25-2} -* Adds ability to remove a node group in the Compatibility Matrix Create Cluster page and modal. +## Overview -## v2024.09.25-1 + -Released on September 25, 2024 +When building the `.airgap` bundle for a release, the Replicated Vendor Portal finds and includes all images defined in the Pod specs for the release. During installation or upgrade, KOTS retags images from the `.airgap` bundle and pushes them to the registry configured in KOTS. -### New Features {#new-features-v2024-09-25-1} -* Adds persistent sessions in the Customer Portal. +Any required images that are _not_ defined in your application manifests must be listed in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the `.airgap` bundle for the release. -### Bug Fixes {#bug-fixes-v2024-09-25-1} -* Fixes an issue that caused registry proxy authentication requests to be redirected to proxy.replicated.com instead of the custom hostname when one is configured. +## Define Additional Images for Air Gap Bundles -## v2024.09.18-3 +KOTS supports including the following types of images in the `additionalImages` field: -Released on September 18, 2024 +* Public images referenced by the docker pullable image name. +* Images pushed to a private registry that was configured in the Vendor Portal, referenced by the docker-pullable, upstream image name. For more information about configuring private registries, see [Connecting to an External Registry](/vendor/packaging-private-images). + :::note + If you use the [Replicated proxy registry](/vendor/private-images-about) for online (internet-connected) installations, be sure to use the _upstream_ image name in the `additionalImages` field, rather than referencing the location of the image at `proxy.replicated.com`. + ::: +* Images pushed to the Replicated registry referenced by the `registry.replicated.com` name. -### New Features {#new-features-v2024-09-18-3} -* Removes the "Helm-only" release option from the releases page. +The following example demonstrates adding multiple images to `additionalImages`: -## v2024.09.18-2 +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-app +spec: + additionalImages: + - elasticsearch:7.6.0 + - quay.io/orgname/private-image:v1.2.3 + - registry.replicated.com/my-operator/my-private-image:abd123f +``` -Released on September 18, 2024 +================ +File: docs/vendor/operator-defining-additional-namespaces.md +================ +# Defining Additional Namespaces -### Improvements {#improvements-v2024-09-18-2} -* Compatibility Matrix: Improved error handling when creating clusters for Embedded Cluster. +Operators often need to be able to manage resources in multiple namespaces in the cluster. +When deploying an application to an existing cluster, Replicated KOTS creates a Kubernetes Role and RoleBinding that are limited to only accessing the namespace that the application is being installed into. -## v2024.09.17-1 +In addition to RBAC policies, clusters running in air gap environments or clusters that are configured to use a local registry also need to ensure that image pull secrets exist in all namespaces that the operator will manage resource in. -Released on September 17, 2024 +## Creating additional namespaces -### Bug Fixes {#bug-fixes-v2024-09-17-1} -* Adds a PATCH method for updating channels. +An application can identify additional namespaces to create during installation time. +You can define these additional namespaces in the Application custom resource by adding an `additionalNamespaces` attribute to the Application custom resource manifest file. For more information, see [Application](../reference/custom-resource-application) in the _Custom Resources_ section. -## v2024.09.17-0 +When these are defined, `kots install` will create the namespaces and ensure that the KOTS Admin Console has full access to manage resources in these namespaces. +This is accomplished by creating a Role and RoleBinding per namespace, and setting the Subject to the Admin Console service account. +If the current user account does not have access to create these additional namespaces, the installer will show an error and fail. -Released on September 17, 2024 +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-operator +spec: + additionalNamespaces: + - namespace1 + - namespace2 +``` -### Bug Fixes {#bug-fixes-v2024-09-17-0} -* Fixes updating the custom domain override on channels in the Vendor Portal. +In addition to creating these namespaces, the Admin Console will ensure that the application pull secret exists in them, and that this secret has access to pull the application images. This includes both images that are used and additional images defined in the Application custom resource manifest. For more information, see [Defining Additional Images](operator-defining-additional-images). -## v2024.09.13-0 +Pull secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. +An operator can reliably depend on this secret existing in all installs (online and air gapped), and can use this secret name in any created `podspec` to pull private images. -Released on September 13, 2024 +## Dynamic namespaces -### Bug Fixes {#bug-fixes-v2024-09-13-0} -* The correct error status code is now returned when downloading an Embedded Cluster release fails. +Some applications need access to dynamically created namespaces or even all namespaces. +In this case, an application spec can list `"*"` as one of its `addtionalNamespaces` in the Application manifest file. +When KOTS encounters the wildcard, it will not create any namespaces, but it will ensure that the application image pull secret is copied to all namespaces. +The Admin Console will run an informer internally to watch namespaces in the cluster, and when a new namespace is created, the secret will automatically be copied to it. +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-operator +spec: + additionalNamespaces: + - "*" +``` -## v2024.09.12-3 +When the wildcard (`"*"`) is listed in `additionalNamespaces`, KOTS will use a ClusterRole and ClusterRoleBinding for the Admin Console. +This will ensure that the Admin Console will continue to have permissions to all newly created namespaces, even after the install has finished. -Released on September 12, 2024 +================ +File: docs/vendor/operator-packaging-about.md +================ +# About Packaging a Kubernetes Operator Application -### New Features {#new-features-v2024-09-12-3} -* Compatibility Matrix: Adds new instance shapes for OKE (Oracle) distribution. +Kubernetes Operators can be packaged and delivered as an application using the same methods as other Kubernetes applications. -## v2024.09.13-1 +Operators are good for [specific use cases](https://blog.replicated.com/operators-in-kots/). In general, we recommend thinking deeply about the problem space an application solves before going down the Operator path because, although powerful, Operators take a lot of time to build and maintain. -Released on September 13, 2024 +Operators are generally defined using one or more `CustomResourceDefinition` manifests, and the controller is often a `StatefulSet`, along with other additional objects. +These Kubernetes manifests can be included in an application by adding them to a release and promoting the release to a channel. -### New Features {#new-features-v2024-09-13-1} -* Compatibility Matrix: Adds Alpha support for Embedded Cluster multinode. +Kubernetes Operators differ from traditional applications because they interact with the Kubernetes API to create and manage other objects at runtime. +When a `CustomResource` is deployed to the cluster that has the operator running, the Operator may need to create new Kubernetes objects to fulfill the request. +When an Operator creates an object that includes a `PodSpec`, the Operator should use locally-available images in order to remain compatible with air gapped environments and customers who have configured a local registry to push all images to. +Even environments that aren't air gapped may need access to private images that are included as part of the application at runtime. -## v2024.09.11-2 +An application includes a definition for the developer to list the additional images that are required for the application, and by exposing the local registry details (endpoint, namespace and secrets) to the application so that they can be referenced when creating a `PodSpec` at runtime. -Released on September 11, 2024 +================ +File: docs/vendor/operator-referencing-images.md +================ +# Referencing Images -### Bug Fixes {#bug-fixes-v2024-09-11-2} -* Compatibility Matrix: Fixes the "Update TTL" section of the Edit Cluster page and disallows setting a lower TTL than one that was previously configured. -* Compatibility Matrix: Fixes an issue where you could not purchase more usage credits if you had a zero credit balance. +This topic explains how to support the use of private image registries for applications that are packaged with Kubernetes Operators. -### Improvements {#improvements-v2024-09-11-2} -* Compatibility Matrix: Improves styles of the Edit Tags modal on the Cluster History page. +## Overview -## v2024.09.09-0 +To support the use of private images in all environments, the Kubernetes Operator code must use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. -Released on September 9, 2024 +There are several template functions available to assist with this. +This might require two new environment variables to be added to a manager to read these values. -### New Features {#new-features-v2024-09-09-0} -* Adds support for setting custom passwords for the customer's Download Portal. See [Sharing Assets Through the Download Portal](/vendor/releases-share-download-portal). +The steps to ensure that an Operator is using the correct image names and has the correct image pull secrets in dynamically created pods are: -## v2024.09.05-3 +1. Add a new environment variables to the Manager Pod so that the Manager knows the location of the private registry, if one is set. +2. Add a new environment variable to the Manager Pod so that the Manager also knows the `imagePullSecret` that's needed to pull the local image. -Released on September 5, 2024 +## Step 1: Add a reference to the local registry -### New Features {#new-features-v2024-09-05-3} -* Compatibility Matrix: Adds support for EKS EC2 instance types m5 and c5. +The manager of an Operator is often a `Statefulset`, but could be a `Deployment` or another kind. +Regardless of where the spec is defined, the location of the private images can be read using the Replicated KOTS template functions. For more information about using template functions, see [About Template Functions](/reference/template-functions-about). -## v2024.09.04-0 +#### Option 1: Define each image +If an Operator only requires one additional image, the easiest way to determine this location is to use the `LocalImageName` function. +This will always return the image name to use, whether the customer's environment is configured to use a local registry or not. -Released on September 4, 2024 +**Example:** -### New Features {#new-features-v2024-09-04-0} -* Comaptibility Matrix: Added capability to create ws and wss tunnels from the web UI. +```yaml +env: + - name: IMAGE_NAME_ONE + value: 'repl{{ LocalImageName "elasticsearch:7.6.0" }}' +``` -## v2024.08.30-0 +For online installations (no local registry), this will be written with no changes -- the variable will contain `elasticsearch:7.6.0`. +For installations that are air gapped or have a locally-configured registry, this will be rewritten as the locally referenceable image name. For example, `registry.somebigbank.com/my-app/elasticsearch:7.6.0`. -Released on August 30, 2024 +**Example:** -### New Features {#new-features-v2024-08-30-0} -* After uploading a support bundle, if instance insights detects a Kubernetes distribution/version combination, the distribution and version will be preloaded when creating a cluster with Compatibility Matrix. +```yaml +env: + - name: IMAGE_NAME_TWO + value: 'repl{{ LocalImageName "quay.io/orgname/private-image:v1.2.3" }}' +``` -## v2024.08.28-0 +In the above example, this is a private image, and will always be rewritten. For online installations, this will return `proxy.replicated.com/proxy/app-name/quay.io/orgname/private-image:v1.2.3` and for installations with a locally-configured registry it will return `registry.somebigbank.com/org/my-app-private-image:v.1.2.3`. -Released on August 28, 2024 +#### Option 2: Build image names manually -### Bug Fixes {#bug-fixes-v2024-08-28-0} -* Click docs link will open a new window to related documentation in the Compatbility Matrix versions available modal. +For applications that have multiple images or dynamically construct the image name at runtime, the KOTS template functions can also return the elements that make up the local registry endpoint and secrets, and let the application developer construct the locally-referenceable image name. -## v2024.08.23-2 +**Example:** -Released on August 23, 2024 +```yaml +env: + - name: REGISTRY_HOST + value: 'repl{{ LocalRegistryHost }}' + - name: REGISTRY_NAMESPACE + value: 'repl{{ LocalRegistryNamespace }}' +``` -### New Features {#new-features-v2024-08-23-2} -* Adds new `channels` column to customers csv export containing basic channels json blob. -* Adds new `customer_channels` object to customer instances csv exports containing basic channels json blob. -* Adds `channels` object to customer instances json export. +## Step 2: Determine the imagePullSecret -## v2024.08.20-5 +Private, local images will need to reference an image pull secret to be pulled. +The value of the secret's `.dockerconfigjson` is provided in a template function, and the application can write this pull secret as a new secret to the namespace. +If the application is deploying the pod to the same namespace as the Operator, the pull secret will already exist in the namespace, and the secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. +KOTS will create this secret automatically, but only in the namespace that the Operator is running in. +It's the responsibility of the application developer (the Operator code) to ensure that this secret is present in any namespace that new pods will be deployed to. -Released on August 20, 2024 +This template function returns the base64-encoded, docker auth that can be written directly to a secret, and referenced in the `imagePullSecrets` attribute of the PodSpec. -### New Features {#new-features-v2024-08-20-5} -* Adds support for the [`dropdown`](/reference/custom-resource-config#dropdown) and [`radio`](/reference/custom-resource-config#radio) Config item types in the Config preview. +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: myregistrykey + namespace: awesomeapps +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +type: kubernetes.io/dockerconfigjson +``` -## v2024.08.20-4 +This will return an image pull secret for the locally configured registry. -Released on August 20, 2024 +If your application has both public and private images, it is recommended that the image name is passed to the image pull secret for the locally configured registry. This will ensure that installs without a local registry can differentiate between private, proxied and public images. -### Bug Fixes {#bug-fixes-v2024-08-20-4} -* Fixes a bug that caused Embedded Cluster installation artifacts to not be shown in the Download Portal. +**Example:** -## v2024.08.19-1 +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-pull-secret + namespace: awesomeapps +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +type: kubernetes.io/dockerconfigjson +``` -Released on August 19, 2024 +In the above example, the `LocalRegistryImagePullSecret()` function will return an empty auth array if the installation is not air gapped, does not have a local registry configured, and the `elasticsearch:7.6.0` image is public. +If the image is private, the function will return the license-key derived pull secret. +And finally, if the installation is using a local registry, the image pull secret will contain the credentials needed to pull from the local registry. -### New Features {#new-features-v2024-08-19-1} -* Update Embedded Cluster install instructions to use custom domain when applicable. +**Example:** -## v2024.08.15-2 +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-pull-secret + namespace: awesomeapps +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +type: kubernetes.io/dockerconfigjson +``` -Released on August 15, 2024 +The above example will always return an image pull secret. +For installations without a local registry, it will be the Replicated license secret, and for installations with a local registry, it will be the local registry. -### New Features {#new-features-v2024-08-15-2} -* Adds a PATCH method for `/v3/customer/:customerId` path that allows updating a customer without having to resend the entire customer object. +## Using the local registry at runtime -## v2024.08.13-1 +The developer of the Operator should use these environment variables to change the `image.name` in any deployed PodSpec to ensure that it will work in air gapped environments. -Released on August 13, 2024 +================ +File: docs/vendor/orchestrating-resource-deployment.md +================ +import WeightLimitation from "../partials/helm/_helm-cr-weight-limitation.mdx" +import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" +import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" -### Bug Fixes {#bug-fixes-v2024-08-13-1} -* The Proxy Registry now includes scope in the WWW-Authenticate auth challenge response header. This fixes support for the Registry Proxy as a Sonatype Nexus Docker proxy. +# Orchestrating Resource Deployment -## v2024.08.12-0 +This topic describes how to orchestrate the deployment order of resources deployed as part of your application. The information in this topic applies to Helm chart- and standard manifest-based applications deployed with Replicated KOTS. -Released on August 12, 2024 +## Overview -### Improvements {#improvements-v2024-08-12-0} -* Streamlines design of the rows on the **Customers** page hybrid view, as well as the customer info header on the **Manage Customer** and **Customer Reporting** pages. +Many applications require that certain resources are deployed and in a ready state before other resources can be deployed. -## v2024.08.09-5 +When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. -Released on August 9, 2024 +For applications deployed with KOTS, you can manage the order in which resources are deployed using the following methods: -### Bug Fixes {#bug-fixes-v2024-08-09-5} -* Fixes an issue that could cause anonymous image pulls from proxy.replicated.com to fail to resume when interrupted. +* For Helm charts, set the `weight` property in the corresponding HelmChart custom resource. See [HelmChart `weight`](#weight). -## v2024.08.09-0 +* For standard manifests, add KOTS annotations to the resources. See [Standard Manifest Deployment Order with KOTS Annotations](#manifests). -Released on August 9, 2024 +## Helm Chart Deployment Order with `weight` {#weight} -### New Features {#new-features-v2024-08-09-0} -* The Compatibility Matrix cluster usage endpoint now also includes channel_id, channel_sequence and version_label. +You can configure the [`weight`](/reference/custom-resource-helmchart-v2#weight) property of the Replicated HelmChart custom resource to define the order in which the Helm charts in your release are installed. -## v2024.08.06-0 +KOTS directs Helm to install the Helm charts based on the value of `weight` in ascending order, deploying the chart with the lowest weight first. Any dependencies are installed along with the parent chart. For example, a chart with a `weight` of `-1` deploys before a chart with a `weight` of `0`. -Released on August 6, 2024 +The value for the `weight` property can be any negative or positive integer or `0`. By default, when you do not provide a `weight` for a Helm chart, the `weight` is `0`. -### Bug Fixes {#bug-fixes-v2024-08-06-0} -* Fixes a bug that caused /require-2fa page to render blank. +For example: -## v2024.08.01-0 +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + releaseName: samplechart-release-1 + # weight determines the order that charts are applied, with lower weights first. + weight: 4 +``` -Released on August 1, 2024 +#### Limitations -### Improvements {#improvements-v2024-08-01-0} -* Updates the Embedded Cluster install instructions to include relevant flags for showing server errors if the release download fails. +The `weight` field in the HelmChart custom resource has the following limitations: -## v2024.07.24-0 +* -Released on July 24, 2024 +* When installing a Helm chart-based application, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. The `weight` property does not allow Helm charts to be deployed before standard manifests. + +## Standard Manifest Deployment Order with KOTS Annotations {#manifests} -### New Features {#new-features-v2024-07-24-0} -* Adds an "Estimate cluster cost" button on the **Create a cluster** page for Compatibility Matrix. +You can use the KOTS annotations described in this section to control the order in which standard manifests are deployed. -### Bug Fixes {#bug-fixes-v2024-07-24-0} -* Fixes inconsistent lint results when editing KOTS releases. +### Requirement -## v2024.07.23-1 +You must quote the boolean or integer values in annotations because Kubernetes annotations must be strings. For more information about working with annotations in Kubernetes resources, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. -Released on July 23, 2024 +### `kots.io/creation-phase` -### New Features {#new-features-v2024-07-23-1} -* Adds the `--dry-run` flag for Compatibility Matrix, which shows the estimated cost of a cluster before you create the cluster. +When the `kots.io/creation-phase: ''` annotation is present on a resource, KOTS groups the resource into the specified creation phase. KOTS deploys each phase in order from lowest to highest. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. -## v2024.07.22-2 +Resources in the same phase are deployed in the same order that Helm installs resources. To view the order in which KOTS deploys resources of the same phase, see [Helm installs resources in the following order](https://helm.sh/docs/intro/using_helm/#:~:text=Helm%20installs%20resources%20in%20the,order) in the Helm documentation. -Released on July 22, 2024 +The following example deploys the `CustomResourceDefinition` before the default creation phase: -### Bug Fixes {#bug-fixes-v2024-07-22-2} -* Fixes a bug where customer channels were not being updated when using the Replicated CLI. +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: myresources.example.com + annotations: + kots.io/creation-phase: "-1" +... +``` -## v2024.07.22-0 +### `kots.io/deletion-phase` -Released on July 22, 2024 +When the `kots.io/deletion-phase: ''` annotation is present on a resource, KOTS groups the resource into the specified deletion phase. KOTS deletes each phase in order from lowest to highest. Resources within the same phase are deleted in the reverse order from which they were created. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. -### Improvements {#improvements-v2024-07-22-0} -* Improves responsive styles on the Compatibility Matrix **Create Cluster** page and on the **Troubleshoot** page. +The following example deploys the `CustomResourceDefinition` before the default creation phase and deletes the resource after the default deletion phase: -## v2024.07.19-4 +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: myresources.example.com + annotations: + kots.io/creation-phase: "-1" + kots.io/deletion-phase: "1" +... +``` +### `kots.io/wait-for-ready` -Released on July 19, 2024 +When the `kots.io/wait-for-ready: ''` annotation is present on a resource and evaluates to `'true'`, KOTS waits for the resource to be in a ready state before deploying any other resources. For most resource types, KOTS has existing logic to determine if a resource is ready. If there is no existing logic for the given resource type, then KOTS waits until the resource exists and is queryable from the Kubernetes API server. -### New Features {#new-features-v2024-07-19-4} -* Adds Compatibility Matrix support for port expose using websockets. +In the following example, KOTS waits for the Postgres `StatefulSet` to be ready before continuing to deploy other resources: -## v2024.07.19-3 +```yaml +apiVersion: apps/v1 +kind: Statefulset +metadata: + name: postgresql + annotations: + kots.io/wait-for-ready: 'true' + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + strategy: + type: Recreate + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:9.6" + imagePullPolicy: "" +... +``` -Released on July 19, 2024 +### `kots.io/wait-for-properties` -### New Features {#new-features-v2024-07-19-3} -* Enables the "Buy $500 additional credits" button on the **Compatibility Matrix** page for any vendor with a valid contract. +When the `kots.io/wait-for-properties: '=,='` annotation is present on a resource, KOTS waits for one or more specified resource properties to match the desired values before deploying other resources. This annotation is useful when the `kots.io/wait-for-ready` annotation, which waits for a resource to exist, is not sufficient. -## v2024.07.19-0 +The value for this annotation is a comma-separated list of key-value pairs, where the key is a JSONPath specifying the path to the property and the value is the desired value for the property. In the following example, KOTS waits for a resource to reach a desired state before deploying other resources. In this case, KOTS waits until each of the three status properties have the target values: -Released on July 19, 2024 +```yaml +kind: MyResource +metadata: + name: my-resource + annotations: + kots.io/wait-for-properties: '.status.tasks.extract=true,.status.tasks.transform=true,.status.tasks.load=true' +... +status: + tasks: + extract: false + transform: false + load: false +``` -### New Features {#new-features-v2024-07-19-0} -* Adds Compatibility Matrix support for ARM based nodes in Oracle OKE. +================ +File: docs/vendor/packaging-air-gap-excluding-minio.md +================ +# Excluding MinIO from Air Gap Bundles (Beta) -## v2024.07.15-0 +The Replicated KOTS Admin Console requires an S3-compatible object store to store application archives and support bundles. By default, KOTS deploys MinIO to satisfy the object storage requirement. For more information about the options for installing without MinIO in existing clusters, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). -Released on July 15, 2024 +As a software vendor, you can exclude MinIO images from all Admin Console air gap distributions (`kotsadm.tar.gz`) in the download portal. Excluding MinIO from the `kotsadm.tar.gz` air gap bundle is useful if you want to prevent MinIO images from appearing in the air gap distribution that your end users download. It also reduces the file size of `kotsadm.tar.gz`. -### New Features {#new-features-v2024-07-15-0} -* Adds a dropdown to select a specific release in the Embedded Cluster installation instructions dialog. +:::note +You can still retrieve a bundle with MinIO images from the KOTS release page in GitHub when this feature is enabled. See [replicatedhq/kots](https://github.com/replicatedhq/kots/releases/) in GitHub. +::: -## v2024.07.09-0 +To exclude MinIO from the `kotsadm.tar.gz` Admin Console air gap bundle: -Released on July 9, 2024 +1. Log in to your Vendor Portal account. Select **Support** > **Request a feature**, and submit a feature request for "Exclude MinIO image from air gap bundle". After this feature is enabled, all `kotsadm.tar.gz` files in the download portal will not include MinIO. -### Improvements {#improvements-v2024-07-09-0} -* UI improvements for Embedded Cluster installation instructions. +1. Instruct your end users to set the flag `--with-minio=false` with the `kots install` command during an air gap installation. For more information about setting this runtime flag, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). -## v2024.07.08-0 + :::important + If you have this feature enabled in your Team account and the end user does not include `--with-minio=false` with the `kots install` command, then the installation fails. + ::: -Released on July 8, 2024 +================ +File: docs/vendor/packaging-cleaning-up-jobs.md +================ +# Cleaning Up Kubernetes Jobs -### Bug Fixes {#bug-fixes-v2024-07-08-0} -* Fixed Oracle Compatibility Matrix Pricing. Pricing is now following Oracle's cost estimator. +This topic describes how to use the Replicated KOTS `kots.io/hook-delete-policy` annotation to remove Kubernetes job objects from the cluster after they complete. -## v2024.06.26-4 +## About Kubernetes Jobs -Released on June 26, 2024 +Kubernetes Jobs are designed to run and then terminate. But, they remain in the namespace after completion. Because Job objects are immutable, this can cause conflicts and errors when attempting to update the Job later. -### New Features {#new-features-v2024-06-26-4} -* Adds a new "Upcoming license expiration" section to the Dashboard page. +A common workaround is to use a content SHA from the Job object in the name. However, a user can update their application instance through various events (upstream update, license sync, config update, CLI upload). If the Job is already completed, it is an error to reapply the same job to the cluster again. -## v2024.06.25-1 +The built-in Replicated KOTS operator/controller can help by deleting Jobs upon completion. +This allows the same Job to be deployed again without polluting the namespace with completed Jobs. -Released on June 25, 2024 +For more information about Job objects, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) in the Kubernetes documentation. -### Bug Fixes {#bug-fixes-v2024-06-25-1} -* Use the correct Embedded Cluster icon on the customer page. -* Release API now returns a 400 with a more descriptive error message when a release includes duplicate chart names. +## KOTS `hook-delete-policy` Annotation -## v2024.06.24-1 +To enable the built-in KOTS operator/controller to automatically delete Jobs when they complete, specify a delete hook policy as an annotation on the Job object. -Released on June 24, 2024 +The KOTS annotation key is `kots.io/hook-delete-policy` and there are two possible values (you can use both simultaneously): `hook-succeeded` and `hook-failed`. -### Bug Fixes {#bug-fixes-v2024-06-24-1} -* Replicated proxy registry no longer requires access to proxy-auth.replicated.com. +When this annotation is present and includes `hook-succeeded`, the job is deleted when it completes successfully. +If this annotation is present and includes `hook-failed`, the job is deleted on failure. -## v2024.06.24-0 +For Helm charts deployed with KOTS, KOTS automatically adds this `kots.io/hook-delete-policy` annotation to any Job objects in the Helm chart that include a `helm.sh/hook-delete-policy` annotation. This means that there is nothing extra to configure when deploying a Helm chart with Helm delete hooks. -Released on June 24, 2024 +The following example shows a Job object with the `kots.io/hook-delete-policy` annotation: -### Improvements {#improvements-v2024-06-24-0} -* Support form product list renames **Troubleshoot** to **Support bundles and preflights**. +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: pi + annotations: + "kots.io/hook-delete-policy": "hook-succeeded, hook-failed" +spec: + template: + spec: + containers: + - name: pi + image: perl + command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] + restartPolicy: Never + backoffLimit: 4 +``` -## v2024.06.21-2 +================ +File: docs/vendor/packaging-embedded-kubernetes.mdx +================ +import Installers from "../partials/kurl/_installers.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" -Released on June 21, 2024 +# Creating a kURL Installer -### New Features {#new-features-v2024-06-21-2} -* Adds the ability to pull public images through the proxy registry without credentials using the prefix `proxy.replicated.com/anon`. For example `docker pull proxy.replicated.com/anon/docker.io/library/mysql:latest`. + -## v2024.06.17-1 +This topic describes how to create a kURL installer spec in the Replicated Vendor Portal to support installations with Replicated kURL. -Released on June 17, 2024 +For information about creating kURL installers with the Replicated CLI, see [installer create](/reference/replicated-cli-installer-create). -### New Features {#new-features-v2024-06-17-1} -* Replicated SDK support bundles details are now visible in Troubleshoot. +## Overview -## v2024.06.13-0 + -Released on June 13, 2024 +For more information about kURL, see [Introduction to kURL](kurl-about). -### New Features {#new-features-v2024-06-13-0} -* Adds a direct link to the **License Fields** page from the **Manage Customer** and **Create New Customer** pages if the user has no custom license fields configured under the "Custom fields" section. +## Create an Installer -## v2024.06.12-0 +To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release: -Released on June 12, 2024 + + + + + + + + + + + + + +
    MethodDescription
    Promote the installer to a channel

    The installer is promoted to one or more channels. All releases on the channel use the kURL installer that is currently promoted to that channel. There can be only one active kURL installer on each channel at a time.

    The benefit of promoting an installer to one or more channels is that you can create a single installer without needing to add a separate installer for each release. However, because all the releases on the channel will use the same installer, problems can occur if all releases are not tested with the given installer.

    Include the installer in a release (Beta)

    The installer is included as a manifest file in a release. This makes it easier to test the installer and release together. It also makes it easier to know which installer spec customers are using based on the application version that they have installed.

    -### Improvements {#improvements-v2024-06-12-0} -* Improves mobile styles on the table views on the **Customers** and **Channels** pages, as well as some mobile styles on the **Releases** page. +### Promote the Installer to a Channel {#channel} -## v2024.05.30-7 +To promote a kURL installer to a channel: -Released on May 30, 2024 +1. In the [Vendor Portal](https://vendor.replicated.com), click **kURL Installers**. -### Bug Fixes {#bug-fixes-v2024-05-30-7} -* Fixes incorrectly displayed "No records to display" message, which appeared on the **Cluster History** page while loading data. +1. On the **kURL Installers** page, click **Create kURL installer**. -## v2024.05.30-5 + vendor portal kurl installers page -Released on May 30, 2024 + [View a larger version of this image](/images/kurl-installers-page.png) -### New Features {#new-features-v2024-05-30-5} -* Adds Sonatype Nexus Repository to the list of providers on the **Images** page. -* Adds support for linking and proxying images from anonymous registries. +1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [Requirements and Recommendations](#requirements-and-recommendations) below. -## v2024.05.28-3 + You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: -Released on May 28, 2024 + kurl.sh landing page -### New Features {#new-features-v2024-05-28-3} -* Add support for Oracle OKE 1.29. + [View a larger version of this image](/images/kurl-build-an-installer.png) -### Bug Fixes {#bug-fixes-v2024-05-28-3} -* Fix Compatibility Matrix available credits rounding. +1. Click **Save installer**. You can continue to edit your file until it is promoted. -## v2024.05.28-0 +1. Click **Promote**. In the **Promote Installer** dialog that opens, edit the fields: -Released on May 28, 2024 + promote installer dialog -### Bug Fixes {#bug-fixes-v2024-05-28-0} -* Users can create GitHub support tickets with large support bundle analysis results. + [View a larger version of this image](/images/promote-installer.png) -## v2024.05.24-6 + + + + + + + + + + + + + +
    FieldDescription
    ChannelSelect the channel or channels where you want to promote the installer.
    Version labelEnter a version label for the installer.
    -Released on May 24, 2024 +1. Click **Promote** again. The installer appears on the **kURL Installers** page. -### New Features {#new-features-v2024-05-24-6} -* Added support for Sonatype Nexus registry. + To make changes after promoting, create and promote a new installer. -## v2024.05.24-2 +### Include an Installer in a Release (Beta) {#release} -Released on May 24, 2024 +To include the kURL installer in a release: -### Bug Fixes {#bug-fixes-v2024-05-24-2} -* Fixes a bug that caused version string for Replicated SDK chart have an invalid "v" prefix. +1. In the [Vendor Portal](https://vendor.replicated.com), click **Releases**. Then, either click **Create Release** to create a new release, or click **Edit YAML** to edit an existing release. -## v2024.05.23-2 + The YAML editor opens. -Released on May 23, 2024 +1. Create a new file in the release with `apiVersion: cluster.kurl.sh/v1beta1` and `kind: Installer`: -### Bug Fixes {#bug-fixes-v2024-05-23-2} -* Adds validation to compatibility matrix object-store add-on bucket prefix input. + ```yaml + apiVersion: cluster.kurl.sh/v1beta1 + kind: Installer + metadata: + name: "latest" + spec: + + ``` -## v2024.05.21-1 +1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [ kURL Add-on Requirements and Recommendations](#requirements-and-recommendations) below. -Released on May 21, 2024 + You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: -### New Features {#new-features-v2024-05-21-1} -* Adds API support for Oracle Cloud Infrastructure Container Engine for Kubernetes (OKE) to compatibility matrix. + kurl.sh landing page -### Bug Fixes {#bug-fixes-v2024-05-21-1} -* Fixes a bug where users could not restore password policies to default. -* Disables the edit and archive channel options and displays helpful hover text on the **Channels** page table view when the user does not have permission to edit channels. -* Fixes a bug that caused "airgap:true" or "airgap:false" customer searches to fail with error 500. + [View a larger version of this image](/images/kurl-build-an-installer.png) -## v2024.05.21-0 +1. Click **Save**. This saves a draft that you can continue to edit until you promote it. -Released on May 21, 2024 +1. Click **Promote**. -### New Features {#new-features-v2024-05-21-0} -* Compatibility matrix automatically sends an email notification to team admins when a team is low on credits. + To make changes after promoting, create a new release. -## v2024.05.20-1 +## kURL Add-on Requirements and Recommendations {#requirements-and-recommendations} -Released on May 20, 2024 +KURL includes several add-ons for networking, storage, ingress, and more. The add-ons that you choose depend on the requirements for KOTS and the unique requirements for your application. For more information about each add-on, see the open source [kURL documentation](https://kurl.sh/docs/introduction/). -### New Features {#new-features-v2024-05-20-1} -* Adds support for IP dual-stack Kind clusters to compatibility matrix. +When creating a kURL installer, consider the following requirements and guidelines for kURL add-ons: -## v2024.05.16-3 +- You must include the KOTS add-on to support installation with KOTS and provision the KOTS Admin Console. See [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the kURL documentation. -Released on May 16, 2024 +- To support the use of KOTS snapshots, Velero must be installed in the cluster. Replicated recommends that you include the Velero add-on in your kURL installer so that your customers do not have to manually install Velero. -### Bug Fixes {#bug-fixes-v2024-05-16-3} -* Fixes an issue that would cause embedded cluster installs to fail with error 404 when downloading public files. + :::note + During installation, the Velero add-on automatically deploys internal storage for backups. The Velero add-on requires the MinIO or Rook add-on to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. + ::: -## v2024.05.14-2 +- You must select storage add-ons based on the KOTS requirements and the unique requirements for your application. For more information, see [About Selecting Storage Add-ons](packaging-installer-storage). -Released on May 14, 2024 +- kURL installers that are included in releases must pin specific add-on versions and cannot pin `latest` versions or x-ranges (such as 1.2.x). Pinning specific versions ensures the most testable and reproducible installations. For example, pin `Kubernetes 1.23.0` in your manifest to ensure that version 1.23.0 of Kubernetes is installed. For more information about pinning Kubernetes versions, see [Versions](https://kurl.sh/docs/create-installer/#versions) and [Versioned Releases](https://kurl.sh/docs/install-with-kurl/#versioned-releases) in the kURL open source documentation. -### New Features {#new-features-v2024-05-14-2} -* Adds Beta support for collecting telemetry from instances running in air gap environments with no outbound internet access. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). + :::note + For kURL installers that are _not_ included in a release, pinning specific versions of Kubernetes and Kubernetes add-ons in the kURL installer manifest is not required, though is highly recommended. + ::: -### Improvements {#improvements-v2024-05-14-2} -* Allows installations with the Helm CLI to upload a support bundle on the **Customer Reporting** page. -* Improves mobile responsiveness of the sign up and login flow in the vendor portal. +- After you configure a kURL installer, Replicated recommends that you customize host preflight checks to support the installation experience with kURL. Host preflight checks help ensure successful installation and the ongoing health of the cluster. For more information about customizing host preflight checks, see [Customizing Host Preflight Checks for Kubernetes Installers](preflight-host-preflights). -## v2024.05.14-1 +- For installers included in a release, Replicated recommends that you define a preflight check in the release to ensure that the target kURL installer is deployed before the release is installed. For more information about how to define preflight checks, see [Defining Preflight Checks](preflight-defining). + + For example, the following preflight check uses the `yamlCompare` analyzer with the `kots.io/installer: "true"` annotation to compare the target kURL installer that is included in the release against the kURL installer that is currently deployed in the customer's environment. For more information about the `yamlCompare` analyzer, see [`yamlCompare`](https://troubleshoot.sh/docs/analyze/yaml-compare/) in the open source Troubleshoot documentation. -Released on May 14, 2024 + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: installer-preflight-example + spec: + analyzers: + - yamlCompare: + annotations: + kots.io/installer: "true" + checkName: Kubernetes Installer + outcomes: + - fail: + message: The kURL installer for this version differs from what you have installed. It is recommended that you run the updated kURL installer before deploying this version. + uri: https://kurl.sh/my-application + - pass: + message: The kURL installer for this version matches what is currently installed. + ``` -### Bug Fixes {#bug-fixes-v2024-05-14-1} -* Fixes a bug that would cause downloaded licenses to not include custom hostname in the `endpoint` field. +================ +File: docs/vendor/packaging-include-resources.md +================ +# Conditionally Including or Excluding Resources -## v2024.05.10-1 +This topic describes how to include or exclude optional application resources based on one or more conditional statements. The information in this topic applies to Helm chart- and standard manifest-based applications. -Released on May 10, 2024 +## Overview -### New Features {#new-features-v2024-05-10-1} -* Adds support for creating compatibility matrix ports with wildcard domains and TLS certificates. +Software vendors often need a way to conditionally deploy resources for an application depending on users' configuration choices. For example, a common use case is giving the user the choice to use an external database or an embedded database. In this scenario, when a user chooses to use their own external database, it is not desirable to deploy the embedded database resources. -## v2024.05.10-0 +There are different options for creating conditional statements to include or exclude resources based on the application type (Helm chart- or standard manifest-based) and the installation method (Replicated KOTS or Helm CLI). -Released on May 10, 2024 +### About Replicated Template Functions -### Improvements {#improvements-v2024-05-10-0} -* Moves release information for the bundle under "Versions Behind" on the **Support Bundle Analysis** page. +For applications deployed with KOTS, Replicated template functions are available for creating the conditional statements that control which optional resources are deployed for a given user. Replicated template functions can be used in standard manifest files such as Replicated custom resources or Kubernetes resources like StatefulSets, Secrets, and Services. -### Bug Fixes {#bug-fixes-v2024-05-10-0} -* Fixes a bug where product options are not updated correctly when changing installation type in the create a support issue modal. +For example, the Replicated ConfigOptionEquals template functions returns true if the specified configuration option value is equal to a supplied value. This is useful for creating conditional statements that include or exclude a resource based on a user's application configuration choices. -## v2024.05.08-0 +For more information about the available Replicated template functions, see [About Template Functions](/reference/template-functions-about). -Released on May 8, 2024 +## Include or Exclude Helm Charts -### New Features {#new-features-v2024-05-08-0} -* Adds "Not Recommended" label to the "GitOps Enabled" option on the **Manage Customer** and **Create New Customer** pages. -* Improves Airgap Bundle Contents modal size for long image names. -* Shows the Replicated domain next to the headers on the **Custom Domains** page. +This section describes methods for including or excluding Helm charts from your application deployment. -### Bug Fixes {#bug-fixes-v2024-05-08-0} -* Remove native sorting on Customers and Instances table. +### Helm Optional Dependencies -## v2024.05.06-2 +Helm supports adding a `condition` field to dependencies in the Helm chart `Chart.yaml` file to include subcharts based on one or more boolean values evaluating to true. -Released on May 6, 2024 +For more information about working with dependencies and defining optional dependencies for Helm charts, see [Dependencies](https://helm.sh/docs/chart_best_practices/dependencies/) in the Helm documentation. -### Bug Fixes {#bug-fixes-v2024-05-06-2} -* Adds validation when creating and deleting license fields. +### HelmChart `exclude` Field -## v2024.05.06-1 +For Helm chart-based applications installed with KOTS, you can configure KOTS to exclude certain Helm charts from deployment using the HelmChart custom resource [`exclude`](/reference/custom-resource-helmchart#exclude) field. When the `exclude` field is set to a conditional statement, KOTS excludes the chart if the condition evaluates to `true`. -Released on May 6, 2024 +The following example uses the `exclude` field and the ConfigOptionEquals template function to exclude a postgresql Helm chart when the `external_postgres` option is selected on the Replicated Admin Console **Config** page: -### New Features {#new-features-v2024-05-06-1} -* Adds additional bundle validation when uploading a support bundle for air gap telemetry (alpha feature). +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: postgresql +spec: + exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' + chart: + name: postgresql + chartVersion: 12.1.7 + releaseName: samplechart-release-1 +``` -## v2024.05.03-1 +## Include or Exclude Standard Manifests -Released on May 3, 2024 +For standard manifest-based applications installed with KOTS, you can use the `kots.io/exclude` or `kots.io/when` annotations to include or exclude resources based on a conditional statement. -### Bug Fixes {#bug-fixes-v2024-05-03-1} -* Fixes an issue that caused compatibility matrix addons to stay in a pending state for multi-node clusters. +By default, if neither `kots.io/exclude` nor `kots.io/when` is present on a resource, the resource is included. -## v2024.05.01-2 +### Requirements -Released on May 1, 2024 +The `kots.io/exclude` and `kots.io/when` annotations have the following requirements: -### New Features {#new-features-v2024-05-01-2} -* Adds support for creating RKE2 clusters with compatibility matrix using the vendor portal UI. +* Only one of the `kots.io/exclude` nor `kots.io/when` annotations can be present on a single resource. If both are present, the `kots.io/exclude` annotation is applied, and the `kots.io/when` annotation is ignored. -## v2024.04.29-0 +* The values of the `kots.io/exclude` and `kots.io/when` annotations must be wrapped in quotes. This is because Kubernetes annotations must be strings. For more information about working with Kubernetes annotations, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. -Released on April 29, 2024 +### `kots.io/exclude` -### New Features {#new-features-v2024-04-29-0} -* Adds support for creating RKE2 clusters with compatibility matrix using the Vendor API v3. +When the `kots.io/exclude: ''` annotation is present on a resource and evaluates to true, the resource is excluded from the deployment. -## v2024.04.26-5 +The following example uses the `kots.io/exclude` annotation and the ConfigOptionEquals template function to exclude the postgresql `StatefulSet` when an `install_postgres` checkbox on the Admin Console **Config** page is disabled: -Released on April 26, 2024 +```yaml +apiVersion: apps/v1 +kind: Statefulset +metadata: + name: postgresql + annotations: + kots.io/exclude: '{{repl ConfigOptionEquals "install_postgres" "0" }}' + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + strategy: + type: Recreate + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:9.6" + imagePullPolicy: "" +... +``` -### Bug Fixes {#bug-fixes-v2024-04-26-5} -* Fixes Embedded Cluster support on the compatibility matrix create cluster page. +### `kots.io/when` -## v2024.04.26-3 +When the `kots.io/when: ''` annotation is present on a resource and evaluates to true, the resource is included in the deployment. -Released on April 26, 2024 +The following example uses the `kots.io/when` annotation and the ConfigOptionEquals template function to include the postgresql `StatefulSet` resource when the `install_postgres` checkbox on the Admin Console **Config** page is enabled: -### Bug Fixes {#bug-fixes-v2024-04-26-3} -* Displays error when creating an embedded cluster with the compatibility matrix and the `--version` flag is a non-numeric string. +```yaml +apiVersion: apps/v1 +kind: Statefulset +metadata: + name: postgresql + annotations: + kots.io/when: '{{repl ConfigOptionEquals "install_postgres" "1" }}' + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + strategy: + type: Recreate + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:9.6" + imagePullPolicy: "" +... +``` -## v2024.04.26-1 +================ +File: docs/vendor/packaging-ingress.md +================ +# Adding Cluster Ingress Options -Released on April 26, 2024 +When delivering a configurable application, ingress can be challenging as it is very cluster specific. +Below is an example of a flexible `ingress.yaml` file designed to work in most Kubernetes clusters, including embedded clusters created with Replicated kURL. -### Bug Fixes {#bug-fixes-v2024-04-26-1} -* Only users with the `team/support-issues/write` RBAC policy can submit support tickets on the **Support Bundle Analysis** page. +## Example -## v2024.04.25-0 +The following example includes an Ingress resource with a single host based routing rule. +The resource works in both existing clusters and kURL clusters. -Released on April 25, 2024 +### Config -### Bug Fixes {#bug-fixes-v2024-04-25-0} -* Users can sort customers by the date they were created on the **Customers** page. +A config option `enable_ingress` has been provided to allow the end-user to choose whether or not to enable the Ingress resource. +In some clusters a custom Ingress resource may be desired — when an ingress controller is not available, other means of exposing services may be preferred. -## v2024.04.23-1 +An `annotations` text area has been made available for the end-user to add additional annotations to the ingress. +Here, cluster specific annotations can be added to support a variety of ingress controllers. +For example, when using the [ALB ingress controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) in AWS, it is necessary to include the `kubernetes.io/ingress.class: alb` annotation on your Ingress resource. -Released on April 23, 2024 +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: example-application +spec: + groups: + - name: ingress + title: Ingress + items: + - name: enable_ingress + type: bool + title: Enable Kubernetes Ingress + help_text: | + When checked, deploy the provided Kubernetes Ingress resource. + default: "1" + - name: hostname + type: text + title: Hostname + help_text: | + Use this field to provide a hostname for your Example Application installation. + required: true + when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} + - name: allow_http + type: bool + title: Allow Unsecured Access through HTTP + help_text: | + Uncheck this box to disable HTTP traffic between the client and the load balancer. + default: "1" + when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} + - name: annotations + type: textarea + title: Annotations + help_text: | + Use this textarea to provide annotations specific to your ingress controller. + For example, `kubernetes.io/ingress.class: alb` when using the ALB ingress controller. + when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} +``` -### Bug Fixes {#bug-fixes-v2024-04-23-1} -* When a user selects a customer-supplied Kubernetes cluster in the support form, the end of life (EOL) alert about the deprecated Docker and Weave kURL add-ons will not apply for the latest channel kURL installer. +### Ingress -## v2024.04.22-1 +For ingress, you must create two separate resources. +The first of which will be deployed to existing cluster installations, while the second will only be deployed to an embedded cluster. +Both of these resources are selectively excluded with the [`exclude` annotation](packaging-include-resources). -Released on April 22, 2024 +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-application-ingress + annotations: + kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) IsKurl }}' + kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' + nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' + kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} +spec: + rules: + - host: repl{{ or (ConfigOption "hostname") "~" }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: nginx + port: + number: 80 +``` -### Bug Fixes {#bug-fixes-v2024-04-22-1} -* Fixes a bug with the 'Reset' filters button on the **Customers** page. +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-application-ingress-embedded + annotations: + kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) (not IsKurl) }}' + kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' + nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' + kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} +spec: + tls: + - hosts: + - repl{{ ConfigOption "hostname" }} + secretName: kotsadm-tls + rules: + - host: repl{{ ConfigOption "hostname" }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: nginx + port: + number: 80 +``` -## v2024.04.18-2 +================ +File: docs/vendor/packaging-installer-storage.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" -Released on April 18, 2024 +# About Selecting Storage Add-ons -### Bug Fixes {#bug-fixes-v2024-04-18-2} -* Fixes styling on the 'Reset password' modal and 'Trial expired' modal on the **Login** page. -* Fixes a stray '0' rendering under the "Latest Release" sections on the **Channels** page for Builders Plan users. + +This topic provides guidance for selecting Replicated kURL add-ons to provide highly available data storage in kURL clusters. For additional guidance, see [Choosing a PV Provisioner](https://kurl.sh/docs/create-installer/choosing-a-pv-provisioner) in the open source kURL documentation. -## v2024.04.16-1 +## Overview -Released on April 16, 2024 +kURL includes add-ons for object storage and for dynamic provisioning of PersistentVolumes (PVs) in clusters. You configure these add-ons in your kURL installer to define how data for your application and data for Replicated KOTS is managed in the cluster. -### New Features {#new-features-v2024-04-16-1} -* Adds support for Postgres as an addon for EKS clusters in compatibility matrix. +The following lists the kURL add-ons for data storage: +* **MinIO**: MinIO is an open source, S3-compatible object store. See [MinIO Add-on](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. +* **Rook**: Rook provides dynamic PV provisioning of distributed Ceph storage. Ceph is a distributed storage system that provides S3-compatible object storage. See [Rook Add-on](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. +* **OpenEBS**: OpenEBS Local PV creates a StorageClass to dynamically provision local PersistentVolumes (PVs) in a cluster. See [OpenEBS Add-on](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. +* **Longhorn**: Longhorn is an open source distributed block storage system for Kubernetes. See [Longhorn Add-on](https://kurl.sh/docs/add-ons/longhorn) in the kURL documentation. -## v2024.04.12-5 + :::important + The Longhorn add-on is deprecated and not supported in production clusters. If you are currently using Longhorn, you must migrate data from Longhorn to either OpenEBS or Rook. For more information about migrating from Longhorn, see [Migrating to Change CSI Add-On](https://kurl.sh/docs/install-with-kurl/migrating-csi) in the kURL documentation. + ::: -Released on April 12, 2024 +## About Persistent Storage for KOTS -### New Features {#new-features-v2024-04-12-5} -* Adds the ability to expose NodePorts on VM clusters in compatibility matrix. -* Adds the ability to attach new S3 buckets to EKS clusters in compatibility matrix. +This section describes the default storage requirements for KOTS. Each of the [Supported Storage Configurations](#supported-storage-configurations) described below satisfy these storage requirements for KOTS. -## v2024.04.11-2 +### rqlite StatefulSet -Released on April 11, 2024 +KOTS deploys a rqlite StatefulSet to store the version history, application metadata and other small amounts of data needed to manage the application(s). No configuration is required to deploy rqlite. -### Bug Fixes {#bug-fixes-v2024-04-11-2} -* Eliminates excessive page reloads on the **Support Bundle Analysis** page that would cause users to lose their place. +Rqlite is a distributed relational database that uses SQLite as its storage engine. For more information, see the [rqlite](https://rqlite.io/) website. -## v2024.04.11-1 +### Object Storage or Local PV -Released on April 11, 2024 +By default, KOTS requires an S3-compatible object store to store the following: +* Support bundles +* Application archives +* Backups taken with Replicated snapshots that are configured to NFS or host path storage destinations -### Bug Fixes {#bug-fixes-v2024-04-11-1} -* Fix selected default instance type on Compatibility Matrix. +Both the Rook add-on and the MinIO add-on satisfy this object store requirement. -## v2024.04.11-0 +Alternatively, you can configure KOTS to be deployed without object storage. This installs KOTS as a StatefulSet using a persistent volume (PV) for storage. When there is no object storage available, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in the local PV. In this case, the OpenEBS add-on can be included to provide the local PV storage. For more information, see [Installing Without Object Storage](/enterprise/installing-stateful-component-requirements). -Released on April 11, 2024 +### Distributed Storage in KOTS v1.88 and Earlier -### Bug Fixes {#bug-fixes-v2024-04-11-0} -* Fixes an issue that prevented add-ons from multi-node compatibility matrix clusters from working properly. +KOTS v1.88 and earlier requires distributed storage. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. For more information, see [Rook Ceph](#rook-ceph) below. -## v2024.04.10-0 +## Factors to Consider When Choosing a Storage Configuration -Released on April 10, 2024 +The object store and/or PV provisioner add-ons that you choose to include in your kURL installer depend on the following factors: +* **KOTS storage requirements**: The storage requirements for the version of the KOTS add-on that you include in the spec. For example, KOTS v1.88 and earlier requires distributed storage. +* **Other add-on storage requirements**: The storage requirements for the other add-ons that you include in the spec. For example, the Velero add-on requires object storage to deploy the default internal storage for snapshots during installation. +* **Application storage requirements**: The storage requirements for your application. For example, you might include different add-ons depending on if your application requires a single or multi-node cluster, or if your application requires distributed storage. -### New Features {#new-features-v2024-04-10-0} -* Allows sev 1 and 2 support issues to be submitted for the Replicated host service, including the compatibility matrix and vendor portal. +## Supported Storage Configurations -### Improvements {#improvements-v2024-04-10-0} -* Highlights required fields that are not filled on the support issue form on the **Support** page. +This section describes the supported storage configurations for embedded clusters provisioned by kURL. -## v2024.04.09-2 +### OpenEBS Without Object Storage (Single Node) {#single-node} -Released on April 9, 2024 +If your application can be deployed to a single node cluster and does not require object storage, then you can choose to exclude object storage and instead use the OpenEBS add-on only to provide local storage on the single node in the cluster. -### New Features {#new-features-v2024-04-09-2} -* Adds advanced cluster creation form for compatibility matrix. +When configured to use local PV storage instead of object storage, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in a PV on the single node in the cluster. -## v2024.04.04-0 +#### Requirements -Released on April 4, 2024 +To use the OpenEBS add-on without object storage, your kURL installer must meet the following requirements: -### New Features {#new-features-v2024-04-04-0} -* Adds channel sequence and updates row styles on the **Release History** page. +* When neither the MinIO nor the Rook add-on are included in the kURL installer, you must set the `disableS3` field to `true` in the KOTS add-on. Setting `disableS3: true` in the KOTS add-on allows KOTS to use the local PV storage provided by OpenEBS instead of using object storage. For more information, see [Effects of the disableS3 Flag](https://kurl.sh/docs/add-ons/kotsadm#effects-of-the-disables3-flag) in _KOTS Add-on_ in the kURL documentation. -## v2024.04.02-2 +* When neither the MinIO nor the Rook add-on are included in the kURL installer, the Velero add-on cannot be included. This is because, during installation, the Velero add-on automatically deploys internal storage for backups taken with the Replicated snapshots feature. The Velero add-on requires object storage to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. -Released on April 2, 2024 + When the Velero add-on is not included, your users must install and configure Velero on the cluster after installation in order to use Replicated snapshots for backup and restore. See [About Backup and Restore with Snapshots](/vendor/snapshots-overview). -### Bug Fixes {#bug-fixes-v2024-04-02-2} -* Fixes an issue that caused collisions in kubeconfig context naming when using the `replicated cluster kubeconfig` command resulting in contexts being overwritten. + For a storage configuration for single node clusters that supports the use of the Velero add-on, see [OpenEBS with MinIO (Single or Multi-Node)](#openebs-minio) below. -## v2024.04.01-3 +#### Example -Released on April 1, 2024 +The following is an example installer that uses OpenEBS v3.3.x with Local PV for local storage and disables object storage for KOTS: -### New Features {#new-features-v2024-04-01-3} -* Makes the granular resource status view generally available (GA). For more information, see [Instance Details](/vendor/instance-insights-details#current-state). +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "local" +spec: + ... + openebs: + version: "3.3.x" + isLocalPVEnabled: true + localPVStorageClassName: "default" + kotsadm: + disables3: true +``` -## v2024.03.27-3 +For more information about properties for the OpenEBS add-on, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. -Released on March 27, 2024 +### OpenEBS with MinIO (Single or Multi-Node) {#openebs-minio} -### Improvements {#improvements-v2024-03-27-3} -* Moves the **Audit Log** page to be nested under the **Team** section. Shows a message to the user if they visit the **Audit Log** from the account dropdown in the top right, and informs them that the **Audit Log** will be permanently moving to the **Team** section in the near future. +Using the OpenEBS add-on with the MinIO add-on provides a highly available data storage solution for multi-node clusters that is lighter-weight compared to using Rook Ceph. Replicated recommends that you use OpenEBS Local PV with MinIO for multi-node clusters if your application does _not_ require distributed storage. If your application requires distributed storage, see [Rook Ceph](#rook-ceph) below. -## v2024.03.27-1 +When both the MinIO and OpenEBS add-ons are included, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in MinIO object storage. Additionally, KOTS uses OpenEBS Local PV to provision the PVs on each node that MinIO uses for local storage. -Released on March 27, 2024 +#### Requirement -### New Features {#new-features-v2024-03-27-1} -* Allows user to attach both existing support bundles and upload new bundles on the support request form on the **Support** page. -* Displays the latest release in the channel at time of bundle collection and the release sequence that was installed at time of bundle collection on **Support Bundle Analysis** pages. +To use both the OpenEBS add-on and the MinIO add-on, the KOTS add-on must use KOTS v1.89 or later. -## v2024.03.27-0 +KOTS v1.88 and earlier requires distributed storage, which is not provided by OpenEBS Local PV. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. See [Rook Ceph](#rook-ceph) below. -Released on March 27, 2024 +#### Example -### Bug Fixes {#bug-fixes-v2024-03-27-0} -* Shows certificate errors on the **Custom Domains** page if certificates cannot be renewed. +The following is an example installer that uses both the OpenEBS add-on version 3.3.x and MinIO add-on version `2022-09-07T22-25-02Z`: -## v2024.03.26-5 +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "openebs-with-minio" +spec: + ... + openebs: + version: "3.3.x" + isLocalPVEnabled: true + localPVStorageClassName: "default" + minio: + version: "2022-09-07T22-25-02Z" +``` -Released on March 26, 2024 +For more information about properties for the OpenEBS and MinIO add-ons, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) and [MinIO](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. -### New Features {#new-features-v2024-03-26-5} -* Compatibility matrix supports Standard_DS and GPU based instance types for AKS clusters. +### Rook Ceph (Multi-Node) {#rook-ceph} -### Improvements {#improvements-v2024-03-26-5} -* Removes the "Download license" and "Install Instructions" buttons from the **Instance Details** page, as they are not relevant on that page. - -## v2024.03.26-1 - -Released on March 26, 2024 - -### Improvements {#improvements-v2024-03-26-1} -* Changes the **Instances** option in the **Download CSV** dropdown on the **Customers** page to **Customers + Instances** to better communicate that it is a superset that contains both customers *and* instances. +If your application requires multiple nodes and distributed storage, Replicated recommends that you use the Rook add-on for storage. The Rook add-on creates an S3-compatible, distributed object store with Ceph and also creates a StorageClass for dynamically provisioning PVs. -## v2024.03.25-0 +#### Requirement -Released on March 25, 2024 +Rook versions 1.4.3 and later require a dedicated block device attached to each node in the cluster. The block device must be unformatted and dedicated for use by Rook only. The device cannot be used for other purposes, such as being part of a Raid configuration. If the device is used for purposes other than Rook, then the installer fails, indicating that it cannot find an available block device for Rook. -### New Features {#new-features-v2024-03-25-0} -* Adds a **View bundle contents** link on airgap bundles that have a warning status on the vendor portal **Release History** page. +For Rook Ceph versions earlier than 1.4.3, a dedicated block device is recommended in production clusters. Running distributed storage such as Rook on block devices is recommended for improved data stability and performance. -## v2024.03.22-1 +#### Example -Released on March 22, 2024 +The following is an example installer that uses the Rook add-on version 1.7.x: -### Improvements {#improvements-v2024-03-22-1} -* Hides the "View bundle contents" link on the **Release History** page if an airgap bundle contains no images. To view image lists, rebuild your bundle. +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "distributed" +spec: + ... + rook: + version: "1.7.x" + storageClassName: "distributed" + isSharedFilesystemDisabled: true +``` -## v2024.03.21-8 +For more information about properties for the Rook add-on, see [Rook](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. -Released on March 21, 2024 +================ +File: docs/vendor/packaging-kots-versions.md +================ +# Setting Minimum and Target Versions for KOTS -### Bug Fixes {#bug-fixes-v2024-03-21-8} -* Fixes an issue where online embedded cluster downloads failed if airgap download was not enabled for the customer / license. +This topic describes how to set minimum and target version for Replicated KOTS in the KOTS [Application](/reference/custom-resource-application) custom resource. -## v2024.03.21-5 +## Limitation -Released on March 21, 2024 +Setting minimum and target versions for KOTS is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). -### New Features {#new-features-v2024-03-21-5} -* Adds the ability to view more granular app status updates in the Instance Activity section on the **Instance Details** page via a tooltip. To get access to this feature, log in to your vendor portal account, select Support > Request a feature, and submit a feature request for "granular app status view". -* Adds a **View bundle contents** link on the **Release History** page to view a list of images in a given airgap bundle. +This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed.`. - :::note - This link appears only for releases built or rebuilt after this implementation. - ::: +To avoid installation failures, do not use `targetKotsVersion` or `minKotsVersion` in releases that support installation with Embedded Cluster. -## v2024.03.21-3 +## Using Minimum KOTS Versions (Beta) -Released on March 21, 2024 +The `minKotsVersion` attribute in the Application custom resource defines the minimum version of Replicated KOTS that is required by the application release. This can be useful when you want to get users who are lagging behind to update to a more recent KOTS version, or if your application requires functionality that was introduced in a particular KOTS version. -### Bug Fixes {#bug-fixes-v2024-03-21-3} -* Fixes pagination on the compatibility matrix **Cluster History** page. +Including this attribute enforces compatibility checks for both new installations and application updates. An installation or update is blocked if the currently deployed KOTS version is earlier than the specified minimum KOTS version. Users must upgrade to at least the specified minimum version of KOTS before they can install or update the application. -## v2024.03.21-1 +### How the Admin Console Handles minKotsVersion -Released on March 21, 2024 +When you promote a new release specifying a minimum KOTS version that is later than what a user currently has deployed, and that user checks for updates, that application version appears in the version history of the Admin Console. However, it is not downloaded. -### Bug Fixes {#bug-fixes-v2024-03-21-1} -* Fixes a bug that could cause the **Channels** page table view to fail to load. +The Admin Console temporarily displays an error message that informs the user that they must update KOTS before downloading the application version. This error also displays when the user checks for updates with the [`kots upstream upgrade`](/reference/kots-cli-upstream-upgrade) command. -## v2024.03.21-0 +KOTS cannot update itself automatically, and users cannot update KOTS from the Admin Console. For more information on how to update KOTS in existing clusters or in kURL clusters, see [Performing Updates in Existing Clusters](/enterprise/updating-app-manager) and [Performing Updates in kURL Clusters](/enterprise/updating-kurl). -Released on March 21, 2024 +After updating KOTS to the minimum version or later, users can use the Admin Console or the [`kots upstream download`](/reference/kots-cli-upstream-download) command to download the release and subsequently deploy it. -### Bug Fixes {#bug-fixes-v2024-03-21-0} -* Fixes a bug that could cause the compatibility matrix **Cluster History** page to fail to load. -## v2024.03.20-0 +## Using Target KOTS Versions -Released on March 20, 2024 +Including `targetKotsVersion` in the Application custom resource enforces compatibility checks for new installations. It blocks the installation if a user tries to install a version of KOTS that is later than the target version. For example, this can prevent users from installing a version of KOTS that you have not tested yet. -### New Features {#new-features-v2024-03-20-0} -* Adds new cluster addon API. +If the latest release in a channel includes `targetKotsVersion`, the install command for existing clusters is modified to install that specific version of KOTS. The install command for existing clusters is on the channel card in the [Vendor Portal](https://vendor.replicated.com). -### Bug Fixes {#bug-fixes-v2024-03-20-0} -* Fixes a bug where users with a "proton.me" email domain could enable auto-join for their team. +### How the Admin Console Handles targetKotsVersion -## v2024.03.18-1 +Specifying a `targetKotsVersion` does not prevent an end user from upgrading to a later version of KOTS after the initial installation. -Released on March 18, 2024 +If a new version of the application specifies a later target KOTS version than what is currently installed, users are not prevented from deploying that version of the application. -### Bug Fixes {#bug-fixes-v2024-03-18-1} -* Adds a **Helm CLI** option to the **Install Commands** modal on the **Release History** page. -* Fixes an issue that could cause a draft KOTS release to not contain KOTS specs by default. +If a user's Admin Console is running a version of KOTS that is earlier than the target version specified in a new version of the application, the Admin Console displays a notification in the footer, indicating that a newer supported version of KOTS is available. -## v2024.03.15-2 +### Using Target Versions with kURL -Released on March 15, 2024 +For installations in a cluster created by Replicated kURL, the version of the KOTS add-on must not be later than the target KOTS version specified in the Application custom resource. If the KOTS add-on version is later than the version specified for `targetKotsVersion`, the initial installation fails. -### Bug Fixes {#bug-fixes-v2024-03-15-2} -* Fixes a styling bug in the granular app status tooltip. +For more information about the KOTS add-on, see [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the open source kURL documentation. -## v2024.03.14-2 +================ +File: docs/vendor/packaging-private-images.md +================ +# Connecting to an External Registry -Released on March 14, 2024 +This topic describes how to add credentials for an external private registry using the Replicated Vendor Portal or Replicated CLI. Adding an external registry allows you to grant proxy access to private images using the Replicated proxy registry. For more information, see [About the Replicated Proxy Registry](private-images-about). -### Bug Fixes {#bug-fixes-v2024-03-14-2} -* Corrects the `helm package` command provided in the **Add the Replicated SDK to your Helm Chart** dialog. +For information about adding a registry with the Vendor API v3, see [Create an external registry with the specified parameters](https://replicated-vendor-api.readme.io/reference/createexternalregistry) in the Vendor API v3 documentation. -## v2024.03.14-1 +## Supported Registries -Released on March 14, 2024 +Replicated recommends that application vendors use one the following external private registries: -### New Features {#new-features-v2024-03-14-1} -* Adds the ability to view a more granular app status via a tooltip on the **Instance Details** page. To get access to this feature, log in to your vendor portal account, select **Support > Request a feature**, and submit a feature request for "granular app status view". +* Amazon Elastic Container Registry (ECR) +* DockerHub +* GitHub Container Registry +* Google Artifact Registry +* Google Container Registry (Deprecated) +* Sonatype Nexus +* Quay.io - :::note - Due to a backend API fix, if the application's status informers are templatized, there might be formatting issues until another app release is promoted. - ::: +These registries have been tested for compatibility with KOTS. -## v2024.03.14-0 +You can also configure access to most other external registries if the registry conforms to the Open Container Initiative (OCI) standard. -Released on March 14, 2024 +## Add Credentials for an External Registry -### Improvements {#improvements-v2024-03-14-0} -* Returns a friendly error message when attempting to download an embedded cluster release with an unknown version. +All applications in your team have access to the external registry that you add. This means that you can use the images in the external registry across multiple apps in the same team. -## v2024.03.13-0 +### Using the Vendor Portal -Released on March 13, 2024 +To add an external registry using the Vendor Portal: -### New Features {#new-features-v2024-03-13-0} -* Adds the ability to search customers by their email address. For more information, see [Filter and Search Customers](/vendor/releases-creating-customer#filter-and-search-customers) in _Creating and Managing Customers_. +1. Log in to the [Vendor Portal](https://vendor.replicated.com) and go to the **Images** page. +1. Click **Add External Registry**. -## v2024.03.12-1 + /images/add-external-registry.png -Released on March 12, 2024 + [View a larger version of this image](/images/add-external-registry.png) -### Improvements {#improvements-v2024-03-12-1} -* Makes the **Gitops Enabled** entitlement false by default when creating a customer. Also updates the description of the **Gitops Enabled** entitlement. +1. In the **Provider** drop-down, select your registry provider. -## v2024.03.11-0 +1. Complete the fields in the dialog, depending on the provider that you chose: -Released on March 11, 2024 + :::note + Replicated stores your credentials encrypted and securely. Your credentials and the encryption key do not leave Replicated servers. + ::: -### Bug Fixes {#bug-fixes-v2024-03-11-0} -* Fixes a bug that could result in a bad URL when downloading an airgap bundle for Replicated kURL from the download portal. + * **Amazon ECR** + + + + + + + + + + + + + + + + + +
    FieldInstructions
    HostnameEnter the host name for the registry, such as 123456689.dkr.ecr.us-east-1.amazonaws.com
    Access Key IDEnter the Access Key ID for a Service Account User that has pull access to the registry. See Setting up the Service Account User.
    Secret Access KeyEnter the Secret Access Key for the Service Account User.
    -## v2024.03.08-3 + * **DockerHub** -Released on March 8, 2024 + + + + + + + + + + + + + + + + + + + + + +
    FieldInstructions
    HostnameEnter the host name for the registry, such as index.docker.io.
    Auth TypeSelect the authentication type for a DockerHub account that has pull access to the registry.
    UsernameEnter the host name for the account.
    Password or TokenEnter the password or token for the account, depending on the authentication type you selected.
    -### Bug Fixes {#bug-fixes-v2024-03-08-3} -* Fixes a bug in the vendor portal UI related to allowing license download when a channel does not have a release. + * **GitHub Container Registry** -## v2024.03.08-2 + + + + + + + + + + + + + + + + + +
    FieldInstructions
    HostnameEnter the host name for the registry.
    UsernameEnter the username for an account that has pull access to the registry.
    GitHub TokenEnter the token for the account.
    -Released on March 8, 2024 + * **Google Artifact Registry** + + + + + + + + + + + + + + + + + +
    FieldInstructions
    HostnameEnter the host name for the registry, such as
    us-east1-docker.pkg.dev
    Auth TypeSelect the authentication type for a Google Cloud Platform account that has pull access to the registry.
    Service Account JSON Key or Token +

    Enter the JSON Key from Google Cloud Platform assigned with the Artifact Registry Reader role, or token for the account, depending on the authentication type you selected.

    +

    For more information about creating a Service Account, see Access Control with IAM in the Google Cloud documentation.

    +
    + * **Google Container Registry** + :::important + Google Container Registry is deprecated. For more information, see Container Registry deprecation in the Google documentation. + ::: + + + + + + + + + + + + + +
    FieldInstructions
    HostnameEnter the host name for the registry, such as gcr.io.
    Service Account JSON Key

    Enter the JSON Key for a Service Account in Google Cloud Platform that is assigned the Storage Object Viewer role.

    For more information about creating a Service Account, see Access Control with IAM in the Google Cloud documentation.

    -### New Features {#new-features-v2024-03-08-2} -* Adds support for E2 family and GPU Tesla T4 on GKE clusters created with the compatibility matrix. + * **Quay.io** -## v2024.03.07-5 + + + + + + + + + + + + + +
    FieldInstructions
    HostnameEnter the host name for the registry, such as quay.io.
    Username and PasswordEnter the username and password for an account that has pull access to the registry.
    -Released on March 7, 2024 + * **Sonatype Nexus** -### Bug Fixes {#bug-fixes-v2024-03-07-5} -* Fixes a bug that caused "An unknown actor performed the action" message to be shown in the Audit Log. + + + + + + + + + + + + + +
    FieldInstructions
    HostnameEnter the host name for the registry, such as nexus.example.net.
    Username and PasswordEnter the username and password for an account that has pull access to the registry.
    -## v2024.03.07-0 + * **Other** -Released on March 7, 2024 + + + + + + + + + + + + + +
    FieldInstructions
    HostnameEnter the host name for the registry, such as example.registry.com.
    Username and PasswordEnter the username and password for an account that has pull access to the registry.
    -### New Features {#new-features-v2024-03-07-0} -* Adds the Replicated embedded cluster (Beta) distribution to the compatibility matrix. For more information, see [Using Embedded Cluster](/vendor/embedded-overview). +1. For **Image name & tag**, enter the image name and image tag and click **Test** to confirm that the Vendor Portal can access the image. For example, `api:v1.0.1` or `my-app/api:v1.01`. -## v2024.03.06-3 +1. Click **Link registry**. -Released on March 6, 2024 +### Using the CLI -### New Features {#new-features-v2024-03-06-3} -* Adds node autoscaling for EKS, GKE and AKS clusters created with the compatibility matrix. +To configure access to private images in an external registry using the Replicated CLI: -## v2024.02.29-3 +1. Install and configure the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). -Released on February 29, 2024 +1. Run the `registry add` command for your external private registry. For more information about the `registry add` command, see [registry add](/reference/replicated-cli-registry-add) in _Replicated CLI_. -### New Features {#new-features-v2024-02-29-3} -* Adds support for nodegroups to compatibility matrix clusters that use VM-based Kubernetes distributions and support multinode. + For example, to add a DockerHub registry: -## v2024.02.29-0 + ```bash + replicated registry add dockerhub --username USERNAME \ + --password PASSWORD + ``` + + Where: + * `USERNAME` is the username for DockerHub credentials with access to the registry. + * `PASSWORD` is the password for DockerHub credentials with access to the registry. -Released on February 29, 2024 + :::note + To prevent the password from being saved in your shell history, Replicated recommends that you use the `--password-stdin` flag and entering the password when prompted. + ::: -### New Features {#new-features-v2024-02-29-0} -* Enables the Embedded Cluster option on the customer license page. For more information, see [Using Embedded Cluster](/vendor/embedded-overview). +## Test External Registry Credentials +Replicated recommends that you test external registry credentials to ensure that the saved credentials on Replicated servers can pull the specified image. -## v2024.02.27-1 +To validate that the configured registry can pull specific images: -Released on February 27, 2024 +```bash +replicated registry test HOSTNAME \ + --image IMAGE_NAME +``` -### New Features {#new-features-v2024-02-27-1} -* Adds ARM support for Compatibility Matrix GKE clusters. +Where: +* `HOSTNAME` is the name of the host, such as `index.docker.io`. +* `IMAGE_NAME` is the name of the target image in the registry. -## v2024.02.26-0 +For example: -Released on February 26, 2024 +```bash +replicated registry test index.docker.io --image my-company/my-image:v1.2.3 +``` -### New Features {#new-features-v2024-02-26-0} -* v3 API for `/customer_instances` endpoint supports filtering with the `customerIds=".."` query parameter. +## Related Topic -## v2024.02.23-2 +[Tutorial: Using ECR for Private Images](tutorial-ecr-private-images) -Released on February 23, 2024 +================ +File: docs/vendor/packaging-private-registry-security.md +================ +# Replicated Registry Security -### New Features {#new-features-v2024.02.23-2} -* Adds the ability to pin a license to a specific release sequence. To get access to this feature, log in to your vendor portal account. Select Support > Request a feature, and submit a feature request for "license release pinning". +This document lists the security measures and processes in place to ensure that images pushed to the Replicated registry remain private. For more information about pushing images to the Replicated registry, see [Using the Replicated Registry for KOTS Installations](private-images-replicated). -## v2024.02.21-1 -Released on February 21, 2024 +## Single Tenant Isolation -### New Features {#new-features-v2024-02-21-1} -* Adds the EKS g4dn instance types to Compatibility Matrix. -* Adds the AKS Standard_D2ps_v5 and higher instance types to Compatibility Matrix. -* Labels and comments on support cases with End of Life (EOL) addons in kURL installer specs embedded in application releases. +The registry is deployed and managed as a multi-tenant application, but each tenant is completely isolated from data that is created and pulled by other tenants. Docker images have shared base layers, but the private registry does not share these between tenants. For example, if a tenant creates an image `FROM postgres:10.3` and pushes the image to Replicated, all of the layers are uploaded, even if other tenants have this base layer uploaded. -## v2024.02.21-0 +A tenant in the private registry is a team on the Replicated [Vendor Portal](https://vendor.replicated.com). Licenses and customers created by the team are also granted some permissions to the registry data, as specified in the following sections. Cross-tenant access is never allowed in the private registry. -Released on February 21, 2024 -### New Features {#new-features-v2024-02-21-0} -* Adds release info to the **Support bundle analysis** page. +## Authentication and Authorization -## v2024.02.19-0 +The private registry supports several methods of authentication. Public access is never allowed because the registry only accepts authenticated requests. -Released on February 19, 2024 -### New Features {#new-features-v2024-02-19-0} -* Adds support for Node Groups on the **Cluster History** page. +### Vendor Authentication -## v2024.02.14-0 +All accounts with read/write access on the Vendor Portal have full access to all images pushed by the tenant to the registry. These users can push and pull images to and from the registry. -Released on February 14, 2024 -### New Features {#new-features-v2024-02-14-0} -* Adds ability to add a Custom ID to a Customer through the vendor portal. -* Shows Custom ID and License ID on the Customers and Instances table views on the **Customers** page. +### End Customer Authentication -## v2024.02.13-3 +A valid (unexpired) license file has an embedded `registry_token` value. Replicated components shipped to customers use this value to authenticate to the registry. Only pull access is enabled when authenticating using a `registry_token`. A `registry_token` has pull access to all images in the tenant's account. All requests to pull images are denied when a license expires or the expiration date is changed to a past date. -Released on February 13, 2024 -### New Features {#new-features-v2024-02-13-3} -* Adds support for creating multiple nodegroups in compatibility matrix EKS clusters. +## Networking and Infrastructure -## v2024.02.09-3 +A dedicated cluster is used to run the private registry and is not used for any services. -Released on February 9, 2024 +The registry metadata is stored in a shared database instance. This database contains information about each layer in an image, but not the image data itself. -### New Features {#new-features-v2024-02-09-3} -* Adds support for Google Artifact Registry. +The registry image data is securely stored in an encrypted S3 bucket. Each layer is encrypted at rest, using a shared key stored in [Amazon Key Management Service](https://aws.amazon.com/kms/). Each tenant has a unique directory in the shared bucket and access is limited to the team or license making the request. -### Improvements {#improvements-v2024-02-09-3} -* Adds pagination to the list of customer instances on the customer details page. +The registry cluster runs on a hardened operating system image (CentOS-based), and all instances are on a private virtual private cloud (VPC). Public IP addresses are not assigned to the instances running the cluster and the registry images. Instead, only port 443 traffic is allowed from a layer 7 load balancer to these servers. -### Bug Fixes {#bug-fixes-v2024-02-09-3} -* pageSize and offset properties are no longer required for the `/v3/customers/search` Vendor API endpoint. API consumers must provide at least one inclusion criteria for a valid customer search. +There are no SSH public keys on these servers, and password-based SSH login is disallowed. The servers are not configured to have any remote access. All deployments to these servers are automated using tools such as Terraform and a custom-built CI/CD process. Only verified images are pulled and run. -## v2024.02.08-2 -Released on February 8, 2024 +## Runtime Monitoring -### Bug Fixes {#bug-fixes-v2024-02-08-2} -* Replaces GMT timezone value with UTC label. +Replicated uses a Web Application Firewall (WAF) on the cluster that monitors and blocks any unusual activity. When unusual activity is detected, access from that endpoint is automatically blocked for a period of time, and a Replicated site reliability engineer (SRE) is alerted. -## v2024.02.08-1 -Released on February 8, 2024 +## Penetration Testing -### New Features {#new-features-v2024-02-08-1} -* Updates the pricing for compatibiliy matrix clusters that use Amazon Elastic Kubernetes Service (EKS) versions with extended support. For more information, see [Compatibility Matrix Platform Pricing](https://www.replicated.com/matrix/pricing) on the Replicated website. +Replicated completed a formal pen test that included the private registry in the scope of the test. Replicated also runs a bug bounty program and encourages responsible disclosure on any vulnerabilities that are found. -## v2024.02.07-7 +================ +File: docs/vendor/packaging-public-images.mdx +================ +# Connecting to a Public Registry through the Proxy Registry -Released on February 7, 2024 +This topic describes how to pull images from public registries using the Replicated proxy registry. -### Bug Fixes {#bug-fixes-v2024-02-07-7} -* Custom Metrics chart tooltip displays two digits for the minutes field. Also adds GMT TZ for clarity. +For more information about the Replicated proxy registry, see [About the Replicated Proxy Registry](private-images-about). -## v2024.02.05-1 +## Pull Public Images Through the Replicated Proxy Registry -Released on February 5, 2024 +You can use the Replicated proxy registry to pull both public and private images. Using the Replicated proxy registry for public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. -### New Features {#new-features-v2024-02-05-1} -* Adds status indicator to Customer rows on the **Customers** page Hybrid view. -* Adds entitlement badges to Customer rows on the **Customers** page Hybrid view. +For public images, you need to first configure registry credentials. -## v2024.02.05-0 +To pull public images through the Replicated proxy registry, use the following `docker` command: -Released on February 5, 2024 +```bash +docker pull REPLICATED_PROXY_DOMAIN/proxy/APPSLUG/UPSTREAM_REGISTRY_HOSTNAME/IMAGE:TAG +``` +Where: +* `APPSLUG` is your Replicated app slug found on the [app settings page](https://vendor.replicated.com/settings). +* `REPLICATED_PROXY_DOMAIN` is `proxy.replicated.com` or your custom domain. For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). +* `UPSTREAM_REGISTRY_HOSTNAME` is the hostname for the public registry where the image is located. If the image is located in a namespace within the registry, include the namespace after the hostname. For example, `quay.io/namespace`. +* `IMAGE` is the image name. +* `TAG` is the image tag. -### New Features {#new-features-v2024-02-05-0} -* Label and comment on support cases with End Of Life (EOL) addons in Installer specs pinned to channels. +## Examples -## v2024.02.01-4 +This section includes examples of pulling public images through the Replicated proxy registry. -Released on February 1, 2024 +### Pull Images from DockerHub -### Improvements {#improvements-v2024-02-01-4} -* Improves the display of large quantities of Custom Metrics on the **Instance Reporting** page. +The following examples show how to pull public images from DockerHub: -## v2024.01.29-0 +```bash +# DockerHub is the default when no hostname is specified +docker pull proxy.replicated.com/proxy/APPSLUG/busybox +docker pull proxy.replicated.com/proxy/APPSLUG/nginx:1.16.0 +``` +```bash +# You can also optionally specify docker.io +docker pull proxy.replicated.com/proxy/APPSLUG/docker.io/replicated/replicated-sdk:1.0.0 +``` -Released on January 29, 2024 +### Pull Images from Other Registries -### Improvements {#improvements-v2024-01-29-0} -* Adds link to documentation for updating team member email addresses. +The following example shows how to pull images from the Amazon ECR Public Gallery: -## v2024.01.26-3 +```bash +docker pull proxy.replicated.com/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest +``` -Released on January 26, 2024 +### Pull Images Using a Custom Domain for the Proxy Registry -### Bug Fixes {#bug-fixes-v2024-01-26-3} -* Display accurate active instance count on the **Customers** page. +The following example shows how to pull a public image when a custom domain is configured for the proxy registry: -## v2024.01.25-4 +```bash +docker pull my.customdomain.io/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest +``` +For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). -Released on January 25, 2024 +## Related Topic -### New Features {#new-features-v2024-01-25-4} -* Adds ability to filter customers by channel version on the **Customers** page. -* Adds links to filter customers by adopted version from the **Channels** page. +[Connecting to an External Registry](packaging-private-images) -## v2024.01.25-0 +================ +File: docs/vendor/packaging-rbac.md +================ +# Configuring KOTS RBAC -Released on January 25, 2024 +This topic describes role-based access control (RBAC) for Replicated KOTS in existing cluster installations. It includes information about how to change the default cluster-scoped RBAC permissions granted to KOTS. -### Improvements {#improvements-v2024-01-25-0} -* Adds more helpful messaging on the **Support Bundle Analysis** page if your bundle does not contain an instance ID. +## Cluster-scoped RBAC -## v2024.01.23-1 +When a user installs your application with KOTS in an existing cluster, Kubernetes RBAC resources are created to allow KOTS to install and manage the application. -Released on January 23, 2024 +By default, the following ClusterRole and ClusterRoleBinding resources are created that grant KOTS access to all resources across all namespaces in the cluster: -### Improvements {#improvements-v2024-01-23-1} -* Application release information is extracted from an attached support bundle and displayed in the Github support case for better reference. +```yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "ClusterRole" +metadata: + name: "kotsadm-role" +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +``` -## v2024.01.19-1 +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kotsadm-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kotsadm-role +subjects: +- kind: ServiceAccount + name: kotsadm + namespace: appnamespace +``` -Released on January 19, 2024 +Alternatively, if your application does not require access to resources across all namespaces in the cluster, then you can enable namespace-scoped RBAC for KOTS. For information, see [About Namespace-scoped RBAC](#min-rbac) below. -### Bug Fixes {#bug-fixes-v2024-01-19-1} -* Adds the ability to scroll on the **License Fields** page. +## Namespace-scoped RBAC {#min-rbac} +Rather than use the default cluster-scoped RBAC, you can configure your application so that the RBAC permissions granted to KOTS are limited to a target namespace or namespaces. By default, for namespace-scoped installations, the following Role and RoleBinding resources are created that grant KOTS permissions to all resources in a target namespace: -## v2024.01.18-3 +```yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "Role" +metadata: + name: "kotsadm-role" +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +``` -Released on January 18, 2024 +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kotsadm-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kotsadm-role +subjects: +- kind: ServiceAccount + name: kotsadm + namespace: appnamespace +``` -### Improvements {#improvements-v2024-01-18-3} -* Displays air gap build status on the **Channels** page. +Namespace-scoped RBAC is supported for applications that use Kubernetes Operators or multiple namespaces. During application installation, if there are `additionalNamespaces` specified in the Application custom resource manifest file, then Roles and RoleBindings are created to grant KOTS access to resources in all specified namespaces. -## v2024.01.18-2 +### Enable Namespace-scoped RBAC {#enable} -Released on January 18, 2024 +To enable namespace-scoped RBAC permissions for KOTS, specify one of the following options in the Application custom resource manifest file: -### Bug Fixes {#bug-fixes-v2024-01-18-2} -* Instances CSV export shows relevant `.airgap` bundle downloaded timestamp, channel_id, and channel_sequence data. +* `supportMinimalRBACPrivileges`: Set to `true` to make namespace-scoped RBAC optional for existing cluster installations. When `supportMinimalRBACPrivileges` is `true`, cluster-scoped RBAC is used by default and users must pass the `--use-minimal-rbac` flag with the installation or upgrade command to use namespace-scoped RBAC. -## v2024.01.17-1 +* `requireMinimalRBACPrivileges`: Set to `true` to require that all installations to existing clusters use namespace-scoped access. When `requireMinimalRBACPrivileges` is `true`, all installations use namespace-scoped RBAC automatically and users do not pass the `--use-minimal-rbac` flag. -Released on January 17, 2024 +For more information about these options, see [requireMinimalRBACPrivileges](/reference/custom-resource-application#requireminimalrbacprivileges) and [supportMinimalRBACPrivileges](/reference/custom-resource-application#supportminimalrbacprivileges) in _Application_. -### New Features {#new-features-v2024-01-17-1} -* Adds support to the compatibility matrix for running Openshift clusters with multiple nodes. +### About Installing with Minimal RBAC -## v2024.01.11-1 +In some cases, it is not possible to grant the user `* * *` permissions in the target namespace. For example, an organization might have security policies that prevent this level of permissions. -Released on January 11, 2024 +If the user installing or upgrading KOTS cannot be granted `* * *` permissions in the namespace, then they can instead request the following: +* The minimum RBAC permissions required by KOTS +* RBAC permissions for any CustomResourceDefinitions (CRDs) that your application includes -### Bug Fixes {#bug-fixes-v2024-01-11-1} -* Fixes bug in the **Customers** page search feature, where it would not display the ‘not found’ state if no results were found. +Installing with the minimum KOTS RBAC permissions also requires that the user manually creates a ServiceAccount, Role, and RoleBinding for KOTS, rather than allowing KOTS to automatically create a Role with `* * *` permissions. -## v2024.01.10-2 +For more information about how users can install KOTS with minimal RBAC when namespace-scoped RBAC is enabled, see [Namespace-scoped RBAC Requirements](/enterprise/installing-general-requirements#namespace-scoped) in _Installation Requirements_. -Released on January 10, 2024 +### Limitations -### Bug Fixes {#bug-fixes-v2024-01-10-2} -* Adds an error state for the **Support Bundle Analysis** page if there is an invalid bundle slug in the URL. +The following limitations apply when using the `requireMinimalRBACPrivileges` or `supportMinimalRBACPrivileges` options to enable namespace-scoped RBAC for KOTS: -## v2024.01.10-1 +* **Existing clusters only**: The `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` options apply only to installations in existing clusters. -Released on January 10, 2024 +* **Preflight checks**: When namespace-scoped access is enabled, preflight checks cannot read resources outside the namespace where KOTS is installed. The preflight checks continue to function, but return less data. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). -### Improvements {#improvements-v2024-01-10-1} -* Adds pagination to the **Kubernetes Installers* page. +* **Velero namespace access for KOTS snapshots**: Velero is required for enabling backup and restore with the KOTS snapshots feature. Namespace-scoped RBAC does not grant access to the namespace where Velero is installed in the cluster. -## v2024.01.10-0 + To set up snapshots when KOTS has namespace-scoped access, users can run the `kubectl kots velero ensure-permissions` command. This command creates additional Roles and RoleBindings to allow the necessary cross-namespace access. For more information, see [`velero ensure-permissions`](/reference/kots-cli-velero-ensure-permissions/) in the KOTS CLI documentation. -Released on January 10, 2024 + For more information about snapshots, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). -### Improvements {#improvements-v2024-01-10-0} -* Improve refetching on **Customers** page. +* **Air Gap Installations**: For air gap installations, the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags are supported only in automated, or _headless_, installations. In headless installations, the user passes all the required information to install both KOTS and the application with the `kots install` command. In non-headless installations, the user provides information to install the application through the Admin Console UI after KOTS is installed. -## v2024.01.09-4 + In non-headless installations in air gap environments, KOTS does not have access to the application's `.airgap` package during installation. This means that KOTS does not have the information required to determine whether namespace-scoped access is needed, so it defaults to the more permissive, default cluster-scoped RBAC policy. -Released on January 9, 2024 + For more information about how to do headless installations in air gap environments, see [Air Gap Installation](/enterprise/installing-existing-cluster-automation#air-gap) in _Installing with the KOTS CLI_. -### Bug Fixes {#bug-fixes-v2024-01-09-4} -* Fixes the install links on the **Channels** page for Native applications. +* **Changing RBAC permissions for installed instances**: The RBAC permissions for KOTS are set during its initial installation. KOTS runs using the assumed identity and cannot change its own authorization. When you update your application to add or remove the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags in the Application custom resource, the RBAC permissions for KOTS are affected only for new installations. Existing KOTS installations continue to run with their current RBAC permissions. -## v2024.01.09-3 + To expand the scope of RBAC for KOTS from namespace-scoped to cluster-scoped in new installations, Replicated recommends that you include a preflight check to ensure the permission is available in the cluster. -Released on January 9, 2024 +================ +File: docs/vendor/packaging-using-tls-certs.mdx +================ +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" -### Improvements {#improvements-v2024-01-09-3} -* Adds pagination for the **Customers** page table view. +# Using TLS Certificates -## v2024.01.08-6 + -Released on January 8, 2024 +Replicated KOTS provides default self-signed certificates that renew automatically. For embedded clusters created with Replicated kURL, the self-signed certificate renews 30 days before expiration when you enable the kURL EKCO add-on version 0.7.0 and later. -### Bug Fixes {#bug-fixes-v2024-01-08-6} -* Fixes back button behavior when navigating to the **Customers** page from a link on the **Channels** page. +Custom TLS options are supported: -## v2024.01.08-5 +- **Existing clusters:** The expectation is for the end customer to bring their own Ingress Controller such as Contour or Istio and upload their own `kubernetes.io/tls` secret. For an example, see [Ingress with TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) in the Kubernetes documentation. -Released on January 8, 2024 +- **Embedded kURL clusters:** End customers can upload a custom TLS certificate. Replicated kURL creates a TLS secret that can reused by other Kubernetes resources, such as Deployment or Ingress, which can be easier than providing and maintaining multiple certificates. As a vendor, you can enable the use of custom TLS certificates with these additional resources. -### Improvements {#improvements-v2024-01-08-5} -* Adds an 'Add support bundle' button the the **Customer Support Bundles** page. -* Adds an error state when user visits an invalid release. -* Simplifies the search design on the **Troubleshoot** pages. -* Adds an empty state when there are no search results on the **Troubleshoot** pages. -* Persists the search query and shows correct results when switching between the application-level **Troubleshoot** page and the top-level **Troubleshoot** page. +For example, if your application does TLS termination, your deployment would need the TLS secret. Or if the application is connecting to another deployment that is also secured using the same SSL certificate (which may not be a trusted certificate), the custom TLS certificate can be used to do validation without relying on the trust chain. -### Bug Fixes {#bug-fixes-v2024-01-08-5} -* Fixes bug where the search box would disappear on the top-level **Troubleshoot** page if the query returned no results. +### Get the TLS Secret -## v2024.01.08-1 +kURL sets up a Kubernetes secret called `kotsadm-tls`. The secret stores the TLS certificate, key, and hostname. Initially, the secret has an annotation set called `acceptAnonymousUploads`. This indicates that a new TLS certificate can be uploaded by the end customer during the installation process. For more information about installing with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). -Released on January 8, 2024 +Before you can reference the TLS certificate in other resources, you must get the `kotsadm-tls` secret output. -### New Features {#new-features-v2024-01-08-1} -* Adds both TTL and Duration to the **Cluster History** page. -* Fixes sort by TTL and sort by duration to work with paginated results. -* Adds filter by Kubernetes distribution to the **Cluster History** page. -* Adds filter by Cost to the **Cluster History** page. -* Adds filter by Node Count to the **Cluster History** page. +To get the `kots-adm-tls` secret, run: -## v2024.01.08-0 +```shell +kubectl get secret kotsadm-tls -o yaml +``` -Released on January 8, 2024 +In the output, the `tls.crt` and `tls.key` hold the certificate and key that can be referenced in other Kubernetes resources. -### Bug Fixes {#bug-fixes-v2024-01-08-0} -* Fixes a bug where the support bundle and customer name would not be prefilled on the support request form if you navigated there from one of the "Submit support ticket" links on the **Troubleshoot** or **Dashboard** pages. +**Example Output:** -## v2024.01.04-2 +```yaml +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: kotsadm-tls +data: + tls.crt: + tls.key: +``` -Released on January 4, 2024 +### Use TLS in a Deployment Resource -### Improvements {#improvements-v2024-01-04-2} -* Adds ability to edit instance name on the **Customers** page. +This procedure shows how to reference the `kotsadm-tls` secret using an example nginx Deployment resource (`kind: Deployment`). -### Bug Fixes {#bug-fixes-v2024-01-04-2} -* Shows an error state when you visit a customer page with an invalid app slug or customer ID in the URL. +To use the `kotsadm-tls` secret in a Deployment resource: -## v2024.01.03-3 +1. In the Deployment YAML file, configure SSL for volumeMounts and volumes, and add the `kotsadm-tls` secret to volumes: -Released on January 3, 2024 + **Example:** -### Improvements {#improvements-v2024-01-03-3} -* Improves the wording and styling of the Adoption Rate section of the channels on the **Channels** page. + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx + spec: + template: + spec: + containers: + volumeMounts: + - mountPath: "/etc/nginx/ssl" + name: nginx-ssl + readOnly: true + volumes: + - name: nginx-ssl + secret: + secretName: kotsadm-tls + ``` -### Bug Fixes {#bug-fixes-v2024-01-03-3} -* Fixes the filtering for the active/inactive customer links on the **Channels** page. +1. Deploy the release, and then verify the pod deployment using the `kubectl exec` command: -## v2024.01.03-2 + **Example:** -Released on January 3, 2024 + ```shell + export POD_NAME=nginx- + kubectl exec -it ${POD_NAME} bash + ``` -### Improvements {#improvements-v2024-01-03-2} -* Includes instance name on the **Support Bundle Analysis** page. +1. Run the `ls` and `cat` commands to verify that the certificate and key were deployed to the specified volumeMount: -## v2024.01.03-0 + **Example:** -Released on January 3, 2024 + ```shell + $ ls /etc/nginx/ssl + tls.crt tls.key -### Improvements {#improvements-v2024-01-03-0} -* Displays instance tags in Instance table view. + $ cat /etc/nginx/ssl/tls.crt + -----BEGIN CERTIFICATE----- + MIID8zCCAtugAwIBAgIUZF+NWHnpJCt2R1rDUhYjwgVv72UwDQYJKoZIhvcNAQEL -## v2024.01.02-0 + $ cat /etc/nginx/ssl/tls.key + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCyiGNuHw2LY3Rv + ``` -Released on January 2, 2024 +### Use TLS in an Ingress Resource -### Improvements {#improvements-v2024-01-02-0} -* Displays instance name on the **Customers** page hybrid view. +You can add the `kotsadm-tls` secret to the Ingress resource to terminate TLS at the contour layer. The following example shows how to configure `secretName: kotsadm-tls` under the TLS `hosts` field in an Ingress resource (`kind: Ingress`): -## v2023.12.30-0 +**Example:** -Released on December 30, 2023 +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: nginx +spec: + rules: + tls: + - hosts: + - 'tls.foo.com' + secretName: kotsadm-tls + - host: tls.foo.com + http: + paths: + - path: / + backend: + serviceName: nginx + servicePort: 80 +``` +:::note +`tls.foo.com` must resolve to a valid IP, and also must match the Common Name (CN) or Subjective Alternative Name (SAN) of the TLS certificate. +::: -### Bug Fixes {#bug-fixes-v2023-12-30-0} -* Fixes an issue where the instance name failed to render after creating an instance tag with the key "name.". +================ +File: docs/vendor/planning-questionnaire.md +================ +# Customer Application Deployment Questionnaire -## v2023.12.29-5 +Before you package and distribute an application, Replicated recommends that you +understand several key characteristics about the environments where your customers +will deploy your application. -Released on December 29, 2023 +To gather this information about your customers' environments: +1. Copy and customize the [$APP Deployment Questionnaire](#app-deployment-questionnaire) below. +1. Replace $APP with the name of your application. +1. Send the questionnaire to your users. -### New Features {#new-features-v2023-12-29-5} -* Adds the ability to add a custom name to a given instance along with other vendor-defined instance tags. +## $APP Deployment Questionnaire -## v2023.12.28-0 +### Infrastructure -Released on December 28, 2023 +This section includes questions about your infrastructure and how you deploy software. +This includes both internally-written and Commercial Off The Shelf (COTS) applications. -### Bug Fixes {#bug-fixes-v2023-12-28-0} -* Removes references to the deprecated support@replicated.com email address. +If it’s more convenient, limit answers to the scope of the target infrastructure for deploying $APP. -## v2023.12.27-1 +- Do you use any IaaS like AWS, GCP, or Azure? -Released on December 27, 2023 +- If you deploy to a physical datacenter, do you use a Hypervisor like VSphere? -### New Features {#new-features-v2023-12-27-1} -* Adds additional bundle and instance metadata to the **Support Bundle Analysis** page. +- Do you ever install on bare metal? -## v2023.12.21-3 +- Do you have any restrictions on what operating systems are used? -Released on December 21, 2023 +- Does the target infrastructure have a direct outbound internet connection? Can it connect out via a Proxy? -### Bug Fixes {#bug-fixes-v2023-12-21-3} -* Fixes incorrect link for releases and customers created by Service Accounts. +- If the environment has no outbound network, do machines in a DMZ have direct network access to the air gapped infrastructure, or do release artifacts need to be copied to physical media for installation? -## v2023.12.20-1 +- If there is an issue causing downtime in the on-prem application, would you be willing to give the $APP team direct SSH access to the instance(s)? -Released on December 20, 2023 +### Development and Deployment Processes -### Bug Fixes {#bug-fixes-v2023-12-20-1} -* Improves error messaging for the **Instance Details** page when there is an invalid app slug, customer ID, or instance ID in the URL. -* Fixes installation failures for applications with Helm charts that contain empty files. +- Do you require applications be deployed by a configuration management framework like Chef, Ansible, or Puppet? -## v2023.12.19-3 +- Do you run any container-based workloads today? -Released on December 19, 2023 +- If you run container workloads, do you run any kind of orchestration like Kubernetes, Mesos, or Docker Swarm? -### Bug Fixes {#bug-fixes-v2023-12-19-3} -* Allows user to press 'Enter' to submit when logging in to the download portal. +- If you run container workloads, what tools do you use to host and serve container images? -## v2023.12.19-2 +- If you run container workloads, what tools do you use to scan and secure container images? -Released on December 19, 2023 +- If you are deploying $APP to your existing Kubernetes cluster, can your cluster nodes pull images from the public internet, or do you require images to be stored in an internal registry? -### Bug Fixes {#bug-fixes-v2023-12-19-2} -* Fixes scrolling on **Kubernetes Installers** teaser page. +### Change Management -## v2023.12.19-1 +- How do you test new releases of COTS software? Do you have a UAT or Staging environment? Are there other change management requirements? -Released on December 19, 2023 +- How often do you like to receive planned (non-critical) software updates? Quarterly? Monthly? As often as possible? -### Improvements {#improvements-v2023-12-19-1} -* Redesigns the **Customers** page search to make it more streamlined. +- For critical updates, what is your target deployment time for new patches? Do you have a requirement for how quickly patches are made available after a vulnerability is announced? -## v2023.12.19-0 +- Do you drive production deploys automatically from version control (“gitops”)? -Released on December 19, 2023 -### New Features {#new-features-v2023-12-19-0} -* Release Embedded Cluster v1.28.4+ec.5 replacing v1.28.4+ec.4. -* Shows max disk size on create cluster form (CMX) based on entitlement value. +### Application Usage and Policy Requirements -### Bug Fixes {#bug-fixes-v2023-12-19-0} -* Disables create cluster button when loading team entitlement. +- For applications that expose a web UI, how will you be connecting to the instance? As much as possible, include details about your workstation, any tunneling/VPN/proxy infrastructure, and what browsers you intend to use. -## v2023.12.18-0 +- Do you require a disaster recovery strategy for deployed applications? If so, where are backups stored today? (SFTP? NAS? S3-compliant object store? Something else?) -Released on December 18, 2023 +- Do you require deployed COTS applications to support logins with an internal identity provider like OpenLDAP, Windows AD or SAML? -### New Features {#new-features-v2023-12-18-0} -* Adds ability to extend cluster Time to Live (TTL) after creation with the compatibility matrix. +- Do you require an audit log of all user activity performed in $APP? What are your needs around exporting / aggregating audit log data? -### Improvements {#improvements-v2023-12-18-0} -* Adds Embedded Cluster `v1.28.4+ec.4` as the default version. -* Removes the 'NEW' badge from the Instances CSV download. +- Do you anticipate the need to scale the capacity of $APP up and down during its lifetime? -## v2023.12.14-4 +- What are your requirements around log aggregation? What downstream systems do you need system logs to be piped to? -Released on December 14, 2023 +================ +File: docs/vendor/policies-data-transmission.md +================ +# Data Transmission Policy -### Improvements {#improvements-v2023-12-14-4} -* Persists inputs on the **Compatibility Matrix > Create Cluster** dialog when there is an error. +A Replicated installation connects to a Replicated-hosted endpoint periodically to perform various tasks including checking for updates and synchronizing the installed license properties. During this time, some data is transmitted from an installed instance to the Replicated API. This data is limited to: -## v2023.12.14-3 +- The IP address of the primary Replicated instance. +- The ID of the installation. +- [Resource statuses](/enterprise/status-viewing-details#resource-statuses) +- Information about the installation including data needed for [instance details](/vendor/instance-insights-details). +- [Custom metrics](/vendor/custom-metrics) which the vendor may configure as part of the installation. +- Date and timestamps of the data transmission. -Released on December 14, 2023 +This data is required to provide the expected update and license services. The data is also used to provide telemetry and other reporting features. -### Improvements {#improvements-v2023-12-14-3} -* Displays maintenance notifications per distro in create cluster form. -* Adds ability to select the date time range filter in **Cluster History** page. Cluster stats can be filtered by `start-time` and `end-time`. +By default, no additional data is collected and transmitted from the instance to external servers. -## v2023.12.14-0 +All data is encrypted in transit according to industry best practices. For more information about Replicated's security practices, see [Security at Replicated](https://www.replicated.com/security/) on the Replicated website. -Released on December 14, 2023 +For more information about application instance data fields that the Replicated Vendor Portal uses to generate events for instances, see [About Instance and Event Data](/vendor/instance-insights-event-data). -### Bug Fixes {#bug-fixes-v2023-12-14-0} -* Fixes the default product options on the support request form. These will be generated based on enabled entitlements. +Last modified December 31, 2023 -## v2023.12.13-1 +================ +File: docs/vendor/policies-infrastructure-and-subprocessors.md +================ +# Infrastructure and Subprocessor Providers -Released on December 13, 2023 +This lists describes the infrastructure environment, subprocessors and other entities material to the Replicated products and services. -### Improvements {#improvement-v2023-12-13-1} -* Uses `sortColumn=tag` and `tag-sort-key` to sort clusters on the values for a tag key. +Prior to engaging any third party, Replicated performs diligence to evaluate their privacy, security and confidentiality practices. Whenever possible, Replicated uses encryption for data at rest and in motion so that all information is not available to these third parties. -### Bug Fixes {#bug-fixes-v2023-12-13-1} -* Shows error message when updating Compatibility Matrix quotas to the same value or less than the current value. +Replicated does not engage in the business of selling or trading personal information. Any personally identifible information Replicated might possibly hold is data that a customer has provided to us. -## v2023.12.13-0 +The fields that Replicated may posess as identifiable to a physical person may include: +- Name +- Email +- Phone Number +- Job Title +- Business Address +- Github Username -Released on December 13, 2023 +Note: This does not imply that all these fields are collected for each person. It also does not mean all these datapoints are used with each declared provider. -### Improvements {#improvements-v2023-12-13-0} -* Adds "Created By" and "Updated By" columns to the Customers and Instances table views. -## v2023.12.11-3 +## Replicated Infrastructure Providers -Released on December 11, 2023 +Replicated might use the following entities to provide infrastructure that helps with delivery of our products: -### Improvements {#improvements-v2023-12-11-3} -* Adds "Last Airgap Download Version" and "Last Airgap Download Date" columns to the Customers and Instances table views. -### Bug Fixes {#bug-fixes-v2023-12-11-3} -* Fixes issues with customer instances CSV row repetition. +| Entity Name | Purpose | Country where Infrastructure Resides | Notes +|---------------------|----------------------------|-------|----| +| Amazon Web Services | Various IaaS | United States | Vendor portal, registry, api and supporting infrastructure services. +| Cloudflare | Network security, DDoS mitigation, DNS | United States | +| Datadog | Performance monitoring | United States | +| DBT Labs | Data transformation or migration | United States | +| FiveTran | Data transformation or migration | United States | +| Github | Customer support | United States | Replicated's customers may engage with our customer support team using Github issues in a private repo. +| Google Looker | Product usage metrics | United States | +| Hex | Data transformation or migration | United States | +| Knock Labs, Inc.| Event notifications | United States | | +| Postmark / Active Campaign | Transactional emails from Vendor Portal. Marketing related communications. | United States | Active Campaign and Postmark businesses merged.| +| Salesforce |Customer and sales relationship management| United States | +| Snowflake | Usage data analysis and transformation | United States | +| Timescale | Time-series data of instance metrics | United States | See our [Data Transmission Policy](/vendor/policies-data-transmission) -## v2023.12.11-2 +Last modified January 4, 2024 -Released on December 11, 2023 +================ +File: docs/vendor/policies-support-lifecycle.md +================ +# Support Lifecycle Policy -### Improvements {#improvements-v2023-12-11-2} -* Improves usability of the Download Portal by providing descriptions, better button names, and improved styles. -* Improves messaging when RBAC prevents requesting more credits in CMX. +Replicated will provide support for products per our terms and services until that product is noted as End of Life (EOL). -### Bug Fixes {#bug-fixes-v2023-12-11-2} -* Fixes version label on customer instances table. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Product PhaseDefinition
    AlphaA product or feature that is exploratory or experimental. Typically, access to alpha features and their documentation is limited to customers providing early feedback. While most alpha features progress to beta and general availability (GA), some are deprecated based on assessment learnings.
    Beta

    A product or feature that is typically production-ready, but has not met Replicated’s definition of GA for one or more of the following reasons:

    • Remaining gaps in intended functionality
    • Outstanding needs around testing
    • Gaps in documentation or sales enablement
    • In-progress customer value validation efforts

    Documentation for beta products and features is published on the Replicated Documentation site with a "(Beta)" label. Beta products or features follow the same build and test processes required for GA.

    Please contact your Replicated account representative if you have questions about why a product or feature is beta.

    “GA” - General AvailabilityA product or feature that has been validated as both production-ready and value-additive by a percentage of Replicated customers. Products in the GA phase are typically those that are available for purchase from Replicated.
    “LA” - Limited AvailabilityA product has reached the Limited Availability phase when it is no longer available for new purchases from Replicated. Updates will be primarily limited to security patches, critical bugs and features that enable migration to GA products.
    “EOA” - End of Availability

    A product has reached the End of Availability phase when it is no longer available for renewal purchase by existing customers. This date may coincide with the Limited Availability phase.

    This product is considered deprecated, and will move to End of Life after a determined support window. Product maintenance is limited to critical security issues only.

    “EOL” - End of Life

    A product has reached its End of Life, and will no longer be supported, patched, or fixed by Replicated. Associated product documentation may no longer be available.

    The Replicated team will continue to engage to migrate end customers to GA product based deployments of your application.

    -## v2023.12.11-1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Replicated ProductProduct PhaseEnd of AvailabilityEnd of Life
    Compatibility MatrixGAN/AN/A
    Replicated SDKBetaN/AN/A
    Replicated KOTS InstallerGAN/AN/A
    Replicated kURL InstallerGAN/AN/A
    Replicated Embedded Cluster InstallerGAN/AN/A
    Replicated Classic Native InstallerEOL2023-12-31*2024-12-31*
    -Released on December 11, 2023 +*Except for customers who have specifically contracted different dates for the End of Availability and End of Life timelines. -### Improvements {#improvements-v2023-12-11-1} -* Shows the release version that was most recently downloaded from the Download Portal on the **Customer Reporting** page. +## Supported Replicated Installer Versions -## v2023.12.11-0 +The following table lists the versions of Replicated KOTS and Replicated kURL that are supported on each Kubernetes version. -Released on December 11, 2023 +The End of Replicated Support date is the End Of Life (EOL) date for the Kubernetes version. The EOL date for each Kubernetes version is published on the [Releases](https://kubernetes.io/releases/) page in the Kubernetes documentation. -### Improvements {#improvements-v2023-12-11-0} -* Re-orders the support request form to ensure that the customer (or "no customer") is selected prior to the selection of the product area, and auto fill the form smartly. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Kubernetes VersionEmbedded Cluster VersionsKOTS VersionskURL VersionsEnd of Replicated Support
    1.32N/AN/AN/A2026-02-28
    1.31N/A1.117.0 and laterv2024.08.26-0 and later2025-10-28
    1.301.16.0 and later1.109.1 and laterv2024.05.03-0 and later2025-06-28
    1.291.0.0 and later1.105.2 and laterv2024.01.02-0 and later2025-02-28
    -## v2023.12.09-0 +Replicated support for end-customer installations is limited to those installs using a Replicated provided installer product, such as KOTS, kURL or Embedded Cluster, available with the [Business or Enterprise plans](https://www.replicated.com/pricing). Replicated support for direct Helm CLI installs or other vendor provided installers is limited to the successful distribution of the software to the end-customer, as well as any issues with the Replicated SDK if included with the installation. -Released on December 9, 2023 -### Improvements {#improvements-v2023-12-09-0} -* Adds ability to upload multiple support bundles when opening a support issue on the **Troubleshoot** or **Support** page. +The information contained herein is believed to be accurate as of the date of publication, but updates and revisions may be posted periodically and without notice. -## v2023.12.08-4 +Last modified January 2, 2025. -Released on December 8, 2023 +================ +File: docs/vendor/policies-vulnerability-patch.md +================ +# Vulnerability Patch Policy -### Bug Fixes {#bug-fixes-v2023-12-08-4} -* Persists column visibility on Compatibility Matrix cluster history. +While it’s our goal to distribute vulnerability-free versions of all components, this isn’t always possible. +Kubernetes and KOTS are made from many components, each authored by different vendors. -## v2023.12.08-1 +The best way to stay ahead of vulnerabilities is to run the latest version and have a strategy to quickly update when a patch is available. -Released on December 8, 2023 +## How We Scan -### Bug Fixes {#bug-fixes-v2023-12-08-1} -* Fixes bug where the selected file in the editor would be reset after saving changes to a KOTS release. +Our build pipeline uses [Trivy](https://www.aquasec.com/products/trivy/) to scan for and detect known, published vulnerabilities in our images. +It’s possible that other security scanners will detect a different set of results. +We commit to patching vulnerabilities according to the timeline below based on the results of our internal scans. -## v2023.12.08-0 +If you or your customer detects a different vulnerability using a different scanner, we encourage you to report it to us in a GitHub issue, Slack message, or opening a support issue from the Replicated Vendor Portal. +Our team will evaluate the vulnerability and determine the best course of action. -Released on December 8, 2023 +## Base Images -### Improvements {#improvements-v2023-12-08-0} -* Adds ability to upload multiple support bundles when opening a support issue on the **Troubleshoot** or **Support** pages. +KOTS images are built on top of Chainguard's open source [Wolfi](https://edu.chainguard.dev/open-source/wolfi/overview/) base image. Wolfi is a Linux undistro that is focused on supply chain security. -## v2023.12.07-2 +KOTS has automation that uses the Chainguard [melange](https://edu.chainguard.dev/open-source/melange/overview/) and [apko](https://edu.chainguard.dev/open-source/apko/overview/) projects to build packages and assemble images on Wolfi. Building and assembling images in this way helps to ensure that any CVEs can be resolved quickly and efficiently. -Released on December 7, 2023 +## Upstream CVE Disclosure -### Improvements {#improvements-v2023-12-07-2} -* Adds ability to specify tags at cluster creation with the compatibility matrix. +Replicated KOTS, kURL, and Embedded Cluster deliver many upstream Kubernetes and ecosystem components. +We do not build these packages and rely on the upstream software vendor to distribute patches. +Our intent is to make any patches available as soon as possible, but guarantee the following timeline to make upstream patches available after we learn about the vulnerability and a patch is available to us: -## v2023.12.07-1 +| CVE Level | Time to release | +|-----------|-----------------| +| Critical | Within 2 weeks | +| High | Within 60 days | +| Medium | Within 90 days | +| Low | Best effort unless risk accepted | -Released on December 7, 2023 +## Notable Upstream CVEs -### Bug Fixes {#bug-fixes-v2023-12-07-1} -* Fixes a bug that prompts the user about unsaved changes when clicking "Create release" on the Draft Release page. +This section lists CVEs that have yet to be resolved by the upstream maintainers and therefore are not patched in Replicated. This is not an exhaustive list of unpatched upstream CVEs; instead, these are noteworthy CVEs that we have evaluated and on which we offer our opinion to help with your own security reviews. When available, we will apply upstream patches in accordance with our policy desribed in [Upstream CVE Disclosure](#upstream-cve-disclosure) above. We will update this list after applying any upstream patches. -## v2023.12.06-2 +| CVE ID | Explanation| +|--------|------------| +| None | N/A | -Released on December 6, 2023 +## Vulnerability Management Exception Policy +There might be instances where policy exceptions are required to continue using third party software with known vulnerabilities in our on premises products. Some reasons for an exception include: -### Improvements {#improvements-v2023-12-06-2} -* Shows 'Created by' and 'Last modified by' information on the **Customers**, **Reporting**, and **Customer details** pages. +- Feature breakage or bugs in patched versions +- Performance issues in patched versions +- Patched version contains higher severity vulnerabilities -## v2023.12.06-0 +Regardless of the reason, an exception is vetted from a business impact and security standpoint. The business review assesses the overall impact to the product created by the patched, but otherwise problematic, piece of software. The security portion determines if the CVE is applicable to this specific context and if that CVE's impact to the product’s overall security posture is acceptable. -Released on December 6, 2023 +In the event of a vulnerability management exception, a notice is posted containing: -### Bug Fixes {#bug-fixes-v2023-12-06-0} -* Fixes a bug that could occur when generating the embedded cluster binary for channels where semantic versioning was not enabled. -* Fixes bug in the **Channel Settings** modal where the user could not return custom domains to the Replicated default. +- The impacted product(s) +- The rationale for the exception +- The relevant CVE(s) +- A risk assessment in the product context for each CVE -## v2023.12.05-1 +As subsequent versions of the vulnerable software are released, Replicated continues to research to find a solution that satisfies the business and security requirements of the original exception.  -Released on December 5, 2023 +## Known Disclosed Vulnerabilities in our On Premises Products -### Improvements {#improvements-v2023-12-05-1} -* Shows 'Created by' and 'Last modified by' on the **Releases**, **View Release**, **Edit Release**, and **Release History** pages. +| CVE | CVE Summary | Rationale | Additional Reading | +|-----|-------------|-----------|--------------------| +| None | N/A | N/A | N/A | -## v2023.12.04-4 +Last modified January 29, 2025. -Released on December 4, 2023 +================ +File: docs/vendor/preflight-defining.mdx +================ +# Defining Preflight Checks -### Bug Fixes {#bug-fixes-v2023-12-04-4} -* Fixes the **Copy download URL** button for airgap builds on the **Release History** page in Safari. +This topic describes how to define preflight checks in Helm and Kubernetes manifest-based applications. For more information about preflight checks, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). -## v2023.12.04-3 +The information in this topic applies to applications that are installed with Helm or with Replicated KOTS. -Released on December 4, 2023 +## Step 1: Create the Manifest File -### Improvements {#improvements-v2023-12-04-3} -* Adds the ability to update a test cluster TTL. +You can define preflight checks in a Kubernetes Secret or in a Preflight custom resource. The type of manifest file that you use depends on your application type (Helm or Kubernetes manifest-based) and the installation methods that your application supports (Helm, KOTS v1.101.0 or later, or KOTS v1.100.3 or earlier). -## v2023.12.04-1 +* **Helm Applications**: For Helm applications, see the following guidance: -Released on December 4, 2023 + * **(Recommended) Helm or KOTS v1.101.0 or Later**: For Helm applications installed with Helm or KOTS v1.101.0 or later, define the preflight checks in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). -### New Features {#new-features-v2023-12-04-1} -* Adds the "installer support enabled" license option to the customer create and manage pages. This option is only visibile to vendors with the associated entitlement enabled. + * **KOTS v1.100.3 or Earlier**: For Helm applications installed with KOTS v1.100.3 or earlier, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). -## v2023.12.01-4 +* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). -Released on December 1, 2023 +### Kubernetes Secret {#secret} -### Improvements {#improvements-v2023-12-01-4} -* Unifies the Customers page search, sort, and filter results across all tabs. +For Helm applications installed with Helm or KOTS v1.101.0 or later, define preflight checks in a Kubernetes Secret in your Helm chart `templates`. This allows you to define the preflights spec only one time to support running preflight checks in both Helm and KOTS installations. -## v2023.11.29-3 +For a tutorial that demonstrates how to define preflight checks in a Secret in chart `templates` and then run the preflight checks in both Helm and KOTS installations, see [Tutorial: Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup). -Released on November 29, 2023 +Add the following YAML to a Kubernetes Secret in your Helm chart `templates` directory: -### Improvements {#improvements-v2023-11-29-3} -* Adds the ability to subscribe to custom metrics notifications. -* Splits notifications for "All" events into "App Status" and "System Events". +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + collectors: [] + analyzers: [] +``` -## v2023.11.29-2 +As shown above, the Secret must include the following: -Released on November 29, 2023 +* The label `troubleshoot.sh/kind: preflight` +* A `stringData` field with a key named `preflight.yaml` so that the preflight binary can use this Secret when it runs from the CLI -### New Features {#new-features-v2023-11-29-2} -* Adds Custom Metrics timeseries graphing on the Instance Details page. +### Preflight Custom Resource {#preflight-cr} -## v2023.11.29-0 +Define preflight checks in a Preflight custom resource for the following installation types: +* Kubernetes manifest-based applications installed with any version of KOTS +* Helm applications installed with KOTS v1.100.3 and earlier + :::note + For Helm charts installed with KOTS v1.101.0 and later, Replicated recommends that you define preflight checks in a Secret in the Helm chart `templates` instead of using the Preflight custom resource. See [Create a Secret](#secret) above. -Released on November 29, 2023 + In KOTS v1.101.0 and later, preflights defined in the Helm chart override the Preflight custom resource used by KOTS. During installation, if KOTS v1.101.0 and later cannot find preflights specified in the Helm chart archive, then KOTS searches for `kind: Preflight` in the root of the release. + ::: -### Improvements {#improvements-v2023-11-29-0} -* Adds support for opening a new tab on right click in the Application drop down. +Add the following YAML to a new file in a release: -### Bug Fixes {#bug-fixes-v2023-11-29-0} -* Fixes an issue that could cause the user to not be able to upload support bundles on the Instance Insights page. +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: preflights +spec: + collectors: [] + analyzers: [] +``` -## v2023.11.28-1 +For more information about the Preflight custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). -Released on November 28, 2023 +## Step 2: Define Collectors and Analyzers -### Bug Fixes {#bug-fixes-v2023-11-28-1} -* Aligns Helm icon with helm chart in release editor. +This section describes how to define collectors and analyzers for preflight checks based on your application needs. You add the collectors and analyzers that you want to use in the `spec.collectors` and `spec.analyzers` keys in the manifest file that you created. -## v2023.11.28-0 +### Collectors -Released on November 28, 2023 +Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define to generate results for the preflight checks. -### Bug Fixes {#bug-fixes-v2023-11-28-0} -* Fixes an issue that caused linter results to not be displayed when opening a KOTS release for editing. -* Fixes loading state on the Customers table view. +The following default collectors are included automatically to gather information about the cluster and cluster resources: +* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) +* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) -## v2023.11.27-1 +You do not need manually include the `clusterInfo` or `clusterResources` collectors in the specification. To use only the `clusterInfo` and `clusterResources` collectors, delete the `spec.collectors` key from the preflight specification. -Released on November 27, 2023 +The Troubleshoot open source project includes several additional collectors that you can include in the specification to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. -### Bug Fixes {#bug-fixes-v2023-11-27-1} -* Fixes a bug with automatic air gap builds in the Channel Settings modal, where it would show false even if automatic air gap builds were enabled. +### Analyzers -## v2023.11.23-0 +Analyzers use the output from the collectors to generate results for the preflight checks, including the criteria for pass, fail, and warn outcomes and custom messages for each outcome. -Released on November 23, 2023 +For example, in a preflight check that checks the version of Kubernetes running in the target cluster, the analyzer can define a fail outcome when the cluster is running a version of Kubernetes less than 1.25 that includes the following custom message to the user: `The application requires Kubernetes 1.25.0 or later, and recommends 1.27.0`. -### New Features {#new-features-v2023-11-23-0} -* Supports multi-node kURL clusters up to 10 nodes with the compatibility matrix. +The Troubleshoot open source project includes several analyzers that you can include in your preflight check specification. The following are some of the analyzers in the Troubleshoot project that use the default `clusterInfo` or `clusterResources` collectors: +* [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) +* [clusterVersion](https://troubleshoot.sh/docs/analyze/cluster-version/) +* [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) +* [distribution](https://troubleshoot.sh/docs/analyze/distribution/) +* [nodeResources](https://troubleshoot.sh/docs/analyze/node-resources/) +* [statefulsetStatus](https://troubleshoot.sh/docs/analyze/stateful-set-status/) +* [storageClass](https://troubleshoot.sh/docs/analyze/storage-class/) -## v2023.11.22-1 +To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. -Released on November 22, 2023 +### Block Installation with Required (Strict) Preflights {#strict} -### Bug Fixes {#bug-fixes-v2023-11-22-1} -* Fixes an issue where the compatibility matrix kURL version displayed in the `create cluster` command was incorrect. +For applications installed with KOTS, you can set any preflight analyzer to `strict: true`. When `strict: true` is set, any `fail` outcomes for the analyzer block the deployment of the release. -## v2023.11.20-2 +:::note +Strict preflight analyzers are ignored if the `exclude` property is also included and evaluates to `true`. See [exclude](https://troubleshoot.sh/docs/analyze/#exclude) in the Troubleshoot documentation. +::: -Released on November 20, 2023 +### Examples -### Improvements {#improvements-v2023-11-20-2} -* Hides inactive instances from the Instances table view by default. Add checkbox to show inactive instances in table. +For common examples of collectors and analyzers used in preflight checks, see [Examples of Preflight Specs](/vendor/preflight-examples). -## v2023.11.17-2 +================ +File: docs/vendor/preflight-examples.mdx +================ +import HttpSecret from "../partials/preflights/_http-requests-secret.mdx" +import HttpCr from "../partials/preflights/_http-requests-cr.mdx" +import MySqlSecret from "../partials/preflights/_mysql-secret.mdx" +import MySqlCr from "../partials/preflights/_mysql-cr.mdx" +import K8sVersionSecret from "../partials/preflights/_k8s-version-secret.mdx" +import K8sVersionCr from "../partials/preflights/_k8s-version-cr.mdx" +import K8sDistroSecret from "../partials/preflights/_k8s-distro-secret.mdx" +import K8sDistroCr from "../partials/preflights/_k8s-distro-cr.mdx" +import NodeReqSecret from "../partials/preflights/_node-req-secret.mdx" +import NodeReqCr from "../partials/preflights/_node-req-cr.mdx" +import NodeCountSecret from "../partials/preflights/_node-count-secret.mdx" +import NodeCountCr from "../partials/preflights/_node-count-cr.mdx" +import NodeMemSecret from "../partials/preflights/_node-mem-secret.mdx" +import NodeMemCr from "../partials/preflights/_node-mem-cr.mdx" +import NodeStorageClassSecret from "../partials/preflights/_node-storage-secret.mdx" +import NodeStorageClassCr from "../partials/preflights/_node-storage-cr.mdx" +import NodeEphemStorageSecret from "../partials/preflights/_node-ephem-storage-secret.mdx" +import NodeEphemStorageCr from "../partials/preflights/_node-ephem-storage-cr.mdx" +import NodeCpuSecret from "../partials/preflights/_node-cpu-secret.mdx" +import NodeCpuCr from "../partials/preflights/_node-cpu-cr.mdx" +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; -Released on November 17, 2023 +# Example Preflight Specs -### Improvements {#improvements-v2023-11-17-2} -* Hides the 'NEW' badge on the Instances CSV download after it has been clicked. +This section includes common examples of preflight check specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/preflight) in GitHub. -## v2023.11.15-0 +## Check HTTP or HTTPS Requests from the Cluster -Released on November 15, 2023 +The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. -### Improvements {#improvements-v2023-11-15-0} -* Saves Channels ordering, sorting, and hidden columns table settings when the user updates them. -* Standardize tooltips on Dashboard, Customers, and Channels pages. -* Disallow adding a .zip file when uploading a bundle in the support request form. +For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. -### Bug Fixes {#bug-fixes-v2023-11-15-0} -* Fixes button alignment in empty state on the **Releases** page when the KOTS installer is not enabled. + + + + + + +

    The following shows how the pass outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    + Preflight checks in Admin Console showing pass message + View a larger version of this image +
    +
    -## v2023.11.13-0 +## Check Kubernetes Version -Released on November 13, 2023 +The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. -### Improvements {#improvements-v2023-11-13-0} -* Standardizes button styles on the Compatibility Matrix pages. +For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. -## v2023.11.10-1 + + + + + + +

    The following shows how the warn outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    + Preflight checks in Admin Console showing warning message + View a larger version of this image +
    +
    -Released on November 10, 2023 +## Check Kubernetes Distribution -### Improvements {#improvements-v2023-11-10-1} -* Updates button styles on Troubleshoot, License Fields, Images, Kubernetes Installers, and Custom Domains. -* Standardizes button styles on Team and Account Settings pages. +The examples below use the `distribution` analyzer to check the Kubernetes distribution of the cluster. The `distribution` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. -## v2023.11.10-0 +For more information, see [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) and [Distribution](https://troubleshoot.sh/docs/analyze/distribution/) in the Troubleshoot documentation. -Released on November 10, 2023 + + + + + + +

    The following shows how the pass outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    + Preflight checks in Admin Console showing pass message + View a larger version of this image +
    +
    -### Improvements {#improvements-v2023-11-10-0} -* Adds the ability to save table settings (column order, column visibility, sort by, page size) on Customer and Instances table. -* Standardizes button styles on Releases, Channels, and Customers pages. +## Check MySQL Version Using Template Functions -### Bug Fixes {#bug-fixes-v2023-11-10-0} -* Show promoted channel(s) when viewing a KOTS release. +The examples below use the `mysql` collector and the `mysql` analyzer to check the version of MySQL running in the cluster. -## v2023.11.06-1 +For more information, see [Collect > MySQL](https://troubleshoot.sh/docs/collect/mysql/) and [Analyze > MySQL](https://troubleshoot.sh/docs/analyze/mysql/) in the Troubleshoot documentation. -Released on November 6, 2023 + + +

    This example uses Helm template functions to render the credentials and connection details for the MySQL server that were supplied by the user. Additionally, it uses Helm template functions to create a conditional statement so that the MySQL collector and analyzer are included in the preflight checks only when MySQL is deployed, as indicated by a .Values.global.mysql.enabled field evaluating to true.

    +

    For more information about using Helm template functions to access values from the values file, see Values Files.

    + +
    + +

    This example uses KOTS template functions in the Config context to render the credentials and connection details for the MySQL server that were supplied by the user in the Replicated Admin Console Config page. Replicated recommends using a template function for the URI, as shown above, to avoid exposing sensitive information. For more information about template functions, see About Template Functions.

    +

    This example also uses an analyzer with strict: true, which prevents installation from continuing if the preflight check fails.

    + +

    The following shows how a fail outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade when strict: true is set for the analyzer:

    + Strict preflight checks in Admin Console showing fail message + View a larger version of this image +
    +
    -### Improvements {#improvements-v2023-11-06-1} -* Improves the way large amounts of custom metrics display on the Instance Details page, in both the Filters dropdown and the Custom Metrics section on the left. +## Check Node Memory -## v2023.11.03-1 +The examples below use the `nodeResources` analyzer to check that a required storage class is available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. -Released on November 3, 2023 +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. -### Bug Fixes {#bug-fixes-v2023-11-03-1} -* Filters out "read" events in the audit log initial search query. + + + + + + +

    The following shows how a warn outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    + Preflight checks in Admin Console showing warn message + View a larger version of this image +
    +
    -## v2023.11.03-2 +## Check Node Storage Class Availability -Released on November 3, 2023 +The examples below use the `storageClass` analyzer to check that a required storage class is available in the nodes in the cluster. The `storageClass` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. -### Improvements {#improvements-v2023-11-03-2} -* Redirects user to the most recently managed application upon login. +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. -## v2023.10.30-3 + + + + + + +

    The following shows how a fail outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    + Preflight checks in Admin Console showing fail message + View a larger version of this image +
    +
    -Released on October 30, 2023 +## Check Node Ephemeral Storage -### Bug Fixes {#bug-fixes-v2023-10-30-3} -* Fixes style bug on the Audit Log page where the search input border is partially hidden. +The examples below use the `nodeResources` analyzer to check the ephemeral storage available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. -## v2023.10.30-2 +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. -Released on October 30, 2023 + + + + + + +

    The following shows how a pass outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    + Preflight checks in Admin Console showing pass message + View a larger version of this image +
    +
    -### Improvements {#improvements-v2023-10-30-2} -* Makes some columns hidden by default in the Instances view on the Customers page and updates column names. +## Check Requirements Are Met By At Least One Node -## v2023.10.30-1 +The examples below use the `nodeResources` analyzer with filters to check that the requirements for memory, CPU cores, and architecture are met by at least one node in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. -Released on October 30, 2023 +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. -### Improvements {#improvements-v2023-10-30-1} -* Updates styles on **Instance Details** page. -* Updates tab styles throughout the vendor portal. + + + + + + +

    The following shows how the fail outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    + Preflight checks in Admin Console showing fail message + View a larger version of this image +
    +
    -## v2023.10.27-2 +## Check Total CPU Cores Across Nodes -Released on October 27, 2023 +The examples below use the `nodeResources` analyzer to check the version of Kubernetes running in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. -### Improvements {#improvements-v2023-10-27-2} -* Standardizes breadcrumbs across the site. +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. -## v2023.10.27-1 + + + + + + +

    The following shows how the pass outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    + Preflight checks in Admin Console showing fail message + View a larger version of this image +
    +
    -Released on October 27, 2023 +================ +File: docs/vendor/preflight-host-preflights.md +================ +# Customizing Host Preflight Checks for kURL -### Improvements {#improvements-v2023-10-27-1} -* Various style improvements to the **Images**, **Kubernetes Installer**, **Custom Domains**, and **App Settings** pages. +This topic provides information about how to customize host preflight checks for installations with Replicated kURL. For information about the default host preflight checks that run for installations with Replicated Embedded Cluster, see [About Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. -## v2023.10.26-3 +## About Host Preflight Checks +You can include host preflight checks with kURL to verify that infrastructure requirements are met for: -Released on October 26, 2023 +- Kubernetes +- kURL add-ons +- Your application -### Improvements {#improvements-v2023-10-26-3} -* Various style improvements to the compatibility matrix **Cluster History**, **Customers**, **Troubleshoot**, and **License Fields** pages. +This helps to ensure successful installation and the ongoing health of the cluster. -## v2023.10.26-2 +While host preflights are intended to ensure requirements are met for running the cluster, you can also use them to codify some of your application requirements so that users get feedback even earlier in the installation process, rather than waiting to run preflights after the cluster is already installed. For more information about application checks, collectors, and analyzers, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). -Released on October 26, 2023 +Default host preflight checks verify conditions such as operating system and disk usage. Default host preflight failures block the installation from continuing and exit with a non-zero return code. Users can then update their environment and run the kURL installation script again to re-run the host preflight checks. -### Bug Fixes {#bug-fixes-v2023-10-26-2} -* Fixes query timeout issues with the `/events` API endpoint. +Host preflight checks run automatically. The default host preflight checks that run can vary, depending on whether the installation is new, an upgrade, joining a node, or an air gap installation. Additionally, some checks only run when certain add-ons are enabled in the installer. For a complete list of default host preflight checks, see [Default Host Preflights](https://kurl.sh/docs/install-with-kurl/host-preflights#default-host-preflights) in the kURL documentation. -## v2023.10.26-0 +There are general kURL host preflight checks that run with all installers. There are also host preflight checks included with certain add-ons. Customizations include the ability to: -Released on October 26, 2023 + - Bypass failures + - Block an installation for warnings + - Exclude certain preflights under specific conditions, such as when a particular license entitlement is enabled + - Skip the default host preflight checks and run only custom checks + - Add custom checks to the default host preflight checks -### Improvements {#improvements-v2023-10-26-0} -* Allows editing tags in the Cluster History table. -* Allows adding tags as separate columns in the Cluster History table. -* Shows some statistics at the top of the Cluster History table. +For more information about customizing host preflights, see [Customize Host Preflight Checks](#customize-host-preflight-checks). -## v2023.10.24-0 +## Customize Host Preflight Checks -Released on October 24, 2023 +The default host preflights run automatically as part of your kURL installation. You can customize the host preflight checks by disabling them entirely, adding customizations to the default checks to make them more restrictive, or completely customizing them. You can also customize the outcomes to enforce warnings or ignore failures. -### Improvements {#improvements-v2023-10-24-0} -* Adds links to release notes in the vendor portal. +### Add Custom Preflight Checks to the Defaults -## v2023.10.23-0 +To run customized host preflight checks in addition to the default host preflight checks, add a `hostPreflights` field to the `kurl` field in your Installer manifest. Under the `hostPreflights` field, add a host preflight specification (`kind: HostPreflight`) with your customizations. You only need to specify your customizations because the default host preflights run automatically. -Released on October 23, 2023 +Customized host preflight checks run in addition to default host preflight checks, if the default host preflight checks are enabled. -### Bug Fixes {#bug-fixes-v2023-10-23-0} -* Shows multiple instances for a single customer in the customer instance table view. +If you only want to make the default host preflight checks more restrictive, add your more restrictive host preflight checks to `kurl.hostPreflights`, and do not set `excludeBuiltinHostPreflights`. For example, if your application requires 6 CPUs but the default host preflight check requires 4 CPUs, you can simply add a custom host preflight check for 6 CPUs, since the default host preflight must pass if the more restrictive custom check passes. -## v2023.10.18-1 +The following example shows customized `kurl` host preflight checks for: -Released on October 18, 2023 + - An application that requires more CPUs than the default + - Accessing a website that is critical to the application -### New Features {#new-features-v2023-10-18-1} -* Compatibility matrix retries on an error provisioning a cluster up to 2 times for a total of 3 attempts before returning an error. +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "latest" +spec: + kurl: + hostPreflights: + apiVersion: troubleshoot.sh/v1beta2 + kind: HostPreflight + spec: + collectors: + - cpu: {} + - http: + collectorName: Can Access A Website + get: + url: https://myFavoriteWebsite.com + analyzers: + - cpu: + checkName: Number of CPU check + outcomes: + - fail: + when: "count < 4" + message: This server has less than 4 CPU cores + - warn: + when: "count < 6" + message: This server has less than 6 CPU cores + - pass: + message: This server has at least 6 CPU cores + - http: + checkName: Can Access A Website + collectorName: Can Access A Website + outcomes: + - warn: + when: "error" + message: Error connecting to https://myFavoriteWebsite.com + - pass: + when: "statusCode == 200" + message: Connected to https://myFavoriteWebsite.com +``` -## v2023.10.18-0 +### Customize the Default Preflight Checks -Released on October 18, 2023 +To customize the default host preflights: -### Improvements {#improvements-v2023-10-18-0} -* Shows tags on the cluster and cluster history table. +1. Disable the default host preflight checks using `excludeBuiltinHostPreflights: true`. +1. Copy the default `host-preflights.yaml` specification for kURL from [host-preflights.yaml](https://github.com/replicatedhq/kURL/blob/main/pkg/preflight/assets/host-preflights.yaml) in the kURL repository. +1. Copy the default `host-preflight.yaml` specification for any and all add-ons that are included in your specification and have default host preflights. For links to the add-on YAML files, see [Finding the Add-on Host Preflight Checks](https://kurl.sh/docs/create-installer/host-preflights/#finding-the-add-on-host-preflight-checks) in the kURL documentation. +1. Merge the copied host preflight specifications into one host preflight specification, and paste it to the `kurl.hostPreflights` field in the Installer YAML in the Vendor Portal. +1. Edit the defaults as needed. -### Bug Fixes {#bug-fixes-v2023-10-18-0} -* Limits release size to 16MiB when compressed using the [Vendor API v3 to create a release](https://replicated-vendor-api.readme.io/reference/createrelease). -* Shows error message if user encounters an error during application creation. -* Fixes a bug that would allow creating accounts using an email address with trailing or leading white spaces. +### Ignore or Enforce Warnings and Failures -## v2023.10.16-0 +Set either of the following flags to customize the outcome of your host preflight checks: -Released on October 16, 2023 + + + + + + + + + + + + + +
    Flag: ValueDescription
    hostPreflightIgnore: trueIgnores host preflight failures and warnings. The installation proceeds regardless of host preflight outcomes.
    hostPreflightEnforceWarnings: trueBlocks an installation if the results include a warning.
    -### Improvements {#improvements-v2023-10-16-0} -* Adds table views for customers and instances on Customers page. +### Disable Host Preflight Checks -### Bug Fixes {#bug-fixes-v2023-10-16-0} -* Fixes a bug in the copy create cluster command. -* Fixes the "by" in cluster history to not show "web ui" most of the time. -* Fixes the display of cost in cluster history table. +To disable the default host preflight checks for Kubernetes and all included add-ons, add the `kurl` field to your Installer manifest and add `kurl.excludeBuiltinHostPreflights: true`. In this case, no host preflight checks are run. -## v2023.10.13-0 +`excludeBuiltinHostPreflights` is an aggregate flag, so setting it to `true` disables the default host preflights for Kubernetes and all included add-ons. -Released on October 13, 2023 +**Example:** -### Improvements {#improvements-v2023-10-13-0} -* Adds the name of the entity that created the cluster to the cluster page. -* Various design imporvements to the **Cluster History** page to improve the user experience. + ```yaml + apiVersion: "cluster.kurl.sh/v1beta1" + kind: "Installer" + metadata: + name: "latest" + spec: + kurl: + excludeBuiltinHostPreflights: true + ``` -## v2023.10.11-1 +## Example of Customized Host Preflight Checks -Released on October 11, 2023 +The following example shows: -### New Features {#new-features-v2023-10-11-1} -* Adds **Settings** page for the Compatibility Matrix, granting users the ability to access quota and capacity information and submit requests for increasing their quotas. +- Default host preflights checks are disabled +- Customized host preflight checks run +- The installation is blocked if there is a warning -### Improvements {#improvements-v2023-10-11-1} -* Adds updated table view for the **Channels** page. +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "latest" +spec: + kurl: + excludeBuiltinHostPreflights: true + hostPreflightEnforceWarnings: true + hostPreflights: + apiVersion: troubleshoot.sh/v1beta2 + kind: HostPreflight + spec: + collectors: + - cpu: {} + - http: + collectorName: Can Access A Website + get: + url: https://myFavoriteWebsite.com + analyzers: + - cpu: + checkName: Number of CPU check + outcomes: + - fail: + when: "count < 4" + message: This server has less than 4 CPU cores + - warn: + when: "count < 6" + message: This server has less than 6 CPU cores + - pass: + message: This server has at least 6 CPU cores + - http: + checkName: Can Access A Website + collectorName: Can Access A Website + outcomes: + - warn: + when: "error" + message: Error connecting to https://myFavoriteWebsite.com + - pass: + when: "statuscode == 200" + message: Connected to https://myFavoriteWebsite.com + ``` -### Bug Fixes {#bug-fixes-v2023-10-11-1} -* Fixes an issue that could prevent users from logging in because they do not have an RBAC role assigned. -* Fixes bug on Dashboard where user was unable to delete a support bundle. -* Fixes bug on the Kubernetes Installer History page where breadcrumbs were not displaying correctly. +================ +File: docs/vendor/preflight-running.md +================ +# Running Preflight Checks for Helm Installations -## v2023.10.10-0 +This topic describes how to use the preflight kubectl plugin to run preflight checks for applications installed with the Helm CLI. -Released on October 10, 2023 +## Overview -### Improvements {#improvements-v2023-10-10-0} -* Adds a verification stage when provisioning bare metal clusters of type Kind, K3s, kURL, and HelmVM to check that the cluster is running and healthy. +For applications installed with the Helm CLI, your users can optionally run preflight checks using the open source preflight kubectl plugin before they run `helm install`. -## v2023.10.09-1 +The preflight plugin requires a preflight check specification as input. For Helm chart-based applications, the specification is defined in a Secret in the Helm chart `templates` directory. For information about how to configure preflight checks for your application, see [Defining Preflight Checks](preflight-defining). -Released on October 9, 2023 +To run preflight checks that are defined in your application Helm chart templates, your users run `helm template` to render the Helm chart templates and then provide the result to the preflight plugin as stdin. The preflight plugin automatically filters the stream of stdout from the `helm template` command to find and run any preflight specifications. -### Bug Fixes {#bug-fixes-v2023.10.09-1} -* Updates the icon for custom metrics events on the Instance Details pages. +## Prerequisite -## v2023.10.09-0 +The preflight kubectl plugin is required to run preflight checks for Helm CLI installations. The preflight plugin is a client-side utility that adds a single binary to the path. -Released on October 9, 2023 +To install the preflight plugin, run the following command to install the preflight plug-in using krew: -### Improvements {#improvements-v2023.10.09-0} -* Sets `false` as the default value for any new boolean license fields. -* Changes boolean license field options to a "True"/"False" dropdown on the **Customer Manage** and **Create Customer** pages. +``` +curl https://krew.sh/preflight | bash +``` +For information about the preflight plugin, including additional installation options, see [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. -================ -File: docs/release-notes/rn-whats-new.md -================ ---- -pagination_next: null -pagination_prev: null ---- +## Command -# Release Notes Overview +``` +helm template HELM_CHART | kubectl preflight - +``` -New features and improvements that have been added to Replicated are documented on a per component basis in the corresponding release notes section. Component updates may be released at any time following a continuous delivery model. +Where `HELM_CHART` is the Helm chart that contains the preflight specification. -To view the component release notes, see the following: -* [Embedded Cluster Release Notes](rn-embedded-cluster) -* [KOTS Release Notes](rn-app-manager) -* [kURL Release Notes](rn-kubernetes-installer) -* [Replicated SDK Release Notes](rn-replicated-sdk) -* [Vendor Platform Release Notes](rn-vendor-platform) +For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. -For an overview of recent updates to the Replicated platform, see the monthly [Replicated Release Highlights blog](https://www.replicated.com/blog-tags/replicated-release-highlights). +**Examples:** -================ -File: docs/templates/procedure.md -================ -# Page Title (Use a gerund. For example, "Creating...") +``` +helm template gitea-1.0.6.tgz | kubectl preflight - +``` +``` +helm template gitea | kubectl preflight - +``` +``` +helm template oci://myregistry.io/org/examplechart | kubectl preflight - +``` - +## Run Preflight Checks from a Release -[Introductory paragraph stating the business reason - what and why - a user would want to do this procedure.] +When you promote a release that contains one or more Helm charts, the Helm charts are automatically pushed to the Replicated registry. To run preflight checks before installing a release, your users must first log in to the Replicated registry where they can access your application Helm chart containing the preflight specification. -**Prerequisites** +To run preflights checks from a release before installation: -Complete the following items before you perform this task: -* First item -* Second item +1. In the [Vendor Portal](https://vendor.replicated.com/apps/gitea-boxer/customers), go to the **Customers** page. Click on the name of the target customer. -To [do this task]: [For example, "To create a customer license:"] +1. On the landing page for the customer, click **Helm install instructions**. -1. Log in to the [vendor portal](https://vendor.replicated.com), and click **Customer > Create Customer**. + The **Helm install instructions** dialog opens: - [Optional: Include a results step. Use only when a result is not obvious, such as in a complex UX flow like GitOps. For example, "A list of your applications displays and shows the status of GitOps integration for each application."] + Helm install instructions dialog with preflight checks -1. Edit the fields: + [View a larger version of this image](/images/helm-install-preflights.png) - - - - - - - - - - - - - -
    NameDescription
    Field NameExample description: The type of customer is used solely for reporting purposes. Their access to your app is not affected by the type you assign to them. Options: Development, Trial, Paid, Community. Default: Trial. For more information, see Creating a Customer.
    +1. Run the commands provided in the dialog: - + Where: + - `USERNAME` is the customer's email address. + - `PASSWORD` is the customer's license ID. -1. Run the following command to export the blah blah blah: + **Example:** + ``` + helm registry login registry.replicated.com --username example@companyname.com password 1234abcd + ``` - ``` - kubectl kots pull UPSTREAM_URI --cluster CLUSTER_NAME - ``` + 1. Run the second command to install the kubectl plugin with krew: - Replace: + ``` + curl https://krew.sh/preflight | bash + ``` + + 1. Run the third command to run preflight checks: - - UPSTREAM_URI: With the URI for the application. - - CLUSTER_NAME: With the name of the kubeconfig cluster. + ``` + helm template oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART | kubectl preflight - + ``` + Where: + - `APP_SLUG` is the name of the application. + - `CHANNEL` is the lowercased name of the release channel. + - `CHART` is the name of the Helm chart. - [Use a bulleted list for the placeholder text definitions unless you feel the list is too long and that a table would be cleaner. If you need to use a table, use the following table format: + **Examples:** - - - - - - - - - - - - - -
    ReplaceWith
    UPSTREAM_URIThe URI for the application.
    CLUSTER_NAMEThe name of the kubeconfig cluster.
    + ``` + helm template oci://registry.replicated.com/gitea-app/unstable/gitea | kubectl preflight - + ``` + ``` + helm template oci://registry.replicated.com/gitea-app/unstable/gitea --values values.yaml | kubectl preflight - + ``` -1. Click **Save Changes**. + For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. -## Next Step + 1. (Optional) Run the fourth command to install the application. For more information, see [Installing with Helm](install-with-helm). -[Describe and link to the next task.] +## (Optional) Save Preflight Check Results -## Related Topics +The output of the preflight plugin shows the success, warning, or fail message for each preflight, depending on how they were configured. You can ask your users to send you the results of the preflight checks if needed. - +![Save output dialog](/images/helm-preflight-save-output.png) -[Example Related Topic Title](https://docs.replicated.com) +[View a larger version of this image](/images/helm-preflight-save-output.png) ================ -File: docs/templates/process-multiple-procedures.md +File: docs/vendor/preflight-sb-helm-templates-about.md ================ -# Page Title (Use a gerund. For example, "Creating...") +# Using Helm Templates in Specifications - +You can use Helm templates to configure collectors and analyzers for preflight checks and support bundles for Helm installations. -[Introductory paragraph stating the business reason why a user would want to do this process/workflow or tutorial.] +Helm templates can be useful when you need to: -## Prerequisites +- Run preflight checks based on certain conditions being true or false, such as the customer wants to use an external database. +- Pull in user-specific information from the values.yaml file, such as the version a customer is using for an external database. -These actions or items must be complete before you perform this task: -* First item -* Second item +You can also use Helm templating with the Troubleshoot template functions for the `clusterPodStatuses` analyzer. For more information, see [Helm and Troubleshoot Template Example](#troubleshoot). -## Task Heading (Start with verb. For example, "Create a Customer License") +## Helm Template Example -[Introductory sentence or two to explain the “what“ and “why“ of the task.] +In the following example, the `mysql` collector is included in a preflight check if the customer does not want to use the default MariaDB. This is indicated by the template `{{- if eq .Values.global.mariadb.enabled false -}}`. -To [do this task]: [For example, "To create a customer license:"] +This specification also takes the MySQL connection string information from the `values.yaml` file, indicated by the template `'{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false'` in the `uri` field. -1. Log in to the [vendor portal](https://vendor.replicated.com), and click **Customer > Create Customer**. +Additionally, the specification verifies the maximum number of nodes in the `values.yaml` file is not exceeded by including the template `'count() > {{ .Values.global.maxNodeCount }}'` for the `nodeResources` analyzer. - [Optional: include a results step. Use only when a result is not obvious. For example, "The Create a new customer page opens."] +```yaml +{{- define "preflight.spec" }} +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: preflight-sample +spec: + {{ if eq .Values.global.mariadb.enabled false }} + collectors: + - mysql: + collectorName: mysql + uri: '{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false' + {{ end }} + analyzers: + - nodeResources: + checkName: Node Count Check + outcomes: + - fail: + when: 'count() > {{ .Values.global.maxNodeCount }}' + message: "The cluster has more than {{ .Values.global.maxNodeCount }} nodes." + - pass: + message: You have the correct number of nodes. + - clusterVersion: + outcomes: + - fail: + when: "< 1.22.0" + message: The application requires at least Kubernetes 1.22.0, and recommends 1.23.0. + uri: https://kubernetes.io + - warn: + when: "< 1.23.0" + message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.23.0 or later. + uri: https://kubernetes.io + - pass: + message: Your cluster meets the recommended and required versions of Kubernetes. + {{ if eq .Values.global.mariadb.enabled false }} + - mysql: + checkName: Must be MySQL 8.x or later + collectorName: mysql + outcomes: + - fail: + when: connected == false + message: Cannot connect to MySQL server + - fail: + when: version < 8.x + message: The MySQL server must be at least version 8 + - pass: + message: The MySQL server is ready + {{ end }} +{{- end }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/managed-by: {{ .Release.Service | quote }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | +{{- include "preflight.spec" . | indent 4 }} +``` -1. Edit the fields: +## Helm and Troubleshoot Template Example {#troubleshoot} - - - - - - - - - - - - - -
    NameDescription
    Field Name[Example description: The type of customer is used solely for reporting purposes. Their access to your app is not affected by the type you assign to them. Options: Development, Trial, Paid, Community. Default: Trial. For more information, see LINK.]
    Field NameSpecifies the...
    +You can also use Helm templates with the Troubleshoot template functions to automatically add the Pod name and namespace to a message when a `clusterPodStatuses` analyzer fails. For more information about the Troubleshoot template function, see [Cluster Pod Statuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) in the Troubleshoot documentation. -1. Run the following command to export the blah blah blah: +When you add the `clusterPodStatuses` analyzer template function values (such as `{{ .Name }}`) to your Helm template, you must encapsulate the Helm template using \{\{ ` ` \}\} so that Helm does not expand it. - ``` - kubectl kots pull UPSTREAM_URI --cluster CLUSTER_NAME - ``` - Replace: +The following example shows an analyzer that uses Troubleshoot templates and the override for Helm: - - UPSTREAM_URI: With the URI for the application. - - CLUSTER_NAME: With the name of the kubeconfig cluster. +```yaml +# This is the support bundle config secret that will be used to generate the support bundle +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: support-bundle + name: {{ .Release.Name }}-support-bundle + namespace: {{ .Release.Namespace }} +type: Opaque +stringData: + # This is the support bundle spec that will be used to generate the support bundle + # Notes: we use {{ .Release.Namespace }} to ensure that the support bundle is scoped to the release namespace + # We can use any of Helm's templating features here, including {{ .Values.someValue }} + support-bundle-spec: | + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: support-bundle + spec: + collectors: + - clusterInfo: {} + - clusterResources: {} + - logs: + selector: + - app=someapp + namespace: {{ .Release.Namespace }} + analyzers: + - clusterPodStatuses: + name: unhealthy + namespaces: + - default + - myapp-namespace + outcomes: + - fail: + when: "== CrashLoopBackOff" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a CrashLoopBackOff state.` }} + - fail: + when: "== ImagePullBackOff" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a ImagePullBackOff state.` }} + - fail: + when: "== Pending" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Pending state.` }} + - fail: + when: "== Evicted" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Evicted state.` }} + - fail: + when: "== Terminating" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Terminating state.` }} + - fail: + when: "== Init:Error" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in an Init:Error state.` }} + - fail: + when: "== Init:CrashLoopBackOff" + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in an Init:CrashLoopBackOff state.` }} + - fail: + when: "!= Healthy" # Catch all unhealthy pods. A pod is considered healthy if it has a status of Completed, or Running and all of its containers are ready. + message: {{ `Pod {{ .Namespace }}/{{ .Name }} is unhealthy with a status of {{ .Status.Reason }}.` }} +``` +================ +File: docs/vendor/preflight-support-bundle-about.mdx +================ +import Overview from "../partials/preflights/_preflights-sb-about.mdx" - [Use a bulleted list for the placeholder text definitions unless you feel the list is too long and that a table would be cleaner. If you need to use a table, use the following table format: +# About Preflight Checks and Support Bundles - - - - - - - - - - - - - -
    ReplaceWith
    UPSTREAM_URIThe URI for the application.
    CLUSTER_NAMEThe name of the kubeconfig cluster.
    +This topic provides an introduction to preflight checks and support bundles, which are provided by the [Troubleshoot](https://troubleshoot.sh/) open source project. -1. Click **Save Changes**. +## Overview -## (Optional) Task Heading 2 + - +Preflight checks and support bundles consist of _collectors_, _redactors_, and _analyzers_ that are defined in a YAML specification. When preflight checks or support bundles are executed, data is collected, redacted, then analyzed to provide insights to users, as illustrated in the following diagram: -[Introductory sentence or two to explain the “what“ and “why“ of the task. You can tell the user that the previous task must be completed first. For example: "After you create a blah blah, you can configure the... This helps you..."] +![Troubleshoot Workflow Diagram](/images/troubleshoot-workflow-diagram.png) -To [do this task]: +[View a larger version of this image](/images/troubleshoot-workflow-diagram.png) -1. Step -1. Step +For more information about each step in this workflow, see the sections below. +### Collect -## Task Heading 3 +During the collection phase, _collectors_ gather information from the cluster, the environment, the application, and other sources. -[Introductory sentence or two to explain the “what“ and “why“ of the task. You can tell the user that the previous task must be completed first. For example: "After you create a blah blah, you can configure the... This helps you..."] +The data collected depends on the types of collectors that are included in the preflight or support bundle specification. For example, the Troubleshoot project provides collectors that can gather information about the Kubernetes version that is running in the cluster, information about database servers, logs from pods, and more. -To [do this task]: +For more information, see the [Collect](https://troubleshoot.sh/docs/collect/) section in the Troubleshoot documentation. -1. Step -1. Step +### Redact -## Related Topics +During the redact phase, _redactors_ censor sensitive customer information from the data before analysis. By default, the following information is automatically redacted: - +For Replicated KOTS installations, it is also possible to add custom redactors to redact additional data. For more information, see the [Redact](https://troubleshoot.sh/docs/redact/) section in the Troubleshoot documentation. -[My Topic Title](https://docs.replicated.com) +### Analyze -================ -File: docs/templates/release-notes.md -================ -# Product Name Release Notes +During the analyze phase, _analyzers_ use the redacted data to provide insights to users. -## vX.X.X +For preflight checks, analyzers define the pass, fail, and warning outcomes, and can also display custom messages to the user. For example, you can define a preflight check that fails if the cluster's Kubernetes version does not meet the minimum version that your application supports. -Release Date: Month Day, Year +For support bundles, analyzers can be used to identify potential problems and share relevant troubleshooting guidance with users. Additionally, when a support bundle is uploaded to the Vendor Portal, it is extracted and automatically analyzed. The goal of analyzers in support bundles is to surface known issues or hints of what might be a problem to make troubleshooting easier. -### Kubernetes Compatibility +For more information, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. -This release is compatible with Kubernetes vX.X, vX.Y, and vX.Z. +## Preflight Checks -### Security Fixes -* Fix 1 -* Fix 2 +This section provides an overview of preflight checks, including how preflights are defined and run. -### New Features +### Overview -* New feature 1 -* New feature 2 +Preflight checks let you define requirements for the cluster where your application is installed. When run, preflight checks provide clear feedback to your customer about any missing requirements or incompatibilities in the cluster before they install or upgrade your application. For KOTS installations, preflight checks can also be used to block the deployment of the application if one or more requirements are not met. -### Improvements +Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. -* Improvement 1 -* Improvement 2 +### About Host Preflights {#host-preflights} -### Bug Fixes +_Host preflight checks_ automatically run during [Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated kURL](/vendor/kurl-about) installations on a VM or bare metal server. The purpose of host preflight checks is to verify that the user's installation environment meets the requirements of the Embedded Cluster or kURL installer, such as checking the number of CPU cores in the system, available disk space, and memory usage. If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. -* Bug 1 -* Bug 2 +Host preflight checks are separate from any application-specific preflight checks that are defined in the release, which run in the Admin Console before the application is deployed with KOTS. Both Embedded Cluster and kURL have default host preflight checks that are specific to the requirements of the given installer. For kURL installations, it is possible to customize the default host preflight checks. -### Known Issues +For more information about the default Embedded Cluster host preflight checks, see [Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. -* Known issue 1 -* Known issue 2 +For more information about kURL host preflight checks, including information about how to customize the defaults, see [Customizing Host Preflight Checks for kURL](/vendor/preflight-host-preflights). -================ -File: docs/vendor/admin-console-adding-buttons-links.mdx -================ -# Adding Links to the Dashboard +### Defining Preflights -This topic describes how to use the Kubernetes SIG Application custom resource to add links to the Replicated KOTS Admin Console dashboard. +To add preflight checks for your application, create a Preflight YAML specification that defines the collectors and analyzers that you want to include. -## Overview +For information about how to add preflight checks to your application, including examples, see [Defining Preflight Checks](preflight-defining). -Replicated recommends that every application include a Kubernetes SIG Application custom resource. The Kubernetes SIG Application custom resource provides a standard API for creating, viewing, and managing applications. For more information, see [Kubernetes Applications](https://github.com/kubernetes-sigs/application#kubernetes-applications) in the kubernetes-sigs GitHub repository. +### Blocking Installation with Required (Strict) Preflights -You can include the Kubernetes SIG Application custom resource in your releases to add links to the Admin Console dashboard. Common use cases include adding links to documentation, dashboards, or a landing page for the application. +For applications installed with KOTS, it is possible to block the deployment of a release if a preflight check fails. This is helpful when it is necessary to prevent an installation or upgrade from continuing unless a given requirement is met. -For example, the following shows an **Open App** button on the dashboard of the Admin Console for an application named Gitea: +You can add required preflight checks for an application by including `strict: true` for the target analyzer in the preflight specification. For more information, see [Block Installation with Required Preflights](preflight-defining#strict) in _Defining Preflight Checks_. -Admin Console dashboard with Open App link +### Running Preflights -[View a larger version of this image](/images/gitea-open-app.png) +This section describes how users can run preflight checks for KOTS and Helm installations. -:::note -KOTS uses the Kubernetes SIG Application custom resource as metadata and does not require or use an in-cluster controller to handle this custom resource. An application that follows best practices does not require cluster admin privileges or any cluster-wide components to be installed. -::: +#### Replicated Installations -## Add a Link +For Replicated installations with Embedded Cluster, KOTS, or kURL, preflight checks run automatically as part of the installation process. The results of the preflight checks are displayed either in the KOTS Admin Console or in the KOTS CLI, depending on the installation method. -To add a link to the Admin Console dashboard, include a [Kubernetes SIG Application](https://github.com/kubernetes-sigs/application#kubernetes-applications) custom resource in the release with a `spec.descriptor.links` field. The `spec.descriptor.links` field is an array of links that are displayed on the Admin Console dashboard after the application is deployed. +Additionally, users can access preflight checks from the Admin Console after installation to view their results and optionally re-run the checks. -Each link in the `spec.descriptor.links` array contains two fields: -* `description`: The link text that will appear on the Admin Console dashboard. -* `url`: The target URL. +The following shows an example of the results of preflight checks displayed in the Admin Console during installation: -For example: +![Preflight results in Admin Console](/images/preflight-warning.png) -```yaml -# app.k8s.io/v1beta1 Application Custom resource +[View a larger version of this image](/images/preflight-warning.png) -apiVersion: app.k8s.io/v1beta1 -kind: Application -metadata: - name: "gitea" -spec: - descriptor: - links: - - description: About Wordpress - url: "https://wordpress.org/" -``` +#### Helm Installations -When the application is deployed, the "About Wordpress" link is displayed on the Admin Console dashboard as shown below: +For installations with Helm, the preflight kubectl plugin is required to run preflight checks. The preflight plugin is a client-side utility that adds a single binary to the path. For more information, see [Getting Started](https://troubleshoot.sh/docs/) in the Troubleshoot documentation. -About Wordpress link on the Admin Console dashboard +Users can optionally run preflight checks before they run `helm install`. The results of the preflight checks are then displayed through the CLI, as shown in the example below: -[View a larger version of this image](/images/dashboard-link-about-wordpress.png) - -For an additional example of a Kubernetes SIG Application custom resource, see [application.yaml](https://github.com/kubernetes-sigs/application/blob/master/docs/examples/wordpress/application.yaml) in the kubernetes-sigs GitHub repository. - -### Create URLs with User-Supplied Values Using KOTS Template Functions {#url-template} - -You can use KOTS template functions to template URLs in the Kubernetes SIG Application custom resource. This can be useful when all or some of the URL is a user-supplied value. For example, an application might allow users to provide their own ingress controller or load balancer. In this case, the URL can be templated to render the hostname that the user provides on the Admin Console Config screen. - -The following examples show how to use the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function in the Kubernetes SIG Application custom resource `spec.descriptor.links.url` field to render one or more user-supplied values: - -* In the example below, the URL hostname is a user-supplied value for an ingress controller that the user configures during installation. - - ```yaml - apiVersion: app.k8s.io/v1beta1 - kind: Application - metadata: - name: "my-app" - spec: - descriptor: - links: - - description: Open App - url: 'http://{{repl ConfigOption "ingress_host" }}' - ``` -* In the example below, both the URL hostname and a node port are user-supplied values. It might be necessary to include a user-provided node port if you are exposing NodePort services for installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about). +![Save output dialog](/images/helm-preflight-save-output.png) - ```yaml - apiVersion: app.k8s.io/v1beta1 - kind: Application - metadata: - name: "my-app" - spec: - descriptor: - links: - - description: Open App - url: 'http://{{repl ConfigOption "hostname" }}:{{repl ConfigOption "node_port"}}' - ``` +[View a larger version of this image](/images/helm-preflight-save-output.png) -For more information about working with KOTS template functions, see [About Template Functions](/reference/template-functions-about). +For more information, see [Running Preflight Checks for Helm Installations](preflight-running). -================ -File: docs/vendor/admin-console-customize-app-icon.md -================ -# Customizing the Application Icon +## Support Bundles -You can add a custom application icon that displays in the Replicated Admin Console and the download portal. Adding a custom icon helps ensure that your brand is reflected for your customers. +This section provides an overview of support bundles, including how support bundles are customized and generated. -:::note -You can also use a custom domain for the download portal. For more information, see [About Custom Domains](custom-domains). -::: +### Overview -## Add a Custom Icon +Support bundles collect and analyze troubleshooting data from customer environments, helping both users and support teams diagnose problems with application deployments. -For information about how to choose an image file for your custom application icon that displays well in the Admin Console, see [Icon Image File Recommendations](#icon-image-file-recommendations) below. +Support bundles can collect a variety of important cluster-level data from customer environments, such as: +* Pod logs +* Node resources and status +* The status of replicas in a Deployment +* Cluster information +* Resources deployed to the cluster +* The history of Helm releases installed in the cluster -To add a custom application icon: +Support bundles can also be used for more advanced use cases, such as checking that a command successfully executes in a pod in the cluster, or that an HTTP request returns a succesful response. -1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. -1. Create or open the Application custom resource manifest file. An Application custom resource manifest file has `apiVersion: kots.io/v1beta1` and `kind: Application`. +Support bundles then use the data collected to provide insights to users on potential problems or suggested troubleshooting steps. The troubleshooting data collected and analyzed by support bundles not only helps users to self-resolve issues with their application deployment, but also helps reduce the amount of time required by support teams to resolve requests by ensuring they have access to all the information they need up front. -1. In the preview section of the Help pane: +### About Host Support Bundles - 1. If your Application manifest file is already populated with an `icon` key, the icon displays in the preview. Click **Preview a different icon** to access the preview options. +For installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about), it is possible to generate a support bundle that includes host-level information to help troubleshoot failures related to host configuration like DNS, networking, or storage problems. - 1. Drag and drop an icon image file to the drop zone. Alternatively, paste a link or Base64 encoded data URL in the text box. Click **Preview**. +For Embedded Cluster installations, a default spec can be used to generate support bundles that include cluster- and host-level information. See [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). - ![Application icon preview](/images/app-icon-preview.png) +For kURL installations, vendors can customize a host support bundle spec for their application. See [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). - 1. (Air gap only) If you paste a link to the image in the text box, click **Preview** and **Base64 encode icon** to convert the image to a Base64 encoded data URL. An encoded URL displays that you can copy and paste into the Application manifest. Base64 encoding is required for images used with air gap installations. +### Customizing Support Bundles - :::note - If you pasted a Base64 encoded data URL into the text box, the **Base64 encode icon** button does not display because the image is already encoded. If you drag and drop an icon, the icon is automatically encoded for you. - ::: +To enable support bundles for your application, add a support bundle YAML specification to a release. An empty support bundle specification automatically includes several default collectors and analzyers. You can also optionally customize the support bundle specification for by adding, removing, or editing collectors and analyzers. - ![Base64 encode image button](/images/app-icon-preview-base64.png) +For more information, see [Adding and Customizing Support Bundles](support-bundle-customizing). - 1. Click **Preview a different icon** to preview a different icon if needed. +### Generating Support Bundles -1. In the Application manifest, under `spec`, add an `icon` key that includes a link or the Base64 encoded data URL to the desired image. +Users generate support bundles as `tar.gz` files from the command line, using the support-bundle kubectl plugin. Your customers can share their support bundles with your team by sending you the resulting `tar.gz` file. - **Example**: +KOTS users can also generate and share support bundles from the KOTS Admin Console. - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - title: My Application - icon: https://kots.io/images/kotsadm-logo-large@2x.png - ``` -1. Click **Save Release**. +For more information, see [Generating Support Bundles](support-bundle-generating). +================ +File: docs/vendor/private-images-about.md +================ +# About the Replicated Proxy Registry -## Icon Image File Recommendations +This topic describes how the Replicated proxy registry can be used to grant proxy access to your application's private images or allow pull through access of public images. -For your custom application icon to look best in the Admin Console, consider the following recommendations: +## Overview -* Use a PNG or JPG file. -* Use an image that is at least 250 by 250 pixels. -* Export the image file at 2x. +If your application images are available in a private image registry exposed to the internet such as Docker Hub or Amazon Elastic Container Registry (ECR), then the Replicated proxy registry can grant proxy, or _pull-through_, access to the images without exposing registry credentials to your customers. When you use the proxy registry, you do not have to modify the process that you already use to build and push images to deploy your application. -================ -File: docs/vendor/admin-console-customize-config-screen.md -================ -# Creating and Editing Configuration Fields +To grant proxy access, the proxy registry uses the customer licenses that you create in the Replicated vendor portal. This allows you to revoke a customer’s ability to pull private images by editing their license, rather than having to manage image access through separate identity or authentication systems. For example, when a trial license expires, the customer's ability to pull private images is automatically revoked. -This topic describes how to use the KOTS Config custom resource manifest file to add and edit fields in the KOTS Admin Console configuration screen. +The following diagram demonstrates how the proxy registry pulls images from your external registry, and how deployed instances of your application pull images from the proxy registry: -## About the Config Custom Resource +![Proxy registry workflow diagram](/images/private-registry-diagram.png) -Applications distributed with Replicated KOTS can include a configuration screen in the Admin Console to collect required or optional values from your users that are used to run your application. For more information about the configuration screen, see [About the Configuration Screen](config-screen-about). +[View a larger version of this image](/images/private-registry-diagram-large.png) -To include a configuration screen in the Admin Console for your application, you add a Config custom resource manifest file to a release for the application. +## About Enabling the Proxy Registry -You define the fields that appear on the configuration screen as an array of `groups` and `items` in the Config custom resource: - * `groups`: A set of `items`. Each group must have a `name`, `title`, `description`, and `items`. For example, you can create a group of several user input fields that are all related to configuring an SMTP mail server. - * `items`: An array of user input fields. Each array under `items` must have a `name`, `title`, and `type`. You can also include several optional properties. For example, in a group for configuring a SMTP mail server, you can have user input fields under `items` for the SMTP hostname, port, username, and password. +The proxy registry requires read-only credentials to your private registry to access your application images. See [Connecting to an External Registry](/vendor/packaging-private-images). - There are several types of `items` supported in the Config manifest that allow you to collect different types of user inputs. For example, you can use the `password` input type to create a text field on the configuration screen that hides user input. +After connecting your registry, the steps the enable the proxy registry vary depending on your application deployment method. For more information, see: +* [Using the Proxy Registry with KOTS Installations](/vendor/private-images-kots) +* [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) -For more information about the syntax of the Config custom resource manifest, see [Config](/reference/custom-resource-config). +## About Allowing Pull-Through Access of Public Images -## About Regular Expression Validation +Using the Replicated proxy registry to grant pull-through access to public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. -You can use [RE2 regular expressions](https://github.com/google/re2/wiki/Syntax) (regex) to validate user input for config items, ensuring conformity to certain standards, such as valid email addresses, password complexity rules, IP addresses, and URLs. This prevents users from deploying an application with a verifiably invalid configuration. +For more information about how to pull public images through the proxy registry, see [Connecting to a Public Registry through the Proxy Registry](/vendor/packaging-public-images). -You add the `validation`, `regex`, `pattern` and `message` fields to items in the Config custom resource. Validation is supported for `text`, `textarea`, `password` and `file` config item types. For more information about regex validation fields, see [Item Validation](/reference/custom-resource-config#item-validation) in _Config_. +================ +File: docs/vendor/private-images-kots.mdx +================ +import Deprecated from "../partials/helm/_replicated-deprecated.mdx" +import StepCreds from "../partials/proxy-service/_step-creds.mdx" +import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" -The following example shows a common password complexity rule: +# Using the Proxy Registry with KOTS Installations -``` -- name: smtp-settings - title: SMTP Settings - items: - - name: smtp_password - title: SMTP Password - type: password - help_text: Set SMTP password - validation: - regex: - pattern: ^(?:[\w@#$%^&+=!*()_\-{}[\]:;"'<>,.?\/|]){8,16}$ - message: The password must be between 8 and 16 characters long and can contain a combination of uppercase letter, lowercase letters, digits, and special characters. -``` +This topic describes how to use the Replicated proxy registry with applications deployed with Replicated KOTS. -## Add Fields to the Configuration Screen +## Overview -To add fields to the Admin Console configuration screen: +Replicated KOTS automatically creates the required image pull secret for accessing the Replicated proxy registry during application deployment. When possible, KOTS also automatically rewrites image names in the application manifests to the location of the image at `proxy.replicated.com` or your custom domain. -1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. -1. Create or open the Config custom resource manifest file in the desired release. A Config custom resource manifest file has `kind: Config`. -1. In the Config custom resource manifest file, define custom user-input fields in an array of `groups` and `items`. +### Image Pull Secret - **Example**: +During application deployment, KOTS automatically creates an `imagePullSecret` with `type: kubernetes.io/dockerconfigjson` that is based on the customer license. This secret is used to authenticate with the proxy registry and grant proxy access to private images. - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: my-application - spec: - groups: - - name: smtp_settings - title: SMTP Settings - description: Configure SMTP Settings - items: - - name: enable_smtp - title: Enable SMTP - help_text: Enable SMTP - type: bool - default: "0" - - name: smtp_host - title: SMTP Hostname - help_text: Set SMTP Hostname - type: text - - name: smtp_port - title: SMTP Port - help_text: Set SMTP Port - type: text - - name: smtp_user - title: SMTP User - help_text: Set SMTP User - type: text - - name: smtp_password - title: SMTP Password - type: password - default: 'password' - ``` +For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. - The example above includes a single group with the name `smtp_settings`. +### Image Location Patching (Standard Manifests and HelmChart v1) - The `items` array for the `smtp_settings` group includes the following user-input fields: `enable_smtp`, `smtp_host`, `smtp_port`, `smtp_user`, and `smtp_password`. Additional item properties are available, such as `affix` to make items appear horizontally on the same line. For more information about item properties, see [Item Properties](/reference/custom-resource-config#item-properties) in Config. +For applications packaged with standard Kubernetes manifests (or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource), KOTS automatically patches image names to the location of the image at at `proxy.replicated.com` or your custom domain during deployment. If KOTS receives a 401 response when attempting to load image manifests using the image reference from the PodSpec, it assumes that this is a private image that must be proxied through the proxy registry. - The following screenshot shows how the SMTP Settings group from the example YAML above displays in the Admin Console configuration screen during application installation: +KOTS uses Kustomize to patch the `midstream/kustomization.yaml` file to change the image name during deployment to reference the proxy registry. For example, a PodSpec for a Deployment references a private image hosted at `quay.io/my-org/api:v1.0.1`: - ![User input fields on the configuration screen for the SMTP settings](/images/config-screen-smtp-example-large.png) +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example +spec: + template: + spec: + containers: + - name: api + image: quay.io/my-org/api:v1.0.1 +``` -1. (Optional) Add default values for the fields. You can add default values using one of the following properties: - * **With the `default` property**: When you include the `default` key, KOTS uses this value when rendering the manifest files for your application. The value then displays as a placeholder on the configuration screen in the Admin Console for your users. KOTS only uses the default value if the user does not provide a different value. +When this application is deployed, KOTS detects that it cannot access +the image at quay.io. So, it creates a patch in the `midstream/kustomization.yaml` +file that changes the image name in all manifest files for the application. This causes the container runtime in the cluster to use the proxy registry to pull the images, using the license information provided to KOTS for authentication. - :::note - If you change the `default` value in a later release of your application, installed instances of your application receive the updated value only if your users did not change the default from what it was when they initially installed the application. +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +bases: +- ../../base +images: +- name: quay.io/my-org/api:v1.0.1 + newName: proxy.replicated.com/proxy/my-kots-app/quay.io/my-org/api +``` - If a user did change a field from its default, the Admin Console does not overwrite the value they provided. - ::: +## Enable the Proxy Registry - * **With the `value` property**: When you include the `value` key, KOTS does not overwrite this value during an application update. The value that you provide for the `value` key is visually indistinguishable from other values that your user provides on the Admin Console configuration screen. KOTS treats user-supplied values and the value that you provide for the `value` key as the same. +This section describes how to enable the proxy registry for applications deployed with KOTS, including how to ensure that image names are rewritten and that the required image pull secret is provided. -2. (Optional) Add regular expressions to validate user input for `text`, `textarea`, `password` and `file` config item types. For more information, see [About Regular Expression Validation](#about-regular-expression-validation). +To enable the proxy registry: - **Example**: +1. - ```yaml - - name: smtp_host - title: SMTP Hostname - help_text: Set SMTP Hostname - type: text - validation: - regex: ​ - pattern: ^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$ - message: Valid hostname starts with a letter (uppercase/lowercase), followed by zero or more groups of letters (uppercase/lowercase), digits, or hyphens, optionally followed by a period. Ends with a letter or digit. - ``` -3. (Optional) Mark fields as required by including `required: true`. When there are required fields, the user is prevented from proceeding with the installation until they provide a valid value for required fields. +1. - **Example**: +1. Rewrite images names to the location of the image at `proxy.replicated.com` or your custom domain. Also, ensure that the correct image pull secret is provided for all private images. The steps required to configure image names and add the image pull secret vary depending on your application type: - ```yaml - - name: smtp_password - title: SMTP Password - type: password - required: true - ``` + * **HelmChart v2**: For Helm charts deployed with the[ HelmChart v2](/reference/custom-resource-helmchart-v2) custom resource, configure the HelmChart v2 custom resource to dynamically update image names in your Helm chart and to inject the image pull secret that is automatically created by KOTS. For instructions, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). -4. Save and promote the release to a development environment to test your changes. + * **Standard Manifests or HelmChart v1**: For standard manifest-based applications or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource, no additional configuration is required. KOTS automatically rewrites image names and injects image pull secrets during deployment for these application types. -## Next Steps + :::note + + ::: -After you add user input fields to the configuration screen, you use template functions to map the user-supplied values to manifest files in your release. If you use a Helm chart for your application, you map the values to the Helm chart `values.yaml` file using the HelmChart custom resource. + * **Kubernetes Operators**: For applications packaged with Kubernetes Operators, KOTS cannot modify pods that are created at runtime by the Operator. To support the use of private images in all environments, the Operator code should use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. For instructions, see [Referencing Images](/vendor/operator-referencing-images) in the _Packaging Kubernetes Operators_ section. -For more information, see [Mapping User-Supplied Values](config-screen-map-inputs). +1. If you are deploying Pods to namespaces other than the application namespace, add the namespace to the `additionalNamespaces` attribute of the KOTS Application custom resource. This ensures that KOTS can provision the `imagePullSecret` in the namespace to allow the Pod to pull the image. For instructions, see [Defining Additional Namespaces](operator-defining-additional-namespaces). ================ -File: docs/vendor/admin-console-display-app-status.md +File: docs/vendor/private-images-replicated.mdx ================ -import StatusesTable from "../partials/status-informers/_statusesTable.mdx" -import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" -import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" -import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" - -# Adding Resource Status Informers - -This topic describes how to add status informers for your application. Status informers apply only to applications installed with Replicated KOTS. For information about how to collect application status data for applications installed with Helm, see [Enabling and Understanding Application Status](insights-app-status). +import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" -## About Status Informers +# Using the Replicated Registry for KOTS Installations -_Status informers_ are a feature of KOTS that report on the status of supported Kubernetes resources deployed as part of your application. You enable status informers by listing the target resources under the `statusInformers` property in the Replicated Application custom resource. KOTS watches all of the resources that you add to the `statusInformers` property for changes in state. +This topic describes how to push images to the Replicated private registry. -Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information, see [Understanding Application Status](#understanding-application-status). +## Overview -When you one or more status informers to your application, KOTS automatically does the following: +For applications installed with KOTS, you can host private images on the Replicated registry. Hosting your images on the Replicated registry is useful if you do not already have your images in an existing private registry. It is also useful for testing purposes. -* Displays application status for your users on the dashboard of the Admin Console. This can help users diagnose and troubleshoot problems with their instance. The following shows an example of how an Unavailable status displays on the Admin Console dashboard: +Images pushed to the Replicated registry are displayed on the **Images** page in the Vendor Portal: - Unavailable status on the Admin Console dashboard +![Replicated Private Registry section of the vendor portal Images page](/images/images-replicated-registry.png) -* Sends application status data to the Vendor Portal. This is useful for viewing insights on instances of your application running in customer environments, such as the current status and the average uptime. For more information, see [Instance Details](instance-insights-details). +[View a larger version of this image](/images/images-replicated-registry.png) - The following shows an example of the Vendor Portal **Instance details** page with data about the status of an instance over time: +For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). - Instance details full page +## Limitations - [View a larger version of this image](/images/instance-details.png) -## Add Status Informers +The Replicated registry has the following limitations: -To create status informers for your application, add one or more supported resource types to the `statusInformers` property in the Application custom resource. See [`statusInformers`](/reference/custom-resource-application#statusinformers) in _Application_. +* You cannot delete images from the Replicated registry. As a workaround, you can push a new, empty image to the registry using the same tags as the target image. Replicated does not recommend removing tags from the registry because it could break older releases of your application. - +* When using Docker Build to build and push images to the Replicated registry, provenance attestations are not supported. To avoid a 400 error, include the `--provenance=false` flag to disable all provenance attestations. For more information, see [docker buildx build](https://docs.docker.com/engine/reference/commandline/buildx_build/#provenance) and [Provenance Attestations](https://docs.docker.com/build/attestations/slsa-provenance/) in the Docker documentation. -You can target resources of the supported types that are deployed in any of the following ways: +* You might encounter a timeout error when pushing images with layers close to or exceeding 2GB in size, such as: "received unexpected HTTP status: 524." To work around this, reduce the size of the image layers and push the image again. If the 524 error persists, continue decreasing the layer sizes until the push is successful. -* Deployed by KOTS. -* Deployed by a Kubernetes Operator that is deployed by KOTS. For more information, see [About Packaging a Kubernetes Operator Application](operator-packaging-about). -* Deployed by Helm. For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). +## Push Images to the Replicated Registry -### Examples +This procedure describes how to tag and push images to the Replicated registry. For more information about building, tagging, and pushing Docker images, see the +[Docker CLI documentation](https://docs.docker.com/engine/reference/commandline/cli/). -Status informers are in the format `[namespace/]type/name`, where namespace is optional and defaults to the current namespace. +To push images to the Replicated registry: -**Example**: +1. Do one of the following to connect with the `registry.replicated.com` container registry: + * **(Recommended) Log in with a user token**: Use `docker login registry.replicated.com` with your Vendor Portal email as the username and a Vendor Portal user token as the password. For more information, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. + * **Log in with a service account token:** Use `docker login registry.replicated.com` with a Replicated Vendor Portal service account as the password. If you have an existing team token, you can use that instead. You can use any string as the username. For more information, see [Service Accounts](replicated-api-tokens#service-accounts) in _Generating API Tokens_. + + -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: my-application -spec: - statusInformers: - - deployment/my-web-svc - - deployment/my-worker -``` + * **Log in with your credentials**: Use `docker login registry.replicated.com` with your Vendor Portal email and password as the credentials. -The `statusInformers` property also supports template functions. Using template functions allows you to include or exclude a status informer based on a customer-provided configuration value: +1. Tag your private image with the Replicated registry hostname in the standard +Docker format: -**Example**: + ``` + docker tag IMAGE_NAME registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG + ``` -```yaml -statusInformers: - - deployment/my-web-svc - - '{{repl if ConfigOptionEquals "option" "value"}}deployment/my-worker{{repl else}}{{repl end}}' -``` + Where: + * `IMAGE_NAME` is the name of the existing private image for your application. + * `APPLICATION_SLUG` is the unique slug for the application. You can find the application slug on the **Application Settings** page in the Vendor Portal. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. + * `TARGET_IMAGE_NAME` is a name for the image. Replicated recommends that the `TARGET_IMAGE_NAME` is the same as the `IMAGE_NAME`. + * `TAG` is a tag for the image. -In the example above, the `deployment/my-worker` status informer is excluded unless the statement in the `ConfigOptionEquals` template function evaluates to true. + For example: -For more information about using template functions in application manifest files, see [About Template Functions](/reference/template-functions-about). + ```bash + docker tag worker registry.replicated.com/myapp/worker:1.0.1 + ``` -## Understanding Application Status +1. Push your private image to the Replicated registry using the following format: -This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. + ``` + docker push registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG + ``` + Where: + * `APPLICATION_SLUG` is the unique slug for the application. + * `TARGET_IMAGE_NAME` is a name for the image. Use the same name that you used when tagging the image in the previous step. + * `TAG` is a tag for the image. Use the same tag that you used when tagging the image in the previous step. -### Resource Statuses + For example: -Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. + ```bash + docker push registry.replicated.com/myapp/worker:1.0.1 + ``` -The following table lists the supported Kubernetes resources and the conditions that contribute to each status: +1. In the [Vendor Portal](https://vendor.replicated.com/), go to **Images** and scroll down to the **Replicated Private Registry** section to confirm that the image was pushed. - +================ +File: docs/vendor/private-images-tags-digests.md +================ +# Using Image Tags and Digests -### Aggregate Application Status +This topic describes using image tags and digests with your application images. It includes information about when image tags and digests are supported, and how to enable support for image digests in air gap bundles. - +## Support for Image Tags and Digests - +The following table describes the use cases in which image tags and digests are supported: -================ -File: docs/vendor/admin-console-port-forward.mdx -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import ServicePortNote from "../partials/custom-resource-application/_servicePort-note.mdx" -import GiteaKotsApp from "../partials/getting-started/_gitea-kots-app-cr.mdx" -import GiteaHelmChart from "../partials/getting-started/_gitea-helmchart-cr.mdx" -import GiteaK8sApp from "../partials/getting-started/_gitea-k8s-app-cr.mdx" -import PortsApplicationURL from "../partials/custom-resource-application/_ports-applicationURL.mdx" -import NginxKotsApp from "../partials/application-links/_nginx-kots-app.mdx" -import NginxK8sApp from "../partials/application-links/_nginx-k8s-app.mdx" -import NginxService from "../partials/application-links/_nginx-service.mdx" -import NginxDeployment from "../partials/application-links/_nginx-deployment.mdx" + + + + + + + + + + + + + + + + +
    InstallationSupport for Image TagsSupport for Image Digests
    OnlineSupported by defaultSupported by default
    Air GapSupported by default for Replicated KOTS installations +

    Supported for applications on KOTS v1.82.0 and later when the Enable new air gap bundle format toggle is enabled on the channel.

    +

    For more information, see Using Image Digests in Air Gap Installations below.

    +
    -# Port Forwarding Services with KOTS +:::note +You can use image tags and image digests together in any case where both are supported. +::: -This topic describes how to add one or more ports to the Replicated KOTS port forward tunnel by configuring the `ports` key in the KOTS Application custom resource. +## Using Image Digests in Air Gap Installations {#digests-air-gap} -The information in this topic applies to existing cluster installations. For information about exposing services for Replicated kURL or Replicated Embedded Cluster installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). +For applications installed with KOTS v1.82.0 or later, you can enable a format for air gap bundles that supports the use of image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. -## Overview +You can enable or disable this air gap bundle format using the **Enable new air gap bundle format** toggle in the settings for any channel in the Vendor Portal. The **Enable new air gap bundle format** toggle is enabled by default. -For installations into existing clusters, KOTS automatically creates a port forward tunnel and exposes the Admin Console on port 8800 where it can be accessed by users. In addition to the 8800 Admin Console port, you can optionally add one or more extra ports to the port forward tunnel. +When you enable **Enable new air gap bundle format** on a channel, all air gap bundles that you build or rebuild on that channel use the updated air gap bundle format. -Adding ports to the port forward tunnel allows you to port forward application services without needing to manually run the `kubectl port-forward` command. You can also add a link to the Admin Console dashboard that points to port-forwarded services. +If users on a version of KOTS earlier than v1.82.0 attempt to install or upgrade an application with an air gap bundle that uses the **Enable new air gap bundle format** format, then the Admin Console displays an error message when they attempt to upload the bundle. -This can be particularly useful when developing and testing KOTS releases for your application, because it provides a quicker way to access an application after installation compared to setting up an ingress controller or adding a load balancer. +To enable the new air gap bundle format on a channel: -## Port Forward a Service with the KOTS Application `ports` Key +1. In the Replicated [Vendor Portal](https://vendor.replicated.com/channels), go to the Channels page and click the edit icon in the top right of the channel where you want to use the new air gap bundle format. +1. Enable the **Enable new air gap bundle format** toggle. +1. (Recommended) To prevent users on a version of KOTS earlier than v1.82.0 from attempting to upgrade with an air gap bundle that uses the new air gap bundle format, set `minKotsVersion` to "1.82.0" in the Application custom resource manifest file. -To port forward a service with KOTS for existing cluster installations: + `minKotsVersion` defines the minimum version of KOTS required by the application release. Including `minKotsVersion` displays a warning in the Admin Console when users attempt to install or upgrade the application if they are not on the specified minimum version or later. For more information, see [Setting Minimum and Target Versions for KOTS](packaging-kots-versions). -1. In a new release, configure the [`ports`](/reference/custom-resource-application#ports) key in the KOTS Application custom resource with details for the target service. For example: + **Example**: ```yaml apiVersion: kots.io/v1beta1 @@ -34262,20699 +35701,9103 @@ To port forward a service with KOTS for existing cluster installations: metadata: name: my-application spec: - ports: - - serviceName: my-service - servicePort: 3000 - localPort: 8888 + ... + minKotsVersion: "1.82.0" + ... ``` - 1. For `ports.serviceName`, add the name of the service. KOTS can create a port forward to ClusterIP, NodePort, or LoadBalancer services. For more information about Kubernetes service types, see [Service](https://kubernetes.io/docs/concepts/services-networking/service/) in the Kubernetes documentation. - - 1. For `ports.servicePort`, add the `containerPort` of the Pod where the service is running. This is the port where KOTS forwards traffic. +1. Test your changes: + 1. Save and promote the release to a development environment. + 1. On the channel where you enabled **Enable new air gap bundle format**, click **Release history**. On the Release History page, click **Build** next to the latest release to create an air gap bundle with the new format. - + ![Vendor portal release history page](../../static/images/airgap-download-bundle.png) - 1. For `ports.localPort`, add the port to map on the local workstation. + 1. Click **Download Airgap Bundle**. + 1. Install or upgrade the application with version 1.82.0 or later of the Admin Console or the KOTS CLI. Upload the new air gap bundle to confirm that the installation or upgrade completes successfully. -1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. +================ +File: docs/vendor/quick-start.mdx +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import HelmPackage from "../partials/helm/_helm-package.mdx" +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import EcCr from "../partials/embedded-cluster/_ec-config.mdx" +import Requirements from "../partials/embedded-cluster/_requirements.mdx" - When the application is in a Ready state and the KOTS port forward is running, you will see output similar to the following: +# Replicated Quick Start - ```bash - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console - • Go to http://localhost:8888 to access the application - ``` - Confirm that you can access the service at the URL provided in the KOTS CLI output. +Welcome! This topic provides a quick start workflow to help new users learn about the Replicated Platform. Complete this quick start before you onboard your application to the platform. -1. (Optional) Add a link to the service on the Admin Console dashboard. See [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link) below. +## Introduction -## Add a Link to a Port-Forwarded Service on the Admin Console Dashboard {#add-link} +This quick start shows how to create, install, and update releases for a sample Helm chart in the Replicated Platform. You will repeat these same basic steps to create and test releases throughout the onboarding process to integrate Replicated features with your own application. -After you add a service to the KOTS port forward tunnel, you can also optionally add a link to the port-forwarded service on the Admin Console dashboard. +The goals of this quick start are to introduce new Replicated users to the following common tasks for the purpose of preparing to onboard to the Replicated Platform: -To add a link to a port-forwarded service, add the _same_ URL in the KOTS Application custom resource `ports.applicationURL` and Kubernetes SIG Application custom resource `spec.descriptor.links.url` fields. When the URLs in these fields match, KOTS adds a link on the Admin Console dashboard where the given service can be accessed. This process automatically links to the hostname in the browser (where the Admin Console is being accessed) and appends the specified `localPort`. +* Working with _applications_, _channels_, _releases_, and _customers_ in the Replicated Vendor Portal -To add a link to a port-forwarded service on the Admin Console dashboard: +* Working with the Replicated CLI -1. In a new release, open the KOTS Application custom resource and add a URL to the `ports.applicationURL` field. For example: +* Installing and updating applications on a VM with Replicated Embedded Cluster - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - ports: - - serviceName: my-service - servicePort: 3000 - localPort: 8888 - applicationUrl: "http://my-service" - ``` +* Managing an installation with the Replicated KOTS Admin Console - Consider the following guidelines for this URL: - * Use HTTP instead of HTTPS unless TLS termination takes place in the application Pod. - * KOTS rewrites the URL with the hostname in the browser during deployment. So, you can use any hostname for the URL, such as the name of the service. For example, `http://my-service`. +## Set Up the Environment -1. Add a Kubernetes SIG Application custom resource in the release. For example: +Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: - ```yaml - # app.k8s.io/v1beta1 Application Custom resource + - apiVersion: app.k8s.io/v1beta1 - kind: Application - metadata: - name: "my-application" - spec: - descriptor: - links: - - description: Open App - # url matches ports.applicationURL in the KOTS Application custom resource - url: "http://my-service" - ``` +## Quick Start - 1. For `spec.descriptor.links.description`, add the link text that will appear on the Admin Console dashboard. For example, `Open App`. +1. Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). - 1. For `spec.descriptor.links.url`, add the _same_ URL that you used in the `ports.applicationURL` in the KOTS Application custom resource. +1. Create an application using the Replicated CLI: -1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. + 1. On your local machine, install the Replicated CLI: - When the application is in a Ready state, confirm that you can access the service by clicking the link that appears on the dashboard. For example: + ```bash + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - Admin Console dashboard with Open App link + 1. Authorize the Replicated CLI: - [View a larger version of this image](/images/gitea-open-app.png) + ```bash + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your Vendor Portal account and authorize the CLI. -## Access Port-Forwarded Services + 1. Create an application named `Gitea`: -This section describes how to access port-forwarded services. + ```bash + replicated app create Gitea + ``` -### Command Line + 1. Set the `REPLICATED_APP` environment variable to the application that you created: -Run [`kubectl kots admin-console`](/reference/kots-cli-admin-console-index) to open the KOTS port forward tunnel. + ```bash + export REPLICATED_APP=APP_SLUG + ``` + Where `APP_SLUG` is the unique application slug provided in the output of the `app create` command. For example, `export REPLICATED_APP=gitea-kite`. -The `kots admin-console` command runs the equivalent of `kubectl port-forward svc/myapplication-service :`, then prints a message with the URLs where the Admin Console and any port-forwarded services can be accessed. For more information about the `kubectl port-forward` command, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the Kubernetes documentation. + This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command. -For example: +1. Get the sample Bitnami Gitea Helm chart and add the Replicated SDK as a dependency: -```bash -kubectl kots admin-console --namespace gitea -``` -```bash -• Press Ctrl+C to exit -• Go to http://localhost:8800 to access the Admin Console -• Go to http://localhost:8888 to access the application -``` + 1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: -### Admin Console + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. -You can optionally add a link to a port-forwarded service from the Admin Console dashboard. This requires additional configuration. For more information, see [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link). + 1. Change to the new `gitea` directory that was created: + + ```bash + cd gitea + ``` -The following example shows an **Open App** link on the dashboard of the Admin Console for an application named Gitea: + 1. In the Helm chart `Chart.yaml`, add the Replicated SDK as a dependency: -Admin Console dashboard with Open App link + -[View a larger version of this image](/images/gitea-open-app.png) + The Replicated SDK is a Helm chart that provides access to Replicated features and can be installed as a small service alongside your application. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). -## Examples + 1. Update dependencies and package the Helm chart to a `.tgz` chart archive: -This section provides examples of how to configure the `ports` key to port-forward a service in existing cluster installations and add links to services on the Admin Console dashboard. + ```bash + helm package -u . + ``` + Where `-u` or `--dependency-update` is an option for the helm package command that updates chart dependencies before packaging. For more information, see [Helm Package](https://helm.sh/docs/helm/helm_package/) in the Helm documentation. -### Example: Bitnami Gitea Helm Chart with LoadBalancer Service +1. Add the chart archive to a release: -This example uses a KOTS Application custom resource and a Kubernetes SIG Application custom resource to configure port forwarding for the Bitnami Gitea Helm chart in existing cluster installations, and add a link to the port-forwarded service on the Admin Console dashboard. To view the Gitea Helm chart source, see [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) in GitHub. + 1. In the `gitea` directory, create a subdirectory named `manifests`: -To test this example: + ``` + mkdir manifests + ``` -1. Pull version 1.0.6 of the Gitea Helm chart from Bitnami: + You will add the files required to support installation with Replicated KOTS and Replicated Embedded Cluster to this subdirectory. - ``` - helm pull oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 - ``` + 1. Move the Helm chart archive that you created to `manifests`: -1. Add the `gitea-1.0.6.tgz` chart archive to a new, empty release in the Vendor Portal along with the `kots-app.yaml`, `k8s-app.yaml`, and `gitea.yaml` files provided below. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). + ``` + mv gitea-1.0.6.tgz manifests + ``` - - -
    Description
    -

    Based on the templates/svc.yaml and values.yaml files in the Gitea Helm chart, the following KOTS Application custom resource adds port 3000 to the port forward tunnel and maps local port 8888. Port 3000 is the container port of the Pod where the gitea service runs.

    -
    YAML
    - -
    - -
    Description
    -

    The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service from the Admin Console dashboard. It also triggers KOTS to rewrite the URL to use the hostname in the browser and append the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".

    -
    YAML
    - -
    - -
    Description
    -

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.

    -
    YAML
    - -
    -
    + 1. In `manifests`, create the following YAML files: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml + ``` -1. Install the release to confirm that the service was port-forwarded successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + 1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: -### Example: NGINX Application with ClusterIP and NodePort Services + + +
    Description
    +

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The optionalValues field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment for the purpose of this quick start.

    +
    YAML
    + +
    + +
    Description
    +

    The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the gitea Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.

    +
    YAML
    + +
    + +
    Description
    +

    The Kubernetes SIG Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.

    +
    YAML
    + +
    + +
    Description
    +

    To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.

    +
    YAML
    + +
    +
    -The following example demonstrates how to link to a port-forwarded ClusterIP service for existing cluster installations. + 1. Lint the YAML files: -It also shows how to use the `ports` key to add a link to a NodePort service for kURL installations. Although the primary purpose of the `ports` key is to port forward services for existing cluster installations, it is also possible to use the `ports` key so that links to NodePort services for Embedded Cluster or kURL installations use the hostname in the browser. For information about exposing NodePort services for Embedded Cluster or kURL installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). + ```bash + replicated release lint --yaml-dir . + ``` + **Example output:** + ```bash + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + You can ignore any warning messages for the purpose of this quick start. + ::: -To test this example: + 1. Create the release and promote it to the Unstable channel: -1. Add the `example-service.yaml`, `example-deployment.yaml`, `kots-app.yaml`, and `k8s-app.yaml` files provided below to a new, empty release in the Vendor Portal. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). + ```bash + replicated release create --yaml-dir . --promote Unstable + ``` + **Example output**: + ```bash + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 + • Promoting ✓ + • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 1 + ``` - - -
    Description
    -

    The YAML below contains ClusterIP and NodePort specifications for a service named nginx. Each specification uses the kots.io/when annotation with the Replicated IsKurl template function to conditionally include the service based on the installation type (existing cluster or kURL cluster). For more information, see Conditionally Including or Excluding Resources and IsKurl.

    -

    As shown below, both the ClusterIP and NodePort nginx services are exposed on port 80.

    -
    YAML
    - -
    - -
    Description
    -

    A basic Deployment specification for the NGINX application.

    -
    YAML
    - -
    - -
    Description
    -

    The KOTS Application custom resource below adds port 80 to the KOTS port forward tunnel and maps port 8888 on the local machine. The specification also includes applicationUrl: "http://nginx" so that a link to the service can be added to the Admin Console dashboard.

    -
    YAML
    - -
    - -
    Description
    -

    The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service on the Admin Console dashboard that uses the hostname in the browser and appends the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".

    -
    YAML
    - -
    -
    +1. Create a customer so that you can install the release on your VM with Embedded Cluster: -1. Install the release into an existing cluster and confirm that the service was port-forwarded successfully by clicking **Open App** on the Admin Console dashboard. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + 1. In the [Vendor Portal](https://vendor.replicated.com), under the application drop down, select the Gitea application that you created. -1. If there is not already a kURL installer promoted to the channel, add a kURL installer to the release to support kURL installs. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). + App drop down -1. Install the release on a VM and confirm that the service was exposed successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation with kURL](/enterprise/installing-kurl). + [View a larger version of this image](/images/quick-start-select-gitea-app.png) + + 1. Click **Customers > Create customer**. - :::note - Ensure that the VM where you install allows HTTP traffic. - ::: + The **Create a new customer** page opens: -================ -File: docs/vendor/admin-console-prometheus-monitoring.mdx -================ -import OverviewProm from "../partials/monitoring/_overview-prom.mdx" -import LimitationEc from "../partials/monitoring/_limitation-ec.mdx" + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) -# Adding Custom Graphs + [View a larger version of this image](/images/create-customer.png) -This topic describes how to customize the graphs that are displayed on the Replicated Admin Console dashboard. + 1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. -## Overview of Monitoring with Prometheus + 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. - + 1. For **License type**, select **Development**. -## About Customizing Graphs + 1. For **License options**, enable the following entitlements: + * **KOTS Install Enabled** + * **Embedded Cluster Enabled** -If your application exposes Prometheus metrics, you can add custom graphs to the Admin Console dashboard to expose these metrics to your users. You can also modify or remove the default graphs. + 1. Click **Save Changes**. -To customize the graphs that are displayed on the Admin Console, edit the [`graphs`](/reference/custom-resource-application#graphs) property in the KOTS Application custom resource manifest file. At a minimum, each graph in the `graphs` property must include the following fields: -* `title`: Defines the graph title that is displayed on the Admin Console. -* `query`: A valid PromQL Prometheus query. You can also include a list of multiple queries by using the `queries` property. For more information about querying Prometheus with PromQL, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/) in the Prometheus documentation. +1. Install the application with Embedded Cluster: + + 1. On the page for the customer that you created, click **Install instructions > Embedded Cluster**. -:::note -By default, a kURL cluster exposes the Prometheus expression browser at NodePort 30900. For more information, see [Expression Browser](https://prometheus.io/docs/visualization/browser/) in the Prometheus documentation. -::: + ![Customer install instructions dropdown](/images/customer-install-instructions-dropdown.png) -## Limitation + [View a larger image](/images/customer-install-instructions-dropdown.png) - + 1. On the command line, SSH onto your VM and run the commands in the **Embedded cluster install instructions** dialog to download the latest release, extract the installation assets, and install. -## Add and Modify Graphs + embedded cluster install instructions dialog -To customize graphs on the Admin Console dashboard: + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) -1. In the [Vendor Portal](https://vendor.replicated.com/), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. + 1. When prompted, enter a password for accessing the Admin Console. -1. Create or open the [KOTS Application](/reference/custom-resource-application) custom resource manifest file. + The installation command takes a few minutes to complete. -1. In the Application manifest file, under `spec`, add a `graphs` property. Edit the `graphs` property to modify or remove existing graphs or add a new custom graph. For more information, see [graphs](/reference/custom-resource-application#graphs) in _Application_. + **Example output:** - **Example**: + ```bash + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Host files materialized! + ✔ Running host preflights + ✔ Node installation finished! + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Additional components are ready! + Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 + ``` - The following example shows the YAML for adding a custom graph that displays the total number of user signups for an application. + At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - graphs: - - title: User Signups - query: 'sum(user_signup_events_total)' - ``` + 1. Go to the URL provided in the output to access to the Admin Console. + + 1. On the Admin Console landing page, click **Start**. -1. (Optional) Under `graphs`, copy and paste the specs for the default Disk Usage, CPU Usage, and Memory Usage Admin Console graphs provided in the YAML below. + 1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. - Adding these default graphs to the Application custom resource manifest ensures that they are not overwritten when you add one or more custom graphs. When the default graphs are included in the Application custom resource, the Admin Console displays them in addition to any custom graphs. + 1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. - Alternatively, you can exclude the YAML specs for the default graphs to remove them from the Admin Console dashboard. + By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - graphs: - - title: User Signups - query: 'sum(user_signup_events_total)' - # Disk Usage, CPU Usage, and Memory Usage below are the default graphs - - title: Disk Usage - queries: - - query: 'sum((node_filesystem_size_bytes{job="node-exporter",fstype!="",instance!=""} - node_filesystem_avail_bytes{job="node-exporter", fstype!=""})) by (instance)' - legend: 'Used: {{ instance }}' - - query: 'sum((node_filesystem_avail_bytes{job="node-exporter",fstype!="",instance!=""})) by (instance)' - legend: 'Available: {{ instance }}' - yAxisFormat: bytes - - title: CPU Usage - query: 'sum(rate(container_cpu_usage_seconds_total{namespace="{{repl Namespace}}",container!="POD",pod!=""}[5m])) by (pod)' - legend: '{{ pod }}' - - title: Memory Usage - query: 'sum(container_memory_usage_bytes{namespace="{{repl Namespace}}",container!="POD",pod!=""}) by (pod)' - legend: '{{ pod }}' - yAxisFormat: bytes - ``` -1. Save and promote the release to a development environment to test your changes. + 1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. -================ -File: docs/vendor/ci-overview.md -================ -import TestRecs from "../partials/ci-cd/_test-recs.mdx" + 1. On the **Configure the cluster** screen, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. -# About Integrating with CI/CD + The Admin Console dashboard opens. -This topic provides an introduction to integrating Replicated CLI commands in your continuous integration and continuous delivery (CI/CD) pipelines, including Replicated's best practices and recommendations. + 1. On the Admin Console dashboard, next to the version, click **Deploy** and then **Yes, Deploy**. -## Overview + The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. -Using CI/CD workflows to automatically compile code and run tests improves the speed at which teams can test, iterate on, and deliver releases to customers. When you integrate Replicated CLI commands into your CI/CD workflows, you can automate the process of deploying your application to clusters for testing, rather than needing to manually create and then archive channels, customers, and environments for testing. + 1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser. + + For example: -You can also include continuous delivery workflows to automatically promote a release to a shared channel in your Replicated team. This allows you to more easily share releases with team members for internal testing and iteration, and then to promote releases when they are ready to be shared with customers. + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) -## Best Practices and Recommendations + [View a larger version of this image](/images/gitea-ec-ready.png) -The following are Replicated's best practices and recommendations for CI/CD: + Gitea app landing page -* Include unique workflows for development and for releasing your application. This allows you to run tests on every commit, and then to promote releases to internal and customer-facing channels only when ready. For more information about the workflows that Replicated recommends, see [Recommended CI/CD Workflows](ci-workflows). + [View a larger version of this image](/images/gitea-app.png) -* Integrate Replicated Compatibility Matrix into your CI/CD workflows to quickly create multiple different types of clusters where you can deploy and test your application. Supported distributions include OpenShift, GKE, EKS, and more. For more information, see [About Compatibility Matrix](testing-about). +1. Return to the Vendor Portal and go to **Customers**. Under the name of the customer, confirm that you can see an active instance. -* If you use the GitHub Actions CI/CD platform, integrate the custom GitHub actions that Replicated maintains to replace repetitive tasks related to distributing application with Replicated or using Compatibility Matrix. For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). + This instance telemetry is automatically collected and sent back to the Vendor Portal by both KOTS and the Replicated SDK. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). -* To help show you are conforming to a secure supply chain, sign all commits and container images. Additionally, provide a verification mechanism for container images. +1. Under **Instance ID**, click on the ID to view additional insights including the versions of Kubernetes and the Replicated SDK running in the cluster where you installed the application. For more information, see [Instance Details](/vendor/instance-insights-details). -* Use custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy that blocks the ability to promote releases to your production channel. For more information about creating custom RBAC policies in the Vendor Portal, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). +1. Create a new release that adds preflight checks to the application: -* Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: - + 1. In your local filesystem, go to the `gitea` directory. -================ -File: docs/vendor/ci-workflows-github-actions.md -================ -# Integrating Replicated GitHub Actions + 1. Create a `gitea-preflights.yaml` file in the `templates` directory: -This topic describes how to integrate Replicated's custom GitHub actions into continuous integration and continuous delivery (CI/CD) workflows that use the GitHub Actions platform. + ``` + touch templates/gitea-preflights.yaml + ``` -## Overview + 1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a simple preflight spec: -Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to distributing your application with Replicated and related to using the Compatibility Matrix, such as: - * Creating and removing customers, channels, and clusters - * Promoting releases - * Creating a matrix of clusters for testing based on the Kubernetes distributions and versions where your customers are running application instances - * Reporting the success or failure of tests + ```yaml + apiVersion: v1 + kind: Secret + metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" + stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." + ``` + The YAML above defines a preflight check that confirms that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. -If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. + 1. In the `Chart.yaml` file, increment the version to 1.0.7: -To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. + ```yaml + # Chart.yaml + version: 1.0.7 + ``` -## GitHub Actions Workflow Examples + 1. Update dependencies and package the chart to a `.tgz` chart archive: -The [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions#examples) repository in GitHub contains example workflows that use the Replicated GitHub actions. You can use these workflows as a template for your own GitHub Actions CI/CD workflows: + ```bash + helm package -u . + ``` -* For a simplified development workflow, see [development-helm-prepare-cluster.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm-prepare-cluster.yaml). -* For a customizable development workflow for applications installed with the Helm CLI, see [development-helm.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm.yaml). -* For a customizable development workflow for applications installed with KOTS, see [development-kots.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-kots.yaml). -* For a release workflow, see [release.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/release.yaml). + 1. Move the chart archive to the `manifests` directory: -## Integrate GitHub Actions + ```bash + mv gitea-1.0.7.tgz manifests + ``` -The following table lists GitHub actions that are maintained by Replicated that you can integrate into your CI/CI workflows. The table also describes when to use the action in a workflow and indicates the related Replicated CLI command where applicable. + 1. In the `manifests` directory, open the KOTS HelmChart custom resource (`gitea.yaml`) and update the `chartVersion`: -:::note -For an up-to-date list of the avilable custom GitHub actions, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. -::: + ```yaml + # gitea.yaml KOTS HelmChart + chartVersion: 1.0.7 + ``` - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    GitHub ActionWhen to UseRelated Replicated CLI Commands
    archive-channel -

    In release workflows, a temporary channel is created to promote a release for testing. This action archives the temporary channel after tests complete.

    -

    See Archive the temporary channel and customer in Recommended CI/CD Workflows.

    -
    channel delete
    archive-customer -

    In release workflows, a temporary customer is created so that a release can be installed for testing. This action archives the temporary customer after tests complete.

    -

    See Archive the temporary channel and customer in Recommended CI/CD Workflows.

    -
    N/A
    create-cluster -

    In release workflows, use this action to create one or more clusters for testing.

    -

    See Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    -
    cluster create
    create-release -

    In release workflows, use this action to create a release to be installed and tested, and optionally to be promoted to a shared channel after tests complete.

    -

    See Create a release and promote to a temporary channel in Recommended CI/CD Workflows.

    -
    release create
    get-customer-instances -

    In release workflows, use this action to create a matrix of clusters for running tests based on the Kubernetes distributions and versions of active instances of your application running in customer environments.

    -

    See Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    -
    N/A
    helm-install -

    In development or release workflows, use this action to install a release using the Helm CLI in one or more clusters for testing.

    -

    See Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    -
    N/A
    kots-install -

    In development or release workflows, use this action to install a release with Replicated KOTS in one or more clusters for testing.

    -

    See Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    -
    N/A
    prepare-cluster -

    In development workflows, use this action to create a cluster, create a temporary customer of type test, and install an application in the cluster.

    -

    See Prepare clusters, deploy, and test in Recommended CI/CD Workflows.

    -
    cluster prepare
    promote-release -

    In release workflows, use this action to promote a release to an internal or customer-facing channel (such as Unstable, Beta, or Stable) after tests pass.

    -

    See Promote to a shared channel in Recommended CI/CD Workflows.

    -
    release promote
    remove-cluster -

    In development or release workflows, use this action to remove a cluster after running tests if no ttl was set for the cluster.

    -

    See Prepare clusters, deploy, and test and Create cluster matrix, deploy, and test in Recommended CI/CD Workflows.

    -
    cluster rm
    report-compatibility-resultIn development or release workflows, use this action to report the success or failure of tests that ran in clusters provisioned by the Compatibility Matrix.release compatibility
    upgrade-clusterIn release workflows, use this action to test your application's compatibility with Kubernetes API resource version migrations after upgrading.cluster upgrade
    + 1. Remove the chart archive for version 1.0.6 of the Gitea chart from the `manifests` directory: -================ -File: docs/vendor/ci-workflows.mdx -================ -import Build from "../partials/ci-cd/_build-source-code.mdx" + ``` + rm gitea-1.0.6.tgz + ``` -# Recommended CI/CD Workflows + 1. From the `manifests` directory, create and promote a new release, setting the version label of the release to `0.0.2`: -This topic provides Replicated's recommended development and release workflows for your continuous integration and continuous delivery (CI/CD) pipelines. + ```bash + replicated release create --yaml-dir . --promote Unstable --version 0.0.2 + ``` + **Example output**: + ```bash + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 2 + • Promoting ✓ + • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 2 + ``` -## Overview +1. On your VM, update the application instance to the new version that you just promoted: -Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). The development and release workflows in this topic describe the recommended steps and jobs to include in your own workflows, including how to integrate Replicated Compatibility Matrix into your workflows for testing. For more information about Compatibility Matrix, see [About Compatibility Matrix](testing-about). + 1. In the Admin Console, go to the **Version history** tab. -For each step, the corresponding Replicated CLI command is provided. Additionally, for users of the GitHub Actions platform, a corresponding custom GitHub action that is maintained by Replicated is also provided. For more information about using the Replicated CLI, see [Installing the Replicated CLI](/reference/replicated-cli-installing). For more information about the Replicated GitHub actions, see [Integrating Replicated GitHub Actions](ci-workflows-github-actions). + The new version is displayed automatically. -:::note -How you implement CI/CD workflows varies depending on the platform, such as GitHub, GitLab, CircleCI, TravisCI, or Jenkins. Refer to the documentation for your CI/CD platform for additional guidance on how to create jobs and workflows. -::: + 1. Click **Deploy** next to the new version. -## About Creating RBAC Policies for CI/CD + The Embedded Cluster upgrade wizard opens. -Replicated recommends using custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy using the [`kots/app/[]/channel/[]/promote`](/vendor/team-management-rbac-resource-names#kotsappchannelpromote) resource that blocks the ability to promote releases to your production channel. This allows for using CI/CD for the purpose of testing, without accidentally releasing to customers. + 1. In the Embedded Cluster upgrade wizard, on the **Preflight checks** screen, note that the "Slack Accessible" preflight check that you added was successful. Click **Next: Confirm and deploy**. -For more information about creating custom RBAC policies in the Vendor Portal, including examples, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). + ![preflight page of the embedded cluster upgrade wizard](/images/quick-start-ec-upgrade-wizard-preflight.png) -For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). + [View a larger version of this image](/images/quick-start-ec-upgrade-wizard-preflight.png) -## Development Workflow + :::note + The **Config** screen in the upgrade wizard is bypassed because this release does not contain a KOTS Config custom resource. The KOTS Config custom resource is used to set up the Config screen in the KOTS Admin Console. + ::: -In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. Additionally, for applications managed in the Replicated vendor portal, a release is created and promoted to a channel in the Replicated Vendor Portal where it can be shared with internal teams. + 1. On the **Confirm and Deploy** page, click **Deploy**. -The following diagram shows the recommended development workflow, where a commit to the application code repository triggers the source code to be built and the application to be deployed to clusters for testing: +1. Reset and reboot the VM to remove the installation: -![Development CI workflow](/images/ci-workflow-dev.png) + ```bash + sudo ./APP_SLUG reset + ``` + Where `APP_SLUG` is the unique slug for the application. + + :::note + You can find the application slug by running `replicated app ls` on your local machine. + ::: -[View a larger version of this image](/images/ci-workflow-dev.png) +## Next Steps -The following describes the recommended steps to include in release workflows, as shown in the diagram above: -1. [Define workflow triggers](#dev-triggers) -1. [Build source code](#dev-build) -1. [Prepare clusters, deploy, and test](#dev-deploy) +Congratulations! As part of this quick start, you: +* Added the Replicated SDK to a Helm chart +* Created a release with the Helm chart +* Installed the release on a VM with Embedded Cluster +* Viewed telemetry for the installed instance in the Vendor Portal +* Created a new release to add preflight checks to the application +* Updated the application from the Admin Console -### Define workflow triggers {#dev-triggers} +Now that you are familiar with the workflow of creating, installing, and updating releases, you can begin onboarding your own application to the Replicated Platform. -Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. +To get started, see [Replicated Onboarding](replicated-onboarding). -The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: +## Related Topics -```yaml -name: development-workflow-example +For more information about the Replicated Platform features mentioned in this quick start, see: -on: - push: - branches: - - '*' # matches every branch that doesn't contain a '/' - - '*/*' # matches every branch containing a single '/' - - '**' # matches every branch - - '!main' # excludes main +* [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) +* [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Introduction to KOTS](/intro-kots) +* [Managing Releases with the CLI](/vendor/releases-creating-cli) +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) +* [Using Embedded Cluster](/vendor/embedded-overview) -jobs: - ... -``` +## Related Tutorials -### Build source code {#dev-build} +For additional tutorials related to this quick start, see: - +* [Deploying a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup) +* [Adding Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) +* [Deploying a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup) -### Prepare clusters, deploy, and test {#dev-deploy} +================ +File: docs/vendor/releases-about.mdx +================ +import ChangeChannel from "../partials/customers/_change-channel.mdx" +import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" +import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" +import VersionLabelReqsHelm from "../partials/releases/_version-label-reqs-helm.mdx" -Add a job with the following steps to prepare clusters with Replicated Compatibility Matrix, deploy the application, and run tests: +# About Channels and Releases -1. Use Replicated Compatibility Matrix to prepare one or more clusters and deploy the application. Consider the following recommendations: +This topic describes channels and releases, including information about the **Releases** and **Channels** pages in the Replicated Vendor Portal. - * For development workflows, Replicated recommends that you use the `cluster prepare` command to provision one or more clusters with Compatibility Matrix. The `cluster prepare` command creates a cluster, creates a release, and installs the release in the cluster, without the need to promote the release to a channel or create a temporary customer. See the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [prepare-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/prepare-cluster) GitHub action. +## Overview - :::note - The `cluster prepare` command is Beta. It is recommended for development only and is not recommended for production releases. For production releases, Replicated recommends that you use the `cluster create` command instead. For more information, see [Create cluster matrix and deploy](#rel-deploy) in _Release Workflow_ below. - ::: +A _release_ represents a single version of your application. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. - * The type and number of clusters that you choose to provision as part of a development workflow depends on how frequently you intend the workflow to run. For example, for workflows that run multiple times a day, you might prefer to provision cluster distributions that can be created quickly, such as kind clusters. +Channels also control which customers are able to install a release. You assign each customer to a channel to define the releases that the customer can access. For example, a customer assigned to the Stable channel can only install releases that are promoted to the Stable channel, and cannot see any releases promoted to other channels. For more information about assigning customers to channels, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. -1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. +Using channels and releases helps you distribute versions of your application to the right customer segments, without needing to manage different release workflows. -1. After the tests complete, remove the cluster. Alternatively, if you used the `--ttl` flag with the `cluster prepare` command, the cluster is automatically removed when the time period provided is reached. See the [`cluster remove`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. +You can manage channels and releases with the Vendor Portal, the Replicated CLI, or the Vendor API v3. For more information about creating and managing releases or channels, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Creating and Editing Channels](releases-creating-channels). -## Compatibility Matrix-Only Development Workflow +## About Channels -In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. +This section provides additional information about channels, including details about the default channels in the Vendor Portal and channel settings. -This example development workflow does _not_ create releases or customers in the Replicated vendor platform. This workflow is useful for applications that are not distributed or managed in the Replicated platform. +### Unstable, Beta, and Stable Channels -The following describes the recommended steps to include in a development workflow using Compatibility Matrix: +Replicated includes the following channels by default: -1. [Define workflow triggers](#dev-triggers) -1. [Build source code](#dev-build) -1. [Create cluster matrix, deploy, and test](#dev-deploy) +* **Unstable**: The Unstable channel is designed for internal testing and development. You can create and assign an internal test customer to the Unstable channel to install in a development environment. Replicated recommends that you do not license any of your external users against the Unstable channel. +* **Beta**: The Beta channel is designed for release candidates and early-adopting customers. Replicated recommends that you promote a release to the Beta channel after it has passed automated testing in the Unstable channel. You can also choose to license early-adopting customers against this channel. +* **Stable**: The Stable channel is designed for releases that are generally available. Replicated recommends that you assign most of your customers to the Stable channel. Customers licensed against the Stable channel only receive application updates when you promote a new release to the Stable channel. -### Define workflow triggers {#dev-triggers} +You can archive or edit any of the default channels, and create new channels. For more information, see [Creating and Editing Channels](releases-creating-channels). -Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. +### Settings -The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: +Each channel has settings. You can customize the settings for a channel to control some of the behavior of releases promoted to the channel. -```yaml -name: development-workflow-example +The following shows the **Channel Settings** dialog, accessed by clicking the settings icon on a channel: -on: - push: - branches: - - '*' # matches every branch that doesn't contain a '/' - - '*/*' # matches every branch containing a single '/' - - '**' # matches every branch - - '!main' # excludes main +Channel Settings dialog in the Vendor Portal -jobs: - ... -``` +[View a larger version of this image](/images/channel-settings.png) -### Build source code {#dev-build} +The following describes each of the channel settings: - +* **Channel name**: The name of the channel. You can change the channel name at any time. Each channel also has a unique ID listed below the channel name. +* **Description**: Optionally, add a description of the channel. +* **Set this channel to default**: When enabled, sets the channel as the default channel. The default channel cannot be archived. +* **Custom domains**: Select the customer-facing domains that releases promoted to this channel use for the Replicated registry, Replicated proxy registry, Replicated app service, or Replicated Download Portal endpoints. If a default custom domain exists for any of these endpoints, choosing a different domain in the channel settings overrides the default. If no custom domains are configured for an endpoint, the drop-down for the endpoint is disabled. + For more information about configuring custom domains and assigning default domains, see [Using Custom Domains](custom-domains-using). +* The following channel settings apply only to applications that support KOTS: + * **Automatically create airgap builds for newly promoted releases in this channel**: When enabled, the Vendor Portal automatically builds an air gap bundle when a new release is promoted to the channel. When disabled, you can generate an air gap bundle manually for a release on the **Release History** page for the channel. + * **Enable semantic versioning**: When enabled, the Vendor Portal verifies that the version label for any releases promoted to the channel uses a valid semantic version. For more information, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_. + * **Enable new airgap bundle format**: When enabled, air gap bundles built for releases promoted to the channel use a format that supports image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. For more information, see [Using Image Digests in Air Gap Installations](private-images-tags-digests#digests-air-gap) in _Using Image Tags and Digests_. -### Create cluster matrix, deploy, and test {#dev-deploy} + :::note + The new air gap bundle format is supported for applications installed with KOTS v1.82.0 or later. + ::: + +## About Releases -Add a job with the following steps to provision clusters with Compatibility Matrix, deploy your application to the clusters, and run tests: +This section provides additional information about releases, including details about release promotion, properties, sequencing, and versioning. -1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. +### Release Files - The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: +A release contains your application files as well as the manifests required to install the application with the Replicated installers ([Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated KOTS](../intro-kots)). - ```yaml - # github actions cluster matrix example +The application files in releases can be Helm charts and/or Kubernetes manifests. Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise customers will expect to be able to install with Helm. - compatibility-matrix-example: - runs-on: ubuntu-22.04 - strategy: - matrix: - cluster: - - {distribution: kind, version: "1.25"} - - {distribution: kind, version: "1.26"} - - {distribution: eks, version: "1.26"} - - {distribution: gke, version: "1.27"} - - {distribution: openshift, version: "4.13.0-okd"} - ``` +### Promotion -1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). +Each release is promoted to one or more channels. While you are developing and testing releases, Replicated recommends promoting to a channel that does not have any real customers assigned, such as the default Unstable channel. When the release is ready to be shared externally with customers, you can then promote to a channel that has the target customers assigned, such as the Beta or Stable channel. -1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. +A release cannot be edited after it is promoted to a channel. This means that you can test a release on an internal development channel, and know with confidence that the same release will be available to your customers when you promote it to a channel where real customers are assigned. -1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. +### Properties -## Replicated Platform Release Workflow +Each release has properties. You define release properties when you promote a release to a channel. You can edit release properties at any time from the channel **Release History** page in the Vendor Portal. For more information, see [Edit Release Properties](releases-creating-releases#edit-release-properties) in _Managing Releases with the Vendor Portal_. -In a release workflow (which is triggered by an action such as a commit to `main` or a tag being pushed to the repository), the source code is built, the application is deployed to clusters for testing, and then the application is made available to customers. In this example release workflow, a release is created and promoted to a channel in the Replicated vendor platform so that it can be installed by internal teams or by customers. +The following shows an example of the release properties dialog: -The following diagram demonstrates a release workflow that promotes a release to the Beta channel when a tag with the format `"v*.*.*-beta.*"` is pushed: +release properties dialog for a release with version label 0.1.22 -![Workflow that promotes to Beta channel](/images/ci-workflow-beta.png) +[View a larger version of this image](/images/release-properties.png) -[View a larger version of this image](/images/ci-workflow-beta.png) +As shown in the screenshot above, the release has the following properties: -The following describes the recommended steps to include in release workflows, as shown in the diagram above: +* **Version label**: The version label for the release. Version labels have the following requirements: -1. [Define workflow triggers](#rel-triggers) -1. [Build source code](#rel-build) -1. [Create a release and promote to a temporary channel](#rel-release) -1. [Create cluster matrix, deploy, and test](#rel-deploy) -1. [Promote to a shared channel](#rel-promote) -1. [Archive the temporary channel and customer](#rel-cleanup) + * If semantic versioning is enabled for the channel, you must use a valid semantic version. For more information, see [Semantic Versioning](#semantic-versioning). -### Define workflow triggers {#rel-triggers} + -Create unique workflows for promoting releases to your team's internal-only, beta, and stable channels. Define unique event triggers for each of your release workflows so that releases are only promoted to a channel when a given condition is met: +* **Requirements**: Select **Prevent this release from being skipped during upgrades** to mark the release as required. -* On every commit to the `main` branch in your code repository, promote a release to the channel that your team uses for internal testing (such as the default Unstable channel). + - The following example shows a workflow trigger in GitHub Actions that runs the workflow on commits to `main`: + - ```yaml - name: unstable-release-example +* **Release notes (supports markdown)**: Detailed release notes for the release. The release notes support markdown and are shown to your customer. - on: - push: - branches: - - 'main' +### Sequencing - jobs: - ... - ``` +By default, Replicated uses release sequence numbers to organize and order releases, and uses instance sequence numbers in an instance's internal version history. -* On pushing a tag that contains a version label with the semantic versioning format `x.y.z-beta-n` (such as `1.0.0-beta.1` or `v1.0.0-beta.2`), promote a release to your team's Beta channel. +#### Release Sequences - The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*-beta.*` is pushed: +In the Vendor Portal, each release is automatically assigned a unique, monotonically-increasing sequence number. You can use this number as a fallback to identify a promoted or draft release, if you do not set the `Version label` field during promotion. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). - ```yaml - name: beta-release-example +The following graphic shows release sequence numbers in the Vendor Portal: - on: - push: - tags: - - "v*.*.*-beta.*" +Release sequence numbers - jobs: - ... - ``` +[View a larger version of this image](/images/release-sequences.png) -* On pushing a tag that contains a version label with the semantic versioning format `x.y.z` (such as `1.0.0` or `v1.0.01`), promote a release to your team's Stable channel. +#### Instance Sequences - The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*` is pushed: +When a new version is available for upgrade, including when KOTS checks for upstream updates as well as when the user syncs their license or makes a config change, the KOTS Admin Console assigns a unique instance sequence number to that version. The instance sequence in the Admin Console starts at 0 and increments for each identifier that is returned when a new version is available. - ```yaml - name: stable-release-example +This instance sequence is unrelated to the release sequence dispalyed in the Vendor Portal, and it is likely that the instance sequence will differ from the release sequence. Instance sequences are only tracked by KOTS instances, and the Vendor Portal has no knowledge of these numbers. - on: - push: - tags: - - "v*.*.*" +The following graphic shows instance sequence numbers on the Admin Console dashboard: - jobs: - ... - ``` +Instance sequence numbers -### Build source code {#rel-build} +[View a larger version of this image](/images/instance-sequences.png) - +#### Channel Sequences -### Create a release and promote to a temporary channel {#rel-release} +When a release is promoted to a channel, a channel sequence number is assigned. This unique sequence number increments by one and tracks the order in which releases were promoted to a channel. You can view the channel sequence on the **Release History** page in the Vendor Portal, as shown in the image below: -Add a job that creates and promotes a release to a temporary channel. This allows the release to be installed for testing in the next step. See the [release create](/reference/replicated-cli-release-create) Replicated CLI command. Or, for GitHub Actions workflows, see [create-release](https://github.com/replicatedhq/replicated-actions/tree/main/create-release). +Channel sequence on Release History page -Consider the following requirements and recommendations: +[View a larger version of this image](/images/release-history-channel-sequence.png) -* Use a consistent naming pattern for the temporary channels. Additionally, configure the workflow so that a new temporary channel with a unique name is created each time that the release workflow runs. +The channel sequence is also used in certain URLs. For example, a release with a *release sequence* of `170` can have a *channel sequence* of `125`. The air gap download URL for that release can contain `125` in the URL, even though the release sequence is `170`. -* Use semantic versioning for the release version label. +Ordering is more complex if some or all of the releases in a channel have a semantic version label and semantic versioning is enabled for the channel. For more information, see [Semantic Versioning Sequence](#semantic-versioning-sequence). - :::note - If semantic versioning is enabled on the channel where you promote the release, then the release version label _must_ be a valid semantic version number. See [Semantic Versioning](releases-about#semantic-versioning) in _About Channels and Releases_. - ::: +#### Semantic Versioning Sequence -* For Helm chart-based applications, the release version label must match the version in the `version` field of the Helm chart `Chart.yaml` file. To automatically update the `version` field in the `Chart.yaml` file, you can define a step in this job that updates the version label before packaging the Helm chart into a `.tgz` archive. +For channels with semantic versioning enabled, the Admin Console sequences instance releases by their semantic versions instead of their promotion dates. -* For releases that will be promoted to a customer-facing channel such as Beta or Stable, Replicated recommends that the version label for the release matches the tag that triggered the release workflow. For example, if the tag `1.0.0-beta.1` was used to trigger the workflow, then the version label for the release is also `1.0.0-beta.1`. +If releases without a valid semantic version are already promoted to a channel, the Admin Console sorts the releases that do have semantic versions starting with the earliest version and proceeding to the latest. The releases with non-semantic versioning stay in the order of their promotion dates. For example, assume that you promote these releases in the following order to a channel: -### Create cluster matrix, deploy, and test {#rel-deploy} +- 1.0.0 +- abc +- 0.1.0 +- xyz +- 2.0.0 -Add a job with the following steps to provision clusters with Compatibility Matrix, deploy the release to the clusters, and run tests: +Then, you enable semantic versioning on that channel. The Admin Console sequences the version history for the channel as follows: -1. Create a temporary customer for installing the release. See the [customer create](/reference/replicated-cli-customer-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-customer](https://github.com/replicatedhq/replicated-actions/tree/main/create-customer) action. +- 0.1.0 +- 1.0.0 +- abc +- xyz +- 2.0.0 -1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. +### Semantic Versioning - Consider the following recommendations: +Semantic versioning is available with the Replicated KOTS v1.58.0 and later. Note the following: - * For release workflows, Replicated recommends that you run tests against multiple clusters of different Kubernetes distributions and versions. To help build the matrix, you can review the most common Kubernetes distributions and versions used by your customers on the **Customers > Reporting** page in the Replicated vendor portal. For more information, see [Customer Reporting](/vendor/customer-reporting). +- For applications created in the Vendor Portal on or after February 23, 2022, semantic versioning is enabled by default on the Stable and Beta channels. Semantic versioning is disabled on the Unstable channel by default. - * When using the Replicated CLI, a list of representative customer instances can be obtained using the `api get` command. For example, `replicated api get /v3/app/[APP_ID]/cluster-usage | jq .` You can further filter these results by `channel_id`, `channel_sequence`, and `version_label`. - - * GitHub Actions users can also use the `get-customer-instances` action to automate the creation of a cluster matrix based on the distributions of clusters where instances of your application are installed and running. For more information, see the [example workflow](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-dynamic.yaml) that makes use of [get-customer-instances](https://github.com/replicatedhq/replicated-actions/tree/main/get-customer-instances) in GitHub. +- For existing applications created before February 23, 2022, semantic versioning is disabled by default on all channels. - The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: +Semantic versioning is recommended because it makes versioning more predictable for users and lets you enforce versioning so that no one uses an incorrect version. - ```yaml - # github actions cluster matrix example +To use semantic versioning: - compatibility-matrix-example: - runs-on: ubuntu-22.04 - strategy: - matrix: - cluster: - - {distribution: kind, version: "1.25.3"} - - {distribution: kind, version: "1.26.3"} - - {distribution: eks, version: "1.26"} - - {distribution: gke, version: "1.27"} - - {distribution: openshift, version: "4.13.0-okd"} - ``` +1. Enable semantic versioning on a channel, if it is not enabled by default. Click the **Edit channel settings** icon, and turn on the **Enable semantic versioning** toggle. +1. Assign a semantic version number when you promote a release. -1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). +Releases promoted to a channel with semantic versioning enabled are verified to ensure that the release version label is a valid semantic version. For more information about valid semantic versions, see [Semantic Versioning 2.0.0](https://semver.org). - For more information about installing in an existing cluster, see: - * [Installing with Helm](/vendor/install-with-helm) - * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) +If you enable semantic versioning for a channel and then promote releases to it, Replicated recommends that you do not later disable semantic versioning for that channel. -1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. +You can enable semantic versioning on a channel that already has releases promoted to it without semantic versioning. Any subsequently promoted releases must use semantic versioning. In this case, the channel will have releases with and without semantic version numbers. For information about how Replicated organizes these release sequences, see [Semantic Versioning Sequences](#semantic-versioning-sequence). -1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. +### Demotion -### Promote to a shared channel {#rel-promote} +A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. -Add a job that promotes the release to a shared internal-only or customer-facing channel, such as the default Unstable, Beta, or Stable channel. See the [release promote](/reference/replicated-cli-release-promote) Replicated CLI command. Or, for GitHub Actions workflows, see the [promote-release](https://github.com/replicatedhq/replicated-actions/tree/main/promote-release) action. +The demoted release's channel sequence and version are not reused. For customers, the release will appear to have been skipped. Un-demoting a release will restore its place in the channel sequence making it again available for download and installation. -Consider the following requirements and recommendations: +For information about how to demote a release, see [Demote a Release](/vendor/releases-creating-releases#demote-a-release) in _Managing Releases with the Vendor Portal_. -* Replicated recommends that you include the `--version` flag with the `release promote` command to explicitly declare the version label for the release. Use the same version label that was used when the release was created as part of [Create a release and promote to a temporary channel](#rel-release) above. Although the `--version` flag is not required, declaring the same release version label during promotion provides additional consistency that makes the releases easier to track. +## Vendor Portal Pages -* The channel to which the release is promoted depends on the event triggers that you defined for the workflow. For example, if the workflow runs on every commit to the `main` branch, then promote the release to an internal-only channel, such as Unstable. For more information, see [Define Workflow Triggers](#rel-triggers) above. +This section provides information about the channels and releases pages in the Vendor Portal. -* Use the `--release-notes` flag to include detailed release notes in markdown. +### Channels Page -### Archive the temporary channel and customer {#rel-cleanup} +The **Channels** page in the Vendor Portal includes information about each channel. From the **Channels** page, you can edit and archive your channels. You can also edit the properties of the releases promoted to each channel, and view and edit the customers assigned to each channel. -Finally, add a job to archive the temporary channel and customer that you created. This ensures that these artifacts are removed from your Replicated team and that they do not have to be manually archived after the release is promoted. +The following shows an example of a channel in the Vendor Portal **Channels** page: -See the [channel rm](/reference/replicated-cli-channel-rm) Replicated CLI command and the [customer/\{customer_id\}/archive](https://replicated-vendor-api.readme.io/reference/archivecustomer) endpoint in the Vendor API v3 documentation. Or, for GitHub Actions workflows, see the [archive-channel](https://github.com/replicatedhq/replicated-actions/tree/main/archive-channel) and [archive-customer](https://github.com/replicatedhq/replicated-actions/tree/main/archive-customer) actions. +Channel card in the Vendor Portal -================ -File: docs/vendor/compatibility-matrix-usage.md -================ -# Viewing Compatibility Matrix Usage History -This topic describes using the Replicated Vendor Portal to understand -Compatibility Matrix usage across your team. +[View a larger version of this image](/images/channel-card.png) -## View Historical Usage -The **Compatibility Matrix > History** page provides -historical information about both clusters and VMs, as shown below: +As shown in the image above, you can do the following from the **Channels** page: -![Compatibility Matrix History Page](/images/compatibility-matrix-history.png) -[View a larger version of this image](/images/compatibility-matrix-history.png) +* Edit the channel settings by clicking on the settings icon, or archive the channel by clicking on the trash can icon. For information about channel settings, see [Settings](#settings). -Only _terminated_ clusters and VMs that have been deleted or errored are displayed on the **History** page. +* In the **Adoption rate** section, view data on the adoption rate of releases promoted to the channel among customers assigned to the channel. -The top of the **History** page displays the total number of terminated clusters and VMs -in the selected time period as well as the total cost and usage time for -the terminated resources. +* In the **Customers** section, view the number of active and inactive customers assigned to the channel. Click **Details** to go to the **Customers** page, where you can view details about the customers assigned to the channel. -The table includes cluster and VM entries with the following columns: -- **Name:** The name of the cluster or VM. -- **By:** The actor that created the resource. -- **Cost:** The cost of the resource. This is calculated at termination and is - based on the time the resource was running. -- **Distribution:** The distribution and version of the resource. For example, - `kind 1.32.1`. -- **Type:** The distribution type of the resource. Kubernetes clusters - are listed as `kubernetes` and VMs are listed as `vm`. -- **Status:** The status of the resource. For example `terminated` or `error`. -- **Instance:** The instance type of the resource. For example `r1.small`. -- **Nodes:** The node count for "kubernetes" resources. VMs do not use this - field. -- **Node Groups:** The node group count for "kubernetes" resources. VMs do not - use this field. -- **Created At:** The time the resource was created. -- **Running At:** The time the resource started running. For billing purposes, - this is the time when Replicated began charging for the resource. -- **Terminated At:** The time the resource was terminated. For billing - purposes, this is the time when Replicated stopped charging for the resource. -- **TTL:** The time-to-live for the resource. This is the maximum amount of - time the resource can run before it is automatically terminated. -- **Duration:** The total time the resource was running. This is the time - between the `running` and `terminated` states. -- **Tag:** Any tags that were applied to the resource. +* In the **Latest release** section, view the properties of the latest release, and get information about any warnings or errors in the YAML files for the latest release. -## Filter and Sort Usage History + Click **Release history** to access the history of all releases promoted to the channel. From the **Release History** page, you can view the version labels and files in each release that has been promoted to the selected channel. + + You can also build and download air gap bundles to be used in air gap installations with Replicated installers (Embedded Cluster, KOTS, kURL), edit the release properties for each release promoted to the channel from the **Release History** page, and demote a release from the channel. -Each of the fields on the **History** page can be filtered and sorted. To sort by a specific field, click on the column header. + The following shows an example of the **Release History** page: -To filter by a specific field, click on the filter icon in the column header, then use each specific filter input to filter the results, as shown below: + Release history page in the Vendor Portal -![Compatibility Matrix History Page, filter input](/images/compatibility-matrix-column-filter-input.png) -[View a larger version of this image](/images/compatibility-matrix-column-filter-input.png) + [View a larger version of this image](/images/channel-card.png) -## Get Usage History with the Vendor API v3 +* For applications that support KOTS, you can also do the following from the **Channel** page: -For more information about using the Vendor API v3 to get Compatibility Matrix -usage history information, see the following API endpoints within the -Vendor API v3 documentation: + * In the **kURL installer** section, view the current kURL installer promoted to the channel. Click **Installer history** to view the history of kURL installers promoted to the channel. For more information about creating kURL installers, see [Creating a kURL Installer](packaging-embedded-kubernetes). -* [/v3/cmx/stats](https://replicated-vendor-api.readme.io/reference/getcmxstats) -* [/v3/vms](https://replicated-vendor-api.readme.io/reference/listvms) -* [/v3/clusters](https://replicated-vendor-api.readme.io/reference/listclusters) -* [/v3/cmx/history](https://replicated-vendor-api.readme.io/reference/listcmxhistory) + * In the **Install** section, view and copy the installation commands for the latest release on the channel. -For examples of using these endpoints, see the sections below. +### Draft Release Page -### Credit Balance and Summarized Usage -You can use the `/v3/cmx/stats` endpoint to get summarized usage information in addition to your Compatibility Matrix -credit balance. +For applications that support installation with KOTS, the **Draft** page provides a YAML editor to add, edit, and delete your application files and Replicated custom resources. You click **Releases > Create Release** in the Vendor Portal to open the **Draft** page. -This endpoint returns: +The following shows an example of the **Draft** page in the Vendor Portal: -- **`cluster_count`:** The total number of terminated clusters. -- **`vm_count`:** The total number of terminated VMs. -- **`usage_minutes`:** The total number of billed usage minutes. -- **`cost`:** The total cost of the terminated clusters and VMs in cents. -- **`credit_balance`:** The remaining credit balance in cents. + Draft release page -```shell -curl --request GET \ - --url https://api.replicated.com/vendor/v3/customers \ - --header 'Accept: application/json' \ - --header 'Authorization: $REPLICATED_API_TOKEN' -{"cluster_count":2,"vm_count":4,"usage_minutes":152,"cost":276,"credit_balance":723}% -``` + [View a larger version of this image](/images/guides/kots/default-yaml.png) -The `v3/cmx/stats` endpoint also supports filtering by `start-time` and -`end-time`. For example, the following request gets usage information for January 2025: +You can do the following tasks on the **Draft** page: -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/stats?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' -``` +- In the file directory, manage the file directory structure. Replicated custom resource files are grouped together above the white line of the file directory. Application files are grouped together underneath the white line in the file directory. -### Currently Active Clusters -To get a list of active clusters: + Delete files using the trash icon that displays when you hover over a file. Create a new file or folder using the corresponding icons at the bottom of the file directory pane. You can also drag and drop files in and out of the folders. -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/clusters' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' -``` + ![Manage File Directory](/images/new-file-and-trash.png) -You can also use a tool such as `jq` to filter and iterate over the output: +- Edit the YAML files by selecting a file in the directory and making changes in the YAML editor. -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/clusters' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' | \ - jq '.clusters[] | {name: .name, ttl: .ttl, distribution: .distribution, version: .version}' +- In the **Help** or **Config help** pane, view the linter for any errors. If there are no errors, you get an **Everything looks good!** message. If an error displays, you can click the **Learn how to configure** link. For more information, see [Linter Rules](/reference/linter). -{ - "name": "friendly_brown", - "ttl": "1h", - "distribution": "kind", - "version": "1.32.1" -} -``` +- Select the Config custom resource to preview how your application's Config page will look to your customers. The **Config preview** pane only appears when you select that file. For more information, see [About the Configuration Screen](config-screen-about). -### Currently Active Virtual Machines -To get a list of active VMs: +- Select the Application custom resource to preview how your application icon will look in the Admin Console. The **Application icon preview** only appears when you select that file. For more information, see [Customizing the Application Icon](admin-console-customize-app-icon). -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/vms' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' -``` +================ +File: docs/vendor/releases-creating-channels.md +================ +# Creating and Editing Channels -### Historical Usage -To fetch historical usage information: +This topic describes how to create and edit channels using the Replicated Vendor Portal. For more information about channels, see [About Channels and Releases](releases-about). -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' -``` +For information about creating channels with the Replicated CLI, see [channel create](/reference/replicated-cli-channel-create). -You can also filter the response from the `/v3/cmx/history` endpoint by `distribution-type`, which -allows you to get a list of either clusters or VMs: +For information about creating and managing channels with the Vendor API v3, see the [channels](https://replicated-vendor-api.readme.io/reference/createchannel) section in the Vendor API v3 documentation. -- **For clusters use `distribution-type=kubernetes`:** - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=kubernetes' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` +## Create a Channel -- **For VMs use `distribution-type=vm`:** - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=vm' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` +To create a channel: -### Filtering Endpoint Results -Each of these endpoints supports pagination and filtering. You can use the -following query parameters to filter the results. +1. From the Replicated [Vendor Portal](https://vendor.replicated.com), select **Channels** from the left menu. +1. Click **Create Channel**. -:::note -Each of the examples below -uses the `v3/cmx/history` endpoint, but the same query parameters can be used -with the other endpoints as well. -::: + The Create a new channel dialog opens. For example: -- **Pagination:** Use the `pageSize` and `currentPage` query parameters to - paginate through the results: + Create channel dialog - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?pageSize=10¤tPage=1' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` +1. Enter a name and description for the channel. +1. (Recommended) Enable semantic versioning on the channel if it is not enabled by default by turning on **Enable semantic versioning**. For more information about semantic versioning and defaults, see [Semantic Versioning](releases-about#semantic-versioning). -- **Filter by date:** Use the `start-time` and `end-time` query parameters to - filter the results by a specific date range: +1. (Recommended) Enable an air gap bundle format that supports image digests and deduplication of image layers, by turning on **Enable new air gap bundle format**. For more information, see [Using Image Tags and Digests](private-images-tags-digests). - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` +1. Click **Create Channel**. -- **Sort by:** Use the `tag-sort-key` query parameter to sort the results by a - specific field. The field can be any of the fields returned in the response. - - By default, the results are sorted in ascending order, use - `sortDesc=true` to sort in descending order: +## Edit a Channel - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-sort-key=created_at&sortDesc=true' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` +To edit the settings of an existing channel: -- **Tag filters:** Use the `tag-filter` query parameter to filter the results by - a specific tag: +1. In the Vendor Portal, select **Channels** from the left menu. +1. Click the gear icon on the top right of the channel that you want to modify. - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-filter=tag1' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` + The Channel settings dialog opens. For example: -- **Actor filters:** Use the `actor-filter` query parameter to filter the actor - that created the resource, or the type of actor such as `Web UI` or - `Replicated CLI`: + Channel Settings dialog in the Vendor Portal - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?actor-filter=name' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` +1. Edit the fields and click **Save**. - :::note - If any filter is passed for an object that does not exist, no warning is given. - For example, if you filter by `actor-filter=name` and there are no results - the response will be empty. - ::: + For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. -================ -File: docs/vendor/config-screen-about.md -================ -# About the Configuration Screen +## Archive a Channel -This topic describes the configuration screen on the Config tab in the Replicated Admin Console. +You can archive an existing channel to prevent any new releases from being promoted to the channel. -## About Collecting Configuration Values +:::note +You cannot archive a channel if: +* There are customers assigned to the channel. +* The channel is set as the default channel. -When you distribute your application with Replicated KOTS, you can include a configuration screen in the Admin Console. This configuration screen is used to collect required or optional values from your users that are used to run your application. You can use regular expressions to validate user input for some fields, such as passwords and email addresses. For more information about how to add custom fields to the configuration screen, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). +Assign customers to a different channel and set a different channel as the default before archiving. +::: -If you use a Helm chart for your application, your users provide any values specific to their environment from the configuration screen, rather than in a Helm chart `values.yaml` file. This means that your users can provide configuration values through a user interface, rather than having to edit a YAML file or use `--set` CLI commands. The Admin Console configuration screen also allows you to control which options you expose to your users. +To archive a channel with the Vendor Portal or the Replicated CLI: -For example, you can use the configuration screen to provide database configuration options for your application. Your users could connect your application to an external database by providing required values in the configuration screen, such as the host, port, and a username and password for the database. +* **Vendor portal**: In the Vendor Portal, go to the **Channels** page and click the trash can icon in the top right corner of the card for the channel that you want to archive. +* **Replicated CLI**: + 1. Run the following command to find the ID for the channel that you want to archive: + ``` + replicated channel ls + ``` + The output of this command includes the ID and name for each channel, as well as information about the latest release version on the channels. -Or, you can also use the configuration screen to provide a database option that runs in the cluster as part of your application. For an example of this use case, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). + 1. Run the following command to archive the channel: + ``` + replicated channel rm CHANNEL_ID + ``` + Replace `CHANNEL_ID` with the channel ID that you retrieved in the previous step. -## Viewing the Configuration Screen + For more information, see [channel rm](/reference/replicated-cli-channel-rm) in the Replicated CLI documentation. -If you include a configuration screen with your application, users of your application can access the configuration screen from the Admin Console: -* During application installation. -* At any time after application installation on the Admin Console Config tab. +================ +File: docs/vendor/releases-creating-cli.mdx +================ +# Managing Releases with the CLI -### Application Installation +This topic describes how to use the Replicated CLI to create and promote releases. -The Admin Console displays the configuration screen when the user installs the application, after they upload their license file. +For information about creating and managing releases with the Vendor Portal, see [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). -The following shows an example of how the configuration screen displays during installation: +For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) section in the Vendor API v3 documentation. -![configuration screen that displays during application install](/images/config-screen-sentry-enterprise-app-install.png) +## Prerequisites -[View a larger version of this image](/images/config-screen-sentry-enterprise-app-install.png) +Before you create a release using the Replicated CLI, complete the following prerequisites: -### Admin Console Config Tab +* Install the Replicated CLI and then log in to authorize the CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* Create a new application using the `replicated app create APP_NAME` command. You only need to do this procedure one time for each application that you want to deploy. See [`app create`](/reference/replicated-cli-app-create) in _Reference_. -Users can access the configuration screen any time after they install the application by going to the Config tab in the Admin Console. +* Set the `REPLICATED_APP` environment variable to the slug of the target application. See [Set Environment Variables](/reference/replicated-cli-installing#env-var) in _Installing the Replicated CLI_. -The following shows an example of how the configuration screen displays in the Admin Console Config tab: + **Example**: -![configuration screen that displays in the Config tab](/images/config-screen-sentry-enterprise.png) + ```bash + export REPLICATED_APP=my-app-slug + ``` -[View a larger version of this image](/images/config-screen-sentry-enterprise.png) +## Create a Release From a Local Directory {#dir} -================ -File: docs/vendor/config-screen-conditional.mdx -================ -import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" -import PropertyWhen from "../partials/config/_property-when.mdx" -import DistroCheck from "../partials/template-functions/_string-comparison.mdx" -import NeComparison from "../partials/template-functions/_ne-comparison.mdx" +You can use the Replicated CLI to create a release from a local directory that contains the release files. -# Using Conditional Statements in Configuration Fields +To create and promote a release: -This topic describes how to use Replicated KOTS template functions in the Config custom resource to conditionally show or hide configuration fields for your application on the Replicated KOTS Admin Console **Config** page. +1. (Helm Charts Only) If your release contains any Helm charts: -## Overview + 1. Package each Helm chart as a `.tgz` file. See [Packaging a Helm Chart for a Release](/vendor/helm-install-release). -The `when` property in the Config custom resource denotes configuration groups or items that are displayed on the Admin Console **Config** page only when a condition evaluates to true. When the condition evaluates to false, the group or item is not displayed. + 1. Move the `.tgz` file or files to the local directory that contains the release files: - + ```bash + mv CHART_TGZ PATH_TO_RELEASE_DIR + ``` + Where: + * `CHART_TGZ` is the `.tgz` Helm chart archive. + * `PATH_TO_RELEASE_DIR` is path to the directory that contains the release files. -For more information about the Config custom resource `when` property, see [when](/reference/custom-resource-config#when) in _Config_. + **Example** -## Conditional Statement Examples + ```bash + mv wordpress-1.3.5.tgz manifests + ``` -This section includes examples of common types of conditional statements used in the `when` property of the Config custom resource. + 1. In the same directory that contains the release files, add a HelmChart custom resource for each Helm chart in the release. See [Configuring the HelmChart Custom Resource](helm-native-v2-using). -For additional examples of using conditional statements in the Config custom resource, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. +1. Lint the application manifest files and ensure that there are no errors in the YAML: + + ```bash + replicated release lint --yaml-dir=PATH_TO_RELEASE_DIR + ``` -### Cluster Distribution Check + Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. -It can be useful to show or hide configuration fields depending on the distribution of the cluster because different distributions often have unique requirements. + For more information, see [release lint](/reference/replicated-cli-release-lint) and [Linter Rules](/reference/linter). -In the following example, the `when` properties use the [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where Replicated KOTS is running. If the distribution of the cluster matches the specified distribution, then the `when` property evaluates to true. +1. Do one of the following: - + * **Create and promote the release with one command**: -### Embedded Cluster Distribution Check + ```bash + replicated release create --yaml-dir PATH_TO_RELEASE_DIR --lint --promote CHANNEL + ``` + Where: + * `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. + * `CHANNEL` is the channel ID or the case sensitive name of the channel. -It can be useful to show or hide configuration fields if the distribution of the cluster is [Replicated Embedded Cluster](/vendor/embedded-overview) because you can include extensions in embedded cluster distributions to manage functionality such as ingress and storage. This means that embedded clusters frequently have fewer configuration options for the user. + * **Create and edit the release before promoting**: - + 1. Create the release: -### kURL Distribution Check + ```bash + replicated release create --yaml-dir PATH_TO_RELEASE_DIR + ``` + Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. -It can be useful to show or hide configuration fields if the cluster was provisioned by Replicated kURL because kURL distributions often include add-ons to manage functionality such as ingress and storage. This means that kURL clusters frequently have fewer configuration options for the user. + For more information, see [release create](/reference/replicated-cli-release-create). -In the following example, the `when` property of the `not_kurl` group uses the IsKurl template function to evaluate if the cluster was provisioned by kURL. For more information about the IsKurl template function, see [IsKurl](/reference/template-functions-static-context#iskurl) in _Static Context_. + 1. Edit and update the release as desired: -```yaml -# Config custom resource -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: all_distributions - title: Example Group - description: This group always displays. - items: - - name: example_item - title: This item always displays. - type: text - - name: not_kurl - title: Non-kURL Cluster Group - description: This group displays only if the cluster is not provisioned by kURL. - when: 'repl{{ not IsKurl }}' - items: - - name: example_item_non_kurl - title: The cluster is not provisioned by kURL. - type: label -``` + ``` + replicated release update SEQUENCE --yaml-dir PATH_TO_RELEASE_DIR + ``` + Where: + + - `SEQUENCE` is the release sequence number. This identifies the existing release to be updated. + - `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. -As shown in the image below, both the `all_distributions` and `non_kurl` groups are displayed on the **Config** page when KOTS is _not_ running in a kURL cluster: + For more information, see [release update](/reference/replicated-cli-release-update). -![Config page displays both groups from the example](/images/config-example-iskurl-false.png) + 1. Promote the release when you are ready to test it. Releases cannot be edited after they are promoted. To make changes after promotion, create a new release. -[View a larger version of this image](/images/config-example-iskurl-false.png) + ``` + replicated release promote SEQUENCE CHANNEL + ``` -However, when KOTS is running in a kURL cluster, only the `all_distributions` group is displayed, as shown below: + Where: + + - `SEQUENCE` is the release sequence number. + - `CHANNEL` is the channel ID or the case sensitive name of the channel. -![Config page displaying only the first group from the example](/images/config-example-iskurl-true.png) + For more information, see [release promote](/reference/replicated-cli-release-promote). -[View a larger version of this image](/images/config-example-iskurl-true.png) +1. Verify that the release was promoted to the target channel: -### License Field Value Equality Check + ``` + replicated release ls + ``` -You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. +================ +File: docs/vendor/releases-creating-customer.mdx +================ +import ChangeChannel from "../partials/customers/_change-channel.mdx" +import Download from "../partials/customers/_download.mdx" +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" -In the following example, the `when` property of the `new_feature_config` item uses the LicenseFieldValue template function to determine if the user's license contains a `newFeatureEntitlement` field that is set to `true`. For more information about the LicenseFieldValue template function, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. +# Creating and Managing Customers -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: example_settings - title: My Example Config - description: Example fields for using LicenseFieldValue template function - items: - - name: new_feature_config - type: label - title: "You have the new feature entitlement" - when: '{{repl (LicenseFieldValue "newFeatureEntitlement") }}' -``` +This topic describes how to create and manage customers in the Replicated Vendor Portal. For more information about customer licenses, see [About Customers](licenses-about). -As shown in the image below, the **Config** page displays the `new_feature_config` item when the user's license contains `newFeatureEntitlement: true`: +## Create a Customer -![Config page displaying the text "You have the new feature entitlement"](/images/config-example-newfeature.png) +This procedure describes how to create a new customer in the Vendor Portal. You can edit customer details at any time. -[View a larger version of this image](/images/config-example-newfeature.png) +For information about creating a customer with the Replicated CLI, see [customer create](/reference/replicated-cli-customer-create). -### License Field Value Integer Comparison +For information about creating and managing customers with the Vendor API v3, see the [customers](https://replicated-vendor-api.readme.io/reference/getcustomerentitlements) section in the Vendor API v3 documentation. -You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. You can also compare integer values from license fields to control the configuration experience for your users. +To create a customer: - +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. -### User-Supplied Value Check + The **Create a new customer** page opens: -You can show or hide configuration fields based on user-supplied values on the **Config** page to ensure that users only see options that are relevant to their selections. + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) -In the following example, the `database_host` and `database_passwords` items use the ConfigOptionEquals template function to evaluate if the user selected the `external` database option for the `db_type` item. For more information about the ConfigOptionEquals template function, see [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) in _Config Context_. + [View a larger version of this image](/images/create-customer.png) -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: database_settings_group - title: Database Settings - items: - - name: db_type - title: Database Type - type: radio - default: external - items: - - name: external - title: External Database - - name: embedded - title: Embedded Database - - name: database_host - title: Database Hostname - type: text - when: '{{repl (ConfigOptionEquals "db_type" "external")}}' - - name: database_password - title: Database Password - type: password - when: '{{repl (ConfigOptionEquals "db_type" "external")}}' -``` -As shown in the images below, when the user selects the external database option, the `database_host` and `database_passwords` items are displayed. Alternatively, when the user selects the embedded database option, the items are _not_ displayed: +1. For **Customer name**, enter a name for the customer. -![Config page displaying the database host and password fields](/images/config-example-external-db.png) +1. For **Customer email**, enter the email address for the customer. -[View a larger version of this image](/images/config-example-external-db.png) + :::note + A customer email address is required for Helm installations. This email address is never used to send emails to customers. + ::: -![Config page with embedded database option selected](/images/config-example-embedded-db.png) +1. For **Assigned channel**, assign the customer to one of your channels. You can select any channel that has at least one release. The channel a customer is assigned to determines the application releases that they can install. For more information, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. -[View a larger version of this image](/images/config-example-embedded-db.png) + :::note + + ::: -## Use Multiple Conditions in the `when` Property +1. For **Custom ID**, you can enter a custom ID for the customer. Setting a custom ID allows you to easily associate this Replicated customer record to your own internal customer data systems during data exports. Replicated recommends using an alphanumeric value such as your Salesforce ID or Hubspot ID. -You can use more than one template function in the `when` property to create more complex conditional statements. This allows you to show or hide configuration fields based on multiple conditions being true. + :::note + Replicated does _not_ require that the custom ID is unique. The custom ID is for vendor data reconciliation purposes, and is not used by Replicated for any functionality purposes. + ::: -The following example includes `when` properties that use both the ConfigOptionEquals and IsKurl template functions: +1. For **Expiration policy**, by default, **Customer's license does not expire** is enabled. To set an expiration date for the license, enable **Customer's license has an expiration date** and specify an expiration date in the **When does this customer expire?** calendar. -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: ingress_settings - title: Ingress Settings - description: Configure Ingress - items: - - name: ingress_type - title: Ingress Type - help_text: | - Select how traffic will ingress to the appliction. - type: radio - items: - - name: ingress_controller - title: Ingress Controller - - name: load_balancer - title: Load Balancer - default: "ingress_controller" - required: true - when: 'repl{{ not IsKurl }}' - - name: ingress_host - title: Hostname - help_text: Hostname used to access the application. - type: text - default: "hostname.example.com" - required: true - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' - - name: ingress_annotations - type: textarea - title: Ingress Annotations - help_text: See your ingress controller’s documentation for the required annotations. - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' - - name: ingress_tls_type - title: Ingress TLS Type - type: radio - items: - - name: self_signed - title: Self Signed (Generate Self Signed Certificate) - - name: user_provided - title: User Provided (Upload a TLS Certificate and Key Pair) - required: true - default: self_signed - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' - - name: ingress_tls_cert - title: TLS Cert - type: file - when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' - required: true - - name: ingress_tls_key - title: TLS Key - type: file - when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' - required: true - - name: load_balancer_port - title: Load Balancer Port - help_text: Port used to access the application through the Load Balancer. - type: text - default: "443" - required: true - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' - - name: load_balancer_annotations - type: textarea - title: Load Balancer Annotations - help_text: See your cloud provider’s documentation for the required annotations. - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' -``` +1. For **Customer type**, set the customer type. Customer type is used only for reporting purposes. Customer access to your application is not affected by the type you assign to them. By default, **Trial** is selected. For more information, see [About Customer License Types](licenses-about-types). -As shown in the image below, the configuration fields that are specific to the ingress controller display only when the user selects the ingress controller option and KOTS is _not_ running in a kURL cluster: +1. Enable any of the available options for the customer. For more information about the license options, see [Built-in License Fields](/vendor/licenses-using-builtin-fields). For more information about enabling install types, see [Managing Install Types for a License (Beta)](/vendor/licenses-install-types). -![Config page displaying the ingress controller options](/images/config-example-ingress-controller.png) +1. For **Custom fields**, configure any custom fields that you have added for your application. For more information about how to create custom fields for your application, see [Managing Customer License Fields](licenses-adding-custom-fields). -[View a larger version of this image](/images/config-example-ingress-controller.png) +1. Click **Save Changes**. -Additionally, the options relevant to the load balancer display when the user selects the load balancer option and KOTS is _not_ running in a kURL cluster: +## Edit a Customer -![Config page displaying the load balancer options](/images/config-example-ingress-load-balancer.png) +You can edit the built-in and custom license fields for a customer at any time by going to the **Manage customer** for a customer. For more information, see [Manage Customer Page](licenses-about#about-the-manage-customer-page) in _About Customers and Licensing_. + +Replicated recommends that you test any licenses changes in a development environment. If needed, install the application using a developer license matching the current customer's entitlements before editing the developer license. Then validate the updated license. -[View a larger version of this image](/images/config-example-ingress-load-balancer.png) +:::important +For online environments, changing license entitlements can trigger changes to the customer's installed application instance during runtime. Replicated recommends that you verify the logic your application uses to query and enforce the target entitlement before making any changes. +::: -================ -File: docs/vendor/config-screen-map-inputs.md -================ -# Mapping User-Supplied Values +To edit license fields: -This topic describes how to map the values that your users provide in the Replicated Admin Console configuration screen to your application. +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers**. -This topic assumes that you have already added custom fields to the Admin Console configuration screen by editing the Config custom resource. For more information, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). +1. Select the target customer and click the **Manage customer** tab. -## Overview of Mapping Values +1. On the **Manage customer** page, edit the desired fields and click **Save**. -You use the values that your users provide in the Admin Console configuration screen to render YAML in the manifest files for your application. + ![Full manage customer page for a customer named Prestige Financial](/images/customer-details.png) -For example, if you provide an embedded database with your application, you might add a field on the Admin Console configuration screen where users input a password for the embedded database. You can then map the password that your user supplies in this field to the Secret manifest file for the database in your application. +1. Test the changes by installing or updating in a development environment. Do one of the following, depending on the installation method for your application: + * For applications installed with Helm that use the Replicated SDK, you can add logic to your application to enforce entitlements before installation or during runtime using the Replicated SDK API license endpoints. See [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). + * For applications installed with Replicated KOTS, update the license in the admin console. See [Update Online Licenses](/enterprise/updating-licenses#update-online-licenses) and [Update Air Gap Licenses](/enterprise/updating-licenses#update-air-gap-licenses) in _Updating Licenses in the Admin Console_. -For an example of mapping database configuration options in a sample application, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). +## Archive a Customer -You can also conditionally deploy custom resources depending on the user input for a given field. For example, if a customer chooses to use their own database with your application rather than an embedded database option, it is not desirable to deploy the optional database resources such as a StatefulSet and a Service. +When you archive a customer in the Vendor Portal, the customer is hidden from search by default and becomes read-only. Archival does not affect the utility of license files downloaded before the customer was archived. -For more information about including optional resources conditionally based on user-supplied values, see [Conditionally Including or Excluding Resources](packaging-include-resources). +To expire a license, set an expiration date and policy in the **Expiration policy** field before you archive the customer. -## About Mapping Values with Template Functions +To archive a customer: -To map user-supplied values, you use Replicated KOTS template functions. The template functions are based on the Go text/template libraries. To use template functions, you add them as strings in the custom resource manifest files in your application. +1. In the Vendor Portal, click **Customers**. Select the target customer then click the **Manage customer** tab. -For more information about template functions, including use cases and examples, see [About Template Functions](/reference/template-functions-about). +1. Click **Archive Customer**. In the confirmation dialog, click **Archive Customer** again. -For more information about the syntax of the template functions for mapping configuration values, see [Config Context](/reference/template-functions-config-context) in the _Template Functions_ section. +You can unarchive by clicking **Unarchive Customer** in the customer's **Manage customer** page. -## Map User-Supplied Values +## Export Customer and Instance Data {#export} -Follow one of these procedures to map user inputs from the configuration screen, depending on if you use a Helm chart for your application: + -* **Without Helm**: See [Map Values to Manifest Files](#map-values-to-manifest-files). -* **With Helm**: See [Map Values to a Helm Chart](#map-values-to-a-helm-chart). +For more information about the data fields in the CSV downloads, see [Data Dictionary](/vendor/instance-data-export#data-dictionary) in _Export Customers and Instance Data_. +## Filter and Search Customers -### Map Values to Manifest Files +The **Customers** page provides a search box and filters that help you find customers: -To map user-supplied values from the configuration screen to manifest files in your application: +search box and filters on the customers page -1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. +[View a larger version of this image](/images/customers-filter.png) -1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. +You can filter customers based on whether they are active, by license type, and by channel name. You can filter using more than one criteria, such as Active, Paid, and Stable. However, you can select only one license type and one channel at a time. -1. In the Config manifest file, locate the name of the user-input field that you want to map. +If there is adoption rate data available for the channel that you are filtering by, you can also filter by current version, previous version, and older versions. - **Example**: +You can also filter customers by custom ID or email address. To filter customers by custom ID or email, use the search box and prepend your search term with "customId:" (ex: `customId:1234`) or "email:" (ex: `email:bob@replicated.com`). - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: my-application - spec: - groups: - - name: smtp_settings - title: SMTP Settings - description: Configure SMTP Settings - items: - - name: smtp_host - title: SMTP Hostname - help_text: Set SMTP Hostname - type: text - ``` +If you want to filter information using multiple license types or channels, you can download a CSV file instead. For more information, see [Export Customer and Instance Data](#export) above. - In the example above, the field name to map is `smtp_host`. +================ +File: docs/vendor/releases-creating-releases.mdx +================ +import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" +import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" -1. In the same release in the Vendor Portal, open the manifest file where you want to map the value for the field that you selected. +# Managing Releases with the Vendor Portal -1. In the manifest file, use the ConfigOption template function to map the user-supplied value in a key value pair. For example: +This topic describes how to use the Replicated Vendor Portal to create and promote releases, edit releases, edit release properties, and archive releases. - ```yaml - hostname: '{{repl ConfigOption "smtp_host"}}' - ``` +For information about creating and managing releases with the CLI, see [Managing Releases with the CLI](/vendor/releases-creating-cli). - For more information about the ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. +For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) and [channelReleases](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) sections in the Vendor API v3 documentation. - **Example**: +## Create a Release - The following example shows mapping user-supplied TLS certificate and TLS private key files to the `tls.cert` and `tls.key` keys in a Secret custom resource manifest file. +To create and promote a release in the Vendor Portal: - For more information about working with TLS secrets, including a strategy for re-using the certificates uploaded for the Admin Console itself, see the [Configuring Cluster Ingress](packaging-ingress) example. +1. From the **Applications** dropdown list, select **Create an app** or select an existing application to update. - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: tls-secret - type: kubernetes.io/tls - data: - tls.crt: '{{repl ConfigOption "tls_certificate_file" }}' - tls.key: '{{repl ConfigOption "tls_private_key_file" }}' - ``` +1. Click **Releases > Create release**. -1. Save and promote the release to a development environment to test your changes. + ![Create Release](/images/release-create-new.png) -### Map Values to a Helm Chart + [View a larger version of this image](/images/release-create-new.png) -The `values.yaml` file in a Helm chart defines parameters that are specific to each environment in which the chart will be deployed. With Replicated KOTS, your users provide these values through the configuration screen in the Admin Console. You customize the configuration screen based on the required and optional configuration fields that you want to expose to your users. +1. Add your files to the release. You can do this by dragging and dropping files to the file directory in the YAML editor or clicking the plus icon to add a new, untitled YAML file. + +1. For any Helm charts that you add to the release, in the **Select Installation Method** dialog, select the version of the HelmChart custom resource that KOTS will use to install the chart. kots.io/v1beta2 is recommended. For more information about the HelmChart custom resource, see [Configuring the HelmChart Custom Resource](helm-native-v2-using). -To map the values that your users provide in the Admin Console configuration screen to your Helm chart `values.yaml` file, you create a HelmChart custom resource. + select installation method dialog -For a tutorial that shows how to set values in a sample Helm chart during installation with KOTS, see [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). + [View a larger version of this image](/images/helm-select-install-method.png) -To map user inputs from the configuration screen to the `values.yaml` file: +1. Click **Save release**. This saves a draft that you can continue to edit until you promote it. + +1. Click **Promote**. In the **Promote Release** dialog, edit the fields: -1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. + For more information about the requirements and limitations of each field, see Properties in _About Channels and Releases_. -1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    Channel +

    Select the channel where you want to promote the release. If you are not sure which channel to use, use the default Unstable channel.

    +
    Version label +

    Enter a version label.

    +

    If you have one or more Helm charts in your release, the Vendor Portal automatically populates this field. You can change the version label to any version specified in any of the Chart.yaml files included in the release.

    +
    Requirements + Select the Prevent this release from being skipped during upgrades to mark the release as required for KOTS installations. This option does not apply to installations with Helm. +
    Release notesAdd release notes. The release notes support markdown and are shown to your customer.
    -1. In the Config manifest file, locate the name of the user-input field that you want to map. +1. Click **Promote**. - **Example**: + The release appears in an **Active** state on the Releases page. - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: my-application - spec: - groups: - - name: smtp_settings - title: SMTP Settings - description: Configure SMTP Settings - items: - - name: smtp_host - title: SMTP Hostname - help_text: Set SMTP Hostname - type: text - ``` +## Edit a Draft Release - In the example above, the field name to map is `smtp_host`. +To edit a draft release: -1. In the same release, create a HelmChart custom resource manifest file. A HelmChart custom resource manifest file has `kind: HelmChart`. +1. From the **Applications** dropdown list, select an existing application to update. +1. On the **Releases** page, find the draft release you want to edit and click **Edit YAML**. - For more information about the HelmChart custom resource, see [HelmChart](../reference/custom-resource-helmchart) in the _Custom Resources_ section. + Edit YAML button for a draft release in the Vendor Portal -1. In the HelmChart manifest file, copy and paste the name of the property from your `values.yaml` file that corresponds to the field that you selected from the Config manifest file under `values`: + [View a larger image](/images/releases-edit-draft.png) - ```yaml - values: - HELM_VALUE_KEY: - ``` - Replace `HELM_VALUE_KEY` with the property name from the `values.yaml` file. +1. Click **Save** to save your updated draft. +1. (Optional) Click **Promote**. + +## Edit Release Properties -1. Use the ConfigOption template function to set the property from the `values.yaml` file equal to the corresponding configuration screen field: +You can edit the properties of a release at any time. For more information about release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. - ```yaml - values: - HELM_VALUE_KEY: '{{repl ConfigOption "CONFIG_SCREEN_FIELD_NAME" }}' - ``` - Replace `CONFIG_SCREEN_FIELD_NAME` with the name of the field that you created in the Config custom resource. +To edit release properties: - For more information about the KOTS ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. +1. Go to **Channels**. +1. In the channel where the release was promoted, click **Release History**. +1. For the release sequence that you want to edit, open the dot menu and click **Edit release**. +1. Edit the properties as needed. + Release Properties dialog in the Vendor Portal + + [View a larger image](/images/release-properties.png) +1. Click **Update Release**. - **Example:** +## Archive a Release - ```yaml - apiVersion: kots.io/v1beta1 - kind: HelmChart - metadata: - name: samplechart - spec: - chart: - name: samplechart - chartVersion: 3.1.7 - helmVersion: v3 - useHelmInstall: true - values: - hostname: '{{repl ConfigOption "smtp_host" }}' - ``` +You can archive releases to remove them from view on the **Releases** page. Archiving a release that has been promoted does _not_ remove the release from the channel's **Release History** page or prevent KOTS from downloading the archived release. -1. Save and promote the release to a development environment to test your changes. +To archive one or more releases: -================ -File: docs/vendor/custom-domains-using.md -================ -# Using Custom Domains +1. From the **Releases** page, click the trash can icon in the upper right corner. +1. Select one or more releases. +1. Click **Archive Releases**. +1. Confirm the archive action when prompted. -This topic describes how to use the Replicated Vendor Portal to add and manage custom domains to alias the Replicated registry, the Replicated proxy registry, the Replicated app service, and the download portal. +## Demote a Release -For information about adding and managing custom domains with the Vendor API v3, see the [customHostnames](https://replicated-vendor-api.readme.io/reference/createcustomhostname) section in the Vendor API v3 documentation. +A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. For more information, see [Demotion](/vendor/releases-about#demotion) in _About Channels and Releases_. -For an overview about custom domains and limitations, see [About Custom Domains](custom-domains). +For information about demoting and un-demoting releases with the Replicated CLI, see [channel demote](/reference/replicated-cli-channel-demote) and [channel un-demote](/reference/replicated-cli-channel-un-demote). -## Configure a Custom Domain +To demote a release in the Vendor Portal: -Before you assign a custom domain for a registry or the download portal, you must first configure and verify the ownership and TLS certificate. +1. Go to **Channels**. +1. In the channel where the release was promoted, click **Release History**. +1. For the release sequence that you want to demote, open the dot menu and select **Demote Release**. -To add and configure a custom domain: + ![Release history page](/images/channels-release-history.png) + [View a larger version of this image](/images/channels-release-history.png) -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Custom Domains**. + After the release is demoted, the given release sequence is greyed out and a **Demoted** label is displayed next to the release on the **Release History** page. -1. In the **Add custom domain** dropdown, select the target Replicated endpoint. +================ +File: docs/vendor/releases-share-download-portal.md +================ +import DownloadPortal from "../partials/kots/_download-portal-about.mdx" - The **Configure a custom domain** wizard opens. +# Downloading Assets from the Download Portal - custom domain wizard +This topic describes how to download customer license files, air gap bundles, and other assets from the Replicated Download Portal. - [View a larger version of this image](/images/custom-domains-download-configure.png) +For information about downloading air gap bundles and licenses with the Vendor API v3, see the following pages in the Vendor API v3 documentation: +* [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) +* [Trigger airgap build for a channel's release](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbuild) +* [Get airgap bundle download URL for the active release on the channel](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) -1. For **Domain**, enter the custom domain. Click **Save & continue**. +## Overview -1. For **Create CNAME**, copy the text string and use it to create a CNAME record in your DNS account. Click **Continue**. + -1. For **Verify ownership**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. +The most common use case for the Download Portal is for customers installing into air gap environments who need to download both their license file as well as multiple air gap bundles. - Your changes can take up to 24 hours to propagate. +The following is an example of the Download Portal for an air gap customer installing in their own existing cluster: -1. For **TLS cert creation verification**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. +![Download Portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) - Your changes can take up to 24 hours to propagate. +[View a larger version of this image](/images/download-portal-existing-cluster.png) - :::note - If you set up a [CAA record](https://letsencrypt.org/docs/caa/) for this hostname, you must include all Certificate Authorities (CAs) that Cloudflare partners with. The following CAA records are required to ensure proper certificate issuance and renewal: +## Limitations - ```dns - @ IN CAA 0 issue "letsencrypt.org" - @ IN CAA 0 issue "pki.goog; cansignhttpexchanges=yes" - @ IN CAA 0 issue "ssl.com" - @ IN CAA 0 issue "amazon.com" - @ IN CAA 0 issue "cloudflare.com" - @ IN CAA 0 issue "google.com" - ``` +* Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. - Failing to include any of these CAs might prevent certificate issuance or renewal, which can result in downtime for your customers. For additional security, you can add an IODEF record to receive notifications about certificate requests: +* Sessions in the Download Portal are valid for 72 hours. After the session expires, your customer must log in again. The Download Portal session length is not configurable. - ```dns - @ IN CAA 0 iodef "mailto:your-security-team@example.com" - ``` - ::: +## Download Assets from the Download Portal -1. For **Use Domain**, to set the new domain as the default, click **Yes, set as default**. Otherwise, click **Not now**. +To log in to the Download Portal and download assets: - :::note - Replicated recommends that you do _not_ set a domain as the default until you are ready for it to be used by customers. - ::: +1. In the [Vendor Portal](https://vendor.replicated.com), on the **Customers** page, click on the name of the customer. -The Vendor Portal marks the domain as **Configured** after the verification checks for ownership and TLS certificate creation are complete. +1. (Optional) On the **Manage customer** tab, enable the **Airgap Download Enabled** option. This makes air gap bundles available in the Download Portal. -## Use Custom Domains + ![airgap download enabled license option](/images/airgap-download-enabled.png) -After you configure one or more custom domains in the Vendor Portal, you assign a custom domain by setting it as the default for all channels and customers or by assigning it to an individual release channel. + [View a larger version of this image](/images/airgap-download-enabled.png) -### Set a Default Domain +1. On the **Reporting** tab, in the **Download portal** section, click **Manage customer password**. -Setting a default domain is useful for ensuring that the same domain is used across channels for all your customers. + ![download portal section](/images/download-portal-link.png) -When you set a custom domain as the default, it is used by default for all new releases promoted to any channel, as long as the channel does not have a different domain assigned in its channel settings. + [View a larger version of this image](/images/download-portal-link.png) -Only releases that are promoted to a channel _after_ you set a default domain use the new default domain. Any existing releases that were promoted before you set the default continue to use the same domain that they used previously. +1. In the pop-up window, enter a password or click **Generate**. -To set a custom domain as the default: + download portal password pop-up -1. In the Vendor Portal, go to **Custom Domains**. + [View a larger version of this image](/images/download-portal-password-popup.png) -1. Next to the target domain, click **Set as default**. +1. Click **Copy** to copy the password to your clipboard. -1. In the confirmation dialog that opens, click **Yes, set as default**. + After the password is saved, it cannot be retrieved again. If you lose the password, you can generate a new one. -### Assign a Domain to a Channel {#channel-domain} +1. Click **Save** to set the password. -You can assign a domain to an individual channel by editing the channel settings. When you specify a domain in the channel settings, new releases promoted to the channel use the selected domain even if there is a different domain set as the default on the **Custom Domains** page. +1. Click **Visit download portal** to log in to the Download Portal +and preview your customer's experience. -Assigning a domain to a release channel is useful when you need to override either the default Replicated domain or a default custom domain for a specific channel. For example: -* You need to use a different domain for releases promoted to your Beta and Stable channels. -* You need to test a domain in a development environment before you set the domain as the default for all channels. + :::note + By default, the Download Portal uses the domain `get.replicated.com`. You can optionally use a custom domain for the Download Portal. For more information, see [Using Custom Domains](/vendor/custom-domains-using). + ::: -To assign a custom domain to a channel: +1. In the Download Portal, on the left side of the screen, select one of the following: + * **Bring my own Kubernetes**: View the downloadable assets for existing cluster installations with KOTS. + * **Embedded Kubernetes**: View the downloadable assets for Replicated kURL installations. -1. In the Vendor Portal, go to **Channels** and click the settings icon for the target channel. + :::note + Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. + ::: -1. Under **Custom domains**, in the drop-down for the target Replicated endpoint, select the domain to use for the channel. For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. + The following is an example of the Download Portal for an air gap customer: - channel settings dialog + ![download portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) - [View a larger version of this image](/images/channel-settings.png) + [View a larger version of this image](/images/download-portal-existing-cluster.png) -## Reuse a Custom Domain for Another Application +1. Under **Select application version**, use the dropdown to select the target application release version. The Download Portal automatically makes the correct air gap bundles available for download based on the selected application version. -If you have configured a custom domain for one application, you can reuse the custom domain for another application in the same team without going through the ownership and TLS certificate verification process again. +1. Click the download button to download each asset. -To reuse a custom domain for another application: +1. To share installation files with a customer, send the customer their unique link and password for the Download Portal. -1. In the Vendor Portal, select the application from the dropdown list. +================ +File: docs/vendor/releases-sharing-license-install-script.mdx +================ +import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -1. Click **Custom Domains**. +# Finding Installation Commands for a Release -1. In the section for the target endpoint, click Add your first custom domain for your first domain, or click **Add new domain** for additional domains. +This topic describes where to find the installation commands and instructions for releases in the Replicated Vendor Portal. - The **Configure a custom domain** wizard opens. +For information about getting installation commands with the Replicated CLI, see [channel inspect](/reference/replicated-cli-channel-inspect). For information about getting installation commands with the Vendor API v3, see [Get install commands for a specific channel release](https://replicated-vendor-api.readme.io/reference/getchannelreleaseinstallcommands) in the Vendor API v3 documentation. -1. In the text box, enter the custom domain name that you want to reuse. Click **Save & continue**. - - The last page of the wizard opens because the custom domain was verified previously. +## Get Commands for the Latest Release -1. Do one of the following: +Every channel in the Vendor Portal has an **Install** section where you can find installation commands for the latest release on the channel. - - Click **Set as default**. In the confirmation dialog that opens, click **Yes, set as default**. - - - Click **Not now**. You can come back later to set the domain as the default. The Vendor Portal shows shows that the domain has a Configured status because it was configured for a previous application, though it is not yet assigned as the default for this application. +To get the installation commands for the latest release: +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. -## Remove a Custom Domain +1. On the target channel card, under **Install**, click the tab for the type of installation command that you want to view: -You can remove a custom domain at any time, but you should plan the transition so that you do not break any existing installations or documentation. + + +

    View the command for installing with Replicated KOTS in existing clusters.

    -Removing a custom domain for the Replicated registry, proxy registry, or Replicated app service will break existing installations that use the custom domain. Existing installations need to be upgraded to a version that does not use the custom domain before it can be removed safely. + Install section of the channel card + [View a larger version of this image](/images/channel-card-install-kots.png) +
    + +

    View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.

    + +

    In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:

    -If you remove a custom domain for the download portal, it is no longer accessible using the custom URL. You will need to point customers to an updated URL. + Install section of the channel card + [View a larger version of this image](/images/channel-card-install-kurl.png) -To remove a custom domain: + Install section of the channel card + [View a larger version of this image](/images/channel-card-install-ec.png) -1. Log in to the [Vendor Portal](https://vendor.replicated.com) and click **Custom Domains**. + :::note + The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: +
    + +

    View the command for installing with the Helm CLI in an existing cluster.

    -1. Verify that the domain is not set as the default nor in use on any channels. You can edit the domains in use on a channel in the channel settings. For more information, see [Settings](releases-about#settings) in _About Channels and Releases_. + Install section of the channel card + [View a larger version of this image](/images/channel-card-install-helm.png) - :::important - When you remove a registry or Replicated app service custom domain, any installations that reference that custom domain will break. Ensure that the custom domain is no longer in use before you remove it from the Vendor Portal. + :::note + The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. ::: +
    +
    + +## Get Commands for a Specific Release -1. Click **Remove** next to the unused domain in the list, and then click **Yes, remove domain**. +Every channel in the Vendor Portal has a **Release history** page where you can find the installation commands for specific release versions. -================ -File: docs/vendor/custom-domains.md -================ -# About Custom Domains +To get the command for a specific release version: -This topic provides an overview and the limitations of using custom domains to alias the Replicated private registry, Replicated proxy registry, Replicated app service, and the Download Portal. +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. -For information about configuring and managing custom domains, see [Using Custom Domains](custom-domains-using). +1. On the channel card, click **Release history**. -## Overview + Release history link on channel card -You can use custom domains to alias Replicated endpoints by creating Canonical Name (CNAME) records for your domains. + [View a larger version of this image](/images/release-history-link.png) -Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. +1. For the target release version, open the dot menu and click **Install Commands**. -TXT records must be created to verify: + ![Release history page](/images/channels-release-history.png) -- Domain ownership: Domain ownership is verified when you initially add a record. -- TLS certificate creation: Each new domain must have a new TLS certificate to be verified. + [View a larger version of this image](/images/channels-release-history.png) -The TXT records can be removed after the verification is complete. +1. In the **Install Commands** dialog, click the tab for the type of installation command that you want to view: -You can configure custom domains for the following services, so that customer-facing URLs reflect your company's brand: + + +

    View the command for installing with Replicated KOTS in existing clusters.

    -- **Replicated registry:** Images and Helm charts can be pulled from the Replicated registry. By default, this registry uses the domain `registry.replicated.com`. We suggest using a CNAME such as `registry.{your app name}.com`. + Install section of the channel card + [View a larger version of this image](/images/release-history-install-kots.png) +
    + +

    View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.

    + +

    In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:

    -- **Proxy registry:** Images can be proxied from external private registries using the Replicated proxy registry. By default, the proxy registry uses the domain `proxy.replicated.com`. We suggest using a CNAME such as `proxy.{your app name}.com`. + Install section of the channel card + [View a larger version of this image](/images/release-history-install-kurl.png) -- **Replicated app service:** Upstream application YAML and metadata, including a license ID, are pulled from replicated.app. By default, this service uses the domain `replicated.app`. We suggest using a CNAME such as `updates.{your app name}.com`. + Install section of the channel card + [View a larger version of this image](/images/release-history-install-embedded-cluster.png) -- **Download Portal:** The Download Portal can be used to share customer license files, air gap bundles, and so on. By default, the Download Portal uses the domain `get.replicated.com`. We suggest using a CNAME such as `portal.{your app name}.com` or `enterprise.{your app name}.com`. + :::note + The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: +
    + +

    View the command for installing with the Helm CLI in an existing cluster.

    -## Limitations + Install section of the channel card + [View a larger version of this image](/images/release-history-install-helm.png) -Using custom domains has the following limitations: + :::note + The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: +
    +
    -- A single custom domain cannot be used for multiple endpoints. For example, a single domain can map to `registry.replicated.com` for any number of applications, but cannot map to both `registry.replicated.com` and `proxy.replicated.com`, even if the applications are different. +## Get Customer-Specific Installation Instructions for Helm or Embedded Cluster {#customer-specific} -- Custom domains cannot be used to alias api.replicated.com (legacy customer-facing APIs) or kURL. +Installation instructions for the Helm CLI and Replicated Embedded Cluster are customer-specific. You can find installation instructions on the page for the target customer. -- Multiple custom domains can be configured, but only one custom domain can be the default for each Replicated endpoint. All configured custom domains work whether or not they are the default. +To get customer-specific Helm or Embedded Cluster installation instructions: -- A particular custom domain can only be used by one team. +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page and click on the target customer. -================ -File: docs/vendor/custom-metrics.md -================ -# Configuring Custom Metrics (Beta) +1. At the top of the page, click the **Install instructions** drop down, then click **Helm** or **Embedded cluster**. -This topic describes how to configure an application to send custom metrics to the Replicated Vendor Portal. + ![Install instructions button](/images/customer-install-instructions-dropdown.png) -## Overview + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) -In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running customer environments. Custom metrics can be collected for application instances running in online or air gap environments. +1. In the dialog that opens, follow the installation instructions to install. -Custom metrics can be used to generate insights on customer usage and adoption of new features, which can help your team to make more informed prioritization decisions. For example: -* Decreased or plateaued usage for a customer can indicate a potential churn risk -* Increased usage for a customer can indicate the opportunity to invest in growth, co-marketing, and upsell efforts -* Low feature usage and adoption overall can indicate the need to invest in usability, discoverability, documentation, education, or in-product onboarding -* High usage volume for a customer can indicate that the customer might need help in scaling their instance infrastructure to keep up with projected usage + + +

    View the customer-specific Helm CLI installation instructions. For more information about installing with the Helm CLI, see [Installing with Helm](/vendor/install-with-helm).

    + Helm install button + [View a larger version of this image](/images/helm-install-instructions-dialog.png) +
    + +

    View the customer-specific Embedded Cluster installation instructions. For more information about installing with Embedded Cluster, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded).

    + Embedded cluster install instructions + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) +
    +
    -## How the Vendor Portal Collects Custom Metrics +================ +File: docs/vendor/replicated-api-tokens.md +================ +import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" -The Vendor Portal collects custom metrics through the Replicated SDK that is installed in the cluster alongside the application. +# Generating API Tokens -The SDK exposes an in-cluster API where you can configure your application to POST metric payloads. When an application instance sends data to the API, the SDK sends the data (including any custom and built-in metrics) to the Replicated app service. The app service is located at `replicated.app` or at your custom domain. +This topic describes the available types of API tokens and how to generate them for use with the Replicated CLI and Replicated Vendor API v3. -If any values in the metric payload are different from the current values for the instance, then a new event is generated and displayed in the Vendor Portal. For more information about how the Vendor Portal generates events, see [How the Vendor Portal Generates Events and Insights](/vendor/instance-insights-event-data#about-events) in _About Instance and Event Data_. +## About API Tokens -The following diagram demonstrates how a custom `activeUsers` metric is sent to the in-cluster API and ultimately displayed in the Vendor Portal, as described above: +The Vendor API v3 is the API that manages applications in the Replicated Vendor Portal. The Replicated CLI is an implementation of the Vendor API v3. -Custom metrics flowing from customer environment to Vendor Portal - -[View a larger version of this image](/images/custom-metrics-flow.png) - -## Requirements - -To support the collection of custom metrics in online and air gap environments, the Replicated SDK version 1.0.0-beta.12 or later must be running in the cluster alongside the application instance. - -The `PATCH` and `DELETE` methods described below are available in the Replicated SDK version 1.0.0-beta.23 or later. - -For more information about the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). - -If you have any customers running earlier versions of the SDK, Replicated recommends that you add logic to your application to gracefully handle a 404 from the in-cluster APIs. - -## Limitations +Using the Replicated CLI and Vendor API V3 requires an API token for authorization. Tokens are primarily used for automated customer, channel, and release management. You create tokens in the Vendor Portal. -Custom metrics have the following limitations: +The following types of tokens are available: -* The label that is used to display metrics in the Vendor Portal cannot be customized. Metrics are sent to the Vendor Portal with the same name that is sent in the `POST` or `PATCH` payload. The Vendor Portal then converts camel case to title case: for example, `activeUsers` is displayed as **Active Users**. +- [Service Accounts](#service-accounts) +- [User API Tokens](#user-api-tokens) -* The in-cluster APIs accept only JSON scalar values for metrics. Any requests containing nested objects or arrays are rejected. + -* When using the `POST` method any existing keys that are not included in the payload will be deleted. To create new metrics or update existing ones without sending the entire dataset, simply use the `PATCH` method. +### Service Accounts -## Configure Custom Metrics +Service accounts are assigned a token and associated with an RBAC policy. Users with the proper permissions can create, retrieve, or revoke service account tokens. Admin users can assign any RBAC policy to a service account. Non-admin users can only assign their own RBAC policy when they create a service account. -You can configure your application to `POST` or `PATCH` a set of metrics as key value pairs to the API that is running in the cluster alongside the application instance. +Service accounts are useful for operations that are not tied to a particular user, such as CI/CD or integrations. -To remove an existing custom metric use the `DELETE` endpoint with the custom metric name. +Updates to a service account's RBAC policy are automatically applied to its associated token. When a service account is removed, its tokens are also invalidated. -The Replicated SDK provides an in-cluster API custom metrics endpoint at `http://replicated:3000/api/v1/app/custom-metrics`. +### User API Tokens -**Example:** +User API tokens are private to the user creating the token. User tokens assume the user's account when used, including any RBAC permissions. -```bash -POST http://replicated:3000/api/v1/app/custom-metrics -``` +Updates to a user's RBAC role are applied to all of the tokens belonging to that user. -```json -{ - "data": { - "num_projects": 5, - "weekly_active_users": 10 - } -} -``` +Revoking a user token immediately invalidates that token. When a user account is deleted, its user tokens are also deleted. -```bash -PATCH http://replicated:3000/api/v1/app/custom-metrics -``` +## Generate Tokens -```json -{ - "data": { - "num_projects": 54, - "num_error": 2 - } -} -``` +To use the Replicated CLI or the Vendor API v3, you need a User API token or a Service Account token. Existing team API tokens also continue to work. -```bash -DELETE http://replicated:3000/api/v1/app/custom-metrics/num_projects -``` +### Generate a Service Account -### POST vs PATCH +To generate a service account: -The `POST` method will always replace the existing data with the most recent payload received. Any existing keys not included in the most recent payload will still be accessible in the instance events API, but they will no longer appear in the instance summary. +1. Log in to the Vendor Portal, and select [**Team > Service Accounts**](https://vendor.replicated.com/team/serviceaccounts). +1. Select **New Service Account**. If one or more service accounts already exist, you can add another by selecting **New Service Account**. -The `PATCH` method will accept partial updates or add new custom metrics if a key:value pair that does not currently exist is passed. +1. Edit the fields in the **New Service Account** dialog: -In most cases, simply using the `PATCH` method is recommended. + New Service Accounts Dialog -For example, if a component of your application sends the following via the `POST` method: + [View a larger version of this image](/images/service-accounts.png) -```json -{ - "numProjects": 5, - "activeUsers": 10, -} -``` + 1. For **Nickname**, enter a name the token. Names for service accounts must be unique within a given team. -Then, the component later sends the following also via the `POST` method: + 1. For **RBAC**, select the RBAC policy from the dropdown list. The token must have `Admin` access to create new releases. -```json -{ - "activeUsers": 10, - "usingCustomReports": false -} -``` + This list includes the Vendor Portal default policies `Admin` and `Read Only`. Any custom policies also display in this list. For more information, see [Configuring RBAC Policies](team-management-rbac-configuring). -The instance detail will show `Active Users: 10` and `Using Custom Reports: false`, which represents the most recent payload received. The previously-sent `numProjects` value is discarded from the instance summary and is available in the instance events payload. In order to preseve `numProjects`from the initial payload and upsert `usingCustomReports` and `activeUsers` use the `PATCH` method instead of `POST` on subsequent calls to the endpoint. + Users with a non-admin RBAC role cannot select any other RBAC role when creating a token. They are restricted to creating a token with their same level of access to avoid permission elevation. -For example, if a component of your application initially sends the following via the `POST` method: + 1. (Optional) For custom RBAC policies, select the **Limit to read-only version of above policy** check box to if you want use a policy that has Read/Write permissions but limit this service account to read-only. This option lets you maintain one version of a custom RBAC policy and use it two ways: as read/write and as read-only. -```json -{ - "numProjects": 5, - "activeUsers": 10, -} -``` +1. Select **Create Service Account**. -Then, the component later sends the following also via the `PATCH` method: -```json -{ - "usingCustomReports": false -} -``` +1. Copy the service account token and save it in a secure location. The token will not be available to view again. -The instance detail will show `Num Projects: 5`, `Active Users: 10`, `Using Custom Reports: false`, which represents the merged and upserted payload. + :::note + To remove a service account, select **Remove** for the service account that you want to delete. + ::: -### NodeJS Example +### Generate a User API Token -The following example shows a NodeJS application that sends metrics on a weekly interval to the in-cluster API exposed by the SDK: +To generate a user API token: -```javascript -async function sendMetrics(db) { +1. Log in to the Vendor Portal and go to the [Account Settings](https://vendor.replicated.com/account-settings) page. +1. Under **User API Tokens**, select **Create a user API token**. If one or more tokens already exist, you can add another by selecting **New user API token**. - const projectsQuery = "SELECT COUNT(*) as num_projects from projects"; - const numProjects = (await db.getConnection().queryOne(projectsQuery)).num_projects; + User API Token Page - const usersQuery = - "SELECT COUNT(*) as active_users from users where DATEDIFF('day', last_active, CURRENT_TIMESTAMP) < 7"; - const activeUsers = (await db.getConnection().queryOne(usersQuery)).active_users; + [View a larger version of this image](/images/user-token-list.png) - const metrics = { data: { numProjects, activeUsers }}; - - const res = await fetch('https://replicated:3000/api/v1/app/custom-metrics', { - method: 'POST', - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify(metrics), - }); - if (res.status !== 200) { - throw new Error(`Failed to send metrics: ${res.statusText}`); - } -} +1. In the **New user API token** dialog, enter a name for the token in the **Nickname** field. Names for user API tokens must be unique per user. -async function startMetricsLoop(db) { + Create New User Token Dialog - const ONE_DAY_IN_MS = 1000 * 60 * 60 * 24 + [View a larger version of this image](/images/user-token-create.png) - // send metrics once on startup - await sendMetrics(db) - .catch((e) => { console.log("error sending metrics: ", e) }); +1. Select the required permissions or use the default **Read and Write** permissions. Then select **Create token**. - // schedule weekly metrics payload + :::note + The token must have `Read and Write` access to create new releases. + ::: - setInterval( () => { - sendMetrics(db, licenseId) - .catch((e) => { console.log("error sending metrics: ", e) }); - }, ONE_DAY_IN_MS); -} +1. Copy the user API token that displays and save it in a secure location. The token will not be available to view again. -startMetricsLoop(getDatabase()); -``` + :::note + To revoke a token, select **Revoke token** for the token that you want to delete. + ::: -## View Custom Metrics +================ +File: docs/vendor/replicated-onboarding.mdx +================ +import CreateRelease from "../partials/getting-started/_create-promote-release.mdx" +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import EcCr from "../partials/embedded-cluster/_ec-config.mdx" +import HelmPackage from "../partials/helm/_helm-package.mdx" +import Requirements from "../partials/embedded-cluster/_requirements.mdx" +import SDKOverview from "../partials/replicated-sdk/_overview.mdx" +import TestYourChanges from "../partials/getting-started/_test-your-changes.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" -You can view the custom metrics that you configure for each active instance of your application on the **Instance Details** page in the Vendor Portal. +# Replicated Onboarding -The following shows an example of an instance with custom metrics: +This topic describes how to onboard applications to the Replicated Platform. -Custom Metrics section of Instance details page +## Before You Begin -[View a larger version of this image](/images/instance-custom-metrics.png) +This section includes guidance and prerequisites to review before you begin onboarding your application. -As shown in the image above, the **Custom Metrics** section of the **Instance Details** page includes the following information: -* The timestamp when the custom metric data was last updated. -* Each custom metric that you configured, along with the most recent value for the metric. -* A time-series graph depicting the historical data trends for the selected metric. +### Best Practices and Recommendations -Custom metrics are also included in the **Instance activity** stream of the **Instance Details** page. For more information, see [Instance Activity](/vendor/instance-insights-details#instance-activity) in _Instance Details_. +The following are some best practices and recommendations for successfully onboarding with Replicated: -## Export Custom Metrics +* When integrating new Replicated features with an application, make changes in small iterations and test frequently by installing or upgrading the application in a development environment. This will help you to more easily identify issues and troubleshoot. This onboarding workflow will guide you through the process of integrating features in small iterations. -You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Export Customer and Instance Data](/vendor/instance-data-export). +* Use the Replicated CLI to create and manage your application and releases. Getting familiar with the Replicated CLI will also help later on when integrating Replicated workflows into your CI/CD pipelines. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). -================ -File: docs/vendor/customer-adoption.md -================ -# Adoption Report +* These onboarding tasks assume that you will test the installation of each release on a VM with the Replicated Embedded Cluster installer _and_ in a cluster with the Replicated KOTS installer. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then can choose to test with Embedded Cluster only. -This topic describes the insights in the **Adoption** section on the Replicated Vendor Portal **Dashboard** page. +* Ask for help from the Replicated community. For more information, see [Getting Help from the Community](#community) below. -## About Adoption Rate +### Getting Help from the Community {#community} -The **Adoption** section on the **Dashboard** provides insights about the rate at which your customers upgrade their instances and adopt the latest versions of your application. As an application vendor, you can use these adoption rate metrics to learn if your customers are completing upgrades regularly, which is a key indicator of the discoverability and ease of application upgrades. +The [Replicated community site](https://community.replicated.com/) is a forum where Replicated team members and users can post questions and answers related to working with the Replicated Platform. It is designed to help Replicated users troubleshoot and learn more about common tasks involved with distributing, installing, observing, and supporting their application. -The Vendor Portal generates adoption rate data from all your customer's application instances that have checked-in during the selected time period. For more information about instance check-ins, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. +Before posting in the community site, use the search to find existing knowledge base articles related to your question. If you are not able to find an existing article that addresses your question, create a new topic or add a reply to an existing topic so that a member of the Replicated community or team can respond. -The following screenshot shows an example of the **Adoption** section on the **Dashboard**: +To search and participate in the Replicated community, see https://community.replicated.com/. -![Adoption report section on dashboard](/images/customer_adoption_rates.png) +### Prerequisites -[View a larger version of this image](/images/customer_adoption_rates.png) +* Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). -As shown in the screenshot above, the **Adoption** report includes a graph and key adoption rate metrics. For more information about how to interpret this data, see [Adoption Graph](#graph) and [Adoption Metrics](#metrics) below. +* Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). -The **Adoption** report also displays the number of customers assigned to the selected channel and a link to the report that you can share with other members of your team. +* Complete a basic quick start workflow to create an application with a sample Helm chart and then promote and install releases in a development environment. This helps you get familiar with the process of creating, installing, and updating releases in the Replicated Platform. See [Replicated Quick Start](/vendor/quick-start). -You can filter the graph and metrics in the **Adoption** report by: -* License type (Paid, Trial, Dev, or Community) -* Time period (the previous month, three months, six months, or twelve months) -* Release channel to which instance licenses are assigned, such as Stable or Beta +* Ensure that you have access to a VM that meets the requirements for the Replicated Embedded Cluster installer. You will use this VM to test installation with Embedded Cluster. -## Adoption Graph {#graph} + Embedded Cluster has the following requirements: -The **Adoption** report includes a graph that shows the percent of active instances that are running different versions of your application within the selected time period. + -The following shows an example of an adoption rate graph with three months of data: +* (Optional) Ensure that you have kubectl access to a Kubernetes cluster. You will use this cluster to test installation with KOTS. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then you do not need access to a cluster for the main onboarding tasks. -![Adoption report graph showing three months of data](/images/adoption_rate_graph.png) + You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. -[View a larger version of this image](/images/adoption_rate_graph.png) +## Onboard -As shown in the image above, the graph plots the number of active instances in each week in the selected time period, grouped by the version each instance is running. The key to the left of the graph shows the unique color that is assigned to each application version. You can use this color-coding to see at a glance the percent of active instances that were running different versions of your application across the selected time period. +Complete the tasks in this section to onboard your application. When you are done, you can continue to [Next Steps](#next-steps) to integrate other Replicated features with your application. -Newer versions will enter at the bottom of the area chart, with older versions shown higher up. +### Task 1: Create An Application -You can also hover over a color-coded section in the graph to view the number and percentage of active instances that were running the version in a given period. +To get started with onboarding, first create a new application. This will be the official Vendor Portal application used by your team to create and promote both internal and customer-facing releases. -If there are no active instances of your application, then the adoption rate graph displays a "No Instances" message. +To create an application: -## Adoption Metrics {#metrics} +1. Create a new application using the Replicated CLI or the Vendor Portal. Use an official name for your application. See [Create an Application](/vendor/vendor-portal-manage-app#create-an-application). + +
    + Can I change the application name in the future? -The **Adoption** section includes metrics that show how frequently your customers discover and complete upgrades to new versions of your application. It is important that your users adopt new versions of your application so that they have access to the latest features and bug fixes. Additionally, when most of your users are on the latest versions, you can also reduce the number of versions for which you provide support and maintain documentation. + You can change the application name, but you cannot change the application _slug_. -The following shows an example of the metrics in the **Adoption** section: + The Vendor Portal automatically generates and assigns a unique slug for each application based on the application's name. For example, the slug for "Example App" would be `example-app`. + + Application slugs are unique across all of Replicated. This means that, if necessary, the Vendor Portal will append a random word to the end of slug to ensure uniqueness. For example, `example-app-flowers`. +
    -![Adoption rate metrics showing](/images/adoption_rate_metrics.png) +1. Set the `REPLICATED_APP` environment variable to the unique slug of the application that you created. This will allow you to interact with the application from the Replicated CLI throughout onboarding. See [Set Environment Variables](/reference/replicated-cli-installing#replicated_app) in _Installing the Replicated CLI_. -[View a larger version of this image](/images/adoption_rate_metrics.png) + For example: -As shown in the image above, the **Adoption** section displays the following metrics: -* Instances on last three versions -* Unique versions -* Median relative age -* Upgrades completed + ```bash + export REPLICATED_APP=my-app + ``` -Based on the time period selected, each metric includes an arrow that shows the change in value compared to the previous period. For example, if the median relative age today is 68 days, the selected time period is three months, and three months ago the median relative age was 55 days, then the metric would show an upward-facing arrow with an increase of 13 days. +### Task 2: Connect Your Image Registry -The following table describes each metric in the **Adoption** section, including the formula used to calculate its value and the recommended trend for the metric over time: +Add credentials for your image registry to the Vendor Portal. This will allow you to use the Replicated proxy registry in a later step so that you can grant proxy access to application images without exposing registry credentials to your customers. - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    MetricDescriptionTarget Trend
    Instances on last three versions -

    Percent of active instances that are running one the latest three versions of your application.

    -

    Formula: count(instances on last 3 versions) / count(instances)

    -
    Increase towards 100%
    Unique versions -

    Number of unique versions of your application running in active instances.

    -

    Formula: count(distinct instance_version)

    -
    Decrease towards less than or equal to three
    Median relative age -

    The relative age of a single instance is the number of days between the date that the instance's version was promoted to the channel and the date when the latest available application version was promoted to the channel.

    -

    Median relative age is the median value across all active instances for the selected time period and channel.

    -

    Formula: median(relative_age(instance_version))

    -

    Depends on release cadence. For vendors who ship every four to eight weeks, decrease the median relative age towards 60 days or fewer.

    Upgrades completed -

    Total number of completed upgrades across active instances for the selected time period and channel.

    -

    An upgrade is a single version change for an instance. An upgrade is considered complete when the instance deploys the new application version.

    -

    The instance does not need to become available (as indicated by reaching a Ready state) after deploying the new version for the upgrade to be counted as complete.

    -

    Formula: sum(instance.upgrade_count) across all instances

    -
    Increase compared to any previous period, unless you reduce your total number of live instances.
    +For more information, see [Connecting to an External Registry](/vendor/packaging-private-images). -================ -File: docs/vendor/customer-reporting.md -================ -# Customer Reporting +### Task 3: Add the Replicated SDK and Package your Chart -This topic describes the customer and instance data displayed in the **Customers > Reporting** page of the Replicated Vendor Portal. +Next, add the Replicated SDK as a dependency of your Helm chart and package the chart as a `.tgz` archive. -## About the Customer Reporting Page {#reporting-page} +The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. The SDK provides access to key Replicated functionality, including an in-cluster API and automatic access to insights and operational telemetry for instances running in customer environments. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). -The **Customers > Reporting** page displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page: +To package your Helm chart with the Replicated SDK: -![Customer reporting page showing two active instances](/images/customer-reporting-page.png) +1. Go to the local directory where your Helm chart is. -[View a larger version of this image](/images/customer-reporting-page.png) +1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. + + If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. For more information, see [Packaging a Helm Chart for a Release](helm-install-release). -As shown in the image above, the **Reporting** page has the following main sections: -* [Manage Customer](#manage-customer) -* [Time to Install](#time-to-install) -* [Download Portal](#download-portal) -* [Instances](#instances) + -### Manage Customer +1. Update dependencies and package the chart as a `.tgz` file: -The manage customer section displays the following information about the customer: + -* The customer name -* The channel the customer is assigned -* Details about the customer license: - * The license type - * The date the license was created - * The expiration date of the license -* The features the customer has enabled, including: - * GitOps - * Air gap - * Identity - * Snapshots - -In this section, you can also view the Helm CLI installation instructions for the customer and download the customer license. + -### Time to Install +1. If your application is deployed as multiple Helm charts, package each chart as a separate `.tgz` archive using the `helm package -u PATH_TO_CHART` command. Do not declare the SDK in more than one chart. -If the customer has one or more application instances that have reached a Ready status at least one time, then the **Time to install** section displays _License time to install_ and _Instance time to install_ metrics: +### Task 4: Create the Initial Release with KOTS HelmChart and Embedded Cluster Config {#first-release} -* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. -* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. +After packaging your Helm chart, you can create a release. The initial release for your application will include the minimum files required to install a Helm chart with the Embedded Cluster installer: +* The Helm chart `.tgz` archive +* [KOTS HelmChart custom resource](/reference/custom-resource-helmchart-v2) +* [Embedded Cluster Config](/reference/embedded-config) -A _Ready_ status indicates that all Kubernetes resources for the application are Ready. For example, a Deployment resource is considered Ready when the number of Ready replicas equals the total desired number of replicas. For more information, see [Enabling and Understanding Application Status](insights-app-status). +If you have multiple charts, you will add each chart archive to the release, plus a corresponding KOTS HelmChart custom resource for each archive. -If the customer has no application instances that have ever reported a Ready status, or if you have not configured your application to deliver status data to the Vendor Portal, then the **Time to install** section displays a **No Ready Instances** message. +:::note +Configuring the KOTS HelmChart custom resource includes several tasks, and involves the use of KOTS template functions. Depending on how many Helm charts your application uses, Replicated recommends that you allow about two to three hours for configuring the HelmChart custom resource and creating and testing your initial release. +::: -If the customer has more than one application instance that has previously reported a Ready status, then the **Time to install** section displays metrics for the instance that most recently reported a Ready status for the first time. +To create the first release for your application: -For example, Instance A reported its first Ready status at 9:00 AM today. Instance B reported its first Ready status at 8:00 AM today, moved to a Degraded status, then reported a Ready status again at 10:00 AM today. In this case, the Vendor Portal displays the time to install metrics for Instance A, which reported its _first_ Ready status most recently. +1. In the local directory for your Helm chart, create a subdirectory named `manifests` where you will add the files for the release. -For more information about how to interpret the time to install metrics, see [Time to Install](instance-insights-details#time-to-install) in _Instance Details_. +1. In the `manifests` directory: -### Download Portal + 1. Move the `.tgz` chart archive that you packaged. If your application is deployed as multiple Helm charts, move each `.tgz` archive to `manifests`. -From the **Download portal** section, you can: -* Manage the password for the Download Portal -* Access the unique Download Portal URL for the customer + 1. Create an `embedded-cluster.yaml` file with the following default Embedded Cluster Config: -You can use the Download Portal to give your customers access to the files they need to install your application, such as their license file or air gap bundles. For more information, see [Downloading Assets from the Download Portal](releases-share-download-portal). + -### Instances +
    + What is the Embedded Cluster Config? + + The Embedded Cluster Config is required to install with Embedded Cluster. +
    + + For more information, see [Using Embedded Cluster](/vendor/embedded-overview). -The **Instances** section displays details about the active application instances associated with the customer. + 1. Create a new YAML file. In this file, configure the KOTS HelmChart custom resource by completing the workflow in [Configuring the HelmChart Custom Resource](helm-native-v2-using). + +
    + What is the KOTS HelmChart custom resource? + + The KOTS HelmChart custom resource is required to install Helm charts with KOTS and Embedded Cluster. As part of configuring the KOTS HelmChart custom resource, you will rewrite image names and add image pull secrets to allow your application images to be accessed through the Replicated proxy registry. +
    -You can click any of the rows in the **Instances** section to open the **Instance details** page. The **Instance details** page displays additional event data and computed metrics to help you understand the performance and status of each active application instance. For more information, see [Instance Details](instance-insights-details). + 1. If your application is deployed as multiple Helm charts, repeat the step above to add a separate HelmChart custom resource for each Helm chart archive in the release. -The following shows an example of a row for an active instance in the **Instances** section: + 1. If there are values in any of your Helm charts that need to be set for the installation to succeed, you can set those values using the `values` key in the corresponding HelmChart custom resource. See [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys). + + This is a temporary measure to ensure the values get passed to the Helm chart during installation until you configure the Admin Console Config screen in a later onboarding task. If your default Helm values are sufficient for installation, you can skip this step. -![Row in the Instances section](/images/instance-row.png) -[View a larger version of this image](/images/instance-row.png) + 1. If your application requires that certain components are deployed before the application and as part of the Embedded Cluster itself, then update the Embedded Cluster Config to add [extensions](/reference/embedded-config#extensions). Extensions allow you to provide Helm charts that are deployed before your application. For example, one situation where this is useful is if you want to ship an ingress controller because Embedded Cluster does not include one. -The **Instances** section displays the following details about each active instance: -* The first seven characters of the instance ID. -* The status of the instance. Possible statuses are Missing, Unavailable, Degraded, Ready, and Updating. For more information, see [Enabling and Understanding Application Status](insights-app-status). -* The application version. -* Details about the cluster where the instance is installed, including: - * The Kubernetes distribution for the cluster, if applicable. - * The Kubernetes version running in the cluster. - * Whether the instance is installed in a Replicated kURL cluster. - * (kURL Clusters Only) The number of nodes ready in the cluster. - * (KOTS Only) The KOTS version running in the cluster. - * The Replicated SDK version running in the cluster. - * The cloud provider and region, if applicable. -* Instance uptime data, including: - * The timestamp of the last recorded check-in for the instance. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. - * An uptime graph of the previous two weeks. For more information about how the Vendor Portal determines uptime, see [Instance Uptime](instance-insights-details#instance-uptime) in _Instance Details_. - * The uptime ratio in the previous two weeks. + For more information, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. -================ -File: docs/vendor/data-availability.md -================ -# Data Availability and Continuity +1. From the `manifests` directory, create a release and promote it to the Unstable channel. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). -Replicated uses redundancy and a cloud-native architecture in support of availability and continuity of vendor data. + ```bash + replicated release create --yaml-dir . --promote Unstable + ``` -## Data Storage Architecture +1. Install the release in your development environment to test: -To ensure availability and continuity of necessary vendor data, Replicated uses a cloud-native architecture. This cloud-native architecture includes clustering and network redundancies to eliminate single point of failure. + 1. Install with Embedded Cluster on a VM. See [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + + 1. (Optional) Install in an existing cluster with KOTS. See [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). -Replicated stores vendor data in various Amazon Web Services (AWS) S3 buckets and multiple databases. Data stored in the AWS S3 buckets includes registry images and air gap build data. +After successfully installing the initial release on a VM with Embedded Cluster (and optionally in an existing cluster with KOTS), go to the next task. You will continue to iterate throughout the rest of the onboarding process by creating and promoting new releases, then upgrading to the new version in your development environment. -The following diagram shows the flow of air gap build data and registry images from vendors to enterprise customers. +### Task 5: Customize the KOTS Admin Console {#admin-console} -![Architecture diagram of Replicated vendor data storage](/images/data-storage.png) +Configure the KOTS Application custom resource to add an application name, icon, and status informers. The name and icon will be displayed in the Admin Console and the Replicated Download Portal. The status informers will be used to display the application status on the Admin Console dashboard. -[View a larger version of this image](/images/data-storage.png) +To configure the KOTS Application custom resource: -As shown in the diagram above, vendors push application images to an image registry. Replicated stores this registry image data in AWS S3 buckets, which are logically isolated by vendor portal Team. Instances of the vendor's application that are installed by enterprise customers pull data from the image registry. +1. In your `manifests` directory, create a new `kots-app.yaml` file. -For more information about how Replicated secures images pushed to the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). +1. In the `kots-app.yaml` file, add the [KOTS Application](/reference/custom-resource-application) custom resource YAML and set the `title`, `icon`, and `statusInformers` fields. -The diagram also shows how enterprise customers access air gap build data from the customer download portal. Replicated stores this air gap build data in AWS S3 buckets. + **Example:** -## Data Recovery + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: gitea + spec: + title: Gitea + # Base64 encoded image string + icon: fyJINrigNkt5VsRiub9nXICdsYyVd2NcVvA3ScE5t2rb5JuEeyZnAhmLt9NK63vX1O + statusInformers: + - deployment/gitea + ``` + For more information, see: + * [Customizing the Application Icon](/vendor/admin-console-customize-app-icon) + * [Enabling and Understanding Application Status](/vendor/insights-app-status) + * [Application](/reference/custom-resource-application) +
    +
    + Can I preview the icon before installing the release? -Our service provider's platform automatically restores customer applications and databases in the case of an outage. The provider's platform is designed to dynamically deploy applications within its cloud, monitor for failures, and recover failed platform components including customer applications and databases. + Yes. The Vendor Portal includes a **Application icon preview** in the **Help** pane on the **Edit release** page. -For more information, see the [Replicated Security White Paper](https://www.replicated.com/downloads/Replicated-Security-Whitepaper.pdf). + ![Icon preview](/images/icon-preview.png) -## Data Availability + [View a larger version of this image](/images/icon-preview.png) -Replicated availability is continuously monitored. For availability reports, see https://status.replicated.com. +
    -## Offsite Data Backup Add-on +1. -For additional data redundancy, an offsite data backup add-on is available to copy customers data to a separate cloud provider. This add-on mitigates against potential data loss by our primary service provider. For more information, see [Offsite Data Backup](offsite-backup). +1. -================ -File: docs/vendor/database-config-adding-options.md -================ -# About Managing Stateful Services +### Task 6: Set Up the Admin Console Config Screen and Map to Helm Values -This topic provides recommendations for managing stateful services that you install into existing clusters. +The KOTS Admin Console Config screen is used to collect required and optional application configuration values from your users. User-supplied values provided on the Config screen can be mapped to your Helm values. -## Preflight Checks for Stateful Services +Before you begin this task, you can complete the [Set Helm Values with KOTS](/vendor/tutorial-config-setup) tutorial to learn how to map user-supplied values from the Admin Console Config screen to a Helm chart. -If you expect to also install stateful services into existing clusters, you will likely want to expose [preflight analyzers that check for the existence of a storage class](https://troubleshoot.sh/reference/analyzers/storage-class/). +:::note +Setting up the Admin Console config screen can include the use of various types of input fields, conditional statements, and KOTS template functions. Depending on your application's configuration options, Replicated recommends that you allow about two to three hours for configuring the Config custom resource and testing the Admin Console config screen. +::: -If you are allowing end users to provide connection details for external databases, you can often use a troubleshoot.sh built-in [collector](https://troubleshoot.sh/docs/collect/) and [analyzer](https://troubleshoot.sh/docs/analyze/) to validate the connection details for [Postgres](https://troubleshoot.sh/docs/analyze/postgresql/), [Redis](https://troubleshoot.sh/docs/collect/redis/), and many other common datastores. These can be included in both `Preflight` and `SupportBundle` specifications. +To set up the Admin Console Config screen for your application: -## About Adding Persistent Datastores +1. In your `manifests` directory, create a new file named `kots-config.yaml`. -You can integrate persistent stores, such as databases, queues, and caches. There are options to give an end user, such as embedding an instance alongside the application or connecting an application to an external instance that they will manage. +1. In `kots-config.yaml`, add the KOTS Config custom resource. Configure the KOTS Config custom resource based on the values that you need to collect from users. -For an example of integrating persistent datastores, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). + **Example:** -================ -File: docs/vendor/embedded-disaster-recovery.mdx -================ -# Disaster Recovery for Embedded Cluster (Alpha) - -This topic describes the disaster recovery feature for Replicated Embedded Cluster, including how to enable disaster recovery for your application. It also describes how end users can configure disaster recovery in the Replicated KOTS Admin Console and restore from a backup. - -:::important -Embedded Cluster disaster recovery is an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). -::: + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: example_group + title: Example Group + items: + - name: example_item + title: Example Item + type: text + default: "Hello World" + ``` -:::note -Embedded Cluster does not support backup and restore with the KOTS snapshots feature. For more information about using snapshots for existing cluster installations with KOTS, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). -::: + For more information, see: + * [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen) + * [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional) + * [Config](/reference/custom-resource-config) -## Overview +
    -The Embedded Cluster disaster recovery feature allows your customers to take backups from the Admin Console and perform restores from the command line. Disaster recovery for Embedded Cluster is implemented with Velero. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. +
    + Can I preview the Admin Console config screen before installing the release? -The backups that your customers take from the Admin Console will include both the Embedded Cluster infrastructure and the application resources that you specify. + Yes. The Vendor Portal includes a **Config preview** in the **Help** pane on the **Edit release** page. -The Embedded Cluster infrastructure that is backed up includes components such as the KOTS Admin Console and the built-in registry that is deployed for air gap installations. No configuration is required to include Embedded Cluster infrastructure in backups. Vendors specify the application resources to include in backups by configuring a Velero Backup resource in the application release. + For example: -## Requirements + ![Config preview](/images/config-preview.png) -Embedded Cluster disaster recovery has the following requirements: + [View a larger version of this image](/images/config-preview.png) +
    -* The disaster recovery feature flag must be enabled for your account. To get access to disaster recovery, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). -* Embedded Cluster version 1.22.0 or later -* Backups must be stored in S3-compatible storage +1. -## Limitations and Known Issues +1. -Embedded Cluster disaster recovery has the following limitations and known issues: +1. In `manifests`, open the KOTS HelmChart custom resource that you configured in a previous step. Configure the `values` key of the HelmChart custom resource to map the fields in the KOTS Config custom resource to your Helm values. -* During a restore, the version of the Embedded Cluster installation assets must match the version of the application in the backup. So if version 0.1.97 of your application was backed up, the Embedded Cluster installation assets for 0.1.97 must be used to perform the restore. Use `./APP_SLUG version` to check the version of the installation assets, where `APP_SLUG` is the unique application slug. For example: + For more information, see: + * [Mapping User-Supplied Values](/vendor/config-screen-map-inputs) + * [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup) + * [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) + * [`values`](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_ - version command +1. - [View a larger version of this image](/images/ec-version-command.png) +1. -* Any Helm extensions included in the `extensions` field of the Embedded Cluster Config are _not_ included in backups. Helm extensions are reinstalled as part of the restore process. To include Helm extensions in backups, configure the Velero Backup resource to include the extensions using namespace-based or label-based selection. For more information, see [Configure the Velero Custom Resources](#config-velero-resources) below. +1. Continue to create and test new releases with new config fields until you are ready to move on to the next task. -* Users can only restore from the most recent backup. +### Task 7: Define Preflight Checks -* Velero is installed only during the initial installation process. Enabling the disaster recovery license field for customers after they have already installed will not do anything. +In the next two tasks, you will add specs for _preflight checks_ and _support bundles_. -* If the `--admin-console-port` flag was used during install to change the port for the Admin Console, note that during a restore the Admin Console port will be used from the backup and cannot be changed. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). +Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. Troubleshoot is a kubectl plugin that provides diagnostic tools for Kubernetes applications. For more information, see the open source [Troubleshoot](https://troubleshoot.sh/docs/) documentation. -## Configure Disaster Recovery +Preflight checks and support bundles analyze data from customer environments to provide insights that help users to avoid or troubleshoot common issues with an application: +* **Preflight checks** run before an application is installed to check that the customer environment meets the application requirements. +* **Support bundles** collect troubleshooting data from customer environments to help users diagnose problems with application deployments. -This section describes how to configure disaster recovery for Embedded Cluster installations. It also describes how to enable access to the disaster recovery feature on a per-customer basis. +:::note +Before you begin this task, you can complete the [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) tutorial to learn how to add a preflight spec to a Helm chart in a Kubernetes secret and run the preflight checks before installation. +::: -### Configure the Velero Custom Resources {#config-velero-resources} +To define preflight checks for your application: -This section describes how to set up Embedded Cluster disaster recovery for your application by configuring Velero [Backup](https://velero.io/docs/latest/api-types/backup/) and [Restore](https://velero.io/docs/latest/api-types/restore/) custom resources in a release. +1. In your Helm chart `templates` directory, add a Kubernetes Secret that includes a preflight spec. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). For examples, see [Example Preflight Specs](/vendor/preflight-examples). + :::note + If your application is deployed as multiple Helm charts, add the Secret to the `templates` directory for the chart that is installed first. + ::: -To configure Velero Backup and Restore custom resources for Embedded Cluster disaster recovery: +1. Update dependencies and package the chart as a `.tgz` file: -1. In a new release containing your application files, add a Velero Backup resource. In the Backup resource, use namespace-based or label-based selection to indicate the application resources that you want to be included in the backup. For more information, see [Backup API Type](https://velero.io/docs/latest/api-types/backup/) in the Velero documentation. + - :::important - If you use namespace-based selection to include all of your application resources deployed in the `kotsadm` namespace, ensure that you exclude the Replicated resources that are also deployed in the `kotsadm` namespace. Because the Embedded Cluster infrastructure components are always included in backups automatically, this avoids duplication. - ::: +1. Move the `.tgz` file to the `manifests` directory. - **Example:** +1. - The following Backup resource uses namespace-based selection to include application resources deployed in the `kotsadm` namespace: +1. - ```yaml - apiVersion: velero.io/v1 - kind: Backup - metadata: - name: backup - spec: - # Back up the resources in the kotsadm namespace - includedNamespaces: - - kotsadm - orLabelSelectors: - - matchExpressions: - # Exclude Replicated resources from the backup - - { key: kots.io/kotsadm, operator: NotIn, values: ["true"] } - ``` + Preflight checks run automatically during installation. -1. In the same release, add a Velero Restore resource. In the `backupName` field of the Restore resource, include the name of the Backup resource that you created. For more information, see [Restore API Type](https://velero.io/docs/latest/api-types/restore/) in the Velero documentation. +1. Continue to create and test new releases with additional preflight checks until you are ready to move on to the next task. - **Example**: +### Task 8: Add a Support Bundle Spec - ```yaml - apiVersion: velero.io/v1 - kind: Restore - metadata: - name: restore - spec: - # the name of the Backup resource that you created - backupName: backup - includedNamespaces: - - '*' - ``` +To add the default support bundle spec to your application: -1. For any image names that you include in your Backup and Restore resources, rewrite the image name using the Replicated KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions. This ensures that the image name is rendered correctly during deployment, allowing the image to be pulled from the user's local image registry (such as in air gap installations) or through the Replicated proxy registry. +1. In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret to enable the default support bundle spec for your application: - **Example:** + ```yaml + apiVersion: v1 + kind: Secret + metadata: + labels: + troubleshoot.sh/kind: support-bundle + name: example + stringData: + support-bundle-spec: | + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: support-bundle + spec: + collectors: [] + analyzers: [] + ``` + :::note + If your application is installed as multiple Helm charts, you can optionally create separate support bundle specs in each chart. The specs are automatically merged when a support bundle is generated. Alternatively, continue with a single support bundle spec and then optionally revisit how you organize your support bundle specs after you finish onboarding. + ::: + +1. (Recommended) At a minimum, Replicated recommends that all support bundle specs include the `logs` collector. This collects logs from running Pods in the cluster. - ```yaml - apiVersion: velero.io/v1 - kind: Restore - metadata: - name: restore - spec: - hooks: - resources: - - name: restore-hook-1 - includedNamespaces: - - kotsadm - labelSelector: - matchLabels: - app: example - postHooks: - - init: - initContainers: - - name: restore-hook-init1 - image: - # Use HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace - # to template the image name - registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' - repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' - tag: 1.24-alpine - ``` - For more information about how to rewrite image names using the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions, including additional examples, see [Task 1: Rewrite Image Names](helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart v2 Custom Resource_. + **Example:** -1. If you support air gap installations, add any images that are referenced in your Backup and Restore resources to the `additionalImages` field of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release so they can be used during the backup and restore process in environments with limited or no outbound internet access. For more information, see [additionalImages](/reference/custom-resource-application#additionalimages) in _Application_. + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle + stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - logs: + selector: + - app.kubernetes.io/name=myapp + namespace: {{ .Release.Namespace }} + limits: + maxAge: 720h + maxLines: 10000 + ``` - **Example:** + For more information, see: + * [Adding and Customizing Support Bundles](/vendor/support-bundle-customizing) + * [Example Support Bundle Specs](/vendor/support-bundle-examples) + * [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-app - spec: - additionalImages: - - elasticsearch:7.6.0 - - quay.io/orgname/private-image:v1.2.3 - ``` +1. (Recommended) Ensure that any preflight checks that you added are also include in your support bundle spec. This ensures that support bundles collect at least the same information collected when running preflight checks. -1. (Optional) Use Velero functionality like [backup](https://velero.io/docs/main/backup-hooks/) and [restore](https://velero.io/docs/main/restore-hooks/) hooks to customize the backup and restore process as needed. +1. Update dependencies and package the chart as a `.tgz` file: - **Example:** + - For example, a Postgres database might be backed up using pg_dump to extract the database into a file as part of a backup hook. It can then be restored using the file in a restore hook: +1. Move the `.tgz` file to the `manifests` directory. - ```yaml - podAnnotations: - backup.velero.io/backup-volumes: backup - pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U {{repl ConfigOption "postgresql_username" }} -d {{repl ConfigOption "postgresql_database" }} -h 127.0.0.1 > /scratch/backup.sql"]' - pre.hook.backup.velero.io/timeout: 3m - post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U {{repl ConfigOption "postgresql_username" }} -h 127.0.0.1 -d {{repl ConfigOption "postgresql_database" }} -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' - post.hook.restore.velero.io/wait-for-ready: 'true' # waits for the pod to be ready before running the post-restore hook - ``` +1. -1. Save and the promote the release to a development channel for testing. +1. -### Enable the Disaster Recovery Feature for Your Customers + For information about how to generate support bundles, see [Generating Support Bundles](/vendor/support-bundle-generating). -After configuring disaster recovery for your application, you can enable it on a per-customer basis with the **Allow Disaster Recovery (Alpha)** license field. +1. (Optional) Customize the support bundle spec by adding additional collectors and analyzers. -To enable disaster recovery for a customer: +### Task 9: Alias Replicated Endpoints with Your Own Domains -1. In the Vendor Portal, go to the [Customers](https://vendor.replicated.com/customers) page and select the target customer. +Your customers are exposed to several Replicated domains by default. Replicated recommends you use custom domains to unify the customer's experience with your brand and simplify security reviews. -1. On the **Manage customer** page, under **License options**, enable the **Allow Disaster Recovery (Alpha)** field. - - When your customer installs with Embedded Cluster, Velero will be deployed if the **Allow Disaster Recovery (Alpha)** license field is enabled. - -## Take Backups and Restore +For more information, see [Using Custom Domains](/vendor/custom-domains-using). -This section describes how your customers can configure backup storage, take backups, and restore from backups. +## Next Steps -### Configure Backup Storage and Take Backups in the Admin Console +After completing the main onboarding tasks, Replicated recommends that you also complete the following additional tasks to integrate other Replicated features with your application. You can complete these next recommended tasks in any order and at your own pace. -Customers with the **Allow Disaster Recovery (Alpha)** license field can configure their backup storage location and take backups from the Admin Console. +### Add Support for Helm Installations -To configure backup storage and take backups: +Existing KOTS releases that include one or more Helm charts can be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. -1. After installing the application and logging in to the Admin Console, click the **Disaster Recovery** tab at the top of the Admin Console. +To enable Helm installations for Helm charts distributed with Replicated, the only extra step is to add a Secret to your chart to authenticate with the Replicated proxy registry. -1. For the desired S3-compatible backup storage location, enter the bucket, prefix (optional), access key ID, access key secret, endpoint, and region. Click **Update storage settings**. +This is the same secret that is passed to KOTS in the HelmChart custom resource using `'{{repl ImagePullSecretName }}'`, which you did as part of [Task 4: Create and Install the Initial Release](#first-release). So, whereas this Secret is created automatically for KOTS and Embedded Cluster installations, you need to create it and add it to your Helm chart for Helm installations. - backup storage settings +:::note +Before you test Helm installations for your application, you can complete the [Deploy a Helm Chart with KOTS and the Helm CLI](tutorial-kots-helm-setup) tutorial to learn how to install a single release with both KOTS and Helm. +::: - [View a larger version of this image](/images/dr-backup-storage-settings.png) +To support and test Helm installations: -1. (Optional) From this same page, configure scheduled backups and a retention policy for backups. +1. Follow the steps in [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) to authenticate with the Replicated proxy registry by creating a Secret with `type: kubernetes.io/dockerconfigjson` in your Helm chart. - scheduled backups - - [View a larger version of this image](/images/dr-scheduled-backups.png) +1. Update dependencies and package the chart as a `.tgz` file: -1. In the **Disaster Recovery** submenu, click **Backups**. Backups can be taken from this screen. + - backups page +1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - [View a larger version of this image](/images/dr-backups.png) +1. Install the release in a cluster with the Helm CLI to test your changes. For more information, see [Installing with Helm](/vendor/install-with-helm). -### Restore from a Backup +### Add Support for Air Gap Installations -To restore from a backup: +Replicated Embedded Cluster and KOTS support installations in _air gap_ environments with no outbound internet access. Users can install with Embedded Cluster and KOTS in air gap environments by providing air gap bundles that contain the required images for the installers and for your application. -1. SSH onto a new machine where you want to restore from a backup. +:::note +Replicated also offers Alpha support for air gap installations with Helm. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. +::: -1. Download the Embedded Cluster installation assets for the version of the application that was included in the backup. You can find the command for downloading Embedded Cluster installation assets in the **Embedded Cluster install instructions dialog** for the customer. For more information, [Online Installation with Embedded Cluster](/enterprise/installing-embedded). +To add support for air gap installations: - :::note - The version of the Embedded Cluster installation assets must match the version that is in the backup. For more information, see [Limitations and Known Issues](#limitations-and-known-issues). - ::: +1. If there are any images for your application that are not listed in your Helm chart, list these images in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release. One common use case for this is applications that use Kubernetes Operators. See [Define Additional Images](/vendor/operator-defining-additional-images). -1. Run the restore command: +1. In the KOTS HelmChart custom resource `builder` key, pass any values that are required in order for `helm template` to yield all the images needed to successfully install your application. See [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). - ```bash - sudo ./APP_SLUG restore - ``` - Where `APP_SLUG` is the unique application slug. + :::note + If the default values in your Helm chart already enable all the images needed to successfully deploy, then you do not need to configure the `builder` key. + ::: - Note the following requirements and guidance for the `restore` command: +
    + How do I know if I need to configure the `builder` key? + + When building an air gap bundle, the Vendor Portal templates the Helm charts in a release with `helm template` in order to detect the images that need to be included in the bundle. Images yielded by `helm template` are included in the bundle for the release. - * If the installation is behind a proxy, the same proxy settings provided during install must be provided to the restore command using `--http-proxy`, `--https-proxy`, and `--no-proxy`. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + For many applications, running `helm template` with the default values would not yield all the images required to install. In these cases, vendors can pass the additional values in the `builder` key to ensure that the air gap bundle includes all the necessary images. +
    - * If the `--cidr` flag was used during install to the set IP address ranges for Pods and Services, this flag must be provided with the same CIDR during the restore. If this flag is not provided or is provided with a different CIDR, the restore will fail with an error message telling you to rerun with the appropriate value. However, it will take some time before that error occurs. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). +1. If you have not done so already as part of [Task 4: Create and Install the Initial Release](#first-release), ensure that the `values` key in the KOTS HelmChart custom resource correctly rewrites image names for air gap installations. This is done using the KOTS HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace template functions to render the location of the given image in the user's own local registry. - * If the `--local-artifact-mirror-port` flag was used during install to change the port for the Local Artifact Mirror (LAM), you can optionally use the `--local-artifact-mirror-port` flag to choose a different LAM port during restore. For example, `restore --local-artifact-mirror-port=50000`. If no LAM port is provided during restore, the LAM port that was supplied during installation will be used. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + For more information, see [Rewrite Image Names](/vendor/helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart Custom Resource v2_. - You will be guided through the process of restoring from a backup. - -1. When prompted, enter the information for the backup storage location. +1. Create and promote a new release with your changes. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - ![Restore prompts on the command line](/images/dr-restore.png) - [View a larger version of this image](/images/dr-restore.png) +1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the release was promoted to build the air gap bundle. Do one of the following: + * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. + * If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. -1. When prompted, confirm that you want to restore from the detected backup. +1. Create a customer with the **Airgap Download Enabled** entitlement enabled so that you can test air gap installations. See [Creating and Managing Customers](/vendor/releases-creating-customer). - ![Restore from detected backup prompt on the command line](/images/dr-restore-from-backup-confirmation.png) - [View a larger version of this image](/images/dr-restore-from-backup-confirmation.png) +1. Download the Embedded Cluster air gap installation assets, then install with Embedded Cluster on an air gap VM to test. See [Installing in Air Gap Environments with Embedded Cluster](/enterprise/installing-embedded-air-gap). - After some time, the Admin console URL is displayed: +1. (Optional) Download the `.airgap` bundle for the release and the air gap bundle for the KOTS Admin Console. You can also download both bundles from the Download Portal for the target customer. Then, install in an air gap existing cluster to test. See [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). - ![Restore from detected backup prompt on the command line](/images/dr-restore-admin-console-url.png) - [View a larger version of this image](/images/dr-restore-admin-console-url.png) +1. (Optional) Follow the steps in [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap) to test air gap installation with Helm. -1. (Optional) If the cluster should have multiple nodes, go to the Admin Console to get a join command and join additional nodes to the cluster. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + :::note + Air gap Helm installations are an Alpha feature. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. + ::: -1. Type `continue` when you are ready to proceed with the restore process. +### Add Roles for Multi-Node Clusters in Embedded Cluster Installations - ![Type continue when you are done adding nodes](/images/dr-restore-continue.png) - [View a larger version of this image](/images/dr-restore-continue.png) +The Embedded Cluster Config supports roles for multi-node clusters. One or more roles can be selected and assigned to a node when it is joined to the cluster. Node roles can be used to determine which nodes run the Kubernetes control plane, and to assign application workloads to particular nodes. - After some time, the restore process completes. +For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. - If the `restore` command is interrupted during the restore process, you can resume by rerunning the `restore` command and selecting to resume the previous restore. This is useful if your SSH session is interrupted during the restore. +### Add and Map License Entitlements -================ -File: docs/vendor/embedded-overview.mdx -================ -import EmbeddedCluster from "../partials/embedded-cluster/_definition.mdx" -import Requirements from "../partials/embedded-cluster/_requirements.mdx" -import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" -import HaArchitecture from "../partials/embedded-cluster/_multi-node-ha-arch.mdx" +You can add custom license entitlements for your application in the Vendor Portal. Custom license fields are useful when there is entitlement information that applies to a subset of customers. For example, you can use entitlements to: +* Limit the number of active users permitted +* Limit the number of nodes a customer is permitted on their cluster +* Identify a customer on a "Premium" plan that has access to additional features or functionality not available with your base plan -# Embedded Cluster Overview +For more information about how to create and assign custom entitlements in the Vendor Portal, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields) and [Creating and Managing Customers](/vendor/releases-creating-customer). -This topic provides an introduction to Replicated Embedded Cluster, including a description of the built-in extensions installed by Embedded Cluster, an overview of the Embedded Cluster single-node and multi-node architecture, and requirements and limitations. +#### Map Entitlements to Helm Values -:::note -If you are instead looking for information about creating Kubernetes Installers with Replicated kURL, see the [Replicated kURL](/vendor/packaging-embedded-kubernetes) section. -::: +You can map license entitlements to your Helm values using KOTS template functions. This can be useful when you need to set certain values based on the user's license information. For more information, see [Using KOTS Template Functions](/vendor/helm-optional-value-keys#using-kots-template-functions) in _Setting Helm Values with KOTS_. -## Overview +#### Query Entitlements Before Installation and at Runtime - +You can add logic to your application to query license entitlements both before deployment and at runtime. For example, you might want to add preflight checks that verify a user's entitlements before installing. Or, you can expose additional product functionality dynamically at runtime based on a customer's entitlements. -## Architecture +For more information, see: +* [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk) +* [Checking Entitlements in Preflights with KOTS Template Functions](/vendor/licenses-referencing-fields) -This section describes the Embedded Cluster architecture, including the built-in extensions deployed by Embedded Cluster. +### Add Application Links to the Admin Console Dashboard -### Single-Node Architecture +You can add the Kubernetes SIG Application custom resource to your release to add a link to your application from the Admin Console dashboard. This makes it easier for users to access your application after installation. -The following diagram shows the architecture of a single-node Embedded Cluster installation for an application named Gitea: +You can also configure the Kubernetes SIG Application resource add links to other resources like documentation or dashboards. -![Embedded Cluster single-node architecture](/images/embedded-architecture-single-node.png) +For more information, see [Adding Application Links to the Dashboard](/vendor/admin-console-adding-buttons-links). -[View a larger version of this image](/images/embedded-architecture-single-node.png) +### Update the Preflight and Support Bundles Specs -As shown in the diagram above, the user downloads the Embedded Cluster installation assets as a `.tgz` in their installation environment. These installation assets include the Embedded Cluster binary, the user's license file, and (for air gap installations) an air gap bundle containing the images needed to install and run the release in an environment with limited or no outbound internet access. +After adding basic specs for preflights and support bundles, you can continue to add more collectors and analyzers as needed. -When the user runs the Embedded Cluster install command, the Embedded Cluster binary first installs the k0s cluster as a systemd service. +Consider the following recommendations and best practices: -After all the Kubernetes components for the cluster are available, the Embedded Cluster binary then installs the Embedded Cluster built-in extensions. For more information about these extensions, see [Built-In Extensions](#built-in-extensions) below. +* Revisit your preflight and support bundle specs when new support issues arise that are not covered by your existing specs. -Any Helm extensions that were included in the [`extensions`](/reference/embedded-config#extensions) field of the Embedded Cluster Config are also installed. The namespace or namespaces where Helm extensions are installed is defined by the vendor in the Embedded Cluster Config. +* Your support bundles should include all of the same collectors and analyzers that are in your preflight checks. This ensures that support bundles include all the necessary troubleshooting information, including any failures in preflight checks. -Finally, Embedded Cluster also installs Local Artifact Mirror (LAM). In air gap installations, LAM is used to store and update images. +* Your support bundles will most likely need to include other collectors and analyzers that are not in your preflight checks. This is because some of the information used for troubleshooting (such as logs) is not necessary when running preflight checks before installation. -### Multi-Node Architecture +* If your application is installed as multiple Helm charts, you can optionally add separate support bundle specs in each chart. This can make it easier to keep the specs up-to-date and to avoid merge conflicts that can be caused when multiple team members contribute to a single, large support bundle spec. When an application has multiple support bundle specs, the specs are automatically merged when generating a support bundle so that only a single support bundle is provided to the user. -The following diagram shows the architecture of a multi-node Embedded Cluster installation: +The documentation for the open-source Troubleshoot project includes the full list of available collectors and analyzers that you can use. See [All Collectors](https://troubleshoot.sh/docs/collect/all/) and the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. -![Embedded Cluster multi-node architecture](/images/embedded-architecture-multi-node.png) +You can also view common examples of collectors and analyzers used in preflight checks and support bundles in [Preflight Spec Examples](preflight-examples) and [Support Bundle Spec Examples](support-bundle-examples). -[View a larger version of this image](/images/embedded-architecture-multi-node.png) +### Configure Backup and Restore -As shown in the diagram above, in multi-node installations, the Embedded Cluster Operator, KOTS, and the image registry for air gap installations are all installed on one controller node. +Enable backup and restore with Velero for your application so that users can back up and restore their KOTS Admin Console and application data. -For installations that include disaster recovery with Velero, the Velero Node Agent runs on each node in the cluster. The Node Agent is a Kubernetes DaemonSet that performs backup and restore tasks such as creating snapshots and transferring data during restores. +There are different steps to configure backup and restore for Embedded Cluster and for existing cluster installations with KOTS: +* To configure the disaster recovery feature for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery) +* To configure the snapshots feature for existing cluster KOTS installations, see [Configuring Snapshots](snapshots-configuring-backups). -Additionally, any Helm [`extensions`](/reference/embedded-config#extensions) that you include in the Embedded Cluster Config are installed in the cluster depending on the given chart and how it is configured to be deployed. +### Add Custom Metrics -### Multi-Node Architecture with High Availability +In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running in customer environments. Custom metrics can be collected for application instances running in online or air gap environments using the Replicated SDK. -:::note -High availability (HA) for multi-node installations with Embedded Cluster is Alpha and is not enabled by default. For more informaiton about enabling HA, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha). -::: +For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). - +### Integrate with CI/CD -## Built-In Extensions {#built-in-extensions} +Replicated recommends that teams integrate the Replicated Platform into their existing develeopment and production CI/CD workflows. This can be useful for automating the processes of creating new releases, promoting releases, and testing releases with the Replicated Compatibility Matrix. -Embedded Cluster includes several built-in extensions. The built-in extensions provide capabilities such as application management and storage. Each built-in extension is installed in its own namespace. +For more information, see: +* [About Integrating with CI/CD](/vendor/ci-overview) +* [About Compatibility Matrix](/vendor/testing-about) +* [Recommended CI/CD Workflows](/vendor/ci-workflows) -The built-in extensions installed by Embedded Cluster include: +### Customize Release Channels -* **Embedded Cluster Operator**: The Operator is used for reporting purposes as well as some clean up operations. +By default, the Vendor Portal includes Unstable, Beta, and Stable channels. You can customize the channels in the Vendor Portal based on your application needs. -* **KOTS:** Embedded Cluster installs the KOTS Admin Console in the kotsadm namespace. End customers use the Admin Console to configure and install the application. Rqlite is also installed in the kotsadm namespace alongside KOTS. Rqlite is a distributed relational database that uses SQLite as its storage engine. KOTS uses rqlite to store information such as support bundles, version history, application metadata, and other small amounts of data needed to manage the application. For more information about rqlite, see the [rqlite](https://rqlite.io/) website. +Consider the following recommendations: +* Use the Stable channel for your primary release cadence. Releases should be promoted to the Stable channel only as frequently as your average customer can consume new releases. Typically, this is no more than monthly. However, this cadence varies depending on the customer base. +* If you have a SaaS product, you might want to create an "Edge" channel where you promote the latest SaaS releases. +* You can consider a “Long Term Support” channel where you promote new releases less frequently and support those releases for longer. +* It can be useful to create channels for each feature branch so that internal teams reviewing a PR can easily get the installation artifacts as well as review the code. You can automate channel creation as part of a pipeline or Makefile. -* **OpenEBS:** Embedded Cluster uses OpenEBS to provide local PersistentVolume (PV) storage, including the PV storage for rqlite used by KOTS. For more information, see the [OpenEBS](https://openebs.io/docs/) documentation. +For more information, see: +* [About Channels and Releases](/vendor/releases-about) +* [Creating and Editing Channels](/vendor/releases-creating-channels) -* **(Disaster Recovery Only) Velero:** If the installation uses the Embedded Cluster disaster recovery feature, Embedded Cluster installs Velero, which is an open-source tool that provides backup and restore functionality. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. For more information about the disaster recovery feature, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). +### Write Your Documentation -* **(Air Gap Only) Image registry:** For air gap installations in environments with limited or no outbound internet access, Embedded Cluster installs an image registry where the images required to install and run the application are pushed. For more information about installing in air-gapped environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). +Before distributing your application to customers, ensure that your documentation is up-to-date. In particular, be sure to update the installation documentation to include the procedures and requirements for installing with Embedded Cluster, Helm, and any other installation methods that you support. -## Comparison to kURL +For guidance on how to get started with documentation for applications distributed with Replicated, including key considerations, examples, and templates, see [Writing Great Documentation for On-Prem Software Distributed with Replicated](https://www.replicated.com/blog/writing-great-documentation-for-on-prem-software-distributed-with-replicated) in the Replicated blog. -Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster offers several improvements such as: -* Significantly faster installation, updates, and node joins -* A redesigned Admin Console UI for managing the cluster -* Improved support for multi-node clusters -* One-click updates of both the application and the cluster at the same time +================ +File: docs/vendor/replicated-sdk-airgap.mdx +================ +# Installing the SDK in Air Gap Environments -Additionally, Embedded Cluster automatically deploys several built-in extensions like KOTS and OpenEBS to provide capabilities such as application management and storage. This represents an improvement over kURL because vendors distributing their application with Embedded Cluster no longer need choose and define various add-ons in the installer spec. For additional functionality that is not included in the built-in extensions, such as an ingress controller, vendors can provide their own [`extensions`](/reference/embedded-config#extensions) that will be deployed alongside the application. +This topic explains how to install the Replicated SDK in air gap environments by enabling air gap mode. -## Requirements +## Overview -### System Requirements +The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. - +Air gap mode is enabled when `isAirgap: true` is set in the values for the SDK Helm chart. For more information, see [Install the SDK in Air Gap Mode](#install) below. Allowing air gap mode to be controlled with the `isAirgap` value means that vendors and enterprise customers do not need to rely on air gap environments being automatically detected, which is unreliable and error-prone. The `isAirgap` value also allows the SDK to be installed in air gap mode even if the instance can access the internet. -### Port Requirements +## Differences in Air Gap Mode - +Air gap mode differs from non-air gap installations of the SDK in the following ways: +* The SDK stores instance telemetry and custom metrics in a Kubernetes Secret in the customer environment, rather than attempting to send telemetry and custom metrics back to the Replicated Vendor Portal. The telemetry and custom metrics stored in the Secret are collected whenever a support bundle is generated in the environment, and are reported when the support bundle is uploaded to the Vendor Portal. For more information about telemetry for air gap instances, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). +* The SDK returns an empty array (`[]`) for any requests to check for updates using the [`/api/v1/app/updates`](/reference/replicated-sdk-apis#get-appupdates) SDK API endpoint. This is because the SDK is not able to receive updates from the Vendor Portal when running in air gap environments. +* Instance tags cannot be updated with the [`/app/instance-tags`](/reference/replicated-sdk-apis#post-appinstance-tags) SDK API endpoint. -## Limitations +In air gap mode, the SDK can still make requests to SDK API endpoints that do not require outbound internet access, such as the [`license`](/reference/replicated-sdk-apis#license) endpoints and the [`/app/info`](/reference/replicated-sdk-apis#get-appinfo) endpoint. However, these endpoints will return whatever values were injected into the SDK when the chart was most recently pulled. These values might not match the latest information available in the Vendor Portal because the SDK cannot receive updates when running in air gap environments. -Embedded Cluster has the following limitations: +## Install the SDK in Air Gap Mode {#install} -* **Reach out about migrating from kURL**: We are helping several customers migrate from kURL to Embedded Cluster. Reach out to Alex Parker at alexp@replicated.com for more information. +This section describes how to install the Replicated SDK in air gap mode with the Helm CLI and with Replicated KOTS. -* **Multi-node support is in beta**: Support for multi-node embedded clusters is in beta, and enabling high availability for multi-node clusters is in alpha. Only single-node embedded clusters are generally available. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). +### Helm CLI -* **Disaster recovery is in alpha**: Disaster Recovery for Embedded Cluster installations is in alpha. For more information, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). +When the SDK is installed with the Helm CLI, air gap mode can be enabled by passing `--set replicated.isAirgap=true` with the Helm CLI installation command. -* **Partial rollback support**: In Embedded Cluster 1.17.0 and later, rollbacks are supported only when rolling back to a version where there is no change to the [Embedded Cluster Config](/reference/embedded-config) compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same Embedded Cluster Config. For more information about how to enable rollbacks for your application in the KOTS Application custom resource, see [allowRollback](/reference/custom-resource-application#allowrollback) in _Application_. +For example: -* **Changing node hostnames is not supported**: After a host is added to a Kubernetes cluster, Kubernetes assumes that the hostname and IP address of the host will not change. If you need to change the hostname or IP address of a node, you must first remove the node from the cluster. For more information about the requirements for naming nodes, see [Node name uniqueness](https://kubernetes.io/docs/concepts/architecture/nodes/#node-name-uniqueness) in the Kubernetes documentation. +``` +helm install gitea oci://registry.replicated.com/my-app/gitea --set replicated.isAirgap=true +``` -* **Automatic updates not supported**: Configuring automatic updates from the Admin Console so that new versions are automatically deployed is not supported for Embedded Cluster installations. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). +For more information about Helm CLI installations with Replicated, see [Installing with Helm](/vendor/install-with-helm). For more information about setting Helm values with the `helm install` command, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. -* **Embedded Cluster installation assets not available through the Download Portal**: The assets required to install with Embedded Cluster cannot be shared with users through the Download Portal. Users can follow the Embedded Cluster installation instructions to download and extract the installation assets. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded). +:::note +Replicated does not provide air gap bundles for applications installed with the Helm CLI. Air gap bundles are a feature of KOTS. +::: -* **`minKotsVersion` and `targetKotsVersion` not supported**: The [`minKotsVersion`](/reference/custom-resource-application#minkotsversion-beta) and [`targetKotsVersion`](/reference/custom-resource-application#targetkotsversion) fields in the KOTS Application custom resource are not supported for Embedded Cluster installations. This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed`. To avoid installation failures, do not use targetKotsVersion or minKotsVersion in releases that support installation with Embedded Cluster. +### KOTS -* **Support bundles over 100MB in the Admin Console**: Support bundles are stored in rqlite. Bundles over 100MB could cause rqlite to crash, causing errors in the installation. You can still generate a support bundle from the command line. For more information, see [Generating Support Bundles for Embedded Cluster](/vendor/support-bundle-embedded). +When the SDK is installed by KOTS in an air gap environment, KOTS automatically sets `isAirGap: true` in the SDK Helm chart values to enable air gap mode. No additional configuration is required. -* **Kubernetes version template functions not supported**: The KOTS [KubernetesVersion](/reference/template-functions-static-context#kubernetesversion), [KubernetesMajorVersion](/reference/template-functions-static-context#kubernetesmajorversion), and [KubernetesMinorVersion](/reference/template-functions-static-context#kubernetesminorversion) template functions do not provide accurate Kubernetes version information for Embedded Cluster installations. This is because these template functions are rendered before the Kubernetes cluster has been updated to the intended version. However, `KubernetesVersion` is not necessary for Embedded Cluster because vendors specify the Embedded Cluster version, which includes a known Kubernetes version. +================ +File: docs/vendor/replicated-sdk-customizing.md +================ +# Customizing the Replicated SDK -* **Custom domains not supported**: Embedded Cluster does not support the use of custom domains, even if custom domains are configured. We intend to add support for custom domains. For more information about custom domains, see [About Custom Domains](/vendor/custom-domains). +This topic describes various ways to customize the Replicated SDK, including customizing RBAC, setting environment variables, adding tolerations, and more. -* **KOTS Auto-GitOps workflow not supported**: Embedded Cluster does not support the KOTS Auto-GitOps workflow. If an end-user is interested in GitOps, consider the Helm install method instead. For more information, see [Installing with Helm](/vendor/install-with-helm). +## Customize RBAC for the SDK -* **Downgrading Kubernetes not supported**: Embedded Cluster does not support downgrading Kubernetes. The admin console will not prevent end-users from attempting to downgrade Kubernetes if a more recent version of your application specifies a previous Embedded Cluster version. You must ensure that you do not promote new versions with previous Embedded Cluster versions. +This section describes role-based access control (RBAC) for the Replicated SDK, including the default RBAC, minimum RBAC requirements, and how to install the SDK with custom RBAC. -* **Templating not supported in Embedded Cluster Config**: The [Embedded Cluster Config](/reference/embedded-config) resource does not support the use of Go template functions, including [KOTS template functions](/reference/template-functions-about). This only applies to the Embedded Cluster Config. You can still use template functions in the rest of your release as usual. +### Default RBAC -* **Policy enforcement on Embedded Cluster workloads is not supported**: The Embedded Cluster runs workloads that require higher levels of privilege. If your application installs a policy enforcement engine such as Gatekeeper or Kyverno, ensure that its policies are not enforced in the namespaces used by Embedded Cluster. +The SDK creates default Role, RoleBinding, and ServiceAccount objects during installation. The default Role allows the SDK to get, list, and watch all resources in the namespace, to create Secrets, and to update the `replicated` and `replicated-instance-report` Secrets: -* **Installing on STIG- and CIS-hardened OS images is not supported**: Embedded Cluster isn't tested on these images, and issues have arisen when trying to install on them. +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "replicated.labels" . | nindent 4 }} + name: replicated-role +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - 'get' + - 'list' + - 'watch' +- apiGroups: + - '' + resources: + - 'secrets' + verbs: + - 'create' +- apiGroups: + - '' + resources: + - 'secrets' + verbs: + - 'update' + resourceNames: + - replicated + - replicated-instance-report + - replicated-custom-app-metrics-report +``` -================ -File: docs/vendor/embedded-using.mdx -================ -import UpdateOverview from "../partials/embedded-cluster/_update-overview.mdx" -import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" -import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" -import EcConfig from "../partials/embedded-cluster/_ec-config.mdx" +### Minimum RBAC Requirements -# Using Embedded Cluster +The SDK requires the following minimum RBAC permissions: +* Create Secrets. +* Get and update Secrets named `replicated`, `replicated-instance-report`, and `replicated-custom-app-metrics-report`. +* The SDK requires the following minimum RBAC permissions for status informers: + * If you defined custom status informers, then the SDK must have permissions to get, list, and watch all the resources listed in the `replicated.statusInformers` array in your Helm chart `values.yaml` file. + * If you did _not_ define custom status informers, then the SDK must have permissions to get, list, and watch the following resources: + * Deployments + * Daemonsets + * Ingresses + * PersistentVolumeClaims + * Statefulsets + * Services + * For any Ingress resources used as status informers, the SDK requires `get` permissions for the Service resources listed in the `backend.Service.Name` field of the Ingress resource. + * For any Daemonset and Statefulset resources used as status informers, the SDK requires `list` permissions for pods in the namespace. + * For any Service resources used as status informers, the SDK requires `get` permissions for Endpoint resources with the same name as the service. -This topic provides information about using Replicated Embedded Cluster, including how to get started, configure Embedded Cluster, access the cluster using kubectl, and more. For an introduction to Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). + The Replicated Vendor Portal uses status informers to provide application status data. For more information, see [Helm Installations](/vendor/insights-app-status#helm-installations) in _Enabling and Understanding Application Status_. +### Install the SDK with Custom RBAC -## Quick Start +#### Custom ServiceAccount -You can use the following steps to get started quickly with Embedded Cluster. More detailed documentation is available below. +To use the SDK with custom RBAC permissions, provide the name for a custom ServiceAccount object during installation. When a service account is provided, the SDK uses the RBAC permissions granted to the service account and does not create the default Role, RoleBinding, or ServiceAccount objects. -1. Create a new customer or edit an existing customer and select the **Embedded Cluster Enabled** license option. Save the customer. +To install the SDK with custom RBAC: -1. Create a new release that includes your application. In that release, create an Embedded Cluster Config that includes, at minimum, the Embedded Cluster version you want to use. See the Embedded Cluster [GitHub repo](https://github.com/replicatedhq/embedded-cluster/releases) to find the latest version. +1. Create custom Role, RoleBinding, and ServiceAccount objects. The Role must meet the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. +1. During installation, provide the name of the service account that you created by including `--set replicated.serviceAccountName=CUSTOM_SERVICEACCOUNT_NAME`. - Example Embedded Cluster Config: + **Example**: - + ``` + helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.serviceAccountName=mycustomserviceaccount + ``` -1. Save the release and promote it to the channel the customer is assigned to. + For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). -1. Return to the customer page where you enabled Embedded Cluster. At the top right, click **Install instructions** and choose **Embedded Cluster**. A dialog appears with instructions on how to download the Embedded Cluster installation assets and install your application. +#### Custom ClusterRole - ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) +To use the SDK with an existing ClusterRole, provide the name for a custom ClusterRole object during installation. When a cluster role is provided, the SDK uses the RBAC permissions granted to the cluster role and does not create the default RoleBinding. Instead, the SDK creates a ClusterRoleBinding as well as a ServiceAccount object. - [View a larger version of this image](/images/customer-install-instructions-dropdown.png) - -1. On your VM, run the commands in the **Embedded Cluster install instructions** dialog. +To install the SDK with a custom ClusterRole: - Embedded cluster install instruction dialog +1. Create a custom ClusterRole object. The ClusterRole must meet at least the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. However, it can also provide additional permissions that can be used by the SDK, such as listing cluster Nodes. +1. During installation, provide the name of the cluster role that you created by including `--set replicated.clusterRole=CUSTOM_CLUSTERROLE_NAME`. - [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + **Example**: -1. Enter an Admin Console password when prompted. + ``` + helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.clusterRole=mycustomclusterrole + ``` - The Admin Console URL is printed when the installation finishes. Access the Admin Console to begin installing your application. During the installation process in the Admin Console, you have the opportunity to add nodes if you want a multi-node cluster. Then you can provide application config, run preflights, and deploy your application. + For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). -## About Configuring Embedded Cluster +## Set Environment Variables {#env-var} -To install an application with Embedded Cluster, an Embedded Cluster Config must be present in the application release. The Embedded Cluster Config lets you define several characteristics about the cluster that will be created. +The Replicated SDK provides a `replicated.extraEnv` value that allows users to set additional environment variables for the deployment that are not exposed as Helm values. -For more information, see [Embedded Cluster Config](/reference/embedded-config). +This ensures that users can set the environment variables that they require without the SDK Helm chart needing to be modified to expose the values. For example, if the SDK is running behind an HTTP proxy server, then the user could set `HTTP_PROXY` or `HTTPS_PROXY` environment variables to provide the hostname or IP address of their proxy server. -## About Installing with Embedded Cluster +To add environment variables to the Replicated SDK deployment, include the `replicated.extraEnv` array in your Helm chart `values.yaml` file. The `replicated.extraEnv` array accepts a list of environment variables in the following format: -This section provides an overview of installing applications with Embedded Cluster. +```yaml +# Helm chart values.yaml -### Installation Overview +replicated: + extraEnv: + - name: ENV_VAR_NAME + value: ENV_VAR_VALUE +``` -The following diagram demonstrates how Kubernetes and an application are installed into a customer environment using Embedded Cluster: +:::note +If the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` variables are configured with the [kots install](/reference/kots-cli-install) command, these variables will also be set automatically in the Replicated SDK. +::: -![Embedded Cluster installs an app in a customer environment](/images/embedded-cluster-install.png) +**Example**: -[View a larger version of this image](/images/embedded-cluster-install.png) +```yaml +# Helm chart values.yaml -As shown in the diagram above, the Embedded Cluster Config is included in the application release in the Replicated Vendor Portal and is used to generate the Embedded Cluster installation assets. Users can download these installation assets from the Replicated app service (`replicated.app`) on the command line, then run the Embedded Cluster installation command to install Kubernetes and the KOTS Admin Console. Finally, users access the Admin Console to optionally add nodes to the cluster and to configure and install the application. +replicated: + extraEnv: + - name: MY_ENV_VAR + value: my-value + - name: MY_ENV_VAR_2 + value: my-value-2 +``` -### Installation Options +## Custom Certificate Authority -Embedded Cluster supports installations in online (internet-connected) environments and air gap environments with no outbound internet access. +When installing the Replicated SDK behind a proxy server that terminates TLS and injects a custom certificate, you must provide the CA to the SDK. This can be done by storing the CA in a ConfigMap or a Secret prior to installation and providing appropriate values during installation. -For online installations, Embedded Cluster also supports installing behind a proxy server. +### Using a ConfigMap -For more information about how to install with Embedded Cluster, see: -* [Online Installation wtih Embedded Cluster](/enterprise/installing-embedded) -* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) +To use a CA stored in a ConfigMap: -### Customer-Specific Installation Instructions +1. Create a ConfigMap and the CA as the data value. Note that name of the ConfigMap and data key can be anything. + ```bash + kubectl -n create configmap private-ca --from-file=ca.crt=./ca.crt + ``` +1. Add the name of the config map to the values file: + ```yaml + replicated: + privateCAConfigmap: private-ca + ``` -To install with Embedded Cluster, you can follow the customer-specific instructions provided on the **Customer** page in the Vendor Portal. For example: +:::note +If the `--private-ca-configmap` flag is used with the [kots install](/reference/kots-cli-install) command, this value will be populated in the Replicated SDK automatically. +::: -Embedded cluster install instruction dialog +### Using a Secret -[View a larger version of this image](/images/embedded-cluster-install-dialog.png) +To use a CA stored in a Secret: -### (Optional) Serve Installation Assets Using the Vendor API +1. Create a Secret and the CA as a data value. Note that the name of the Secret and the key can be anything. + ```bash + kubectl -n create secret generic private-ca --from-file=ca.crt=./ca.crt + ``` +1. Add the name of the secret and the key to the values file: + ```yaml + replicated: + privateCASecret: + name: private-ca + key: ca.crt + ``` -To install with Embedded Cluster, you need to download the Embedded Cluster installer binary and a license. Air gap installations also require an air gap bundle. Some vendors already have a portal where their customers can log in to access documentation or download artifacts. In cases like this, you can serve the Embedded Cluster installation essets yourself using the Replicated Vendor API, rather than having customers download the assets from the Replicated app service using a curl command during installation. +## Add Tolerations -To serve Embedded Cluster installation assets with the Vendor API: +The Replicated SDK provides a `replicated.tolerations` value that allows users to add custom tolerations to the deployment. For more information about tolerations, see [Taints and Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the Kubernetes documentation. -1. If you have not done so already, create an API token for the Vendor API. See [Using the Vendor API v3](/reference/vendor-api-using#api-token-requirement). +To add tolerations to the Replicated SDK deployment, include the `replicated.tolerations` array in your Helm chart `values.yaml` file. The `replicated.tolerations` array accepts a list of tolerations in the following format: -1. Call the [Get an Embedded Cluster release](https://replicated-vendor-api.readme.io/reference/getembeddedclusterrelease) endpoint to download the assets needed to install your application with Embedded Cluster. Your customers must take this binary and their license and copy them to the machine where they will install your application. +```yaml +# Helm chart values.yaml - Note the following: +replicated: + tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule" +``` - * (Recommended) Provide the `customerId` query parameter so that the customer’s license is included in the downloaded tarball. This mirrors what is returned when a customer downloads the binary directly using the Replicated app service and is the most useful option. Excluding the `customerId` is useful if you plan to distribute the license separately. +## Add Affinity - * If you do not provide any query parameters, this endpoint downloads the Embedded Cluster binary for the latest release on the specified channel. You can provide the `channelSequence` query parameter to download the binary for a particular release. +The Replicated SDK provides a `replicated.affinity` value that allows users to add custom affinity to the deployment. For more information about affinity, see [Affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) in the Kubernetes documentation. -### About Host Preflight Checks +To add affinity to the Replicated SDK deployment, include the `replicated.affinity` map in your Helm chart `values.yaml` file. The `replicated.affinity` map accepts a standard Kubernets affinity object in the following format: -During installation, Embedded Cluster automatically runs a default set of _host preflight checks_. The default host preflight checks are designed to verify that the installation environment meets the requirements for Embedded Cluster, such as: -* The system has sufficient disk space -* The system has at least 2G of memory and 2 CPU cores -* The system clock is synchronized +```yaml +# Helm chart values.yaml -For Embedded Cluster requirements, see [Embedded Cluster Installation Requirements](/enterprise/installing-embedded-requirements). For the full default host preflight spec for Embedded Cluster, see [`host-preflight.yaml`](https://github.com/replicatedhq/embedded-cluster/blob/main/pkg/preflights/host-preflight.yaml) in the `embedded-cluster` repository in GitHub. +replicated: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: production/node-pool + operator: In + values: + - private-node-pool +``` +## Add Custom Labels -If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. For more information about host preflight checks for installations on VMs or bare metal servers, see [About Host Preflights](preflight-support-bundle-about#host-preflights). +With the Replicated SDK version 1.1.0 and later, you can pass custom labels to the Replicated SDK Helm Chart by setting the `replicated.commonLabels` and `replicated.podLabels` Helm values in your Helm chart. -#### Limitations +### Requirement -Embedded Cluster host preflight checks have the following limitations: +The `replicated.commonLabels` and `replicated.podLabels` values are available with the Replicated SDK version 1.1.0 and later. -* The default host preflight checks for Embedded Cluster cannot be modified, and vendors cannot provide their own custom host preflight spec for Embedded Cluster. -* Host preflight checks do not check that any application-specific requirements are met. For more information about defining preflight checks for your application, see [Defining Preflight Checks](/vendor/preflight-defining). +### commonLabels -#### Skip Host Preflight Checks +The `replicated.commonLabels` value allows you to add one or more labels to all resources created by the SDK chart. -You can skip host preflight checks by passing the `--skip-host-preflights` flag with the Embedded Cluster `install` command. For example: +For example: -```bash -sudo ./my-app install --license license.yaml --skip-host-preflights +```yaml +# Helm chart values.yaml + +replicated: + commonLabels: + environment: production + team: platform ``` -When you skip host preflight checks, the Admin Console still runs any application-specific preflight checks that are defined in the release before the application is deployed. +### podLabels -:::note -Skipping host preflight checks is _not_ recommended for production installations. -::: +The `replicated.podLabels` value allows you to add pod-specific labels to the pod template. -## About Managing Multi-Node Clusters with Embedded Cluster +For example: -This section describes managing nodes in multi-node clusters created with Embedded Cluster. +```yaml +# Helm chart values.yaml -### Defining Node Roles for Multi-Node Clusters +replicated: + podLabels: + monitoring: enabled + custom.company.io/pod-label: value +``` -You can optionally define node roles in the Embedded Cluster Config. For multi-node clusters, roles can be useful for the purpose of assigning specific application workloads to nodes. If nodes roles are defined, users access the Admin Console to assign one or more roles to a node when it is joined to the cluster. +================ +File: docs/vendor/replicated-sdk-development.mdx +================ +import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" -For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. +# Developing Against the SDK API -### Adding Nodes +This topic describes how to develop against the SDK API to test changes locally. It includes information about installing the SDK in integration mode and port forwarding the SDK API service to your local machine. For more information about the SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). -Users can add nodes to a cluster with Embedded Cluster from the Admin Console. The Admin Console provides the join command used to add nodes to the cluster. +## Install the SDK in Integration Mode -For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + -### High Availability for Multi-Node Clusters (Alpha) +## Port Forwarding the SDK API Service {#port-forward} -Multi-node clusters are not highly available by default. Enabling high availability (HA) requires that at least three controller nodes are present in the cluster. Users can enable HA when joining the third node. +After the Replicated SDK is installed and initialized in a cluster, the Replicated SDK API is exposed at `replicated:3000`. You can access the SDK API for testing by forwarding port 3000 to your local machine. -For more information about creating HA multi-node clusters with Embedded Cluster, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha) in _Managing Multi-Node Clusters with Embedded Cluster_. +To port forward the SDK API service to your local machine: -## About Performing Updates with Embedded Cluster +1. Run the following command to port forward to the SDK API service: - + ```bash + kubectl port-forward service/replicated 3000 + ``` + ``` + Forwarding from 127.0.0.1:3000 -> 3000 + Forwarding from [::1]:3000 -> 3000 + ``` -For more information about updating, see [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). +1. With the port forward running, test the SDK API endpoints as desired. For example: -## Access the Cluster + ```bash + curl localhost:3000/api/v1/license/fields/expires_at + curl localhost:3000/api/v1/license/fields/{field} + ``` + + For more information, see [Replicated SDK API](/reference/replicated-sdk-apis). -With Embedded Cluster, end-users are rarely supposed to need to use the CLI. Typical workflows, like updating the application and the cluster, are driven through the Admin Console. + :::note + When the SDK is installed in integration mode, requests to the `license` endpoints use your actual development license data, while requests to the `app` endpoints use the default mock data. + ::: -Nonetheless, there are times when vendors or their customers need to use the CLI for development or troubleshooting. +================ +File: docs/vendor/replicated-sdk-installing.mdx +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" +import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" +import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" -To access the cluster and use other included binaries: +# Installing the Replicated SDK -1. SSH onto a controller node. +This topic describes the methods for distributing and installing the Replicated SDK. -1. Use the Embedded Cluster shell command to start a shell with access to the cluster: +It includes information about how to install the SDK alongside Helm charts or Kubernetes manifest-based applications using the Helm CLI or a Replicated installer (Replicated KOTS, kURL, Embedded Cluster). It also includes information about installing the SDK as a standalone component in integration mode. - ``` - sudo ./APP_SLUG shell - ``` +For information about installing the SDK in air gap mode, see [Installing the SDK in Air Gap Environments](replicated-sdk-airgap). - The output looks similar to the following: - ``` - __4___ - _ \ \ \ \ Welcome to APP_SLUG debug shell. - <'\ /_/_/_/ This terminal is now configured to access your cluster. - ((____!___/) Type 'exit' (or CTRL+d) to exit. - \0\0\0\0\/ Happy hacking. - ~~~~~~~~~~~ - root@alex-ec-2:/home/alex# export KUBECONFIG="/var/lib/embedded-cluster/k0s/pki/admin.conf" - root@alex-ec-2:/home/alex# export PATH="$PATH:/var/lib/embedded-cluster/bin" - root@alex-ec-2:/home/alex# source <(kubectl completion bash) - root@alex-ec-2:/home/alex# source /etc/bash_completion - ``` +## Requirement - The appropriate kubeconfig is exported, and the location of useful binaries like kubectl and Replicated’s preflight and support-bundle plugins is added to PATH. + - :::note - You cannot run the `shell` command on worker nodes. - ::: +## Install the SDK as a Subchart -1. Use the available binaries as needed. +When included as a dependency of your application Helm chart, the SDK is installed as a subchart alongside the application. - **Example**: +To install the SDK as a subchart: - ```bash - kubectl version - ``` - ``` - Client Version: v1.29.1 - Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3 - Server Version: v1.29.1+k0s - ``` +1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. -1. Type `exit` or **Ctrl + D** to exit the shell. + - :::note - If you encounter a typical workflow where your customers have to use the Embedded Cluster shell, reach out to Alex Parker at alexp@replicated.com. These workflows might be candidates for additional Admin Console functionality. - ::: +1. Update the `charts/` directory: -## Reset a Node + ``` + helm dependency update + ``` + :::note + + ::: + +1. Package the Helm chart into a `.tgz` archive: -Resetting a node removes the cluster and your application from that node. This is useful for iteration, development, and when mistakes are made, so you can reset a machine and reuse it instead of having to procure another machine. + ``` + helm package . + ``` -If you want to completely remove a cluster, you need to reset each node individually. +1. Add the chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). -When resetting a node, OpenEBS PVCs on the node are deleted. Only PVCs created as part of a StatefulSet will be recreated automatically on another node. To recreate other PVCs, the application will need to be redeployed. +1. (Optional) Add a KOTS HelmChart custom resource to the release to support installation with Embedded Cluster, KOTS, or kURL. For more information, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). -To reset a node: +1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. -1. SSH onto the machine. Ensure that the Embedded Cluster binary is still available on that machine. +1. Install the release using Helm or a Replicated installer. For more information, see: + * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) + * [Installing with Helm](/vendor/install-with-helm) + * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) + * [Online Installation with kURL](/enterprise/installing-kurl) -1. Run the following command to reset the node and automatically reboot the machine to ensure that transient configuration is also reset: +1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: ``` - sudo ./APP_SLUG reset + kubectl get deploy --namespace NAMESPACE ``` - Where `APP_SLUG` is the unique slug for the application. + Where `NAMESPACE` is the namespace in the cluster where the application and the SDK are installed. - :::note - Pass the `--no-prompt` flag to disable interactive prompts. Pass the `--force` flag to ignore any errors encountered during the reset. - ::: + **Example output**: -## Additional Use Cases + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + my-app 1/1 1 1 35s + replicated 1/1 1 1 35s + ``` -This section outlines some additional use cases for Embedded Cluster. These are not officially supported features from Replicated, but are ways of using Embedded Cluster that we or our customers have experimented with that might be useful to you. +## Install the SDK Alongside a Kubernetes Manifest-Based Application {#manifest-app} -### NVIDIA GPU Operator +For applications that use Kubernetes manifest files instead of Helm charts, the SDK Helm chart can be added to a release and then installed by KOTS alongside the application. -The NVIDIA GPU Operator uses the operator framework within Kubernetes to automate the management of all NVIDIA software components needed to provision GPUs. For more information about this operator, see the [NVIDIA GPU Operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html) documentation. + -You can include the NVIDIA GPU Operator in your release as an additional Helm chart, or using Embedded Cluster Helm extensions. For information about adding Helm extensions, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. +To add the SDK Helm chart to a release for a Kubernetes manifest-based application: -Using the NVIDIA GPU Operator with Embedded Cluster requires configuring the containerd options in the operator as follows: +1. Install the Helm CLI using Homebrew: -```yaml -# Embedded Cluster Config + ``` + brew install helm + ``` + For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. - extensions: - helm: - repositories: - - name: nvidia - url: https://nvidia.github.io/gpu-operator - charts: - - name: gpu-operator - chartname: nvidia/gpu-operator - namespace: gpu-operator - version: "v24.9.1" - values: | - # configure the containerd options - toolkit: - env: - - name: CONTAINERD_CONFIG - value: /etc/k0s/containerd.d/nvidia.toml - - name: CONTAINERD_SOCKET - value: /run/k0s/containerd.sock -``` -When the containerd options are configured as shown above, the NVIDIA GPU Operator automatically creates the required configurations in the `/etc/k0s/containerd.d/nvidia.toml` file. It is not necessary to create this file manually, or modify any other configuration on the hosts. +1. Download the `.tgz` chart archive for the SDK Helm chart: -:::note -If you include the NVIDIA GPU Operator as a Helm extension, remove any existing containerd services that are running on the host (such as those deployed by Docker) before attempting to install the release with Embedded Cluster. If there are any containerd services on the host, the NVIDIA GPU Operator will generate an invalid containerd config, causing the installation to fail. -::: + ``` + helm pull oci://registry.replicated.com/library/replicated --version SDK_VERSION + ``` + Where `SDK_VERSION` is the version of the SDK to install. For a list of available SDK versions, see the [replicated-sdk repository](https://github.com/replicatedhq/replicated-sdk/tags) in GitHub. -## Troubleshoot with Support Bundles + The output of this command is a `.tgz` file with the naming convention `CHART_NAME-CHART_VERSION.tgz`. For example, `replicated-1.1.1.tgz`. - + For more information and additional options, see [Helm Pull](https://helm.sh/docs/helm/helm_pull/) in the Helm documentation. - +1. Add the SDK `.tgz` chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). -================ -File: docs/vendor/helm-image-registry.mdx -================ -import StepCreds from "../partials/proxy-service/_step-creds.mdx" -import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" + The following shows an example of the SDK Helm chart added to a draft release for a standard manifest-based application: -# Using the Proxy Registry with Helm Installations + ![SDK Helm chart in a draft release](/images/sdk-kots-release.png) + + [View a larger version of this image](/images/sdk-kots-release.png) -This topic describes how to use the Replicated proxy registry to proxy images for installations with the Helm CLI. For more information about the proxy registry, see [About the Replicated Proxy Registry](private-images-about). +1. If one was not created automatically, add a KOTS HelmChart custom resource to the release. HelmChart custom resources have `apiVersion: kots.io/v1beta2` and `kind: HelmChart`. -## Overview + **Example:** + + ```yaml + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: replicated + spec: + # chart identifies a matching chart from a .tgz + chart: + # for name, enter replicated + name: replicated + # for chartversion, enter the version of the + # SDK Helm chart in the release + chartVersion: 1.1.1 + ``` -With the Replicated proxy registry, each customer's unique license can grant proxy access to images in an external private registry. To enable the proxy registry for Helm installations, you must create a Secret with `type: kubernetes.io/dockerconfigjson` to authenticate with the proxy registry. + As shown in the example above, the HelmChart custom resource requires the name and version of the SDK Helm chart that you added to the release: + * **`chart.name`**: The name of the SDK Helm chart is `replicated`. You can find the chart name in the `name` field of the SDK Helm chart `Chart.yaml` file. + * **`chart.chartVersion`**: The chart version varies depending on the version of the SDK that you pulled and added to the release. You can find the chart version in the `version` field of SDK Helm chart `Chart.yaml` file. -During Helm installations, after customers provide their license ID, a `global.replicated.dockerconfigjson` field that contains a base64 encoded Docker configuration file is automatically injected in the Helm chart values. You can use this `global.replicated.dockerconfigjson` field to create the required pull secret. + For more information about configuring the HelmChart custom resource to support KOTS installations, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) and [HelmChart v2](/reference/custom-resource-helmchart-v2). -For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. +1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. -## Enable the Proxy Registry +1. Install the release using a Replicated installer. For more information, see: + * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) + * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) + * [Online Installation with kURL](/enterprise/installing-kurl) -This section describes how to enable the proxy registry for applications deployed with Helm, including how to use the `global.replicated.dockerconfigjson` field that is injected during application deployment to create the required pull secret. +1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: -To enable the proxy registry: + ``` + kubectl get deploy --namespace NAMESPACE + ``` + Where `NAMESPACE` is the namespace in the cluster where the application, the Admin Console, and the SDK are installed. -1. + **Example output**: -1. + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + kotsadm 1/1 1 1 112s + my-app 1/1 1 1 28s + replicated 1/1 1 1 27s + ``` -1. In your Helm chart templates, create a Kubernetes Secret to evaluate if the `global.replicated.dockerconfigjson` value is set, and then write the rendered value into a Secret on the cluster: +## Install the SDK in Integration Mode - ```yaml - # /templates/replicated-pull-secret.yaml + - {{ if .Values.global.replicated.dockerconfigjson }} - apiVersion: v1 - kind: Secret - metadata: - name: replicated-pull-secret - type: kubernetes.io/dockerconfigjson - data: - .dockerconfigjson: {{ .Values.global.replicated.dockerconfigjson }} - {{ end }} - ``` +## Troubleshoot - :::note - If you use the Replicated SDK, do not use `replicated` for the name of the image pull secret because the SDK automatically creates a Secret named `replicated`. Using the same name causes an error. - ::: +### 401 Unauthorized Error When Updating Helm Dependencies {#401} -1. Ensure that you have a field in your Helm chart values file for your image repository URL, and that any references to the image in your Helm chart access the field from your values file. +#### Symptom - **Example**: +You see an error message similar to the following after adding the Replicated SDK as a dependency in your Helm chart then running `helm dependency update`: - ```yaml - # values.yaml - ... - dockerconfigjson: '{{ .Values.global.replicated.dockerconfigjson }}' - images: - myapp: - # Add image URL in the values file - apiImageRepository: quay.io/my-org/api - apiImageTag: v1.0.1 - ``` - ```yaml - # /templates/deployment.yaml +``` +Error: could not download oci://registry.replicated.com/library/replicated-sdk: failed to authorize: failed to fetch oauth token: unexpected status from GET request to https://registry.replicated.com/v2/token?scope=repository%3Alibrary%2Freplicated-sdk%3Apull&service=registry.replicated.com: 401 Unauthorized +``` - apiVersion: apps/v1 - kind: Deployment - metadata: - name: example - spec: - template: - spec: - containers: - - name: api - # Access the apiImageRepository field from the values file - image: {{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }} - ``` +#### Cause -1. In your Helm chart templates, add the image pull secret that you created to any manifests that reference the private image: +When you run `helm dependency update`, Helm attempts to pull the Replicated SDK chart from the Replicated registry. An error can occur if you are already logged in to the Replicated registry with a license that has expired, such as when testing application releases. - ```yaml - # /templates/example.yaml - ... - {{ if .Values.global.replicated.dockerconfigjson }} - imagePullSecrets: - - name: replicated-pull-secret - {{ end }} - ``` +#### Solution - **Example:** +To solve this issue: - ```yaml - # /templates/deployment.yaml - ... - image: "{{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }}" - {{ if .Values.global.replicated.dockerconfigjson }} - imagePullSecrets: - - name: replicated-pull-secret - {{ end }} - name: myapp - ports: - - containerPort: 3000 - name: http +1. Run the following command to remove login credentials for the Replicated registry: + + ``` + helm registry logout registry.replicated.com ``` -1. Package your Helm chart and add it to a release. Promote the release to a development channel. See [Managing Releases with Vendor Portal](releases-creating-releases). +1. Re-run `helm dependency update` for your Helm chart. -1. Install the chart in a development environment to test your changes: +================ +File: docs/vendor/replicated-sdk-overview.mdx +================ +import SDKOverview from "../partials/replicated-sdk/_overview.mdx" +import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" - 1. Create a local `values.yaml` file to override the default external registry image URL with the URL for the image on `proxy.replicated.com`. - - The proxy registry URL has the following format: `proxy.replicated.com/proxy/APP_SLUG/EXTERNAL_REGISTRY_IMAGE_URL` - - Where: - * `APP_SLUG` is the slug of your Replicated application. - * `EXTERNAL_REGISTRY_IMAGE_URL` is the path to the private image on your external registry. +# About the Replicated SDK - **Example** - ```yaml - # A local values.yaml file - ... - images: - myapp: - apiImageRepository: proxy.replicated.com/proxy/my-app/quay.io/my-org/api - apiImageTag: v1.0.1 +This topic provides an introduction to using the Replicated SDK with your application. - ``` +## Overview - :::note - If you configured a custom domain for the proxy registry, use the custom domain instead of `proxy.replicated.com`. For more information, see [Using Custom Domains](custom-domains-using). - ::: - - 1. Log in to the Replicated registry and install the chart, passing the local `values.yaml` file you created with the `--values` flag. See [Installing with Helm](install-with-helm). + -================ -File: docs/vendor/helm-install-airgap.mdx -================ -import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" +For more information about the Replicated SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). For information about developing against the SDK API locally, see [Developing Against the SDK API](replicated-sdk-development). -# Installing and Updating with Helm in Air Gap Environments +## Limitations -## Overview +The Replicated SDK has the following limitations: -Replicated supports installing and updating Helm charts in air gap environments with no outbound internet access. In air gap Helm installations, customers are guided through the process with instructions provided in the [Replicated Download Portal](/vendor/releases-share-download-portal). +* Some popular enterprise continuous delivery tools, such as ArgoCD and Pulumi, deploy Helm charts by running `helm template` then `kubectl apply` on the generated manifests, rather than running `helm install` or `helm upgrade`. The following limitations apply to applications installed by running `helm template` then `kubectl apply`: -When air gap Helm installations are enabled, an **Existing cluster with Helm** option is displayed in the Download Portal on the left nav. When selected, **Existing cluster with Helm** displays three tabs (**Install**, **Manual Update**, **Automate Updates**), as shown in the screenshot below: + * The `/api/v1/app/history` SDK API endpoint always returns an empty array because there is no Helm history in the cluster. See [GET /app/history](/reference/replicated-sdk-apis#get-apphistory) in _Replicated SDK API_. -![download helm option](/images/download-helm.png) + * The SDK does not automatically generate status informers to report status data for installed instances of the application. To get instance status data, you must enable custom status informers by overriding the `replicated.statusInformers` Helm value. See [Enable Application Status Insights](/vendor/insights-app-status#enable-application-status-insights) in _Enabling and Understanding Application Status_. + +## SDK Resiliency -[View a larger version of this image](/images/download-helm.png) +At startup and when serving requests, the SDK retrieves and caches the latest information from the upstream Replicated APIs, including customer license information. -Each tab provides instructions for how to install, perform a manual update, or configure automatic updates, respectively. +If the upstream APIs are not available at startup, the SDK does not accept connections or serve requests until it is able to communicate with the upstream APIs. If communication fails, the SDK retries every 10 seconds and the SDK pod is at `0/1` ready. -These installing and updating instructions assume that your customer is accessing the Download Portal from a workstation that can access the internet and their internal private registry. Direct access to the target cluster is not required. +When serving requests, if the upstream APIs become unavailable, the SDK serves from the memory cache and sets the `X-Replicated-Served-From-Cache` header to `true`. Additionally, rapid successive requests to same SDK endpoint with the same request properties will be rate-limited returning the last cached payload and status code without reaching out to the upstream APIs. A `X-Replicated-Rate-Limited` header will set to `true`. -Each method assumes that your customer is familiar with `curl`, `docker`, `helm`, `kubernetes`, and a bit of `bash`, particularly for automating updates. +## Replicated SDK Helm Values -## Prerequisites + -Before you install, complete the following prerequisites: +================ +File: docs/vendor/replicated-sdk-slsa-validating.md +================ +# SLSA Provenance Validation Process for the Replicated SDK -* Reach out to your account rep to enable the Helm air gap installation feature. +This topic describes the process to perform provenance validation on the Replicated SDK. - +## About Supply Chain Levels for Software Artifacts (SLSA) -## Install +[Supply Chain Levels for Software Artifacts (SLSA)](https://slsa.dev/), pronounced “salsa,” is a security framework that comprises standards and controls designed to prevent tampering, enhance integrity, and secure software packages and infrastructure. -The installation instructions provided in the Download Portal are designed to walk your customer through the first installation of your chart in an air gap environment. -To install with Helm in an air gap environment: +## Purpose of Attestations +Attestations enable the inspection of an image to determine its origin, the identity of its creator, the creation process, and its contents. When building software using the Replicated SDK, the image’s Software Bill of Materials (SBOM) and SLSA-based provenance attestations empower your customers to make informed decisions regarding the impact of an image on the supply chain security of your application. This process ultimately enhances the security and assurances provided to both vendors and end customers. -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers > [Customer Name] > Reporting**. +## Prerequisite +Before you perform these tasks, you must install [slsa-verifier](https://github.com/slsa-framework/slsa-verifier) and [crane](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md). -1. In the **Download portal** section, click **Visit download portal** to log in to the Download Portal for the customer. +## Validate the SDK SLSA Attestations -1. In the Download Portal left nav, click **Existing cluster with Helm**. +The Replicated SDK build process utilizes Wolfi-based images to minimize the number of CVEs. The build process automatically generates SBOMs and attestations, and then publishes the image along with these metadata components. For instance, you can find all the artifacts readily available on [DockerHub](https://hub.docker.com/r/replicated/replicated-sdk/tags). The following shell script is a tool to easily validate the SLSA attestations for a given Replicated SDK image. - ![download helm option](/images/download-helm.png) +``` +#!/bin/bash - [View a larger version of this image](/images/download-helm.png) +# This script verifies the SLSA metadata of a container image +# +# Requires +# - slsa-verifier (https://github.com/slsa-framework/slsa-verifier) +# - crane (https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md) +# -1. On the **Install** tab, in the **App version** dropdown, select the target application version to install. -1. Run the first command to authenticate into the Replicated proxy registry with the customer's credentials (the `license_id`). +# Define the image and version to verify +VERSION=v1.0.0-beta.20 +IMAGE=replicated/replicated-sdk:${VERSION} -1. Under **Get the list of images**, run the command provided to generate the list of images needed to install. +# expected source repository that should have produced the artifact, e.g. github.com/some/repo +SOURCE_REPO=github.com/replicatedhq/replicated-sdk -1. For **(Optional) Specify registry URI**, provide the URI for an internal image registry where you want to push images. If a registry URI is provided, Replicatd automatically updates the commands for tagging and pushing images with the URI. -1. For **Pull, tag, and push each image to your private registry**, copy and paste the docker commands provided to pull, tag, and push each image to your internal registry. +# Use `crane` to retrieve the digest of the image without pulling the image +IMAGE_WITH_DIGEST="${IMAGE}@"$(crane digest "${IMAGE}") - :::note - If you did not provide a URI in the previous step, ensure that you manually replace the image names in the `tag` and `push` commands with the target registry URI. - ::: +echo "Verifying artifact" +echo "Image: ${IMAGE_WITH_DIGEST}" +echo "Source Repo: ${SOURCE_REPO}" -1. Run the command to authenticate into the OCI registry that contains your Helm chart. +slsa-verifier verify-image "${IMAGE_WITH_DIGEST}" \ + --source-uri ${SOURCE_REPO} \ + --source-tag ${VERSION} -1. Run the command to install the `preflight` plugin. This allows you to run preflight checks before installing to ensure that the installation environment meets the requirements for the application. +``` -1. For **Download a copy of the values.yaml file** and **Edit the values.yaml file**, run the `helm show values` command provided to download the values file for the Helm chart. Then, edit the values file as needed to customize the configuration of the given chart. +================ +File: docs/vendor/resources-annotations-templating.md +================ +# Templating Annotations - If you are installing a release that contains multiple Helm charts, repeat these steps to download and edit each values file. +This topic describes how to use Replicated KOTS template functions to template annotations for resources and objects based on user-supplied values. - :::note - For installations with mutliple charts where two or more of the top-level charts in the release use the same name, ensure that each values file has a unique name to avoid installation error. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. - ::: +## Overview -1. For **Determine install method**, select one of the options depending on your ability to access the internet and the cluster from your workstation. +It is common for users to need to set custom annotations for a resource or object deployed by your application. For example, you might need to allow your users to provide annotations to apply to a Service or Ingress object in public cloud environments. -1. Use the commands provided and the values file or files that you edited to run preflight checks and then install the release. +For applications installed with Replicated KOTS, you can apply user-supplied annotations to resources or objects by first adding a field to the Replicated Admin Console **Config** page where users can enter one or more annotations. For information about how to add fields on the **Config** page, see [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen). -## Perform Updates +You can then map these user-supplied values from the **Config** page to resources and objects in your release using KOTS template functions. KOTS template functions are a set of custom template functions based on the Go text/template library that can be used to generate values specific to customer environments. The template functions in the Config context return user-supplied values on the **Config** page. -This section describes the processes of performing manual and automatic updates with Helm in air gap environments using the instructions provided in the Download Portal. +For more information about KOTS template functions in the Config text, see [Config Context](/reference/template-functions-config-context). For more information about the Go library, see [text/template](https://pkg.go.dev/text/template) in the Go documentation. -### Manual Updates +## About `kots.io/placeholder` -The manual update instructions provided in the Download Portal are similar to the installation instructions. +For applications installed with KOTS that use standard Kubernetes manifests, the `kots.io/placeholder` annotation allows you to template annotations in resources and objects without breaking the base YAML or needing to include the annotation key. -However, the first step prompts the customer to select their current version an the target version to install. This step takes [required releases](/vendor/releases-about#properties) into consideration, thereby guiding the customer to the versions that are upgradable from their current version. +The `kots.io/placeholder` annotation uses the format `kots.io/placeholder 'bool' 'string'`. For example: -The additional steps are consistent with installation process until the `preflight` and `install` commands where customers provide the existing values from the cluster with the `helm get values` command. Your customer will then need to edit the `values.yaml` to reference the new image tags. +```yaml +# Example manifest file -If the new version introduces new images or other values, Replicated recommends that you explain this at the top of your release notes so that customers know they will need to make additional edits to the `values.yaml` before installing. +annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "additional_annotations" | nindent 4 }} +``` -### Automate Updates +:::note +For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file using the Replicated HelmChart custom resource, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. -The instructions in the Download Portal for automating updates use API endpoints that your customers can automate against. +For an example, see [Map User-Supplied Annotations to Helm Chart Values](#map-user-supplied-annotations-to-helm-chart-values) below. +::: -The instructions in the Download Portal provide customers with example commands that can be put into a script that they run periodically (nightly, weekly) using GitHub Actions, Jenkins, or other platforms. +## Annotation Templating Examples -This method assumes that the customer has already done a successful manual installation, including the configuration of the appropriate `values`. +This section includes common examples of templating annotations in resources and objects to map user-supplied values. -After logging into the registry, the customer exports their current version and uses that to query an endpoint that provides the latest installable version number (either the next required release, or the latest release) and export it as the target version. With the target version, they can now query an API for the list of images. +For additional examples of how to map values to Helm chart-based applications, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. -With the list of images the provided `bash` script will automate the process of pulling updated images from the repository, tagging them with a name for an internal registry, and then pushing the newly tagged images to their internal registry. +### Map Multiple Annotations from a Single Configuration Field -Unless the customer has set up the `values` to preserve the updated tag (for example, by using the `latest` tag), they need to edit the `values.yaml` to reference the new image tags. After doing so, they can log in to the OCI registry and perform the commands to install the updated chart. +You can map one or more annotations from a single `textarea` field on the **Config** page. The `textarea` type defines multi-line text input and supports properties such as `rows` and `cols`. For more information, see [textarea](/reference/custom-resource-config#textarea) in _Config_. -## Use a Harbor or Artifactory Registry Proxy +For example, the following Config custom resource adds an `ingress_annotations` field of type `textarea`: -You can integrate the Replicated proxy registry with an existing Harbor or jFrog Artifactory instance to proxy and cache images on demand. For more information, see [Using a Registry Proxy for Helm Air Gap Installations](using-third-party-registry-proxy). +```yaml +# Config custom resource -================ -File: docs/vendor/helm-install-overview.mdx -================ -import Helm from "../partials/helm/_helm-definition.mdx" +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config +spec: + groups: + - name: ingress_settings + title: Ingress Settings + description: Configure Ingress + items: + - name: ingress_annotations + type: textarea + title: Ingress Annotations + help_text: See your cloud provider’s documentation for the required annotations. +``` -# About Helm Installations with Replicated +On the **Config** page, users can enter one or more key value pairs in the `ingress_annotations` field, as shown in the example below: -This topic provides an introduction to Helm installations for applications distributed with Replicated. +![Config page with custom annotations in a Ingress Annotations field](/images/config-map-annotations.png) -## Overview +[View a larger version of this image](/images/config-map-annotations.png) - +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "ingress_annotations" | nindent 4 }} +``` -Replicated strongly recommends that all applications are packaged using Helm because many enterprise users expect to be able to install an application with the Helm CLI. +During installation, KOTS renders the YAML with the multi-line input from the configuration field as shown below: -Existing releases in the Replicated Platform that already support installation with Replicated KOTS and Replicated Embedded Cluster (and that include one or more Helm charts) can also be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. +```yaml +# Rendered Ingress object +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + + key1: value1 + key2: value2 + key3: value3 +``` -For information about how to install with Helm, see: -* [Installing with Helm](/vendor/install-with-helm) -* [Installing and Updating with Helm in Air Gap Environments (Alpha)](helm-install-airgap) +### Map Annotations from Multiple Configuration Fields -The following diagram shows how Helm charts distributed with Replicated are installed with Helm in online (internet-connected) customer environments: +You can specify multiple annotations using the same `kots.io/placeholder` annotation. -diagram of a helm chart in a custom environment +For example, the following Ingress object includes ConfigOption template functions that render the user-supplied values for the `ingress_annotation` and `ingress_hostname` fields: -[View a larger version of this image](/images/helm-install-diagram.png) +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "ingress_annotation" | nindent 4 }} + repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} +``` -As shown in the diagram above, when a release containing one or more Helm charts is promoted to a channel, the Replicated Vendor Portal automatically extracts any Helm charts included in the release. These charts are pushed as OCI objects to the Replicated registry. The Replicated registry is a private OCI registry hosted by Replicated at `registry.replicated.com`. For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). +During installation, KOTS renders the YAML as shown below: -For example, if your application in the Vendor Portal is named My App and you promote a release containing a Helm chart with `name: my-chart` to a channel with the slug `beta`, then the Vendor Portal pushes the chart to the following location: `oci://registry.replicated.com/my-app/beta/my-chart`. +```yaml +# Rendered Ingress object -Customers can install your Helm chart by first logging in to the Replicated registry with their unique license ID. This step ensures that any customer who installs your chart from the registry has a valid, unexpired license. After the customer logs in to the Replicated registry, they can run `helm install` to install the chart from the registry. +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + + key1: value1 + my.custom/annotation.ingress.hostname: example.hostname.com +``` -During installation, the Replicated registry injects values into the `global.replicated` key of the parent Helm chart's values file. For more information about the values schema, see [Helm global.replicated Values Schema](helm-install-values-schema). +### Map User-Supplied Value to a Key -## Limitations +You can map a user-supplied value from the **Config** page to a pre-defined annotation key. -Helm installations have the following limitations: +For example, in the following Ingress object, `my.custom/annotation.ingress.hostname` is the key for the templated annotation. The annotation also uses the ConfigOption template function to map the user-supplied value from a `ingress_hostname` configuration field: -* Installing with Helm in air gap environments is an Beta feature. For more information, see [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap). -* Helm CLI installations do not provide access to any of the features of the Replicated KOTS installer, such as: - * The KOTS Admin Console - * Strict preflight checks that block installation - * Backup and restore with snapshots - * Required releases with the **Prevent this release from being skipped during upgrades** option +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} +``` -================ -File: docs/vendor/helm-install-release.md -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" -import HelmPackage from "../partials/helm/_helm-package.mdx" +During installation, KOTS renders the YAML as shown below: -# Packaging a Helm Chart for a Release +```yaml +# Rendered Ingress object -This topic describes how to package a Helm chart and the Replicated SDK into a chart archive that can be added to a release. +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + + my.custom/annotation.ingress.hostname: example.hostname.com +``` -## Overview +### Include Conditional Statements in Templated Annotations -To add a Helm chart to a release, you first add the Replicated SDK as a dependency of the Helm chart and then package the chart and its dependencies as a `.tgz` chart archive. +You can include or exclude templated annotations based on a conditional statement. -The Replicated SDK is a Helm chart can be installed as a small service alongside your application. The SDK provides access to key Replicated features, such as support for collecting custom metrics on application instances. For more information, see [About the Replicated SDK](replicated-sdk-overview). +For example, the following Ingress object includes a conditional statement for `kots.io/placeholder` that renders `my.custom/annotation.class: somevalue` if the user enables a `custom_annotation` field on the **Config** page: -## Requirements and Recommendations +```yaml +apiVersion: v1 +kind: Ingress +metadata: + name: myapp + labels: + app: myapp +annotations: + kots.io/placeholder: |- + repl{{if ConfigOptionEquals "custom_annotation" "1" }}repl{{ printf "my.custom/annotation.class: somevalue" | nindent 4 }}repl{{end}} +spec: +... +``` -This section includes requirements and recommendations for Helm charts. +During installation, if the user enables the `custom_annotation` configuration field, KOTS renders the YAML as shown below: -### Chart Version Requirement +```yaml +# Rendered Ingress object -The chart version in your Helm chart must comply with image tag format requirements. A valid tag can contain only lowercase and uppercase letters, digits, underscores, periods, and dashes. +apiVersion: v1 +kind: Ingress +metadata: + name: myapp + labels: + app: myapp + annotations: + kots.io/placeholder: |- + my.custom/annotation.class: somevalue +spec: +... +``` -The chart version must also comply with the Semantic Versioning (SemVer) specification. When you run the `helm install` command without the `--version` flag, Helm retrieves the list of all available image tags for the chart from the registry and compares them using the SemVer comparison rules described in the SemVer specification. The version that is installed is the version with the largest tag value. For more information about the SemVer specification, see the [Semantic Versioning](https://semver.org) documentation. +Alternatively, if the condition evaluates to false, the annotation does not appear in the rendered YAML: -### Chart Naming +```yaml +apiVersion: v1 +kind: Ingress +metadata: + name: myapp + labels: + app: myapp + annotations: + kots.io/placeholder: |- +spec: +... +``` -For releases that contain more than one Helm chart, Replicated recommends that you use unique names for each top-level Helm chart in the release. This aligns with Helm best practices and also avoids potential conflicts in filenames during installation that could cause the installation to fail. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. +### Map User-Supplied Annotations to Helm Chart Values -### Helm Best Practices +For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. -Replicated recommends that you review the [Best Practices](https://helm.sh/docs/chart_best_practices/) guide in the Helm documentation to ensure that your Helm chart or charts follows the required and recommended conventions. +To map user-supplied annotations from the **Config** page to the Helm chart `values.yaml` file, you use the `values` field of the Replicated HelmChart custom resource. For more information, see [values](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_. -## Package a Helm Chart {#release} +For example, the following HelmChart custom resource uses a ConfigOption template function in `values.services.myservice.annotations` to map the value of a configuration field named `additional_annotations`: -This procedure shows how to create a Helm chart archive to add to a release. For more information about the Helm CLI commands in this procedure, see the [Helm Commands](https://helm.sh/docs/helm/helm/) section in the Helm documentation. +```yaml +# HelmChart custom resource -To package a Helm chart so that it can be added to a release: +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: myapp +spec: + values: + services: + myservice: + annotations: repl{{ ConfigOption "additional_annotations" | nindent 10 }} +``` -1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. +The `values.services.myservice.annotations` field in the HelmChart custom resource corresponds to a `services.myservice.annotations` field in the `value.yaml` file of the application Helm chart, as shown in the example below: - - - For additional guidelines related to adding the SDK as a dependency, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. +```yaml +# Helm chart values.yaml -1. Update dependencies and package the chart as a `.tgz` file: +services: + myservice: + annotations: {} +``` - +During installation, the ConfigOption template function in the HelmChart custom resource renders the user-supplied values from the `additional_annotations` configuration field. - :::note - - ::: +Then, KOTS replaces the value in the corresponding field in the `values.yaml` in the chart archive, as shown in the example below. -1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). +```yaml +# Rendered Helm chart values.yaml - After the release is promoted, your Helm chart is automatically pushed to the Replicated registry. For information about how to install a release with the Helm CLI, see [Installing with Helm](install-with-helm). For information about how to install Helm charts with KOTS, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). +services: + myservice: + annotations: + key1: value1 +``` + +In your Helm chart templates, you can access these values from the `values.yaml` file to apply the user-supplied annotations to the target resources or objects. For information about how to access values from a `values.yaml` file, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the Helm documentation. ================ -File: docs/vendor/helm-install-troubleshooting.mdx +File: docs/vendor/snapshots-configuring-backups.md ================ -# Troubleshooting Helm Installations with Replicated +# Configuring Snapshots -This topic provides troubleshooting information for common issues related to performing installations and upgrades with the Helm CLI. +This topic provides information about how to configure the Velero Backup resource to enable Replicated KOTS snapshots for an application. -## Installation Fails for Release With Multiple Helm Charts {#air-gap-values-file-conflict} +For more information about snapshots, see [About Backup and Restore with snapshots](/vendor/snapshots-overview). -#### Symptom +## Configure Snapshots -When performing installing a release with multiple Helm charts, the installation fails. You might also see the following error message: +Add a Velero Backup custom resource (`kind: Backup`, `apiVersion: velero.io/v1`) to your release and configure it as needed. After configuring the Backup resource, add annotations for each volume that you want to be included in backups. -``` -Error: INSTALLATION FAILED: cannot re-use a name that is still in use -``` +To configure snapshots for your application: -#### Cause +1. In a new release containing your application files, add a Velero Backup resource (`kind: Backup` and `apiVersion: velero.io/v1`): -In the Download Portal, each chart's values file is named according to the chart's name. For example, the values file for the Helm chart Gitea would be named `gitea-values.yaml`. + ```yaml + apiVersion: velero.io/v1 + kind: Backup + metadata: + name: backup + spec: {} + ``` -If any top-level charts in the release use the same name, the associated values files will also be assigned the same name. This causes each new values file downloaded with the `helm show values` command to overwrite any previously-downloaded values file of the same name. +1. Configure the Backup resource to specify the resources that will be included in backups. -#### Solution + For more information about the Velero Backup resource, including limitations, the list of supported fields for snapshots, and an example, see [Velero Backup Resource for Snapshots](/reference/custom-resource-backup). -Replicated recommends that you use unique names for top-level Helm charts in the same release. +1. (Optional) Configure backup and restore hooks. For more information, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). -Alternatively, if a release contains charts that must use the same name, convert one or both of the charts into subcharts and use Helm conditions to differentiate them. See [Conditions and Tags](https://helm.sh/docs/chart_best_practices/dependencies/#conditions-and-tags) in the Helm documentation. +1. For each volume that requires a backup, add the `backup.velero.io/backup-volumes` annotation. The annotation name is `backup.velero.io/backup-volumes` and the value is a comma separated list of volumes to include in the backup. -================ -File: docs/vendor/helm-install-values-schema.mdx -================ -import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" +
    + Why do I need to use the backup annotation? +

    By default, no volumes are included in the backup. If any pods mount a volume that should be backed up, you must configure the backup with an annotation listing the specific volumes to include in the backup.

    +
    -# Helm global.replicated Values Schema - -This topic describes the `global.replicated` values that are injected in the values file of an application's parent Helm chart during Helm installations with Replicated. - -## Overview - -When a user installs a Helm application with the Helm CLI, the Replicated registry injects a set of customer-specific values into the `global.replicated` key of the parent Helm chart's values file. - -The values in the `global.replicated` field include the following: + **Example:** -* The fields in the customer's license, such as the field names, descriptions, signatures, values, and any custom license fields that you define. Vendors can use this license information to check entitlements before the application is installed. For more information, see [Checking Entitlements in Helm Charts Before Deployment](/vendor/licenses-reference-helm). + In the following Deployment manifest file, `pvc-volume` is the only volume that is backed up. The `scratch` volume is not included in the backup because it is not listed in annotation on the pod specification. -* A base64 encoded Docker configuration file. To proxy images from an external private registry with the Replicated proxy registry, you can use the `global.replicated.dockerconfigjson` field to create an image pull secret for the proxy registry. For more information, see [Proxying Images for Helm Installations](/vendor/helm-image-registry). + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: sample + labels: + app: foo + spec: + replicas: 1 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + annotations: + backup.velero.io/backup-volumes: pvc-volume + spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-webserver + volumeMounts: + - name: pvc-volume + mountPath: /volume-1 + - name: scratch + mountPath: /volume-2 + volumes: + - name: pvc-volume + persistentVolumeClaim: + claimName: test-volume-claim + - name: scratch + emptyDir: {} -The following is an example of a Helm values file containing the `global.replicated` values: + ``` -```yaml -# Helm values.yaml -global: - replicated: - channelName: Stable - customerEmail: username@example.com - customerName: Example Customer - dockerconfigjson: eyJhdXRocyI6eyJd1dIRk5NbEZFVGsxd2JGUmFhWGxYWm5scloyNVRSV1pPT2pKT2NGaHhUVEpSUkU1... - licenseFields: - expires_at: - description: License Expiration - name: expires_at - signature: - v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... - title: Expiration - value: "2023-05-30T00:00:00Z" - valueType: String - licenseID: YiIXRTjiB7R... - licenseType: dev -``` +1. (Optional) Configure manifest exclusions. By default, Velero also includes backups of all of the Kubernetes objects in the namespace. -## `global.replicated` Values Schema + To exclude any manifest file, add a [`velero.io/exclude-from-backup=true`](https://velero.io/docs/v1.5/resource-filtering/#veleroioexclude-from-backuptrue) label to the manifest to be excluded. The following example shows the Secret manifest file with the `velero.io/exclude-from-backup` label: -The `global.replicated` values schema contains the following fields: + ```yaml + apiVersion: apps/v1 + kind: Secret + metadata: + name: sample + labels: + velero.io/exclude-from-backup: "true" + stringData: + uri: Secret To Not Include -| Field | Type | Description | -| --- | --- | --- | -| `channelName` | String | The name of the release channel | -| `customerEmail` | String | The email address of the customer | -| `customerName` | String | The name of the customer | -| `dockerconfigjson` | String | Base64 encoded docker config json for pulling images | -| `licenseFields`| | A list containing each license field in the customer's license. Each element under `licenseFields` has the following properties: `description`, `signature`, `title`, `value`, `valueType`. `expires_at` is the default `licenseField` that all licenses include. Other elements under `licenseField` include the custom license fields added by vendors in the Vendor Portal. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). | -| `licenseFields.[FIELD_NAME].description` | String | Description of the license field | -| `licenseFields.[FIELD_NAME].signature.v1` | Object | Signature of the license field | -| `licenseFields.[FIELD_NAME].title` | String | Title of the license field | -| `licenseFields.[FIELD_NAME].value` | String | Value of the license field | -| `licenseFields.[FIELD_NAME].valueType` | String | Type of the license field value | -| `licenseID` | String | The unique identifier for the license | -| `licenseType` | String | The type of license, such as "dev" or "prod". For more information, see [Customer Types](/vendor/licenses-about#customer-types) in _About Customers and Licensing_. | + ``` -## Replicated SDK Helm Values +1. If you distribute multiple applications with Replicated, repeat these steps for each application. Each application must have its own Backup resource to be included in a full backup with snapshots. - +1. (kURL Only) If your application supports installation with Replicated kURL, Replicated recommends that you include the kURL Velero add-on so that customers do not have to manually install Velero in the kURL cluster. For more information, see [Creating a kURL Installer](packaging-embedded-kubernetes). ================ -File: docs/vendor/helm-native-about.mdx +File: docs/vendor/snapshots-hooks.md ================ -import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" -import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" -import TemplateLimitation from "../partials/helm/_helm-template-limitation.mdx" -import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" -import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" -import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" -import Deprecated from "../partials/helm/_replicated-deprecated.mdx" -import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" -import ReplicatedHelmMigration from "../partials/helm/_replicated-helm-migration.mdx" -import Helm from "../partials/helm/_helm-definition.mdx" - -# About Distributing Helm Charts with KOTS - -This topic provides an overview of how Replicated KOTS deploys Helm charts, including an introduction to the KOTS HelmChart custom resource, limitations of deploying Helm charts with KOTS, and more. - -## Overview +# Configuring Backup and Restore Hooks for Snapshots - +This topic describes the use of custom backup and restore hooks and demonstrates a common example. -KOTS can install applications that include: -* One or more Helm charts -* More than a single instance of any chart -* A combination of Helm charts and Kubernetes manifests +## About Backup and Restore Hooks -Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise users expect to be able to install an application with the Helm CLI. +Velero supports the use of backup hooks and restore hooks. -Deploying Helm charts with KOTS provides additional functionality not directly available with the Helm CLI, such as: -* The KOTS Admin Console -* Backup and restore with snapshots -* Support for air gap installations -* Support for embedded cluster installations on VMs or bare metal servers +Your application workload might require additional processing or scripts to be run before or after creating a backup to prepare the system for a backup. Many application workloads also require additional processing or scripts to run during or after the restore process. -Additionally, for applications packaged as Helm charts, you can support Helm CLI and KOTS installations from the same release without having to maintain separate sets of Helm charts and application manifests. The following diagram demonstrates how a single release containing one or more Helm charts can be installed using the Helm CLI and KOTS: +Some common examples of how to use a hook to create backups are: +- Run `pg_dump` to export a postgres database prior to backup +- Lock a file before running a backup, and unlock immediately after +- Delete TMP files that should not be backed up +- Restore a database file only if that file exists +- Perform required setup tasks in a restored Pod before the application containers can start -One release being installed into three different customer environments +Additionally, for embedded clusters created by Replicated kURL, you must write custom backup and restore hooks to enable back ups for any object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs). For more information about object-stored data, see [Other Object Stored Data](snapshots-overview#other-object-stored-data) in _Backup and Restore_. -[View a larger version of this image](/images/helm-kots-install-options.png) +For more information about backup and restore hooks, see [Backup Hooks](https://velero.io/docs/v1.10/backup-hooks/) and [Restore Hooks](https://velero.io/docs/v1.10/restore-hooks) in the Velero documentation. -For a tutorial that demonstrates how to add a sample Helm chart to a release and then install the release using both KOTS and the Helm CLI, see [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup). +## Example -## How KOTS Deploys Helm Charts +The following example demonstrates how to include Velero backup and restore hooks for a Postgres database in a Replicated HelmChart custom resource manifest file. -This section describes how KOTS uses the HelmChart custom resource to deploy Helm charts. +The use case for this example is an application packaged with a Helm chart that includes a Postgres database. A description of key fields from the YAML follows the example. -### About the HelmChart Custom Resource +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: postgresql +spec: + exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' - + chart: + name: postgresql + chartVersion: 8.7.4 -The HelmChart custom resource with `apiVersion: kots.io/v1beta2` (HelmChart v2) is supported with KOTS v1.99.0 and later. For more information, see [About the HelmChart kots.io/v1beta2 Installation Method](#v2-install) below. + values: -KOTS versions earlier than v1.99.0 can install Helm charts with `apiVersion: kots.io/v1beta1` of the HelmChart custom resource. The `kots.io/v1beta1` HelmChart custom resource is deprecated. For more information, see [Deprecated HelmChart kots.io/v1beta1 Installation Methods](#deprecated-helmchart-kotsiov1beta1-installation-methods) below. + master: + podAnnotations: + backup.velero.io/backup-volumes: backup + pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U username -d dbname -h 127.0.0.1 > /scratch/backup.sql"]' + pre.hook.backup.velero.io/timeout: 3m + post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U username -h 127.0.0.1 -d dbname -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' -### About the HelmChart v2 Installation Method {#v2-install} + extraVolumes: + - name: backup + emptyDir: + sizeLimit: 1Gi + extraVolumeMounts: + - name: backup + mountPath: /scratch -When you include a HelmChart custom resource with `apiVersion: kots.io/v1beta2` in a release, KOTS v1.99.0 or later does a Helm install or upgrade of the associated Helm chart directly. + global: + postgresql: + postgresqlUsername: username + postgresqlPassword: "repl{{ ConfigOption `embedded_postgres_password` }}" + postgresqlDatabase: dbname +``` -The `kots.io/v1beta2` HelmChart custom resource does _not_ modify the chart during installation. This results in Helm chart installations that are consistent, reliable, and easy to troubleshoot. For example, you can reproduce the exact installation outside of KOTS by downloading a copy of the application files from the cluster with `kots download`, then using those files to install with `helm install`. And, you can use `helm get values` to view the values that were used to install. +The following describes key fields from the example above: -The `kots.io/v1beta2` HelmChart custom resource requires configuration. For more information, see [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). +* `spec.exclude`: A common and recommended pattern for applications. The customer can choose to bring an external Postgres instance instead of running it in-cluster. The Replicated KOTS template function in `spec.exclude` evaluates to true when the user specifies the external database option in the Admin Console **Config** page. This means that the internal Postgres database is not included in the deployment. -For information about the fields and syntax of the HelmChart custom resource, see [HelmChart v2](/reference/custom-resource-helmchart-v2). +* `spec.values.master.podAnnotations`: Adds podAnnotations to the postgres master PodSpec. Velero backup and restore hooks are included in the podAnnotations. The following table describes the podAnnotations: -### Limitations + :::note + Run backup hooks inside the container that contains the data to back up. + ::: -The following limitations apply when deploying Helm charts with the `kots.io/v1beta2` HelmChart custom resource: + + + + + + + + + + + + + + + + + + + + + +
    podAnnotationDescription
    backup.velero.io/backup-volumesA comma-separated list of volumes from the Pod to include in the backup. The primary data volume is not included in this field because data is exported using the backup hook.
    pre.hook.backup.velero.io/commandA stringified JSON array containing the command for the backup hook. + This command is a pg_dump from the running database to the backup volume.
    pre.hook.backup.velero.io/timeoutA duration for the maximum time to let this script run.
    post.hook.restore.velero.io/commandA Velero exec restore hook that runs a script to check if the database file exists, and restores only if it exists. Then, the script deletes the file after the operation is complete.
    -* Available only for Helm v3. +* `spec.master.extraVolumes`: A new volume that is injected into the postgres Pod. The new volume is an empty volume that uses ephemeral storage. The ephemeral storage must have enough space to accommodate the size of the exported data. +The `extraVolumeMounts` field mounts the volume into the `/scratch` directory of the master Pod. The volume is used as a destination when the backup hook command described above runs `pg_dump`. This is the only volume that is backed up. -* Available only for KOTS v1.99.0 and later. +================ +File: docs/vendor/snapshots-overview.mdx +================ +import RestoreTable from "../partials/snapshots/_restoreTable.mdx" +import NoEcSupport from "../partials/snapshots/_limitation-no-ec-support.mdx" +import RestoreTypes from "../partials/snapshots/_restore-types.mdx" +import Dr from "../partials/snapshots/_limitation-dr.mdx" +import Os from "../partials/snapshots/_limitation-os.mdx" +import InstallMethod from "../partials/snapshots/_limitation-install-method.mdx" +import CliRestores from "../partials/snapshots/_limitation-cli-restores.mdx" -* The rendered manifests shown in the `rendered` directory might not reflect the final manifests that will be deployed to the cluster. This is because the manifests in the `rendered` directory are generated using `helm template`, which is not run with cluster context. So values returned by the `lookup` function and the built-in `Capabilities` object might differ. +# About Backup and Restore with Snapshots -* When updating the HelmChart custom resource in a release from `kots.io/v1beta1` to `kots.io/v1beta2`, the diff viewer shows a large diff because the underlying file structure of the rendered manifests is different. +This topic provides an introduction to the Replicated KOTS snapshots feature for backup and restore. It describes how vendors enable snapshots, the type of data that is backed up, and how to troubleshoot issues for enterprise users. -* Editing downstream Kustomization files to make changes to the application before deploying is not supported. This is because KOTS does not use Kustomize when installing Helm charts with the `kots.io/v1beta2` HelmChart custom resource. For more information about patching applications with Kustomize, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). +:::note + +::: -* +## Overview - +An important part of the lifecycle of an application is backup and restore. You can enable Replicated KOTS snapshots to support backup and restore for existing cluster installations with KOTS and Replicated kURL installations. - For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). -## Support for Helm Hooks {#hooks} +When snapshots is enabled for your application, your customers can manage and perform backup and restore from the Admin Console or KOTS CLI. -KOTS supports the following hooks for Helm charts: -* `pre-install`: Executes after resources are rendered but before any resources are installed. -* `post-install`: Executes after resources are installed. -* `pre-upgrade`: Executes after resources are rendered but before any resources are upgraded. -* `post-upgrade`: Executes after resources are upgraded. -* `pre-delete`: Executes before any resources are deleted. -* `post-delete`: Executes after resources are deleted. +Snapshots uses the Velero open source project as the backend to back up Kubernetes manifests and persistent volumes. Velero is a mature, fully-featured application. For more information, see the [Velero documentation](https://velero.io/docs/). -The following limitations apply to using hooks with Helm charts deployed by KOTS: +In addition to the default functionality that Velero provides, KOTS exposes hooks that let you inject scripts that can execute both before and after a backup, and before and after a restore. For more information, see [Configuring Backup and Restore Hooks for Snapshots](/vendor/snapshots-hooks). -* +### Limitations and Considerations -* +* -For more information about Helm hooks, see [Chart Hooks](https://helm.sh/docs/topics/charts_hooks/) in the Helm documentation. +- The snapshots feature is available only for licenses with the **Allow Snapshots** option enabled. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). -## Air Gap Installations +- Snapshots are useful for rollback and disaster recovery scenarios. They are not intended to be used for application migration. -KOTS supports installation of Helm charts into air gap environments with configuration of the HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. The `builder` key specifies the Helm values to use when building the air gap bundle for the application. +- -For more information about how to configure the `builder` key to support air gap installations, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). +- -## Resource Deployment Order +- -When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. +- -For information about how to set the deployment order for Helm charts with KOTS, see [Orchestrating Resource Deployment](/vendor/orchestrating-resource-deployment). +- Removing data from the snapshot storage itself results in data corruption and the loss of snapshots. Instead, use the **Snapshots** tab in the Admin Console to cleanup and remove snapshots. -## Deprecated HelmChart kots.io/v1beta1 Installation Methods +- Snapshots does not support Amazon Simple Storage Service (Amazon S3) buckets that have a bucket policy requiring the server-side encryption header. If you want to require server-side encryption for objects, you can enable default encryption on the bucket instead. For more information about Amazon S3, see the [Amazon S3](https://docs.aws.amazon.com/s3/?icmpid=docs_homepage_featuredsvcs) documentation. -This section describes the deprecated Helm chart installation methods that use the HelmChart custom resource `apiVersion: kots.io/v1beta1`. +### Velero Version Compatibility -:::important - -::: +The following table lists which versions of Velero are compatible with each version of KOTS. For more information, see the [Velero documentation](https://velero.io/docs/). -### useHelmInstall: true {#v1beta1} +| KOTS version | Velero version | +|------|-------------| +| 1.15 to 1.20.2 | 1.2.0 | +| 1.20.3 to 1.94.0 | 1.5.1 through 1.9.x | +| 1.94.1 and later | 1.6.x through 1.12.x | -:::note -This method was previously referred to as _Native Helm_. -::: +## About Backups -When you include version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`, KOTS uses Kustomize to render the chart with configuration values, license field values, and rewritten image names. KOTS then packages the resulting manifests into a new Helm chart to install. For more information about Kustomize, see the [Kustomize documentation](https://kubectl.docs.kubernetes.io/). +This section describes the types of backups that are supported with snapshots. For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. -The following diagram shows how KOTS processes Helm charts for deployment with the `kots.io/v1beta1` method: +### Application and Admin Console (Full) Backups -![Flow chart of a v1beta1 Helm chart deployment to a cluster](/images/native-helm-flowchart.png) +Full backups (also referred to as _instance_ backups) include the KOTS Admin Console and all application data, including application volumes and manifest files. -[View a larger image](/images/native-helm-flowchart.png) +For clusters created with Replicated kURL, full backups also back up the Docker registry, which is required for air gapped installations. -As shown in the diagram above, when given a Helm chart, KOTS: +If you manage multiple applications with the Admin Console, data from all applications that support backups is included in a full backup. To be included in full backups, each application must include a manifest file with `kind: Backup` and `apiVersion: velero.io/v1`, which you can check for in the Admin Console. -- Uses Kustomize to merge instructions from KOTS and the end user to chart resources (see steps 2 - 4 below) -- Packages the resulting manifest files into a new Helm chart (see step 5 below) -- Deploys the new Helm chart (see step 5 below) +Full backups are recommended because they support all types of restores. For example, you can restore both the Admin Console and application from a full backup to a new cluster in disaster recovery scenarios. Or, you can use a full backup to restore only application data for the purpose of rolling back after deploying a new version of an application. -To deploy Helm charts with version `kots.io/v1beta1` of the HelmChart custom resource, KOTS does the following: +### Application-Only (Partial) Backups -1. **Checks for previous installations of the chart**: If the Helm chart has already been deployed with a HelmChart custom resource that has `useHelmInstall: false`, then KOTS does not attempt the install the chart. The following error message is displayed if this check fails: `Deployment method for chart has changed`. For more information, see [HelmChart kots.io/v1beta1 (useHelmInstall: false)](#v1beta1-false) below. +Partial backups back up the application volumes and manifest files only. Partial backups do not back up the KOTS Admin Console. -1. **Writes base files**: KOTS extracts Helm manifests, renders them with Replicated templating, and then adds all files from the original Helm tarball to a `base/charts/` directory. +Partial backups can be useful if you need to roll back after deploying a new application version. Partial backups of the application only _cannot_ be restored to a new cluster, and are therefore not useable for disaster recovery scenarios. - Under `base/charts/`, KOTS adds a Kustomization file named `kustomization.yaml` in the directories for each chart and subchart. KOTS uses these Kustomization files later in the deployment process to merge instructions from Kustomize to the chart resources. For more information about Kustomize, see the [Kustomize website](https://kustomize.io). +### Backup Storage Destinations - The following screenshot from the Replicated Admin Console shows a `base/charts/` directory for a deployed application. The `base/charts/` directory contains a Helm chart named postgresql with one subchart: +For disaster recovery, backups should be configured to use a storage destination that exists outside of the cluster. This is especially true for installations in clusters created with Replicated kURL, because the default storage location on these clusters is internal. - ![Base directory in the Admin Console](/images/native-helm-base.png) +You can use a storage provider that is compatible with Velero as the storage destination for backups created with the Replicated snapshots feature. For a list of the compatible storage providers, see [Providers](https://velero.io/docs/v1.9/supported-providers/) in the Velero documentation. - In the screenshot above, a Kustomization file that targets the resources from the postgresql Helm chart appears in the `base/charts/postgresql/` directory: +You initially configure backups on a supported storage provider backend using the KOTS CLI. If you want to change the storage destination after the initial configuration, you can use the the **Snapshots** page in the Admin Console, which has built-in support for the following storage destinations: - ```yaml - apiVersion: kustomize.config.k8s.io/v1beta1 - kind: Kustomization - resources: - - secrets.yaml - - statefulset.yaml - - svc-headless.yaml - - svc.yaml - ``` - -1. **Writes midstream files with Kustomize instructions from KOTS**: KOTS then copies the directory structure from `base/charts/` to an `overlays/midstream/charts/` directory. The following screenshot shows an example of the midstream directory for the postgresql Helm chart: - - ![Midstream directory in the Admin Console UI](/images/native-helm-midstream.png) +- Amazon Web Services (AWS) +- Google Cloud Provider (GCP) +- Microsoft Azure +- S3-Compatible +- Network File System (NFS) +- Host Path - As shown in the screenshot above, the midstream directory also contains a Kustomization file with instructions from KOTS for all deployed resources, such as image pull secrets, image rewrites, and backup labels. For example, in the midstream Kustomization file, KOTS rewrites any private images to pull from the Replicated proxy registry. +kURL installers that include the Velero add-on also include a locally-provisioned object store. By default, kURL clusters are preconfigured in the Admin Console to store backups in the locally-provisioned object store. This object store is sufficient for only rollbacks and downgrades and is not a suitable configuration for disaster recovery. Replicated recommends that you configure a snapshots storage destination that is external to the cluster in the Admin Console for kURL clusters. - The following shows an example of a midstream Kustomization file for the postgresql Helm chart: +For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. - ```yaml - apiVersion: kustomize.config.k8s.io/v1beta1 - bases: - - ../../../../base/charts/postgresql - commonAnnotations: - kots.io/app-slug: helm-test - images: - - name: gcr.io/replicated-qa/postgresql - newName: proxy.replicated.com/proxy/helm-test/gcr.io/replicated-qa/postgresql - kind: Kustomization - patchesStrategicMerge: - - pullsecrets.yaml - resources: - - secret.yaml - transformers: - - backup-label-transformer.yaml - ``` +### What Data is Backed Up? - As shown in the example above, all midstream Kustomization files have a `bases` entry that references the corresponding Kustomization file from the `base/charts/` directory. +Full backups include the Admin Console and all application data, including KOTS-specific object-stored data. For Replicated kURL installations, this also backs up the Docker registry, which is required for air gapped installations. -1. **Writes downstream files for end user Kustomize instructions**: KOTS then creates an `overlays/downstream/this-cluster/charts` directory and again copies the directory structure of `base/charts/` to this downstream directory: +#### Other Object-Stored Data - ![Downstream directory in the Admin Console UI](/images/native-helm-downstream.png) +For kURL clusters, you might be using object-stored data that is not specific to the kURL KOTS add-on. - As shown in the screenshot above, each chart and subchart directory in the downstream directory also contains a Kustomization file. These downstream Kustomization files contain only a `bases` entry that references the corresponding Kustomization file from the midstream directory. For example: +For object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs), you must write custom backup and restore hooks to enable back ups for that object-stored data. For example, Rook and Ceph do not use PVCs and so require custom backup and restore hooks. For more information about writing custom hooks, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). - ```yaml - apiVersion: kustomize.config.k8s.io/v1beta1 - bases: - - ../../../../midstream/charts/postgresql - kind: Kustomization - ``` - - End users can edit the downstream Kustomization files to make changes before deploying the application. Any instructions that users add to the Kustomization files in the downstream directory take priority over midstream and base Kustomization files. For more information about how users can make changes before deploying, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). +#### Pod Volume Data -1. **Deploys the Helm chart**: KOTS runs `kustomize build` for any Kustomization files in the `overlays/downstream/charts` directory. KOTS then packages the resulting manifests into a new chart for Helm to consume. +Replicated supports only the restic backup program for pod volume data. - Finally, KOTS runs `helm upgrade -i --timeout 3600s -n `. The Helm binary processes hooks and weights, applies manifests to the Kubernetes cluster, and saves a release secret similar to `sh.helm.release.v1.chart-name.v1`. Helm uses this secret to track upgrades and rollbacks of applications. +By default, Velero requires that you opt-in to have pod volumes backed up. In the Backup resource that you configure to enable snapshots, you must annotate each specific volume that you want to back up. For more information about including and excluding pod volumes, see [Configuring Snapshots](/vendor/snapshots-configuring-backups). -### useHelmInstall: false {#v1beta1-false} +## About Restores {#restores} -:::note -This method was previously referred to as _Replicated Helm_. -::: + -When you use version `kots.io/v1beta1` of HelmChart custom resource with `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. KOTS also has additional functionality for specific Helm hooks. For example, when KOTS encounters an upstream Helm chart with a `helm.sh/hook-delete-policy` annotation, it automatically adds the same `kots.io/hook-delete-policy` to the Job object. +When you restore an application with snapshots, KOTS first deletes the selected application. All existing application manifests are removed from the cluster, and all `PersistentVolumeClaims` are deleted. This action is not reversible. -The resulting deployment is comprised of standard Kubernetes manifests. Therefore, cluster operators can view the exact differences between what is currently deployed and what an update will deploy. +Then, the restore process redeploys all of the application manifests. All Pods are given an extra `initContainer` and an extra directory named `.velero`, which are used for restore hooks. For more information about the restore process, see [Restore Reference](https://velero.io/docs/v1.9/restore-reference/) in the Velero documentation. -### Limitations {#replicated-helm-limitations} +When you restore the Admin Console only, no changes are made to the application. -This section lists the limitations for version `kots.io/v1beta1` of the HelmChart custom resource. -#### kots.io/v1beta1 (useHelmInstall: true) Limitations +For information about how to restore using the Admin Console or the KOTS CLI, see [Restoring from Backups](/enterprise/snapshots-restoring-full). -The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`: +## Using Snapshots -* +This section provides an overview of how vendors and enterprise users can configure and use the snapshots feature. -* Available only for Helm V3. +### How to Enable Snapshots for Your Application -* +To enable the snapshots backup and restore feature for your users, you must: - For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). +- Have the snapshots entitlement enabled in your Replicated vendor account. For account entitlements, contact the Replicated TAM team. +- Define a manifest for creating backups. See [Configuring Snapshots](snapshots-configuring-backups). +- When needed, configure backup and restore hooks. See [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). +- Enable the **Allow Snapshot** option in customer licenses. See [Creating and Managing Customers](releases-creating-customer). -* +### Understanding Backup and Restore for Users {#how-users} -* +After vendors enable backup and restore, enterprise users install Velero and configure a storage destination in the Admin Console. Then users can create backups manually or schedule automatic backups. -* +Replicated recommends advising your users to make full backups for disaster recovery purposes. Additionally, full backups give users the flexibility to do a full restore, a partial restore (application only), or restore just the Admin Console. -* +From a full backup, users restore using the KOTS CLI or the Admin Console as indicated in the following table: - For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. + -#### kots.io/v1beta1 (useHelmInstall: false) Limitations {#v1beta1-false-limitations} +Partial backups are not recommended as they are a legacy feature and only back up the application volumes and manifests. Partial backups can be restored only from the Admin Console. -The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: false`: +### Troubleshooting Snapshots -* +To support end users with backup and restore, use the following resources: -* +- To help troubleshoot error messages, see [Troubleshooting Snapshots](/enterprise/snapshots-troubleshooting-backup-restore). -* +- Review the Limitations and Considerations section to make sure an end users system is compliant. - For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. +- Check that the installed Velero version and KOTS version are compatible. ================ -File: docs/vendor/helm-native-v2-using.md +File: docs/vendor/support-bundle-customizing.mdx ================ -import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" +# Adding and Customizing Support Bundles -# Configuring the HelmChart Custom Resource v2 +This topic describes how to add a default support bundle spec to a release for your application. It also describes how to customize the default support bundle spec based on your application's needs. For more information about support bundles, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). -This topic describes how to configure the Replicated HelmChart custom resource version `kots.io/v1beta2` to support Helm chart installations with Replicated KOTS. +The information in this topic applies to Helm applications and Kubernetes manifest-based application installed with Helm or with Replicated KOTS. -## Workflow +## Step 1: Add the Default Spec to a Manifest File -To support Helm chart installations with the KOTS `kots.io/v1beta2` HelmChart custom resource, do the following: -1. Rewrite image names to use the Replicated proxy registry. See [Rewrite Image Names](#rewrite-image-names). -1. Inject a KOTS-generated image pull secret that grants proxy access to private images. See [Inject Image Pull Secrets](#inject-image-pull-secrets). -1. Add a pull secret for any Docker Hub images that could be rate limited. See [Add Pull Secret for Rate-Limited Docker Hub Images](#docker-secret). -1. Configure the `builder` key to allow your users to push images to their own local registries. See [Support Local Image Registries](#local-registries). -1. (KOTS Existing Cluster and kURL Installations Only) Add backup labels to your resources to support backup and restore with the KOTS snapshots feature. See [Add Backup Labels for Snapshots](#add-backup-labels-for-snapshots). - :::note - Snapshots is not supported for installations with Replicated Embedded Cluster. For more information about configuring disaster recovery for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). - ::: +You can add the support bundle spec to a Kubernetes Secret or a SupportBundle custom resource. The type of manifest file that you use depends on your application type (Helm or manifest-based) and installation method (Helm or KOTS). -## Task 1: Rewrite Image Names {#rewrite-image-names} +Use the following guidance to determine which type of manifest file to use for creating a support bundle spec: -Configure the KOTS HelmChart custom resource `values` key so that KOTS rewrites the names for both private and public images in your Helm values during deployment. This allows images to be accessed at one of the following locations, depending on where they were pushed: -* The [Replicated proxy registry](private-images-about) (`proxy.replicated.com` or your custom domain) -* A public image registry -* Your customer's local registry -* The built-in registry used in Replicated Embedded Cluster or Replicated kURL installations in air-gapped environments +* **Helm Applications**: For Helm applications, see the following guidance: -You will use the following KOTS template functions to conditionally rewrite image names depending on where the given image should be accessed: -* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry): Returns true if the installation environment is configured to use a local image registry. HasLocalRegistry is always true in air gap installations. HasLocalRegistry is also true in online installations if the user configured a local private registry. -* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost): Returns the host of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryHost returns the host of the built-in registry. -* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace): Returns the namespace of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryNamespace returns the namespace of the built-in registry. + * **(Recommended) Helm or KOTS v1.94.2 and Later**: For Helm applications installed with Helm or KOTS v1.94.2 or later, create the support bundle spec in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). -
    - What is the registry namespace? - - The registry namespace is the path between the registry and the image name. For example, `images.mycompany.com/namespace/image:tag`. -
    + * **KOTS v1.94.1 and Earlier**: For Helm applications installed with KOTS v1.94.1 or earlier, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). -### Task 1a: Rewrite Private Image Names +* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). -For any private images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the Replicated proxy registry (for online installations) or to the local registry in the user's installation environment (for air gap installations or online installations where the user configured a local registry). +### Kubernetes Secret {#secret} -To rewrite image names to the location of the image in the proxy registry, use the format `/proxy//`, where: -* `` is `proxy.replicated.com` or your custom domain. For more information about configuring a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). -* `` is the unique application slug in the Vendor Portal -* `` is the path to the image in your registry +You can define support bundle specs in a Kubernetes Secret for the following installation types: +* Installations with Helm +* Helm applications installed with KOTS v1.94.2 and later -For example, if the private image is `quay.io/my-org/nginx:v1.0.1` and `images.mycompany.com` is the custom proxy registry domain, then the image name should be rewritten to `images.mycompany.com/proxy/my-app-slug/quay.io/my-org/nginx:v1.0.1`. +In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret: -For more information, see the example below. +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: support-bundle + name: example +stringData: + support-bundle-spec: | + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: support-bundle + spec: + collectors: [] + analyzers: [] +``` -#### Example +As shown above, the Secret must include the following: -The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: +* The label `troubleshoot.sh/kind: support-bundle` +* A `stringData` field with a key named `support-bundle-spec` -```yaml -# kots.io/v1beta2 HelmChart custom resource +This empty support bundle spec includes the following collectors by default: +* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) +* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - ... - values: - image: - # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry's hostname - # Else use proxy.replicated.com or your custom proxy registry domain - registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "images.mycompany.com" }}' - # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry namespace - # Else use the image's namespace at the proxy registry domain - repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' - tag: v1.0.1 -``` +You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. -The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource above correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown below: +:::note +If your application is deployed as multiple Helm charts, Replicated recommends that you create separate support bundle specs for each subchart. This allows you to make specs that are specific to different components of your application. When a support bundle is generated, all the specs are combined to provide a single bundle. +::: -```yaml -# Helm chart values.yaml file +After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. -image: - registry: quay.io - repository: my-org/nginx - tag: v1.0.1 -``` +### SupportBundle Custom Resource {#sb-cr} -During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. +You can define support bundle specs in a SupportBundle custom resource for the following installation types: +* Kubernetes manifest-based applications installed with KOTS +* Helm applications installed with KOTS v1.94.1 and earlier -Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: +In a release for your application, add the following YAML to a new `support-bundle.yaml` manifest file: ```yaml -apiVersion: v1 -kind: Pod +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle metadata: - name: nginx + name: example spec: - containers: - - name: - image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} + collectors: [] + analyzers: [] ``` +For more information about the SupportBundle custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). -### Task 1b: Rewrite Public Image Names +This empty support bundle spec includes the following collectors by default: +* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) +* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) -For any public images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the location of the image in the public registry (for online installations) or the local registry (for air gap installations or online installations where the user configured a local registry. +You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. -For more information, see the example below. +After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. -#### Example +## Step 2: Customize the Spec {#customize-the-spec} -The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: +You can customize the support bundles for your application by: +* Adding collectors and analyzers +* Editing or excluding the default `clusterInfo` and `clusterResources` collectors -```yaml -# kots.io/v1beta2 HelmChart custom resource +### Add Collectors -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - ... - values: - image: - # If a local registry is used, use that registry's hostname - # Else, use the public registry host (ghcr.io) - registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "ghcr.io" }}' - # If a local registry is used, use the registry namespace provided - # Else, use the path to the image in the public registry - repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "cloudnative-pg" }}/cloudnative-pg' - tag: catalog-1.24.0 -``` +Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define. -The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown in the example below: +In addition to the default `clusterInfo` and `clusterResources` collectors, the Troubleshoot open source project includes several collectors that you can include in the spec to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. -```yaml -# Helm chart values.yaml file +The following are some recommended collectors: -image: - registry: ghcr.io - repository: cloudnative-pg/cloudnative-pg - tag: catalog-1.24.0 -``` +- [logs](https://troubleshoot.sh/docs/collect/logs/) +- [secret](https://troubleshoot.sh/docs/collect/secret/) and [configMap](https://troubleshoot.sh/docs/collect/configmap/) +- [postgresql](https://troubleshoot.sh/docs/collect/postgresql/), [mysql](https://troubleshoot.sh/docs/collect/mysql/), and [redis](https://troubleshoot.sh/docs/collect/redis/) +- [runPod](https://troubleshoot.sh/docs/collect/run-pod/) +- [copy](https://troubleshoot.sh/docs/collect/copy/) and [copyFromHost](https://troubleshoot.sh/docs/collect/copy-from-host/) +- [http](https://troubleshoot.sh/docs/collect/http/) -During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in your Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: +### Add Analyzers -```yaml -apiVersion: v1 -kind: Pod -spec: - containers: - - name: - image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} -``` +Analyzers use the data from the collectors to generate output for the support bundle. Good analyzers clearly identify failure modes and provide troubleshooting guidance for the user. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log and provides a description of the error to the user. -## Task 2: Inject Image Pull Secrets {#inject-image-pull-secrets} +The Troubleshoot open source project includes several analyzers that you can include in the spec. To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. -Kubernetes requires a Secret of type `kubernetes.io/dockerconfigjson` to authenticate with a registry and pull a private image. When you reference a private image in a Pod definition, you also provide the name of the Secret in a `imagePullSecrets` key in the Pod definition. For more information, see [Specifying imagePullSecrets on a Pod](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) in the Kubernetes documentation. +The following are some recommended analyzers: -During installation, KOTS creates a `kubernetes.io/dockerconfigjson` type Secret that is based on the customer license. This pull secret grants access to the private image through the Replicated proxy registry or in the Replicated registry. Additionally, if the user configured a local image registry, then the pull secret contains the credentials for the local registry. You must provide the name of this KOTS-generated pull secret in any Pod definitions that reference the private image. +- [textAnalyze](https://troubleshoot.sh/docs/analyze/regex/) +- [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) +- [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) +- [replicasetStatus](https://troubleshoot.sh/docs/analyze/replicaset-status/) +- [statefulsetStatus](https://troubleshoot.sh/docs/analyze/statefulset-status/) +- [postgresql](https://troubleshoot.sh/docs/analyze/postgresql/), [mysql](https://troubleshoot.sh/docs/analyze/mysql/), and [redis](https://troubleshoot.sh/docs/analyze/redis/) -You can inject the name of this pull secret into a field in the HelmChart custom resource using the Replicated ImagePullSecretName template function. During installation, KOTS sets the value of the corresponding field in your Helm chart `values.yaml` file with the rendered value of the ImagePullSecretName template function. +### Customize the Default `clusterResources` Collector -#### Example +You can edit the default `clusterResources` using the following properties: -The following example shows a `spec.values.image.pullSecrets` array in the HelmChart custom resource that uses the ImagePullSecretName template function to inject the name of the KOTS-generated pull secret: +* `namespaces`: The list of namespaces where the resources and information is collected. If the `namespaces` key is not specified, then the `clusterResources` collector defaults to collecting information from all namespaces. The `default` namespace cannot be removed, but you can specify additional namespaces. -```yaml -# kots.io/v1beta2 HelmChart custom resource +* `ignoreRBAC`: When true, the `clusterResources` collector does not check for RBAC authorization before collecting resource information from each namespace. This is useful when your cluster uses authorization webhooks that do not support SelfSubjectRuleReviews. Defaults to false. -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. + +The following example shows how to specify the namespaces where the `clusterResources` collector collects information: + +```yaml spec: - values: - image: - # Note: Use proxy.replicated.com or your custom domain - registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' - repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/ecr.us-east-1.amazonaws.com/my-org" }}/api' - pullSecrets: - - name: '{{repl ImagePullSecretName }}' + collectors: + - clusterResources: + namespaces: + - default + - my-app-namespace + ignoreRBAC: true ``` -The `spec.values.image.pullSecrets` array in the HelmChart custom resource corresponds to a `image.pullSecrets` array in the Helm chart `values.yaml` file, as shown in the example below: +The following example shows how to use Helm template functions to set the namespace: ```yaml -# Helm chart values.yaml file +spec: + collectors: + - clusterResources: + namespaces: {{ .Release.Namespace }} + ignoreRBAC: true +``` -image: - registry: ecr.us-east-1.amazonaws.com - repository: my-org/api/nginx - pullSecrets: - - name: my-org-secret +The following example shows how to use the Replicated Namespace template function to set the namespace: + +```yaml +spec: + collectors: + - clusterResources: + namespaces: '{{repl Namespace }}' + ignoreRBAC: true ``` +For more information, see [Namespace](/reference/template-functions-static-context#namespace) in _Static Context_. -During installation, KOTS renders the ImagePullSecretName template function and adds the rendered pull secret name to the `image.pullSecrets` array in the Helm chart `values.yaml` file. +### Exclude the Default Collectors -Any templates in the Helm chart that access the `image.pullSecrets` field are updated to use the name of the KOTS-generated pull secret, as shown in the example below: +Although Replicated recommends including the default `clusterInfo` and `clusterResources` collectors because they collect a large amount of data to help with installation and debugging, you can optionally exclude them. + +The following example shows how to exclude both the clusterInfo and clusterResources collectors from your support bundle spec: ```yaml -apiVersion: v1 -kind: Pod -metadata: - name: nginx spec: - containers: - - name: nginx - image: {{ .Values.image.registry }}/{{ .Values.image.repository }} - {{- with .Values.image.pullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 2 }} - {{- end }} + collectors: + - clusterInfo: + exclude: true + - clusterResources: + exclude: true ``` -## Task 3: Add Pull Secret for Rate-Limited Docker Hub Images {#docker-secret} +### Examples -Docker Hub enforces rate limits for Anonymous and Free users. To avoid errors caused by reaching the rate limit, your users can run the `kots docker ensure-secret` command, which creates an `-kotsadm-dockerhub` secret for pulling Docker Hub images and applies the secret to Kubernetes manifests that have images. For more information, see [Avoiding Docker Hub Rate Limits](/enterprise/image-registry-rate-limits). +For common examples of collectors and analyzers used in support bundle specs, see [Examples of Support Bundle Specs](/vendor/support-bundle-examples). -If you are deploying a Helm chart with Docker Hub images that could be rate limited, to support the use of the `kots docker ensure-secret` command, any Pod definitions in your Helm chart templates that reference the rate-limited image must be updated to access the `-kotsadm-dockerhub` pull secret, where `` is your application slug. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). +================ +File: docs/vendor/support-bundle-embedded.mdx +================ +import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" +import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" -You can do this by adding the `-kotsadm-dockerhub` pull secret to a field in the `values` key of the HelmChart custom resource, along with a matching field in your Helm chart `values.yaml` file. During installation, KOTS sets the value of the matching field in the `values.yaml` file with the `-kotsadm-dockerhub` pull secret, and any Helm chart templates that access the value are updated. +# Generating Support Bundles for Embedded Cluster -For more information about Docker Hub rate limiting, see [Understanding Docker Hub rate limiting](https://www.docker.com/increase-rate-limits) on the Docker website. +This topic describes how to generate a support bundle that includes cluster- and host-level information for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. -#### Example +For information about generating host support bundles for Replicated kURL installations, see [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). -The following Helm chart `values.yaml` file includes `image.registry`, `image.repository`, and `image.pullSecrets` for a rate-limited Docker Hub image: +## Overview -```yaml -# Helm chart values.yaml file + -image: - registry: docker.io - repository: my-org/example-docker-hub-image - pullSecrets: [] -``` +## Generate a Support Bundle -The following HelmChart custom resource includes `spec.values.image.registry`, `spec.values.image.repository`, and `spec.values.image.pullSecrets`, which correspond to those in the Helm chart `values.yaml` file above. + -The `spec.values.image.pullSecrets` array lists the `-kotsadm-dockerhub` pull secret, where the slug for the application is `example-app-slug`: +================ +File: docs/vendor/support-bundle-examples.mdx +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HttpSecret from "../partials/support-bundles/_http-requests-secret.mdx" +import HttpCr from "../partials/support-bundles/_http-requests-cr.mdx" +import NodeStatusSecret from "../partials/support-bundles/_node-status-secret.mdx" +import NodeStatusCr from "../partials/support-bundles/_node-status-cr.mdx" +import K8sVersionSecret from "../partials/support-bundles/_k8s-version-secret.mdx" +import K8sVersionCr from "../partials/support-bundles/_k8s-version-cr.mdx" +import DeployStatusSecret from "../partials/support-bundles/_deploy-status-secret.mdx" +import DeployStatusCr from "../partials/support-bundles/_deploy-status-cr.mdx" +import NodeResourcesSecret from "../partials/support-bundles/_node-resources-secret.mdx" +import NodeResourcesCr from "../partials/support-bundles/_node-resources-cr.mdx" +import LogsSelectorsSecret from "../partials/support-bundles/_logs-selectors-secret.mdx" +import LogsSelectorsCr from "../partials/support-bundles/_logs-selectors-cr.mdx" +import LogsLimitsSecret from "../partials/support-bundles/_logs-limits-secret.mdx" +import LogsLimitsCr from "../partials/support-bundles/_logs-limits-cr.mdx" +import RedisMysqlSecret from "../partials/support-bundles/_redis-mysql-secret.mdx" +import RedisMysqlCr from "../partials/support-bundles/_redis-mysql-cr.mdx" +import RunPodsSecret from "../partials/support-bundles/_run-pods-secret.mdx" +import RunPodsCr from "../partials/support-bundles/_run-pods-cr.mdx" -```yaml -# kots.io/v1beta2 HelmChart custom resource +# Example Support Bundle Specs -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - values: - image: - registry: docker.io - repository: my-org/example-docker-hub-image - pullSecrets: - - name: example-app-slug-kotsadm-dockerhub -``` +This topic includes common examples of support bundle specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/support-bundle) in GitHub. -During installation, KOTS adds the `example-app-slug-kotsadm-dockerhub` secret to the `image.pullSecrets` array in the Helm chart `values.yaml` file. Any templates in the Helm chart that access `image.pullSecrets` are updated to use `example-app-slug-kotsadm-dockerhub`: +## Check API Deployment Status -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: example -spec: - containers: - - name: example - image: {{ .Values.image.registry }}/{{ .Values.image.repository }} - {{- with .Values.image.pullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 2 }} - {{- end }} -``` +The examples below use the `deploymentStatus` analyzer to check the version of Kubernetes running in the cluster. The `deploymentStatus` analyzer uses data from the default `clusterResources` collector. -## Task 4: Support the Use of Local Image Registries {#local-registries} +For more information, see [Deployment Status](https://troubleshoot.sh/docs/analyze/deployment-status/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. -Local image registries are required for KOTS installations in air-gapped environments with no outbound internet connection. Also, users in online environments can optionally use a local registry. For more information about how users configure a local image registry with KOTS, see [Configuring Local Image Registries](/enterprise/image-registry-settings). + + + + + + + + -To support the use of local registries, configure the `builder` key. For more information about how to configure the `builder` key, see [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. +## Check HTTP Requests -## Task 5: Add Backup Labels for Snapshots (KOTS Existing Cluster and kURL Installations Only) {#add-backup-labels-for-snapshots} +If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. -:::note -The Replicated [snapshots](snapshots-overview) feature for backup and restsore is supported only for existing cluster installations with KOTS. Snapshots are not support for installations with Embedded Cluster. For more information about disaster recovery for installations with Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery.mdx). -::: +The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. -The snapshots feature requires the following labels on all resources in your Helm chart that you want to be included in the backup: -* `kots.io/backup: velero` -* `kots.io/app-slug: APP_SLUG`, where `APP_SLUG` is the slug of your Replicated application. +For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. -For more information about snapshots, see [Understanding Backup and Restore](snapshots-overview). + + + + + + + + -To support backup and restore with snapshots, add the `kots.io/backup: velero` and `kots.io/app-slug: APP_SLUG` labels to fields under the HelmChart custom resource `optionalValues` key. Add a `when` statement that evaluates to true only when the customer license has the `isSnapshotSupported` entitlement. +## Check Kubernetes Version -The fields that you create under the `optionalValues` key must map to fields in your Helm chart `values.yaml` file. For more information about working with the `optionalValues` key, see [optionalValues](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. +The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. -#### Example +For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. -The following example shows how to add backup labels for snapshots in the `optionalValues` key of the HelmChart custom resource: + + + + + + + + -```yaml -# kots.io/v1beta2 HelmChart custom resource +## Check Node Resources -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - ... - optionalValues: - # add backup labels only if the license supports snapshots - - when: "repl{{ LicenseFieldValue `isSnapshotSupported` }}" - recursiveMerge: true - values: - mariadb: - commonLabels: - kots.io/backup: velero - kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} - podLabels: - kots.io/backup: velero - kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} -``` +The examples below use the `nodeResources` analyzer to check that the minimum requirements are met for memory, CPU cores, number of nodes, and ephemeral storage. The `nodeResources` analyzer uses data from the default `clusterResources` collector. -## Additional Information +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. -### About the HelmChart Custom Resource + + + + + + + + +## Check Node Status - +The following examples use the `nodeResources` analyzers to check the status of the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. -For more information about the HelmChart custom resource, including the unique requirements and limitations for the keys described in this topic, see [HelmChart v2](/reference/custom-resource-helmchart-v2). +For more information, see [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. -### HelmChart v1 and v2 Differences + + + + + + + + -To support the use of local registries with version `kots.io/v1beta2` of the HelmChart custom resource, provide the necessary values in the builder field to render the Helm chart with all of the necessary images so that KOTS knows where to pull the images from to push them into the local registry. +## Collect Logs Using Multiple Selectors -For more information about how to configure the `builder` key, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles) and [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. +The examples below use the `logs` collector to collect logs from various Pods where application workloads are running. They also use the `textAnalyze` collector to analyze the logs for a known error. -The `kots.io/v1beta2` HelmChart custom resource has the following differences from `kots.io/v1beta1`: +For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    HelmChart v1beta2HelmChart v1beta1Description
    apiVersion: kots.io/v1beta2apiVersion: kots.io/v1beta1apiVersion is updated to kots.io/v1beta2
    releaseNamechart.releaseNamereleaseName is a top level field under spec
    N/AhelmVersionhelmVersion field is removed
    N/AuseHelmInstalluseHelmInstall field is removed
    +You can use the `selector` attribute of the `logs` collector to find Pods that have the specified labels. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector, as shown in the examples below. You can include the `logs` collector as many times as needed. -### Migrate Existing KOTS Installations to HelmChart v2 + + + + + + + + -Existing KOTS installations can be migrated to use the KOTS HelmChart v2 method, without having to reinstall the application. +## Collect Logs Using `limits` -There are different steps for migrating to HelmChart v2 depending on the application deployment method used previously. For more information, see [Migrating Existing Installations to HelmChart v2](helm-v2-migrate). +The examples below use the `logs` collector to collect Pod logs from the Pod where the application is running. These specifications use the `limits` field to set a `maxAge` and `maxLines` to limit the output provided. + +For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. + + + + + + + + + + +## Collect Redis and MySQL Server Information + +The following examples use the `redis` and `mysql` collectors to collect information about Redis and MySQL servers running in the cluster. + +For more information, see [Redis](https://troubleshoot.sh/docs/collect/redis/) and [MySQL](https://troubleshoot.sh/docs/collect/mysql/) and in the Troubleshoot documentation. + + + + + + + + + + +## Run and Analyze a Pod + +The examples below use the `textAnalyze` analyzer to check that a command successfully executes in a Pod running in the cluster. The Pod specification is defined in the `runPod` collector. + +For more information, see [Run Pods](https://troubleshoot.sh/docs/collect/run-pod/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. + + + + + + + + + ================ -File: docs/vendor/helm-optional-charts.md +File: docs/vendor/support-bundle-generating.mdx ================ -# Example: Including Optional Helm Charts +import InstallPlugin from "../partials/support-bundles/_install-plugin.mdx" +import GenerateBundle from "../partials/support-bundles/_generate-bundle.mdx" -This topic describes using optional Helm charts in your application. It also provides an example of how to configure the Replicated HelmChart custom resource to exclude optional Helm charts from your application when a given condition is met. +# Generating Support Bundles -## About Optional Helm Charts +This topic describes how to generate support bundles from the command line using the kubectl support-bundle plugin. For more information about support bundles, see [About Preflights and Support Bundles](/vendor/preflight-support-bundle-about). -By default, KOTS creates an instance of a Helm chart for every HelmChart custom resource manifest file in the upstream application manifests. However, you can configure your application so that KOTS excludes certain Helm charts based on a conditional statement. +The information in this topic applies to generating support bundles in clusters where you have kubectl access. For information about generating support bundles that include cluster- and host-level information for Replicated Embedded Cluster installations, see [Generating Support Bundles for Embedded Cluster](support-bundle-embedded). -To create this conditional statement, you add a Replicated KOTS template function to an `exclude` field in the HelmChart custom resource file. For example, you can add a template function that evaluates to `true` or `false` depending on the user's selection for a configuration field on the KOTS Admin Console Config page. -KOTS renders the template function in the `exclude` field, and excludes the chart if the template function evaluates to `true`. +## Prerequisite: Install the support-bundle Plugin -For all optional components, Replicated recommends that you add a configuration option to allow the user to optionally enable or disable the component. -This lets you support enterprises that want everything to run in the cluster and those that want to bring their own services for stateful components. + -For more information about template functions, see [About Template Functions](/reference/template-functions-about). +## Generate a Bundle -## Example + -This example uses an application that has a Postgres database. -The community-supported Postgres Helm chart is available at https://github.com/bitnami/charts/tree/main/bitnami/postgresql. +## Generate a Bundle when a Helm Installation Fails -In this example, you create a configuration field on the Admin Console Config page that lets the user provide their own Postgres instance or use a Postgres service that is embedded with the application. Then, you configure the HelmChart custom resource in a release for an application in the Replicated Vendor Portal to conditionally exclude the optional Postgres component. +If a Helm installation fails and you want to collect a support bundle to assist with diagnostics, you can use a Replicated default specification to generate the support bundle. -### Step 1: Create the Configuration Fields +Run the following command: -To start, define the Admin Console Config page that gives the user a choice of "Embedded Postgres" or "External Postgres", where "External Postgres" is user-supplied. +```bash +kubectl support-bundle https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml +``` -1. Log in to the [Vendor Portal](https://vendor.replicated.com). Create a new application for this example, or open an existing application. Then, click **Releases > Create release** to create a new release for the application. +================ +File: docs/vendor/support-enabling-direct-bundle-uploads.md +================ +# Enabling Support Bundle Uploads (Beta) -1. In the Config custom resource manifest file in the release, add the following YAML to create the "Embedded Postgres" or "External Postgres" configuration options: +:::note +Direct bundle uploads is in beta. The functionality, requirements, and limitations of direct bundle uploads are subject to change. +::: - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: example-application - spec: - groups: - - name: database - title: Database - description: Database Options - items: - - name: postgres_type - type: radio - title: Postgres - default: embedded_postgres - items: - - name: embedded_postgres - title: Embedded Postgres - - name: external_postgres - title: External Postgres - - name: embedded_postgres_password - type: password - value: "{{repl RandomString 32}}" - hidden: true - - name: external_postgres_uri - type: text - title: External Postgres Connection String - help_text: Connection string for a Postgres 10.x server - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - ``` +When this feature is enabled, customers using online KOTS installations can upload support bundles directly through the Admin Console UI, eliminating the need to share the generated bundle with you manually. - The YAML above does the following: - * Creates a field with "Embedded Postgres" or "External Postgres" radio buttons - * Uses the Replicated RandomString template function to generate a unique default password for the embedded Postgres instance at installation time - * Creates fields for the Postgres password and connection string, if the user selects the External Postgres option +When enabled, your customers can use the **Send bundle to vendor button** in the Admin Console to upload a generated support bundle. - The following shows how this Config custom resource manifest file displays on the Admin Console Config page: +Send bundle to vendor screen - ![Postgres Config Screen](/images/postgres-config-screen.gif) +After clicking this button, the bundle will be immediately available under the Troubleshoot tab in the Vendor Portal team account associated with this customer. -### Step 2: Create a Secret for Postgres +For more information on how your customer can use this feature, see [Generating Support Bundles from the Admin Console](/enterprise/troubleshooting-an-app). -The application has a few components that use Postgres, and they all mount the Postgres connection string from a single Secret. +### How to Enable Direct Bundle Uploads -Define a Secret for Postgres that renders differently if the user selects the Embedded Postgres or External Postgres option: +Direct bundle uploads are disabled by default. To enable this feature for your customer: -1. In the release, create a Secret file and add the following YAML: +1. Log in to the Vendor Portal and navigate to your customer's **Manage Customer** page. +1. Under the **License options** section, make sure your customer has **KOTS Install Enabled** checked, and then check the **Support Bundle Upload Enabled (Beta)** option. + Customer license options: configure direct support bundle upload - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: postgresql-secret - stringData: - uri: postgres://username:password@postgresql:5432/database?sslmode=disable - ``` + [View a larger version of this image](/images/configure-direct-support-bundle-upload.png) +1. Click **Save**. -1. Edit the `uri` field in the Secret to add a conditional statement that renders either a connection string to the embedded Postgres chart or to the user supplied instance: +### Limitations - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: postgresql-secret - stringData: - uri: repl{{ if ConfigOptionEquals "postgres_type" "embedded_postgres" }}postgres://myapplication:repl{{ ConfigOption "embedded_postgres_password" }}@postgres:5432/mydatabase?sslmode=disablerepl{{ else }}repl{{ ConfigOption "external_postgres_uri" }}repl{{ end }} - ``` +- You will not receive a notification when a customer sends a support bundle to the Vendor Portal. To avoid overlooking these uploads, activate this feature only if there is a reliable escalation process already in place for the customer license. +- This feature only supports online KOTS installations. If enabled, but installed in air gap mode, the upload button will not appear. +- There is a 500mb limit for support bundles uploaded directly via the Admin Console. - As shown above, you must use a single line for the conditional statement. Optionally, you can use the Replicated Base64Encode function to pipe a string through. See [Base64Encode](/reference/template-functions-static-context#base64encode) in _Static Context_. +================ +File: docs/vendor/support-host-support-bundles.md +================ +import GenerateBundleHost from "../partials/support-bundles/_generate-bundle-host.mdx" -### Step 3: Add the Helm Chart +# Generating Host Bundles for kURL -Next, package the Helm chart and add it to the release in the Vendor Portal: +This topic describes how to configure a host support bundle spec for Replicated kURL installations. For information about generating host support bundles for Replicated Embedded Cluster installations, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). -1. Run the following commands to generate a `.tgz` package of the Helm chart: +## Overview - ``` - helm repo add bitnami https://charts.bitnami.com/bitnami - helm fetch bitnami/postgresql - ``` +Host support bundles can be used to collect information directly from the host where a kURL cluster is running, such as CPU, memory, available block devices, and the operating system. Host support bundles can also be used for testing network connectivity and gathering the output of provided commands. -1. Drag and drop the `.tgz` file into the file tree of the release. The Vendor Portal automatically creates a new HelmChart custom resource named `postgresql.yaml`, which references the `.tgz` file you uploaded. +Host bundles for kURL are useful when: +- The kURL cluster is offline +- The kURL installer failed before the control plane was initialized +- The Admin Console is not working +- You want to debug host-specific performance and configuration problems even when the cluster is running - For more information about adding Helm charts to a release in the Vendor Portal, see [Managing Releases with the Vendor Portal](releases-creating-releases). +You can create a YAML spec to allow users to generate host support bundles for kURL installations. For information, see [Create a Host Support Bundle Spec](#create-a-host-support-bundle-spec) below. -### Step 4: Edit the HelmChart Custom Resource +Replicated also provides a default support bundle spec to collect host-level information for installations with the Embedded Cluster installer. For more information, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). -Finally, edit the HelmChart custom resource: +## Create a Host Support Bundle Spec -1. In the HelmChart custom resource, add a mapping to the `values` key so that it uses the password you created. Also, add an `exclude` field to specify that the Postgres Helm chart must only be included when the user selects the embedded Postgres option on the Config page: +To allow users to generate host support bundles for kURL installations, create a host support bundle spec in a YAML manifest that is separate from your application release and then share the file with customers to run on their hosts. This spec is separate from your application release because host collectors and analyzers are intended to run directly on the host and not with Replicated KOTS. If KOTS runs host collectors, the collectors are unlikely to produce the desired results because they run in the context of the kotsadm Pod. - ```yaml - apiVersion: kots.io/v1beta2 - kind: HelmChart - metadata: - name: postgresql - spec: - exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' - chart: - name: postgresql - chartVersion: 12.1.7 +To configure a host support bundle spec for kURL: - releaseName: samplechart-release-1 - - # values are used in the customer environment, as a pre-render step - # these values will be supplied to helm template - values: - auth: - username: username - password: "repl{{ ConfigOption `embedded_postgres_password` }}" - database: mydatabase - ``` +1. Create a SupportBundle custom resource manifest file (`kind: SupportBundle`). -1. Save and promote the release. Then, install the release in a development environment to test the embedded and external Postgres options. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). +1. Configure all of your host collectors and analyzers in one manifest file. You can use the following resources to help create your specification: -================ -File: docs/vendor/helm-optional-value-keys.md -================ -import Values from "../partials/helm/_helm-cr-values.mdx" -import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" -import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" -import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" -import ConfigExample from "../partials/helm/_set-values-config-example.mdx" -import LicenseExample from "../partials/helm/_set-values-license-example.mdx" -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; + - Access sample specifications in the the Replicated troubleshoot-specs repository, which provides specifications for supporting your customers. See [troubleshoot-specs/host](https://github.com/replicatedhq/troubleshoot-specs/tree/main/host) in GitHub. -# Setting Helm Values with KOTS + - View a list and details of the available host collectors and analyzers. See [All Host Collectors and Analyzers](https://troubleshoot.sh/docs/host-collect-analyze/all/) in the Troubleshoot documentation. -This topic describes how to use the Replicated KOTS HelmChart custom resource to set and delete values in `values.yaml` files for Helm charts deployed with Replicated KOTS. + **Example:** -For a tutorial that demonstrates how to set Helm values in a sample Helm chart using the KOTS HelmChart custom resource, see [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). + The following example shows host collectors and analyzers for the number of CPUs and the amount of memory. -## Overview + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: host-collectors + spec: + hostCollectors: + - cpu: {} + - memory: {} + hostAnalyzers: + - cpu: + checkName: "Number of CPUs" + outcomes: + - fail: + when: "count < 2" + message: At least 2 CPU cores are required, and 4 CPU cores are recommended. + - pass: + message: This server has at least 4 CPU cores. + - memory: + checkName: "Amount of Memory" + outcomes: + - fail: + when: "< 4G" + message: At least 4G of memory is required, and 8G is recommended. + - pass: + message: The system has at least 8G of memory. + ``` -The KOTS HelmChart custom resource [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) keys create a mapping between KOTS and the `values.yaml` file for the corresponding Helm chart. This allows you to set or delete Helm values during installation or upgrade with KOTS, without having to make any changes to the Helm chart itself. +1. Share the file with your customers to run on their hosts. -You can create this mapping by adding a value under `values` or `optionalValues` that uses the exact same key name as a value in the corresponding Helm chart `values.yaml` file. During installation or upgrade, KOTS sets the Helm chart `values.yaml` file with any matching values from the `values` or `optionalValues` keys. +:::important +Do not store support bundles on public shares, as they may still contain information that could be used to infer private data about the installation, even if some values are redacted. +::: -The `values` and `optionalValues` keys also support the use of Replicated KOTS template functions. When you use KOTS template functions in the `values` and `optionalValues` keys, KOTS renders the template functions and then sets any matching values in the corresponding Helm chart `values.yaml` with the rendered values. For more information, see [About Template Functions](/reference/template-functions-about). +## Generate a Host Bundle for kURL -Common use cases for the HelmChart custom resource `values` and `optionalValues` keys include: -* Setting Helm values based on user-supplied values from the KOTS Admin Console configuration page -* Setting values based on the user's unique license entitlements -* Conditionally setting values when a given condition is met -* Deleting a default value key from the `values.yaml` file that should not be included for KOTS installations + -For more information about the syntax for these fields, see [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. +================ +File: docs/vendor/support-inspecting-support-bundles.md +================ +# Inspecting Support Bundles -## Set Values +You can use the Vendor Portal to get a visual analysis of customer support bundles and use the file inspector to drill down into the details and logs files. Use this information to get insights and help troubleshoot your customer issues. -This section describes how to use KOTS template functions or static values in the HelmChart custom resource `values` key to set existing Helm values. +To inspect a support bundle: -### Using a Static Value +1. In the Vendor Portal, go to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Add support bundle > Upload a support bundle**. -You can use static values in the HelmChart custom resource `values` key when a given Helm value must be set the same for all KOTS installations. This allows you to set values for KOTS installations only, without affecting values for any installations that use the Helm CLI. +1. In the **Upload a support bundle** dialog, drag and drop or use the file selector to upload a support bundle file to the Vendor Portal. -For example, the following Helm chart `values.yaml` file contains `kotsOnlyValue.enabled`, which is set to `false` by default: + Upload a support bundle dialog -```yaml -# Helm chart values.yaml -kotsOnlyValue: - enabled: false -``` + [View a larger version of this image](/images/support-bundle-analyze.png) -The following HelmChart custom resource contains a mapping to `kotsOnlyValue.enabled` in its `values` key, which is set to `true`: +1. (Optional) If the support bundle relates to an open support issue, select the support issue from the dropdown to share the bundle with Replicated. -```yaml -# KOTS HelmChart custom resource +1. Click **Upload support bundle**. -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - - releaseName: samplechart-release-1 + The **Support bundle analysis** page opens. The **Support bundle analysis** page includes information about the bundle, any available instance reporting data from the point in time when the bundle was collected, an analysis overview that can be filtered to show errors and warnings, and a file inspector. - values: - kotsOnlyValue: - enabled: true -``` + ![Support bundle analysis overview](/images/support-bundle-analysis-overview.png) -During installation or upgrade with KOTS, KOTS sets `kotsOnlyValue.enabled` in the Helm chart `values.yaml` file to `true` so that the KOTS-only value is enabled for the installation. For installations that use the Helm CLI instead of KOTS, `kotsOnlyValue.enabled` remains `false`. + [View a larger version of this image](/images/support-bundle-analysis-overview.png) -### Using KOTS Template Functions +1. On the **File inspector** tab, select any files from the directory tree to inspect the details of any files included in the support bundle, such as log files. -You can use KOTS template functions in the HelmChart custom resource `values` key to set Helm values with the rendered template functions. For more information, see [About Template Functions](/reference/template-functions-about). +1. (Optional) Click **Download bundle** to download the bundle. This can be helpful if you want to access the bundle from another system or if other team members want to access the bundle and use other tools to examine the files. - - - - - - - - +1. (Optional) Navigate back to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Create cluster** to provision a cluster with Replicated Compatibility Matrix. This can be helpful for creating customer-representative environments for troubleshooting. For more information about creating clusters with Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). -## Conditionally Set Values + Cluster configuration dialog - + [View a larger version of this image](/images/cmx-cluster-configuration.png) -For example, the following HelmChart custom resource uses the `optionalValues` key and the [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to set user-supplied values for an external MariaDB database: +1. If you cannot resolve your customer's issue and need to submit a support request, go to the [**Support**](https://vendor.replicated.com/) page and click **Open a support request**. For more information, see [Submitting a Support Request](support-submit-request). -```yaml -# KOTS HelmChart custom resource + :::note + The **Share with Replicated** button on the support bundle analysis page does _not_ open a support request. You might be directed to use the **Share with Replicated** option when you are already interacting with a Replicated team member. + ::: -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: wordpress -spec: - chart: - name: wordpress - chartVersion: 15.3.2 + ![Submit a Support Request](/images/support.png) - releaseName: sample-release-1 + [View larger version of this image](/images/support.png) - optionalValues: - - when: "repl{{ ConfigOptionEquals `mariadb_type` `external`}}" - recursiveMerge: false - values: - externalDatabase: - host: "repl{{ ConfigOption `external_db_host`}}" - user: "repl{{ ConfigOption `external_db_user`}}" - password: "repl{{ ConfigOption `external_db_password`}}" - database: "repl{{ ConfigOption `external_db_database`}}" - port: "repl{{ ConfigOption `external_ db_port`}}" -``` +================ +File: docs/vendor/support-modular-support-bundle-specs.md +================ +# About Creating Modular Support Bundle Specs -During installation, KOTS renders the template functions and sets the `externalDatabase` values in the HelmChart `values.yaml` file only when the user selects the `external` option for `mariadb_type` on the Admin Console configuration page. +This topic describes how to use a modular approach to creating support bundle specs. -### About Recursive Merge for optionalValues {#recursive-merge} +## Overview - +Support bundle specifications can be designed using a modular approach. This refers to creating multiple different specs that are scoped to individual components or microservices, rather than creating a single, large spec. For example, for applications that are deployed as multiple Helm charts, vendors can create a separate support bundle spec in the `templates` directory in the parent chart as well as in each subchart. -For example, the following HelmChart custom resource has both `values` and `optionalValues`: +This modular approach helps teams develop specs that are easier to maintain and helps teams to avoid merge conflicts that are more likely to occur when making to changes to a large spec. When generating support bundles for an application that includes multiple modular specs, the specs are merged so that only one support bundle archive is generated. -```yaml -values: - favorite: - drink: - hot: tea - cold: soda - dessert: ice cream - day: saturday +## Example: Support Bundle Specifications by Component {#component} -optionalValues: - - when: '{{repl ConfigOptionEquals "example_config_option" "1" }}' - recursiveMerge: false - values: - example_config_option: - enabled: true - favorite: - drink: - cold: lemonade -``` +Using a modular approach for an application that ships MySQL, NGINX, and Redis, your team can add collectors and analyzers in using a separate support bundle specification for each component. -The `values.yaml` file for the associated Helm chart defines the following key value pairs: +`manifests/nginx/troubleshoot.yaml` -```yaml -favorite: - drink: - hot: coffee - cold: soda - dessert: pie -``` -The `templates/configmap.yaml` file for the Helm chart maps these values to the following fields: +This collector and analyzer checks compliance for the minimum number of replicas for the NGINX component: -```yaml -apiVersion: v1 -kind: ConfigMap + ```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle metadata: - name: test-configmap -data: - favorite_day: {{ .Values.favorite.day }} - favorite_dessert: {{ .Values.favorite.dessert }} - favorite_drink_cold: {{ .Values.favorite.drink.cold }} - favorite_drink_hot: {{ .Values.favorite.drink.hot }} -``` - -When `recursiveMerge` is set to `false`, the ConfigMap for the deployed application includes the following key value pairs: - -```yaml -favorite_day: null -favorite_dessert: pie -favorite_drink_cold: lemonade -favorite_drink_hot: coffee -``` - -In this case, the top level keys in `optionalValues` override the top level keys in `values`. + name: nginx +spec: + collectors: + - logs: + selector: + - app=nginx + analyzers: + - deploymentStatus: + name: nginx + outcomes: + - fail: + when: replicas < 2 + ``` -KOTS then uses the values from the Helm chart `values.yaml` to populate the remaining fields in the ConfigMap: `favorite_day`, `favorite_dessert`, and `favorite_drink_hot`. +`manifests/mysql/troubleshoot.yaml` -When `recursiveMerge` is set to `true`, the ConfigMap for the deployed application includes the following key value pairs: +This collector and analyzer checks compliance for the minimum version of the MySQL component: -```yaml -favorite_day: saturday -favorite_dessert: ice cream -favorite_drink_cold: lemonade -favorite_drink_hot: tea + ```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: mysql +spec: + collectors: + - mysql: + uri: 'dbuser:**REDACTED**@tcp(db-host)/db' + analyzers: + - mysql: + checkName: Must be version 8.x or later + outcomes: + - fail: + when: version < 8.x ``` -In this case, all keys from `values` and `optionalValues` are merged. Because both include `favorite.drink.cold`, KOTS uses `lemonade` from `optionalValues`. - -## Delete a Default Key - -If the Helm chart `values.yaml` contains a static value that must be deleted when deploying with KOTS, you can set the value to `"null"` (including the quotation marks) in the `values` key of the HelmChart custom resource. - -A common use case for deleting default value keys is when you include a community Helm chart as a dependency. Because you cannot control how the community chart is built and structured, you might want to change some of the default behavior. +`manifests/redis/troubleshoot.yaml` -For example, the following HelmChart custom resource sets an `exampleKey` value to `"null"` when the chart is deployed with KOTS: +This collector and analyzer checks that the Redis server is responding: ```yaml -# KOTS HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle metadata: - name: samplechart + name: redis spec: - chart: - name: samplechart - chartVersion: 3.1.7 - - releaseName: samplechart-release-1 - - values: - exampleKey: "null" + collectors: + - redis: + collectorName: redis + uri: rediss://default:password@hostname:6379 ``` -For more information about using a `null` value to delete a key, see [Deleting a Default Key](https://helm.sh/docs/chart_template_guide/values_files/#deleting-a-default-key) in the Helm documentation. +A single support bundle archive can be generated from a combination of these manifests using the `kubectl support-bundle --load-cluster-specs` command. +For more information and additional options, see [Generating Support Bundles](support-bundle-generating). ================ -File: docs/vendor/helm-packaging-airgap-bundles.mdx +File: docs/vendor/support-online-support-bundle-specs.md ================ -import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" -import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" -import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" -import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" - -# Packaging Air Gap Bundles for Helm Charts +# Making Support Bundle Specs Available Online -This topic describes how to package and build air gap bundles for releases that contain one or more Helm charts. This topic applies to applications deployed with Replicated KOTS. +This topic describes how to make your application's support bundle specs available online as well as how to link to online specs. ## Overview - +You can make the definition of one or more support bundle specs available online in a source repository and link to it from the specs in the cluster. This approach lets you update collectors and analyzers outside of the application release and notify customers of potential problems and fixes in between application updates. -When building the `.airgap` bundle for a release that contains one or more Helm charts, the Vendor Portal renders the Helm chart templates in the release using values supplied in the KOTS HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. +The schema supports a `uri:` field that, when set, causes the support bundle generation to use the online specification. If the URI is unreachable or unparseable, any collectors or analyzers in the specification are used as a fallback. -## Configure the `builder` Key +You update collectors and analyzers in the online specification to manage bug fixes. When a customer generates a support bundle, the online specification can detect those potential problems in the cluster and let them know know how to fix it. Without the URI link option, you must wait for the next time your customers update their applications or Kubernetes versions to get notified of potential problems. The URI link option is particularly useful for customers that do not update their application routinely. -You should configure the `builder` key if you need to change any default values in your Helm chart so that the `.airgap` bundle for the release includes all images needed to successfully deploy the chart. For example, you can change the default Helm values so that images for any conditionally-deployed components are always included in the air gap bundle. Additionally, you can use the `builder` key to set any `required` values in your Helm chart that must be set for the chart to render. +If you are using a modular approach to designing support bundles, you can use multiple online specs. Each specification supports one URI link. For more information about modular specs, see [About Creating Modular Support Bundle Specs](support-modular-support-bundle-specs). -The values in the `builder` key map to values in the given Helm chart's `values.yaml` file. For example, `spec.builder.postgres.enabled` in the example HelmChart custom resource below would map to a `postgres.enabled` field in the `values.yaml` file for the `samplechart` chart: +## Example: URI Linking to a Source Repository -```yaml -# KOTS HelmChart custom resource +This example shows how Replicated could set up a URI link for one of its own components. You can follow a similar process to link to your own online repository for your support bundles. -apiVersion: kots.io/v1beta2 -kind: HelmChart +Replicated kURL includes an EKCO add-on for maintenance on embedded clusters, such as automating certificate rotation or data migration tasks. Replicated can ship this component with a support bundle manifest that warns users if they do not have this add-on installed or if it is not running in the cluster. + +**Example: Release v1.0.0** + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle metadata: - name: samplechart +  name: ekco spec: - chart: - name: samplechart - chartVersion: 3.1.7 - builder: - postgres: - enabled: true + collectors: + analyzers: + - deploymentStatus: + checkName: Check EKCO is operational + name: ekc-operator + namespace: kurl + outcomes: + - fail: + when: absent + message: EKCO is not installed - please add the EKCO component to your kURL spec and re-run the installer script + - fail: + when: "< 1" + message: EKCO does not have any ready replicas + - pass: + message: EKCO has at least 1 replica ``` -For requirements, recommendations, and examples of common use cases for the `builder` key, see the sections below. - -### Requirements and Recommendations - - +If a bug is discovered at any time after the release of the specification above, Replicated can write an analyzer for it in an online specification. By adding a URI link to the online specification, the support bundle uses the assets hosted in the online repository, which is kept current. -### Example: Set the Image Registry for Air Gap Installations +The `uri` field is added to the specification as a raw file link. Replicated hosts the online specification on [GitHub](https://github.com/replicatedhq/troubleshoot-specs/blob/main/in-cluster/default.yaml). -For air gap installations, if the [Replicated proxy registry](/vendor/private-images-about) domain `proxy.replicated.com` is used as the default image name for any images, you need to rewrite the image to the upstream image name so that it can be processed and included in the air gap bundle. You can use the `builder` key to do this by hardcoding the upstream location of the image (image registry, repository, and tag), as shown in the example below: +**Example: Release v1.1.0** ```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle metadata: - name: samplechart +  name: ekco spec: - chart: - name: samplechart - chartVersion: 3.1.7 - builder: - my-service: - image: - registry: 12345.dkr.ecr.us-west-1.amazonaws.com - repository: my-app - tag: "1.0.2" + uri: https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml + collectors: [...] + analyzers: [...] ``` -When building the `.airgap` bundle for the release, the Vendor Portal uses the registry, repository, and tag values supplied in the `builder` key to template the Helm chart, rather than the default values defined in the Helm `values.yaml` file. This ensures that the image is pulled from the upstream registry using the credentials supplied in the Vendor Portal, without requiring any changes to the Helm chart directly. - -### Example: Include Conditional Images - -Many applications have images that are included or excluded based on a given condition. For example, enterprise users might have the option to deploy an embedded database with the application or bring their own database. To support this use case for air gap installations, the images for any conditionally-deployed components must always be included in the air gap bundle. - +Using the `uri:` property, the support bundle gets the latest online specification if it can, or falls back to the collectors and analyzers listed in the specification that is in the cluster. -## Related Topics +Note that because the release version 1.0.0 did not contain the URI, Replicated would have to wait until existing users upgrade a cluster before getting the benefit of the new analyzer. Then, going forward, those users get any future online analyzers without having to upgrade. New users who install the version containing the URI as their initial installation automatically get any online analyzers when they generate a support bundle. -* [builder](/reference/custom-resource-helmchart-v2#builder) -* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) -* [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped) +For more information about the URI, see [Troubleshoot schema supports a `uri://` field](https://troubleshoot.sh/docs/support-bundle/supportbundle/#uri) in the Troubleshoot documentation. For a complete example, see [Debugging Kubernetes: Enhancements to Troubleshoot](https://www.replicated.com/blog/debugging-kubernetes-enhancements-to-troubleshoot/#Using-online-specs-for-support-bundles) in The Replicated Blog. ================ -File: docs/vendor/helm-v2-migrate.md +File: docs/vendor/support-submit-request.md ================ -# Migrating Existing Installations to HelmChart v2 - -This topic describes how to migrate existing Replicated KOTS installations to the KOTS HelmChart `kots.io/v1beta2` (HelmChart v2) installation method, without having to reinstall the application. It also includes information about how to support both HelmChart v1 and HelmChart v2 installations from a single release, and lists frequently-asked questions (FAQs) related to migrating to HelmChart v2. - -## Migrate to HelmChart v2 - -### Requirements - -* The HelmChart v2 custom resource is supported with KOTS v1.99.0 and later. If any of your customers are running a version of KOTS earlier than v1.99.0, see [Support Customers on KOTS Versions Earlier Than v1.99.0](#support-both-v1-v2) below for more information about how to support both HelmChart v1 and HelmChart v2 installations from the same release. +# Submitting a Support Request -* The Helm `--take-ownership` flag is supported with KOTS v1.124.0 and later. +You can submit a support request and a support bundle using the Replicated Vendor Portal. Uploading a support bundle is secure and helps the Replicated support team troubleshoot your application faster. Severity 1 issues are resolved three times faster when you submit a support bundle with your support request. -* The `kots.io/keep` annotation is supported with KOTS v1.122.0 and later. +### Prerequisites -### Migrate From HelmChart v1 with `useHelmInstall: true` +The following prerequisites must be met to submit support requests: -To migrate existing installations from HelmChart v1 with `useHelmInstall: true` to HelmChart v2: +* Your Vendor Portal account must be configured for access to support before you can submit support requests. Contact your administrator to ensure that you are added to the correct team. -1. In a development environment, install an application release using the KOTS HelmChart v1 with `useHelmInstall: true` method. You will use this installation to test the migration to HelmChart v2. +* Your team must have a replicated-collab repository configured. If you are a team administrator and need information about getting a collab repository set up and adding users, see [Adding Users to the Collab Repository](team-management-github-username#add). -1. Create a new release containing your application files. -1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). +### Submit a Support Request -1. Promote the release to an internal-only channel that your team uses for testing. +To submit a support request: -1. In your development environment, log in to the Admin Console and confirm that you can upgrade to the new HelmChart v2 release. +1. From the [Vendor Portal](https://vendor.replicated.com), click **Support > Submit a Support Request** or go directly to the [Support page](https://vendor.replicated.com/support). -1. When you are done testing, promote the release to one or more of your customer-facing channels. Customers can follow the standard upgrade process in the Admin Console to update their instance. +1. In section 1 of the Support Request form, complete the fields with information about your issue. -### Migrate From HelmChart v1 with `useHelmInstall: false` +1. In section 2, do _one_ of the following actions: + - Use your pre-selected support bundle or select a different bundle in the pick list + - Select **Upload and attach a new support bundle** and attach a bundle from your file browser -This section describes how to migrate existing HelmChart v1 installations with `useHelmInstall: false`. +1. Click **Submit Support Request**. You receive a link to your support issue, where you can interact with the support team. -:::note -When the `useHelmInstall` field is _not_ set in the HelmChart custom resource, `false` is the default value. -::: + :::note + Click **Back** to exit without submitting a support request. + ::: -These migration steps ensure that KOTS does not uninstall any resources that were previously deployed without Helm, and that Helm takes ownership of these existing resources. +================ +File: docs/vendor/team-management-github-username.mdx +================ +import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" +import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" +import CollabExistingUser from "../partials/collab-repo/_collab-existing-user.mdx" -To migrate existing installations from HelmChart v1 and `useHelmInstall: false` to HelmChart v2: -1. Create a new release containing your application files: +# Managing Collab Repository Access - 1. In the release, for any resources defined in Kubernetes manifests or in your Helm `templates` that were previously installed with HelmChart v1 and `useHelmInstall: false`, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling these resources when upgrading using the HelmChart v2 method. +This topic describes how to add users to the Replicated collab GitHub repository automatically through the Replicated Vendor Portal. It also includes information about managing user roles in this repository using Vendor Portal role-based access control (RBAC) policies. - **Example:** - - ```yaml - apiVersion: apps/v1 - kind: Statefulset - metadata: - name: postgresql - # Add the kots.io/keep annotation - annotations: - kots.io/keep: "true" - ``` - - 1. Save the release. - -1. Create another new release: +## Overview {#overview} - 1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). + - 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: +To get access to the collab repository, members of a Vendor Portal team can add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. The Vendor Portal then automatically provisions the team member as a user in the collab repository in GitHub. The RBAC policy that the member is assigned in the Vendor Portal determines the GitHub role that they have in the collab repository. - ```yaml - # HelmChart v2 - apiVersion: kots.io/v1beta2 - kind: HelmChart - metadata: - name: samplechart - spec: - helmUpgradeFlags: - - --take-ownership - ``` +Replicated recommends that Vendor Portal admins manage user access to the collab repository through the Vendor Portal, rather than manually managing users through GitHub. Managing access through the Vendor Portal has the following benefits: +* Users are automatically added to the collab repository when they add their GitHub username in the Vendor Portal. +* Users are automatically removed from the collab repository when they are removed from the Vendor Portal team. +* Vendor portal and collab repository RBAC policies are managed from a single location. - When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. +## Add Users to the Collab Repository {#add} - 1. Save the release. +This procedure describes how to use the Vendor Portal to access the collab repository for the first time as an Admin, then automatically add new and existing users to the repository. This allows you to use the Vendor Portal to manage the GitHub roles for users in the collab repository, rather than manually adding, managing, and removing users from the repository through GitHub. -1. Test the migration process: +### Prerequisite - 1. Promote the first release to an internal-only channel that your team uses for testing. +Your team must have a replicated-collab repository configured to add users to +the repository and to manage repository access through the Vendor Portal. To get +a collab support repository configured in GitHub for your team, complete the onboarding +instructions in the email you received from Replicated. You can also access the [Replicated community help forum](https://community.replicated.com/) for assistance. - 1. In a development environment, install the first release. +### Procedure - 1. Promote the second release to the same channel. - - 1. In your development environment, access the Admin Console to upgrade to the second release. +To add new and existing users to the collab repository through the Vendor Portal: -1. When you are done testing, promote the first release to one or more of your customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. +1. As a Vendor Portal admin, log in to your Vendor Portal account. In the [Account Settings](https://vendor.replicated.com/account-settings) page, add your GitHub username and click **Save Changes**. -1. Promote the second release to the same customer-facing channel(s). Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. + Account info in the Vendor Portal -1. Instruct customers to migrate by first upgrading to the release where the `kots.io.keep` annotation is applied to your resources, then upgrading to the release with HelmChart v2. + The Vendor Portal automatically adds your GitHub username to the collab repository and assigns it the Admin role. You receive an email with details about the collab repository when you are added. -1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. +1. Follow the collab repository link from the email that you receive to log in to your GitHub account and access the repository. -### Migrate From Standard Kubernetes Manifests +1. (Recommended) Manually remove any users in the collab repository that were previously added through GitHub. -This section describes how to migrate existing KOTS installations of applications that were previously packaged as standard Kubernetes manifests and are now packaged as one or more Helm charts. This migration path involves performing two upgrades to ensure that KOTS does not uninstall any resources that were adopted into Helm charts, and that Helm can take ownership of resources that were previously deployed without Helm. + :::note + + ::: -To migrate applications that were previously packaged as standard Kubernetes manifests: +1. (Optional) In the Vendor Portal, go to the [Team](https://vendor.replicated.com/team/members) page. For each team member, click **Edit permissions** as necessary to specify their GitHub role in the collab repository. -1. Create a new release containing the Kubernetes manifests for your application: + For information about which policies to select, see [About GitHub Roles](#about-github-roles). - 1. For each of the application manifests in the release, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling resources that were previously installed without Helm when upgrading using the HelmChart v2 method. +1. Instruct each Vendor Portal team member to add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. - **Example:** - - ```yaml - apiVersion: apps/v1 - kind: Statefulset - metadata: - name: postgresql - annotations: - kots.io/keep: "true" - ``` + The Vendor Portal adds the username to the collab repository and assigns a GitHub role to the user based on their Vendor Portal policy. - 1. Save the release. + Users receive an email when they are added to the collab repository. -1. Create another new release: +## About GitHub Roles - 1. In the release, add your application Helm chart(s). Remove the application manifests for resources that were adopted into the Helm chart(s). +When team members add a GitHub username to their Vendor Portal account, the Vendor Portal determines how to assign the user a default GitHub role in the collab repository based on the following criteria: +* If the GitHub username already exists in the collab repository +* The RBAC policy assigned to the member in the Vendor Portal - 1. For each Helm chart in the release, add a corresponding KOTS HelmChart custom resource with `apiVersion` set to `kots.io/v1beta2`. Configure the resource to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). +You can also update any custom RBAC policies in the Vendor Portal to change the default GitHub roles for those policies. - 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: +### Default Roles for Existing Users {#existing-username} - ```yaml - # HelmChart v1 beta2 - apiVersion: kots.io/v1beta2 - kind: HelmChart - metadata: - name: samplechart - spec: - helmUpgradeFlags: - - --take-ownership - ``` + - When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. +### Default Role Mapping {#role-mapping} - 1. Save the release. +When team members add a GitHub username to their Vendor Portal account, the Vendor Portal assigns them to a GitHub role in the collab repository that corresponds to their Vendor Portal policy. For example, users with the default Read Only policy in the Vendor Portal are assigned the Read GitHub role in the collab repository. -1. Test the migration process: +For team members assigned custom RBAC policies in the Vendor Portal, you can edit the custom policy to change their GitHub role in the collab repository. For more information, see [About Changing the Default GitHub Role](#custom) below. - 1. Promote the first release to an internal-only channel that your team uses for testing. +The table below describes how each default and custom Vendor Portal policy corresponds to a role in the collab repository in GitHub. For more information about each of the GitHub roles described in this table, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - 1. In a development environment, install the first release. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Vendor Portal RoleGitHub collab RoleDescription
    AdminAdmin

    Members assigned the default Admin role in the Vendor Portal are assigned the GitHub Admin role in the collab repository.

    Support EngineerTriage

    Members assigned the custom Support Engineer role in the Vendor Portal are assigned the GitHub Triage role in the collab repository.

    For information about creating a custom Support Engineer policy in the Vendor Portal, see Support Engineer in Configuring RBAC Policies.

    For information about editing custom RBAC policies to change this default GitHub role, see About Changing the Default GitHub Role below.

    Read OnlyReadMembers assigned the default Read Only role in the Vendor Portal are assigned the GitHub Read role in the collab repository.
    SalesN/A

    Users assigned the custom Sales role in the Vendor Portal do not have access to the collab repository.

    For information about creating a custom Sales policy in the Vendor Portal, see Sales in Configuring RBAC Policies.

    For information about editing custom RBAC policies to change this default GitHub role, see About Changing the Default GitHub Role below.

    Custom policies with **/admin under allowed:Admin +

    By default, members assigned to a custom RBAC policy that specifies **/admin under allowed: are assigned the GitHub Admin role in the collab repository.

    +

    For information about editing custom RBAC policies to change this default GitHub role, see About Changing the Default GitHub Role below.

    +
    Custom policies without **/admin under allowed:Read Only +

    By default, members assigned to any custom RBAC policies that do not specify **/admin under allowed: are assigned the Read Only GitHub role in the collab repository.

    +

    For information about editing custom RBAC policies to change this default GitHub role, see About Changing the Default GitHub Role below.

    +
    - 1. Promote the second release to the same channel. - - 1. In your development environment, access the Admin Console to upgrade to the second release. Upgrading to the second release migrates the installation to HelmChart v2. +### Change the Default Role {#custom} -1. After you are done testing the migration process, promote the first release containing your application manifests with the `kots.io/keep` annotation to one or more customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. +You can update any custom RBAC policies that you create in the Vendor Portal to change the default GitHub roles for those policies. For example, by default, any team members assigned a custom policy with `**/admin` under `allowed:` are assigned the Admin role in the collab repository in GitHub. You can update the custom policy to specify a more restrictive GitHub role. -1. Promote the second release containing your Helm chart(s) to the same channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. +To edit a custom policy to change the default GitHub role assigned to users with that policy, add one of the following RBAC resources to the `allowed:` or `denied:` list in the custom policy: -1. Instruct customers to migrate by first upgrading to the release containing the standard manifests, then upgrading to the release packaged with Helm. +* `team/support-issues/read` +* `team/support-issues/write` +* `team/support-issues/triage` +* `team/support-issues/admin` -1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. +For more information about each of these RBAC resources, see [Team](team-management-rbac-resource-names#team) in _RBAC Resource Names_. -## Support Customers on KOTS Versions Earlier Than v1.99.0 {#support-both-v1-v2} +For more information about how to edit the `allowed:` or `denied:` lists for custom policies in the Vendor Portal, see [Configuring Custom RBAC Policies](team-management-rbac-configuring). -The HelmChart v2 installation method requires KOTS v1.99.0 or later. If you have existing customers that have not yet upgraded to KOTS v1.99.0 or later, Replicated recommends that you support both the HelmChart v2 and v1 installation methods from the same release until all installations are running KOTS v1.99.0 or later. + -To support both installation methods from the same release, include both versions of the HelmChart custom resource for each Helm chart in your application releases (HelmChart `kots.io/v1beta2` and HelmChart `kots.io/v1beta1` with `useHelmInstall: true`). +================ +File: docs/vendor/team-management-google-auth.md +================ +# Managing Google Authentication -When you include both versions of the HelmChart custom resource for a Helm chart, installations with KOTS v1.98.0 or earlier use the v1 method, while installations with KOTS v1.99.0 or later use v2. +This topic describes the Google authentication options that you can configure to control access to the Replicated Vendor Portal. -After all customers are using KOTS v1.99.0 or later, you can remove the HelmChart v1 custom resources so that all customers are using the HelmChart v2 method. +## Manage Google Authentication Options -## HelmChart v2 Migration FAQs +As a team administrator, you can enable, disable, or require Google authentication for all accounts in the team. -This section includes FAQs related to migrating existing installations to the KOTS HelmChart v2 method. +A core benefit of using Google authentication is that when a user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal, unless the username and password is also allowed. Requiring Google authentication is an effective way of centrally removing access to the Vendor Portal. -### Which migration scenarios require the `kots.io/keep` annotation? +To manage Google authentication settings: -When applied to a resource in a release, the `kots.io/keep` annotation prevents the given resource from being uninstalled. The `kots.io/keep` annotation can be used to prevent KOTS from deleting resources that were adopted into Helm charts or otherwise previously deployed without Helm. +1. Click **Team Settings > [Google Authentication](https://vendor.replicated.com/team/google-authentication)**. -To prevent existing resources from being uninstalled during upgrade, the `kots.io/keep` annotation is required for the following types of migrations: - * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 - * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 + ![Google Auth Settings](/images/team-mgmt-google-auth.png) -`kots.io/keep` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. +1. Enable or disable the settings: -### Which migration scenarios require the `--take-ownership` flag? + | Field | Instructions | + |-----------------------|------------------------| + | Allow Google authentication for team members | Enables team members to log in using a Google account. | + | Restrict login to only allow to Google authentication | Requires new users to accept an invitation and sign up with a Google account that exactly matches the email address that was invited to the team. The email address can be a gmail.com address or user from another domain, but it must match the email address from the invitation exactly. Disabling this setting requires users to accept the invitation by creating a username and password (or use the SAML workflow). | + -When the `--take-ownership` flag is enabled, Helm automatically takes ownership of resources that were previously deployed to the cluster without Helm. +## Migrating Existing Accounts +Excluding some teams that restrict end users to use only Security Assertion Markup Language (SAML) or require two-factor authentication (2FA), existing end users can seamlessly sign into an account that exactly matches their Google Workspace (formerly GSuite) email address. However, Google authentication only matches existing user accounts, so for users who have signed up using task-based email addresses (such as name+news@domain.com), you can continue to use email/password to sign in, invite your normal email address to your team, or contact support to change your email address. For more information about task-based email addresses, see [Create task-specific email addresses](https://support.google.com/a/users/answer/9308648?hl=en) in the Google Support site. -The `--take-ownership` flag is required for the following types of migrations: - * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 - * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 +Migrated accounts maintain the same role-based access control (RBAC) permissions that were previously assigned. After signing in with Google, users can choose to disable username/password-based authentication on their account or maintain both authentication methods using the Vendor Portal [account settings page](https://vendor.replicated.com/account-settings). -`--take-ownership` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. +## Limitations -### What is the difference between HelmChart v1 with `useHelmInstall: false` and `useHelmInstall: true`? +Using distribution lists for sending invitations to join a team are not supported. The invitations are sent, but are invalid and cannot be used to join a team using Google authentication. -With HelmChart v1 and `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. This differs from both the HelmChart v1 with `useHelmInstall: true` and HelmChart v2 methods, where KOTS installs the application using Helm. +## Compatibility with Two-Factor Authentication +Google authentication is not entirely compatible with Replicated two-factor authentication (2FA) implementation because Google authentication bypasses account-based 2FA, relying on your Google Authentication instead. However, the Vendor Portal continues to enforce 2FA on all email/password-based authentication, even for the same user, if both options are enabled. -Because the HelmChart v1 with `useHelmInstall: false` method does not deploy resources with Helm, it is necessary to use the `kots.io/keep` annotation and the Helm `--take-ownership` flag when migrating to the HelmChart v2 installation method. These ensure that Helm can take ownership of existing resources and that the resources are not uninstalled during upgrade. +## Related Topic -For more information about how KOTS deploys Helm charts, including information about the deprecated HelmChart v1 installation methods, see [About Distributing Helm Charts with KOTS](helm-native-about). +[Managing Team Members](team-management) ================ -File: docs/vendor/identity-service-configuring.md +File: docs/vendor/team-management-rbac-configuring.md ================ -:::important -This topic is deleted from the product documentation because this Beta feature is deprecated. -::: +import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" -# Enabling and Configuring Identity Service (Beta) +# Configuring RBAC Policies -This topic describes how to enable the identity service (Beta) feature, and how to regulate access to application resources using role based access control (RBAC). +This topic describes how to use role-based access policies (RBAC) to grant or deny team members permissions to use Replicated services in the Replicated Vendor Portal. -## About Identity Service +## About RBAC Policies -When you enable the identity service for an application, the Replicated app manager deploys [Dex](https://dexidp.io/) as an intermediary that can be configured to control access to the application. Dex implements an array of protocols for querying other user-management systems, known as connectors. For more information about connectors, see [Connectors](https://dexidp.io/docs/connectors/) in the Dex documentation. +By default, every team has two policies created automatically: **Admin** and **Read Only**. If you have an Enterprise plan, you will also have the **Sales** and **Support** policies created automatically. These default policies are not configurable. For more information, see [Default RBAC Policies](#default-rbac) below. +You can configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. For example, you can limit access for the sales team to one application and to specific channels. Or, you can grant only certain users permission to promote releases to your production channels. -## Limitations and Requirements +You can also create custom RBAC policies in the Vendor Portal to manage user access and permissions in the Replicated collab repository in GitHub. For more information, see [Managing Access to the Collab Repository](team-management-github-username). -Identity service has the following limitations and requirements: +## Default RBAC Policies {#default-rbac} -* Requires the identity service option is enabled in customer licenses. -* Is available only for embedded cluster installations with the kURL installer. -* Is available only through the Replicated Admin Console. +This section describes the default RBAC policies that are included for Vendor Portal teams, depending on the team's Replicated pricing plan. -## Enable and Configure Identity Service +### Admin -Use the Identity custom resource to enable and configure the identity service for your application. For an example application that demonstrates how to configure the identity service, see the [`kots-idp-example-app`](https://github.com/replicatedhq/kots-idp-example-app) on GitHub. +The Admin policy grants read/write permissions to all resources on the team. -To begin, create a new release in the [Vendor Portal](https://vendor.replicated.com). Add an Identity custom resource file and customize the file for your application. For more information about the Identity custom resource, see [Identity (Beta)](/reference/custom-resource-identity) in _Reference_. +:::note +This policy is automatically created for all plans. +::: -**Example:** - -```YAML -apiVersion: kots.io/v1beta1 -kind: Identity -metadata: - name: identity -spec: - requireIdentityProvider: true - identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver - oidcRedirectUris: - - https://{{repl ConfigOption "ingress_hostname"}}/callback - roles: - - id: access - name: Access - description: Restrict access to IDP Example App +```json +{ + "v1": { + "name": "Admin", + "resources": { + "allowed": [ + "**/*" + ], + "denied": [] + } + } +} ``` -Make the identity service accessible from the browser by configuring the service name and port. The app manager provides the service name and port to the application through the identity template functions so that the application can configure ingress for the identity service. For more information about the identity template functions, see [Identity Context](/reference/template-functions-identity-context) in _Reference_. +### Read Only -**Example:** +The Read Only policy grants read permission to all resources on the team except for API tokens. -```YAML -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: idp-app - annotations: - kubernetes.io/ingress.allow-http: 'false' - ingress.kubernetes.io/force-ssl-redirect: 'true' - kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} - labels: - app: idp-app -spec: - tls: - - hosts: - - repl{{ ConfigOption "ingress_hostname" }} - secretName: idp-ingress-tls - rules: - - host: repl{{ or (ConfigOption "ingress_hostname") "~" }} - http: - paths: - - path: / - backend: - serviceName: idp-app - servicePort: 80 - - path: /oidcserver - backend: - serviceName: repl{{ IdentityServiceName }} - servicePort: repl{{ IdentityServicePort }} +:::note +This policy is automatically created for all plans. +::: + +```json +{ + "v1": { + "name": "Read Only", + "resources": { + "allowed": [ + "**/list", + "**/read" + ], + "denied": [ + "**/*" + ] + } + } +} ``` -In your Deployment manifest file, add environment variables to configure all of the information that your application needs to communicate and integrate with the identity service. -**Example:** +### Support Engineer -```YAML -apiVersion: apps/v1 -kind: Deployment -metadata: - name: idp-app - labels: - app: idp-app -spec: - replicas: 1 - selector: - matchLabels: - app: idp-app - template: - metadata: - labels: - app: idp-app - spec: - containers: - - name: idp-app - image: replicated/kots-idp-example-app:latest - imagePullPolicy: Always - ports: - - containerPort: 5555 - volumeMounts: - - name: tls-ca-volume - mountPath: /idp-example - readOnly: true - args: ["--issuer-root-ca=/idp-example/tls.ca"] - env: - - name: CERT_SHA - value: repl{{ sha256sum (ConfigOption "tls_cert") }} - - name: LISTEN - value: http://0.0.0.0:5555 - - name: ISSUER - value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver - - name: CLIENT_ID - value: repl{{ IdentityServiceClientID }} - - name: CLIENT_SECRET - value: repl{{ IdentityServiceClientSecret }} # TODO: secret - - name: REDIRECT_URI - value: https://{{repl ConfigOption "ingress_hostname"}}/callback - - name: EXTRA_SCOPES - value: groups - - name: RESTRICTED_GROUPS - value: | - {{repl IdentityServiceRoles | keys | toJson }} - hostAliases: - - ip: 172.17.0.1 - hostnames: - - myapp.kotsadmdevenv.com - volumes: - - name: tls-ca-volume - secret: - secretName: idp-app-ca -``` +The Support Engineer policy grants read access to release, channels, and application data, and read-write access to customer and license details. It also grants permission to open Replicated support issues and upload support bundles. -## Configuring Access with RBAC +:::note +This policy is automatically created for teams with the Enterprise plan only. +::: -You can also regulate access to your application resources using role based access control (RBAC). +```json +{ + "v1": { + "name": "Support Engineer", + "resources": { + "allowed": [ + "**/read", + "**/list", + "kots/app/*/license/**", + "team/support-issues/read", + "team/support-issues/write" + ], + "denied": [ + "**/*" + ] + } + } +} +``` -In the Identity custom resource, provide a list of the available roles within your application in the `roles` section. For more information, see [`roles`](/reference/custom-resource-identity#roles) in _Reference_. +### Sales -**Example:** +The Sales policy grants read-write access to customers and license details and read-only access to resources necessary to manage licenses (applications, channels, and license fields). No additional access is granted. -```YAML -apiVersion: kots.io/v1beta1 -kind: Identity -metadata: - name: identity -spec: - requireIdentityProvider: true - identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver - oidcRedirectUris: - - https://{{repl ConfigOption "ingress_hostname"}}/callback - roles: - - id: access - name: Access - description: Restrict access to IDP Example App +:::note +This policy is automatically created for teams with the Enterprise plan only. +::: + +```json +{ + "v1": { + "name": "Sales", + "resources": { + "allowed": [ + "kots/app/*/read", + "kots/app/*/channel/*/read", + "kots/app/*/licensefields/read", + "kots/app/*/license/**" + ], + "denied": [ + "**/*" + ] + } + } +} ``` -Then, using the Admin Console, your customer has the ability to create groups and assign specific roles to each group. -This mapping of roles to groups is returned to your application through the `IdentityServiceRoles` template function that you configure in your Deployment manifest file under the environment variable `RESTRICTED_GROUPS`. For more information, see [`IdentityServiceRoles`](/reference/template-functions-identity-context#identityserviceroles) in _Reference_. +## Configure a Custom RBAC Policy -**Example:** +To configure a custom RBAC policy: -```YAML -apiVersion: apps/v1 -kind: Deployment -metadata: - name: idp-app - labels: - app: idp-app -spec: - replicas: 1 - selector: - matchLabels: - app: idp-app - template: - metadata: - labels: - app: idp-app - spec: - containers: - - name: idp-app - image: replicated/kots-idp-example-app:latest - imagePullPolicy: Always - ports: - - containerPort: 5555 - volumeMounts: - - name: tls-ca-volume - mountPath: /idp-example - readOnly: true - args: ["--issuer-root-ca=/idp-example/tls.ca"] - env: - - name: CERT_SHA - value: repl{{ sha256sum (ConfigOption "tls_cert") }} - - name: LISTEN - value: http://0.0.0.0:5555 - - name: ISSUER - value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver - - name: CLIENT_ID - value: repl{{ IdentityServiceClientID }} - - name: CLIENT_SECRET - value: repl{{ IdentityServiceClientSecret }} # TODO: secret - - name: REDIRECT_URI - value: https://{{repl ConfigOption "ingress_hostname"}}/callback - - name: EXTRA_SCOPES - value: groups - - name: RESTRICTED_GROUPS - value: | - {{repl IdentityServiceRoles | keys | toJson }} - hostAliases: - - ip: 172.17.0.1 - hostnames: - - myapp.kotsadmdevenv.com - volumes: - - name: tls-ca-volume - secret: - secretName: idp-app-ca +1. From the Vendor Portal [Team page](https://vendor.replicated.com/team), select **RBAC** from the left menu. + +1. Do _one_ of the following: + + - Click **Create Policy** from the RBAC page to create a new policy. + - Click **View policy** to edit an existing custom policy in the list. + + + +1. Edit the fields in the policy dialog. In the **Definition** pane, specify the `allow` and `denied` arrays in the resources key to create limits for the role. + + The default policy allows everything and the **Config help** pane displays any errors. + + ![Create RBAC Policy](/images/policy-create.png) + + - For more information, see [Policy Definition](#policy-definition). + - For more information about and examples of rule order, see [Rule Order](#rule-order). + - For a list of resource names, see [RBAC Resource Names](team-management-rbac-resource-names). + +1. Click **Create Policy** to create a new policy, or click **Update Policy** to update an existing policy. + + :::note + Click **Cancel** to exit without saving changes. + ::: + +1. To apply RBAC policies to Vendor Portal team members, you can: + + - Assign policies to existing team members + - Specify a policy when inviting new team members + - Set a default policy for auto-joining a team + + See [Managing Team Members](team-management). + +## Policy Definition + +A policy is defined in a single JSON document: + +``` +{ + "v1": { + "name": "Read Only", + "resources": { + "allowed": [ + "**/read", + "**/list" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +The primary content of a policy document is the resources key. The resources key should contain two arrays, identified as `allowed` and `denied`. Resources specified in the allowed list are allowed for users assigned to the policy, and resources specified in the denied list are denied. + +Resource names are hierarchical, and support wildcards and globs. For a complete list of resource names that can be defined in a policy document, see [RBAC Resource Names](team-management-rbac-resource-names). + +When a policy document has conflicting rules, the behavior is predictable. For more information about conflicting rules, see [Rule Order](#rule-order). + +### Example: View Specific Application and Channel + + The following policy definition example limits any user with this role to viewing a specific application and a specific channel for that application: + + ``` + { + "v1": { + "name": "Policy Name", + "resources": { + "allowed": [ + "kots/app/appID/list", + "kots/app/appID/read", + "kots/app/appID/channel/channelID/list", + "kots/app/appID/channel/channelID/read" + ], + "denied": [] + } + } + } + ``` + The example above uses an application ID and a channel ID to scope the permissions of the RBAC policy. To find your application and channel IDs, do the following: + + - To get the application ID, click **Settings > Show Application ID (Advanced)** in the Vendor Portal. + + - To get the channel ID, click **Channels** in the Vendor Portal. Then click the Release History link for the channel that you want to limit access to. The channel ID displays in your browser URL. + +## Rule Order + +When a resource name is specified in both the `allow` and the `deny` chains of a policy, defined rules determine which rule is applied. + +If `denied` is left empty, it is implied as a `**/*` rule, unless `**/*` rule is specified in the `allowed` resources. If a rule exactly conflicts with another rule, the `denied` rule takes precedence. + +### Defining Precedence Using Rule Specificity +The most specific rule definition is always applied, when compared with less specific rules. Specificity of a rule is calculated by the number of asterisks (`**` and `*`) in the definition. A `**` in the rule definition is the least specific, followed by rules with `*`, and finally rules with no wildcards as the most specific. + +### Example: No Access To Stable Channel + +In the following example, a policy grants access to promote releases to any channel except the Stable channel. It uses the rule pattern `kots/app/[:appId]/channel/[:channelId]/promote`. Note that you specify the channel ID, rather than the channel name. To find the channel ID, go to the Vendor Portal **Channels** page and click the **Settings** icon for the target channel. + +```json +{ + "v1": { + "name": "No Access To Stable Channel", + "resources": { + "allowed": [ + "**/*" + ], + "denied": [ + "kots/app/*/channel/1eg7CyEofYSmVAnK0pEKUlv36Y3/promote" + ] + } + } +} +``` + +### Example: View Customers Only + +In the following example, a policy grants access to viewing all customers, but not to creating releases, promoting releases, or creating new customers. + +```json +{ + "v1": { + "name": "View Customers Only", + "resources": { + "allowed": [ + "kots/app/*/license/*/read", + "kots/app/*/license/*/list", + "kots/app/*/read", + "kots/app/*/list" + ], + "denied": [ + "**/*" + ] + } + } +} ``` ================ -File: docs/vendor/insights-app-status.md +File: docs/vendor/team-management-rbac-resource-names.md ================ -import StatusesTable from "../partials/status-informers/_statusesTable.mdx" -import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" -import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" -import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" +import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" -# Enabling and Understanding Application Status +# RBAC Resource Names -This topic describes how to configure your application so that you can view the status of application instances in the Replicated Vendor Portal. It also describes the meaning of the different application statuses. +This a list of all available resource names for the Replicated vendor role-based access control (RBAC) policy: -## Overview +## Integration Catalog -The Vendor Portal displays data on the status of instances of your application that are running in customer environments, including the current state (such as Ready or Degraded), the instance uptime, and the average amount of time it takes your application to reach a Ready state during installation. For more information about viewing instance data, see [Instance Details](instance-insights-details). +### integration/catalog/list -To compute and display these insights, the Vendor Portal interprets and aggregates the state of one or more of the supported Kubernetes resources that are deployed to the cluster as part of your application. +Grants the holder permission to view the catalog events and triggers available for integrations. - +## kots -For more information about how instance data is sent to the Vendor Portal, see [About Instance and Event Data](instance-insights-event-data). +### kots/app/create -## Enable Application Status Insights +When allowed, the holder will be allowed to create new applications. -To display insights on application status, the Vendor Portal requires that your application has one or more _status informers_. Status informers indicate the Kubernetes resources deployed as part of your application that are monitored for changes in state. +### kots/app/[:appId]/read +Grants the holder permission to view the application. If the holder does not have permissions to view an application, it will not appear in lists. -To enable status informers for your application, do one of the following, depending on the installation method: -* [Helm Installations](#helm-installations) -* [KOTS Installations](#kots-installations) +### kots/externalregistry/list +Grants the holder the ability to list external docker registry for application(s). -### Helm Installations +### kots/externalregistry/create -To get instance status data for applications installed with Helm, the Replicated SDK must be installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). +Grants the holder the ability to link a new external docker registry to application(s). -After you include the SDK as a dependency, the requirements for enabling status informers vary depending on how your application is installed: +### kots/externalregistry/[:registryName]/delete -* For applications installed by running `helm install` or `helm upgrade`, the Replicated SDK automatically detects and reports the status of the resources that are part of the Helm release. No additional configuration is required to get instance status data. +Grants the holder the ability to delete the specified linked external docker registry in application(s). -* For applications installed by running `helm template` then `kubectl apply`, the SDK cannot automatically detect and report the status of resources. You must configure custom status informers by overriding the `statusInformers` value in the Replicated SDK chart. For example: +### kots/app/[:appId]/channel/create - ```yaml - # Helm chart values.yaml file +Grants the holder the ability to create a new channel in the specified application(s). - replicated: - statusInformers: - - deployment/nginx - - statefulset/mysql - ``` +### kots/app/[:appId]/channel/[:channelId]/archive - :::note - Applications installed by running `helm install` or `helm upgrade` can also use custom status informers. When the `replicated.statusInformers` field is set, the SDK detects and reports the status of only the resources included in the `replicated.statusInformers` field. - ::: +Grants the holder permission to archive the specified channel(s) of the specified application(s). -### KOTS Installations +### kots/app/[:appId]/channel/[:channelId]/promote -For applications installed with Replicated KOTS, configure one or more status informers in the KOTS Application custom resource. For more information, see [Adding Resource Status Informers](admin-console-display-app-status). +Grants the holder the ability to promote a new release to the specified channel(s) of the specified application(s). -When Helm-based applications that include the Replicated SDK and are deployed by KOTS, the SDK inherits the status informers configured in the KOTS Application custom resource. In this case, the SDK does _not_ automatically report the status of the resources that are part of the Helm release. This prevents discrepancies in the instance data in the vendor platform. +### kots/app/[:appId]/channel/[:channelId]/update -## View Resource Status Insights {#resource-status} +Grants the holder permission to update the specified channel of the specified application(s). -For applications that include the Replicated SDK, the Vendor Portal also displays granular resource status insights in addition to the aggregate application status. For example, you can hover over the **App status** field on the **Instance details** page to view the statuses of the indiviudal resources deployed by the application, as shown below: +### kots/app/[:appId]/channel/[:channelId]/read -resource status pop up +Grants the holder the permission to view information about the specified channel of the specified application(s). -[View a larger version of this image](/images/resource-status-hover-current-state.png) +### kots/app/[:appId]/enterprisechannel/[:channelId]/read -Viewing these resource status details is helpful for understanding which resources are contributing to the aggregate application status. For example, when an application has an Unavailable status, that means that one or more resources are Unavailable. By viewing the resource status insights on the **Instance details** page, you can quickly understand which resource or resources are Unavailable for the purpose of troubleshooting. +Grants the holder the permission to view information about the specified enterprise channel of the specified application(s). -Granular resource status details are automatically available when the Replicated SDK is installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). +### kots/app/[:appId]/channel/[:channelId]/releases/airgap -## Understanding Application Status +Grants the holder permission to trigger airgap builds for the specified channel. -This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. +### kots/app/[:appId]/channel/[:channelId]/releases/airgap/download-url -### About Resource Statuses {#resource-statuses} +Grants the holder permission to get an airgap bundle download URL for any release on the specified channel. -Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. +### kots/app/[:appId]/installer/create -The following table lists the supported Kubernetes resources and the conditions that contribute to each status: +Grants the holder permission to create kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - +### kots/app/[:appId]/installer/update -### Aggregate Application Status +Grants the holder permission to update kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - +### kots/app/[:appId]/installer/read - +Grants the holder permission to view kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). -================ -File: docs/vendor/install-with-helm.mdx -================ -import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" -import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" +### kots/app/[:appId]/installer/promote -# Installing with Helm +Grants the holder permission to promote kURL installers to a channel. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). -This topic describes how to use Helm to install releases that contain one or more Helm charts. For more information about the `helm install` command, including how to override values in a chart during installation, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. +:::note +The `kots/app/[:appId]/installer/promote` policy does not grant the holder permission to view and create installers. Users must be assigned both the `kots/app/[:appId]/installers` and `kots/app/[:appId]/installer/promote` policies to have permissions to view, create, and promote installers. +::: -## Prerequisites +### kots/app/[:appId]/license/create -Before you install, complete the following prerequisites: +Grants the holder permission to create a new license in the specified application(s). - +### kots/app/[:appId]/license/[:customerId]/read -## Firewall Openings for Online Installations with Helm {#firewall} +Grants the holder permission to view the license specified by ID. If this is denied, the licenses will not show up in search, CSV export or on the Vendor Portal, and the holder will not be able to subscribe to this license's instance notifications. - +### kots/app/[:appId]/license/[:customerId]/update - - - - - - - - - - - - - - - - - -
    DomainDescription
    `replicated.app` *

    Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

    `registry.replicated.com`

    Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

    `proxy.replicated.com`

    Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

    +Grants the holder permission to edit the license specified by ID for the specified application(s). -* Required only if the [Replicated SDK](/vendor/replicated-sdk-overview) is included as a dependency of the application Helm chart. +### kots/app/[:appId]/license/[:customerId]/slack-notifications/read -## Install +Grants the holder permission to view the team's Slack notification subscriptions for instances associated with the specified license. -To install a Helm chart: +### kots/app/[:appId]/license/[:customerId]/slack-notifications/update -1. In the Vendor Portal, go to **Customers** and click on the target customer. +Grants the holder permission to edit the team's Slack notification subscriptions for instances associated with the specified license. -1. Click **Helm install instructions**. +### kots/app/[:appId]/builtin-licensefields/update - Helm install button +Grants the holder permission to edit the builtin license field override values for the specified application(s). - [View a larger image](/images/helm-install-button.png) +### kots/app/[:appId]/builtin-licensefields/delete -1. In the **Helm install instructions** dialog, run the first command to log in to the Replicated registry: +Grants the holder permission to delete the builtin license field override values for the specified application(s). - ```bash - helm registry login registry.replicated.com --username EMAIL_ADDRESS --password LICENSE_ID - ``` - Where: - * `EMAIL_ADDRESS` is the customer's email address - * `LICENSE_ID` is the ID of the customer's license +### kots/license/[:customerId]/airgap/password - :::note - You can safely ignore the following warning message: `WARNING: Using --password via the CLI is insecure.` This message is displayed because using the `--password` flag stores the password in bash history. This login method is not insecure. +Grants the holder permission to generate a new download portal password for the license specified (by ID) for the specified application(s). - Alternatively, to avoid the warning message, you can click **(show advanced)** in the **Helm install instructions** dialog to display a login command that excludes the `--password` flag. With the advanced login command, you are prompted for the password after running the command. - ::: +### kots/license/[:customerId]/archive -1. (Optional) Run the second and third commands to install the preflight plugin and run preflight checks. If no preflight checks are defined, these commands are not displayed. For more information about defining and running preflight checks, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). +Grants the holder permission to archive the specified license (by ID). -1. Run the fourth command to install using Helm: +### kots/license/[:customerId]/unarchive - ```bash - helm install RELEASE_NAME oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART_NAME - ``` - Where: - * `RELEASE_NAME` is the name of the Helm release. - * `APP_SLUG` is the slug for the application. For information about how to find the application slug, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). - * `CHANNEL` is the lowercased name of the channel where the release was promoted, such as `beta` or `unstable`. Channel is not required for releases promoted to the Stable channel. - * `CHART_NAME` is the name of the Helm chart. +Grants the holder permissions to unarchive the specified license (by ID). - :::note - To install the SDK with custom RBAC permissions, include the `--set` flag with the `helm install` command to override the value of the `replicated.serviceAccountName` field with a custom service account. For more information, see [Customizing RBAC for the SDK](/vendor/replicated-sdk-customizing#customize-rbac-for-the-sdk). - ::: +### kots/app/[:appId]/licensefields/create -1. (Optional) In the Vendor Portal, click **Customers**. You can see that the customer you used to install is marked as **Active** and the details about the application instance are listed under the customer name. +Grants the holder permission to create new license fields in the specified application(s). - **Example**: +### kots/app/[:appId]/licensefields/read - ![example customer in the Vendor Portal with an active instance](/images/sdk-customer-active-example.png) - [View a larger version of this image](/images/sdk-customer-active-example.png) +Grants the holder permission to view the license fields in the specified application(s). -================ -File: docs/vendor/installer-history.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" +### kots/app/[:appId]/licensefields/update -# Installer History +Grants the holder permission to edit the license fields for the specified application(s). - +### kots/app/[:appId]/licensefields/delete -This topic describes how to access the installation commands for all active and inactive kURL installers promoted to a channel. +Grants the holder permission to delete the license fields for the specified application(s). -## About Using Inactive Installers +### kots/app/[:appId]/release/create -Each release channel in the Replicated Vendor Portal saves the history of kURL installers that were promoted to the channel. You can view the list of historical installers on the **kURL Installer History** page for each channel. For more information, see [About the Installer History Page](#about) below. +Grants the holder permission to create a new release in the specified application(s). -It can be useful to access the installation commands for inactive installers to reproduce an issue that a user is experiencing for troubleshooting purposes. For example, if the user's cluster is running the inactive installer version 1.0.0, then you can install with version 1.0.0 in a test environment to troubleshoot. +### kots/app/[:appId]/release/[:sequence]/update -You can also send the installation commands for inactive installers to your users as needed. For example, a user might have unique requirements for specific versions of Kubernetes or add-ons. +Grants the holder permission to update the files saved in release sequence `[:sequence]` in the specified application(s). Once a release is promoted to a channel, it's not editable by anyone. -## About the Installer History Page {#about} +### kots/app/[:appId]/release/[:sequence]/read -The **kURL Installer History** page for each channel includes a list of all the kURL installers that have been promoted to the channel, including the active installer and any inactive installers. +Grants the holder permission to read the files at release sequence `[:sequence]` in the specified application(s). -To access the **kURL Installer History** page, go to **Channels** and click the **Installer history** button on the target channel. +### kots/app/[:appId]/customhostname/list -The following image shows an example **kURL Installer History** page with three installers listed: +Grants the holder permission to view custom hostnames for the team. -![Installer History page in the Vendor Portal](/images/installer-history-page.png) +### kots/app/[:appId]/customhostname/create -[View a larger version of this image](/images/installer-history-page.png) +Grants the holder permission to create custom hostnames for the team. -The installers are listed in the order in which they were promoted to the channel. The installer at the top of the list is the active installer for the channel. +### kots/app/[:appId]/customhostname/delete -The **kURL Installer History** page includes the following information for each installer listed: +Grants the holder permission to delete custom hostnames for the team. -* Version label, if provided when the installer was promoted -* Sequence number -* Installation command -* Installer YAML content +### kots/app/[:appId]/customhostname/default/set -================ -File: docs/vendor/instance-data-export.md -================ -import Download from "../partials/customers/_download.mdx" +Grants the holder permission to set default custom hostnames. -# Export Customer and Instance Data +### kots/app/[:appId]/customhostname/default/unset -This topic describes how to download and export customer and instance data from the Replicated Vendor Portal. +Grants the holder permission to unset the default custom hostnames. -## Overview +### kots/app/[:appId]/supportbundle/read -While you can always consume customer and instance insight data directly in the Replicated Vendor Portal, the data is also available in a CSV format so that it can be imported into any other system, such as: -* Customer Relationship Management (CRM) systems like Salesforce or Gainsight -* Data warehouses like Redshift, Snowflake, or BigQuery -* Business intelligence (BI) tools like Looker, Tableau, or PowerBI +Grants the holder permission to view and download support bundles. -By collecting and organizing this data wherever it is most visible and valuable, you can enable your team to make better decisions about where to focus efforts across product, sales, engineering, and customer success. +## Registry -## Bulk Export Instance Event Timeseries Data +### registry/namespace/:namespace/pull -You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Get instance events in either JSON or CSV format](https://replicated-vendor-api.readme.io/reference/listappinstanceevents) in the Vendor API v3 documentation. +Grants the holder permission to pull images from Replicated registry. -The `/app/{app_id}/events` endpoint returns data scoped to a given application identifier. It also allows filtering based on time periods, instances identifiers, customers identifers, and event types. You must provide at least **one** query parameter to scope the query in order to receive a response. +### registry/namespace/:namespace/push -By bulk exporting this instance event data with the `/app/{app_id}/events` endpoint, you can: -* Identify trends and potential problem areas -* Demonstrate the impact, adoption, and usage of recent product features +Grants the holder permission to push images into Replicated registry. -### Filter Bulk Data Exports +## Compatibility Matrix -You can use the following types of filters to filter timeseries data for bulk export: +### kots/cluster/create -- **Filter by date**: - - Get instance events recorded _at or before_ the query date. For example: - ```bash - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?before=2023-10-15" - ``` - - Get instance events recorded _at or after_ the query date. For example: - ```shell - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-10-15" - ``` - - Get instance events recorded within a range of dates [after, before]. For example: - ```shell - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-05-02&before=2023-10-15" - ``` -- **Filter by customer**: Get instance events from one or more customers using a comma-separated list of customer IDs. For example: - ```bash - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?customerIDs=1b13241,2Rjk2923481" - ``` -- **Filter by event type**: Get instance events by event type using a comma-separated list of event types. For example: - ```bash - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?eventTypes=numUsers,numProjects" - ``` +Grants the holder permission to create new clusters. -:::note -If any filter is passed for an object that does not exist, no warning is given. For example, if a `customerIDs` filter is passed for an ID that does not exist, or for an ID that the user does not have access to, then an empty array is returned. -::: +### kots/cluster/list +Grants the holder permission to list running and terminated clusters. -## Download Customer Instance Data CSVs - +### kots/cluster/[:clusterId] -### Data Dictionary +Grants the holder permission to get cluster details. -The following table lists the data fields that can be included in the customers and instances CSV downloads, including the label, data type, and description. +### kots/cluster/[:clusterId]/upgrade - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    LabelTypeDescription
    customer_idstringCustomer identifier
    customer_namestringThe customer name
    customer_created_datetimestamptzThe date the license was created
    customer_license_expiration_datetimestamptzThe expiration date of the license
    customer_channel_idstringThe channel id the customer is assigned
    customer_channel_namestringThe channel name the customer is assigned
    customer_app_idstringApp identifier
    customer_last_activetimestamptzThe date the customer was last active
    customer_typestringOne of prod, trial, dev, or community
    customer_statusstringThe current status of the customer
    customer_is_airgap_enabledbooleanThe feature the customer has enabled - Airgap
    customer_is_geoaxis_supportedbooleanThe feature the customer has enabled - GeoAxis
    customer_is_gitops_supportedbooleanThe feature the customer has enabled - KOTS Auto-GitOps
    customer_is_embedded_cluster_download_enabledbooleanThe feature the customer has enabled - Embedded Cluster
    customer_is_identity_service_supportedbooleanThe feature the customer has enabled - Identity
    customer_is_snapshot_supportedbooleanThe feature the customer has enabled - Snapshot
    customer_has_entitlementsbooleanIndicates the presence or absence of entitlements and entitlment_* columns
    customer_entitlement__*string/integer/booleanThe values of any custom license fields configured for the customer. For example, customer_entitlement__active-users.
    customer_created_by_idstringThe ID of the actor that created this customer: user ID or a hashed value of a token.
    customer_created_by_typestringThe type of the actor that created this customer: user, service-account, or service-account.
    customer_created_by_descriptionstringThe description of the actor that created this customer. Includes username or token name depending on actor type.
    customer_created_by_linkstringThe link to the actor that created this customer.
    customer_created_by_timestamptimestamptzThe date the customer was created by this actor. When available, matches the value in the customer_created_date column
    customer_updated_by_idstringThe ID of the actor that updated this customer: user ID or a hashed value of a token.
    customer_updated_by_typestringThe type of the actor that updated this customer: user, service-account, or service-account.
    customer_updated_by_descriptionstringThe description of the actor that updated this customer. Includes username or token name depending on actor type.
    customer_updated_by_linkstringThe link to the actor that updated this customer.
    customer_updated_by_timestamptimestamptzThe date the customer was updated by this actor.
    instance_idstringInstance identifier
    instance_is_activebooleanThe instance has pinged within the last 24 hours
    instance_first_reported_attimestamptzThe timestamp of the first recorded check-in for the instance.
    instance_last_reported_attimestamptzThe timestamp of the last recorded check-in for the instance.
    instance_first_ready_attimestamptzThe timestamp of when the cluster was considered ready
    instance_kots_versionstringThe version of KOTS or the Replicated SDK that the instance is running. The version is displayed as a Semantic Versioning compliant string.
    instance_k8s_versionstringThe version of Kubernetes running in the cluster.
    instance_is_airgapbooleanThe cluster is airgaped
    instance_is_kurlbooleanThe instance is installed in a Replicated kURL cluster (embedded cluster)
    instance_last_app_statusstringThe instance's last reported app status
    instance_clientstringIndicates whether this instance is managed by KOTS or if it's a Helm CLI deployed instance using the SDK.
    instance_kurl_node_count_totalintegerTotal number of nodes in the cluster. Applies only to kURL clusters.
    instance_kurl_node_count_readyintegerNumber of nodes in the cluster that are in a healthy state and ready to run Pods. Applies only to kURL clusters.
    instance_cloud_providerstringThe cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.
    instance_cloud_provider_regionstringThe cloud provider region where the instance is running. For example, us-central1-b
    instance_app_versionstringThe current application version
    instance_version_agestringThe age (in days) of the currently deployed release. This is relative to the latest available release on the channel.
    instance_is_gitops_enabledbooleanReflects whether the end user has enabled KOTS Auto-GitOps for deployments in their environment
    instance_gitops_providerstringIf KOTS Auto-GitOps is enabled, reflects the GitOps provider in use. For example, GitHub Enterprise.
    instance_is_skip_preflightsbooleanIndicates whether an end user elected to skip preflight check warnings or errors
    instance_preflight_statusstringThe last reported preflight check status for the instance
    instance_k8s_distributionstringThe Kubernetes distribution of the cluster.
    instance_has_custom_metricsbooleanIndicates the presence or absence of custom metrics and custom_metric__* columns
    instance_custom_metrics_reported_attimestamptzTimestamp of latest custom_metric
    custom_metric__*string/integer/booleanThe values of any custom metrics that have been sent by the instance. For example, custom_metric__active_users
    instance_has_tagsbooleanIndicates the presence or absence of instance tags and instance_tag__* columns
    instance_tag__*string/integer/booleanThe values of any instance tag that have been set by the vendor. For example, instance_tag__name
    +Grants the holder permission to upgrade a cluster. -================ -File: docs/vendor/instance-insights-details.md -================ -# Instance Details +### kots/cluster/tag/update -This topic describes using the Replicated Vendor Portal to quickly understand the recent events and performance of application instances installed in your customers' environments. -## About the Instance Details Page {#about-page} +Grants the holder permission to update cluster tags. -The Vendor Portal provides insights about the health, status, and performance of the active application instances associated with each customer license on the **Instance details** page. You can use the insights on the **Instance details** page to more quickly troubleshoot issues with your customers' active instances, helping to reduce support burden. +### kots/cluster/ttl/update -For example, you can use the **Instance details** page to track the following events for each instance: +Grants the holder permission to update cluster ttl. -* Recent performance degradation or downtime -* Length of instance downtime -* Recent changes to the cluster or infrastructure -* Changes in the number of nodes, such as nodes lost or added -* Changes in the cluster's Kubernetes version -* Changes in the application version that the instance is running +### kots/cluster/[:clusterId]/nodegroup -To access the **Instance details** page, go to **Customers** and click the **Customer reporting** button for the customer that you want to view: +Grants the holder permission to update nodegroup details. -![Customer reporting button on the Customers page](/images/customer-reporting-button.png) +### kots/cluster[:clusterId]/kubeconfig -From the **Reporting** page for the selected customer, click the **View details** button for the desired application instance. +Grants the holder permision to get the kubeconfig for a cluster. -The following shows an example of the **Instance details** page: +### kots/cluster/[:clusterId]/delete -![Instance details full page](/images/instance-details.png) +Grants the holder permission to delete a cluster. -[View a larger version of this image](/images/instance-details.png) +### kots/cluster/[:clusterId]/addon/list -As shown in the image above, the **Instance details** page includes the following sections: +Grants the holder permission to list addons for a cluster. -* **Current State**: Information about the state of the instance, such as the current application version. See [Current State](#current-state) below. -* **Instance Insights**: Key performance indicators (KPIs) related to health, performance, and adoption. See [Insights](#insights) below. -* **Instance Information**: Information about the cluster where the instance is installed, such as the version of Kubernetes running on the cluster. See [Instance Information](#instance-information) below. -* **Custom Metrics**: The values for any custom metrics that are configured for the application, from the most recent check-in. For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). -* **Instance Uptime**: Details about instance uptime over time. See [Instance Uptime](#instance-uptime) below. -* **Instance Activity**: Event data stream. See [Instance Activity](#instance-activity) below. +### kots/cluster/[:clusterId]/addon/[:addonId]/read -### Current State +Grants the holder permission to read the addon for a cluster. -The **Current State** section displays the following event data about the status and version of the instance: +### kots/cluster/[:clusterId]/addon/[:addonId]/delete -* **App status**: The status of the application. Possible statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information about enabling application status insights and how to interpret the different statuses, see [Enabling and Understanding Application Status](insights-app-status). +Grants the holder permission to delete the addon for a cluster. - Additionally, for applications that include the [Replicated SDK](/vendor/replicated-sdk-overview), you can hover over the **App status** field to view the statuses of the indiviudal resources deployed by the application, as shown in the example below: +### kots/cluster/[:clusterId]/addon/create/objectStore - resource status pop up +Grants the holder permission to create an object store for a cluster. - [View a larger version of this image](/images/resource-status-hover-current-state.png) +### kots/cluster/[:clusterId]/port/expose -* **App version**: The version label of the currently running release. You define the version label in the release properties when you promote the release. For more information about defining release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. +Grants the holder permission to expose a port for a cluster. - If there is no version label for the release, then the Vendor Portal displays the release sequence in the **App version** field. You can find the sequence number associated with a release by running the `replicated release ls` command. See [release ls](/reference/replicated-cli-release-ls) in the _Replicated CLI_ documentation. +### kots/cluster/[:clusterId]/port/delete -* **Version age**: The absolute and relative ages of the instance: +Grants the holder permission to delete a port for a cluster. - * **Absolute age**: `now - current_release.promoted_date` - - The number of days since the currently running application version was promoted to the channel. For example, if the instance is currently running version 1.0.0, and version 1.0.0 was promoted to the channel 30 days ago, then the absolute age is 30. +### kots/cluster/[:clusterId]/port/list - * **Relative age (Days Behind Latest)**: `channel.latest_release.promoted_date - current_release.promoted_date` - - The number of days between when the currently running application version was promoted to the channel and when the latest available version on the channel was promoted. - - For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. The latest version available on the Stable channel is 1.5.0. If 1.0.0 was promoted 30 days ago and 1.5.0 was promoted 10 days ago, then the relative age of the application instance is 20 days. +Grants the holder permission to list exposed ports for a cluster. -* **Versions behind**: The number of versions between the currently running version and the latest version available on the channel where the instance is assigned. +### kots/cluster/list-quotas - For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. If the later versions 1.1.0, 1.2.0, 1.3.0, 1.4.0, and 1.5.0 were also promoted to the Stable channel, then the instance is five versions behind. +Grants the holder permission to list the quotas. -* **Last check-in**: The timestamp when the instance most recently sent data to the Vendor Portal. +### kots/cluster/increase-quota -### Instance Insights {#insights} +Grants the holder permission to request an increase in the quota. -The **Insights** section includes the following metrics computed by the Vendor Portal: +### kots/vm/tag/update -* [Uptime](#uptime) -* [Time to Install](#time-to-install) +Grants the holder permission to update vm tags. -#### Uptime +### kots/vm/ttl/update -The Vendor Portal computes the total uptime for the instance as the fraction of time that the instance spends with a Ready, Updating, or Degraded status. The Vendor Portal also provides more granular details about uptime in the **Instance Uptime** graph. See [Instance Uptime](#instance-uptime) below. +Grants the holder permission to update vm ttl. -High uptime indicates that the application is reliable and able to handle the demands of the customer environment. Low uptime might indicate that the application is prone to errors or failures. By measuring the total uptime, you can better understand the performance of your application. +### kots/vm/[:vmId]/port/expose -The following table lists the application statuses that are associated with an Up or Down state in the total uptime calculation: +Grants the holder permission to expose a port for a vm. - - - - - - - - - - - - - -
    Uptime StateApplication Statuses
    UpReady, Updating, or Degraded
    DownMissing or Unavailable
    +### kots/vm/[:vmId]/port/list -:::note -The Vendor Portal includes time spent in a Degraded status in the total uptime for an instance because an app may still be capable of serving traffic when some subset of desired replicas are available. Further, it is possible that a Degraded state is expected during upgrade. -::: +Grants the holder permission to list exposed ports for a vm. -#### Time to Install +### kots/vm/[:vmId]/addon/[:addonId]/delete -The Vendor Portal computes both _License time to install_ and _Instance time to install_ metrics to represent how quickly the customer was able to deploy the application to a Ready state in their environment. +Grants the holder permission to delete the addon for a vm. -Replicated recommends that you use Time to Install as an indicator of the quality of the packaging, configuration, and documentation of your application. +## Team -If the installation process for your application is challenging, poorly documented, lacks appropriate preflight checks, or relies heavily on manual steps, then it can take days or weeks to deploy the application in customer environments. A longer Time to Install generally represents a significantly increased support burden and a degraded customer installation experience. +### team/auditlog/read -The following describes the _License time to install_ and _Instance time to install_ metrics: +Grants the holder permission to view the audit log for the team. -* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. +### team/authentication/update - License time to install represents the time that it takes for a customer to successfully deploy your application after you intend to distribute the application to the customer. Replicated uses the timestamp of when you create the customer license in the Vendor Portal to represent your intent to distribute the application because creating the license file is generally the final step before you share the installation materials with the customer. +Grants the holder permission to manage the following team authentication settings: Google authentication, Auto-join, and SAML authentication. - License time to install includes several activities that are involved in deploying the application, including the customer receiving the necessary materials and documentation, downloading the assets, provisioning the required hardware, networking, external systems, completing the preflight checks, and finally installing, configuring, and deploying the application. +### team/authentication/read -* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. +Grants the holder permission to read the following authentication settings: Google authentication, Auto-join, and SAML authentication. - Instance time to install is the length of time that it takes for the application to reach a Ready state after the customer starts a deployment attempt in their environment. Replicated considers a deployment attempt started when the Vendor Portal first records an event for the instance. - - For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. - - :::note - Instance time to install does _not_ include any deployment attempts that a customer might have made that did not generate an event. For example, time spent by the customer discarding the server used in a failed attempt before attempting to deploy the instance again on a new server. - ::: +### team/integration/list -### Instance Information +Grants the holder permission to view team's integrations. -The **Instance Information** section displays the following details about the cluster infrastructure where the application is installed as well as vendor-defined metadata about the instance: +### team/integration/create -* The Kubernetes distribution for the cluster. For example, GKE or EKS. -* The version of Kubernetes running in the cluster. -* The version of KOTS or the Replicated SDK installed in the cluster. -* For **First Seen**, the timestamp of the first event that the Vendor Portal generated for the instance. For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. -* If detected, the cloud provider and region where the cluster is running. For example, `GCP: us-central1`. -* An optional vendor-defined name for the instance. -* Optional vendor-defined instance tags in the form of key-value pairs. Each instance can have a maximum of 10 tags. +Grants the holder permission to create an integration. -In addition to the details listed above, the **Instance Information** section also displays the following for embedded clusters provisioned by Replicated kURL: -* Node operating systems -* Node operating systems versions -* Total number of cluster nodes -* Number of cluster nodes in a Ready state -* ID of the kURL installer specification +### team/integration/[:integrationId]/delete -### Instance Uptime +Grants the holder permission to delete specified integration(s). -The **Instance Uptime** graph shows the percentage of a given time period that the instance was in an Up, Degraded, or Down state. +### team/integration/[:integrationId]/update -To determine if the instance is Up, Degraded, or Down, the Vendor Portal uses the application status. Possible application statuses are Ready, Updating, Degraded, Unavailable, and Missing. The following table lists the application statuses that are associated with each state in the **Instance Uptime** graph: +Grants the holder permission to update specified integration(s). - - - - - - - - - - - - - - - - - -
    Uptime StateApplication Statuses
    UpReady or Updating
    DegradedDegraded
    DownMissing or Unavailable
    +### team/members/list -The following shows an example of an **Instance Uptime** graph: +Grants the holder permission to list team members and invitations. -![Uptime Graph on the Instance details page](/images/instance-uptime-graph.png) +### team/member/invite -You can hover over the bars in the **Instance Uptime** graph to view more detail about the percent of time that the instance was in each state during the given time period. +Grants the holder permission to invite additional people to the team. -![Uptime Graph with event markers on the Instance details page](/images/instance-uptime-graph-event-markers.png) +### team/members/delete -You can hover over the event markers in the **Instance Uptime** graph to view more detail about the events that occurred during that given interval on the graph. If more than two events occurred in that period, the event marker displays the number of events that occurred during that period. If you click the event marker or the event in the tooltip, the **Instance Activity** section highlights the event or the first event in the group. +Grants the holder permission to delete other team members. -### Instance Activity +### team/notifications/slack-webhook/read -The **Instance Activity** section displays recent events for the instance. The data stream is updated each time an instance _check-in_ occurs. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. +Grants the holder permission to view the team's Slack webhook for instance notifications. -The timestamp of events displayed in the **Instance Activity** stream is the timestamp when the Replicated Vendor API received data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. +### team/notifications/slack-webhook/update -The following shows an example of the **Instance Activity** data stream: +Grants the holder permission to edit the team's Slack webhook for instance notifications. -![Instance Activity section of Instance details page](/images/instance-activity.png) +### team/policy/read -You can filter the **Instance Activity** stream by the following categories: +Grants the holder permission to view RBAC policies for the team. -* [App install/upgrade](#app-install-upgrade) -* [App status](#app-status) -* [Cluster status](#cluster) -* [Custom metrics](#custom-metrics) -* [Infrastructure status](#infrastructure) -* [KOTS version](#kots) -* [Replicated SDK version](#sdk) -* [Upstream update](#upstream) +### team/policy/update -The following tables describe the events that can be displayed in the **Instance Activity** stream for each of the categories above: -#### App install/upgrade {#app-install-upgrade} +Grants the holder permission to update RBAC policies for the team. - - - - - - - - - - - - - -
    LabelDescription
    App ChannelThe ID of the channel the application instance is assigned.
    App VersionThe version label of the release that the instance is currently running. The version label is the version that you assigned to the release when promoting it to a channel.
    +### team/policy/delete -#### App status {#app-status} +Grants the holder permission to delete RBAC policies for the team. - - - - - - - - - -
    LabelDescription
    App Status -

    A string that represents the status of the application. Possible values: Ready, Updating, Degraded, Unavailable, Missing. For applications that include the Replicated SDK, hover over the application status to view the statuses of the indiviudal resources deployed by the application.

    -

    For more information, see Enabling and Understanding Application Status.

    -
    +### team/policy/create -#### Cluster status {#cluster} +Grants the holder permission to create RBAC policies for the team. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    LabelDescription
    Cluster Type -

    Indicates if the cluster was provisioned by kURL.

    -

    Possible values:

    -
      -
    • kURL: The cluster is provisioned by kURL.
    • -
    • Existing: The cluster is not provisioned by kURL.
    • -
    -

    For more information about kURL clusters, see Creating a kURL installer.

    -
    Kubernetes VersionThe version of Kubernetes running in the cluster.
    Kubernetes Distribution -

    The Kubernetes distribution of the cluster.

    -

    Possible values:

    -
      -
    • EKS
    • -
    • GKE
    • -
    • K3S
    • -
    • RKE2
    • -
    -
    kURL Nodes Total -

    Total number of nodes in the cluster.

    -

    Note: Applies only to kURL clusters.

    -
    kURL Nodes Ready -

    Number of nodes in the cluster that are in a healthy state and ready to run Pods.

    -

    Note: Applies only to kURL clusters.

    -
    New kURL Installer -

    The ID of the kURL installer specification that kURL used to provision the cluster. Indicates that a new Installer specification was added. An installer specification is a manifest file that has apiVersion: cluster.kurl.sh/v1beta1 and kind: Installer.

    -

    For more information about installer specifications for kURL, see Creating a kURL installer.

    -

    Note: Applies only to kURL clusters.

    -
    +### team/security/update -#### Custom metrics {#custom-metrics} +Grants the holder permission to manage team password requirements including two-factor authentication and password complexity requirements. -You can filter the activity feed by any custom metrics that are configured for the application. The labels for the custom metrics vary depending on the custom key value pairs included in the data set that is sent to the Vendor Portal. For example, the key value pair `"num_projects": 5` is displayed as **Num Projects: 5** in the activity feed. +### team/serviceaccount/list -For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). -#### Infrastructure status {#infrastructure} +Grants the holder permission to list service accounts. - - - - - - - - - - - - - -
    LabelDescription
    Cloud Provider -

    The cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.

    -

    Possible values:

    -
      -
    • AWS
    • -
    • GCP
    • -
    • DigitalOcean
    • -
    -
    Cloud Region -

    The cloud provider region where the instance is running. For example, us-central1-b

    -
    +### team/serviceaccount/create -#### KOTS version {#kots} +Grants the holder permission to create new service accounts. - - - - - - - - - -
    LabelDescription
    KOTS VersionThe version of KOTS that the instance is running. KOTS version is displayed as a Semantic Versioning compliant string.
    +### team/serviceaccount/[:name]/delete -#### Replicated SDK version {#sdk} +Grants the holder permission to delete the service account identified by the name specified. - - - - - - - - - -
    LabelDescription
    Replicated SDK VersionThe version of the Replicated SDK that the instance is running. SDK version is displayed as a Semantic Versioning compliant string.
    +### team/support-issues/read -#### Upstream update {#upstream} +Grants the holder Read permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - - - - - - - - - -
    LabelDescription
    Versions Behind -

    The number of versions between the version that the instance is currently running and the latest version available on the channel.

    -

    Computed by the Vendor Portal each time it receives instance data.

    -
    +To prevent access to the collab repository for an RBAC policy, add `team/support-issues/read` to the `denied:` list in the policy. For example: -================ -File: docs/vendor/instance-insights-event-data.mdx -================ -import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" +``` +{ + "v1": { + "name": "Policy Name", + "resources": { + "allowed": [], + "denied": [ + "team/support-issues/read" + ] + } + } +} +``` -# About Instance and Event Data +For more information about the Read role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. -This topic provides an overview of the customer and instance insights that you can view in the Replicated Vendor Portal. It includes information about how the Vendor Portal accesses data as well as requirements and limitations. + -## How the Vendor Portal Collects Instance Data {#about-reporting} +### team/support-issues/write -This section describes how the Vendor Portal collects instance data from online and air gap environments. +Grants the holder Write permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. -### Online Instances +For more information about the Write role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. -For instances running in online (internet-connected) environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), then both send instance data. + -The data sent to the Vendor Portal includes properties such as the current version and status of the instance. For a full overview of what data might be included, see the [Replicated Data Transmission Policy](https://docs.replicated.com/vendor/policies-data-transmission). +### team/support-issues/triage -The following diagram shows the flow of different types of data from customer environments to the Vendor Portal: +Grants the holder Triage permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. -![Telemetry sent from instances to vendor platform](/images/telemetry-diagram.png) +For more information about the Triage role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. -[View a larger version of this image](/images/telemetry-diagram.png) + -As shown in the diagram above, application instance data, application status data, and details about the KOTS and the SDK instances running in the cluster are all sent to the Vendor Portal through the Replicated app service: -* When both KOTS and the SDK are installed in the cluster, they both send application instance data, including information about the cluster where the instance is running. -* KOTS and the SDK both send information about themselves, including the version of KOTS or the SDK running in the cluster. -* Any custom metrics configured by the software vendor are sent to the Vendor Portal through the Replicated SDK API. For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). -* Application status data, such as if the instance is ready or degraded, is sent by KOTS. If KOTS is not installed in the cluster, then the SDK sends the application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). +### team/support-issues/admin -### Air Gap Instances +Grants the holder Admin permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - +For more information about the Admin role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. -For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). + -## Frequency of Data Sent to the Vendor Portal +## User -This section describes how frequently data is sent to the Vendor Portal for online and air gap instances. +### user/token/list -### From the Replicated SDK (Online Instances Only) +Grants the holder permission to list user tokens. -When installed alongside the application in an online environment, the SDK automatically sends instance data to the Vendor Portal when any of the following occur: +### user/token/create -* The SDK sends data every four hours. +Grants the holder permission to create new user tokens. -* The instance checks for updates. An update check occurs when the instance makes a request to the `/api/v1/app/updates` SDK API endpoint. See [app](/reference/replicated-sdk-apis#app) in _Replicated SDK API (Alpha)_. +### user/token/delete -* The instance completes a Helm update to a new application version. After the update completes, the SDK sends data when it restarts. +Grants the holder permission to delete user tokens. -* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). +================ +File: docs/vendor/team-management-saml-auth.md +================ +# Managing SAML Authentication -### From KOTS (Online Instances Only) +This topic describes how to enable or disable SAML authentication for the Replicated Vendor Portal. -When installed alongisde the application in an online environment, KOTS automatically sends instance data to the Vendor Portal when any of the following occur: +## About Using SAML with the Vendor Portal -* The instance checks for updates. By default, KOTS checks for updates every four hours. Additionally, an update check can occur when a user clicks the **Check for updates** button in the Replicated Admin Console. +After starting out with Replicated, most teams grow, adding more developers, support engineers, and sales engineers. Eventually, managing access to the Vendor Portal can become difficult. Replicated supports logging in using SAML, which lets you manage access (provisioning and unprovisioning accounts) through your SAML identity provider. - :::note - KOTS users can modify or disable automatic update checks from the Admin Console. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). - ::: +Using SAML, everyone on your team logs in with their existing usernames and passwords through your identity provider's dashboard. Users do not need to sign up through the Vendor Portal or log in with a separate Vendor Portal account, simplifying their experience. -* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). +### Enabling SAML in Your Vendor Account -* (KOTS v1.92 and later only) The instance deploys a new application version. +To enable SAML in your Vendor Portal account, you must have an Enterprise plan. For access to SAML, you can contact Replicated through [Support](https://vendor.replicated.com/support). For information about the Enterprise plan, see [pricing](https://www.replicated.com/pricing/). -### From Air Gap Instances +### SCIM -For air gap instances, the frequency of data sent to the Vendor Portal depends on how frequently support bundles are collected in the customer environment and uploaded to the Vendor Portal. +Replicated does not implement System for Cross-domain Identity Management (SCIM). Instead, we use SAML to authenticate and create just-in-time user identities in our system. We resolve the username (email address) as the actor and use this to ensure that audit log events follow these dynamically provisioned users. If a user's email address is already associated with a Replicated account, by using your SAML integration to access the Vendor Portal, they automatically leave their current team and join the team associated with the SAML login. -For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). +### Compatibility with Two-Factor Authentication -## How the Vendor Portal Generates Events and Insights {#about-events} +If SAML authentication is configured for your team, Replicated two-factor authentication (2FA) is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. -When the Vendor Portal receives instance data, it evaluates each data field to determine if there was a change in its value. For each field that changes in value, the Vendor Portal creates an _event_ to record the change. For example, a change from Ready to Degraded in the application status generates an event. +### Role Based Access Control -In addition to creating events for changes in data sent by the instance, the Vendor Portal also generates events for changes in values of computed metrics. The Vendor Portal updates the values of computed metrics each time it receives instance data. For example, the Vendor Portal computes a _Versions behind_ metric that tracks the number of versions behind the latest available version for the instance. When the instance checks for updates and a new update is available, the value of this metric changes and the Vendor Portal generates an event. +Replicated supports Role Based Access Control (RBAC) in the Vendor Portal. To use RBAC with SAML, you must configure policies and add users to the policies by their username. Usernames are the identity of the user in your identity provide (IDP). Typically, this username is the full email address. For more information about configuring RBAC, see [Configuring RBAC Policies](team-management-rbac-configuring). -The Vendor Portal uses events to display insights for each active instance in a **Instance details** dashboard. For more information about using the Vendor Portal **Instance details** page to monitor active instances of your application, see [Instance Details](instance-insights-details). +## Downloading Certificates from Supported SAML providers -## Requirements +You must retrieve the metadata and x.509 public certificate files from your SAML provider before configuring SAML in the Vendor Portal. The certificate file must be in PEM format. -The following requirements apply to collecting instance telemetry: +Replicated tests several SAML providers, but the service should be compatible with any SAML 2.0 compliant service provider. We provide full support for the following SAML providers: -* Replicated KOTS or the Replicated SDK must be installed in the cluster where the application instance is running. +* Okta. For more information about integrating Okta with Replicated, see [Configure Okta](#configure-okta). -* For KOTS installations and for Helm CLI installations that use `helm template` then `kubectl apply`, additional configuration is required to get application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). +* OneLogin -* To view resource status details for an instance on the **Instance details** page, the Replicated SDK must be installed in the cluster alongside the application. For more information, see [View Resource Status Insights](insights-app-status#resource-status) in _Enabling and Understanding Application Status_. -* There are additional requirements for collecting telemetry from air gap instances. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). +## Configure Okta -## Limitations +The first part of the Vendor Portal and Okta integration is configured in the Okta dashboard. This configuration lets you download the XML Metadata file and x.509 public certificate in PEM format required for the SAML authentication. -The Vendor Portal has the following limitations for reporting instance data and generating events: +This procedure outlines the basic configuration steps, recommended settings, and the specific fields to configure in Okta. For more information about using Okta, see the [Okta](https://help.okta.com/en/prod/Content/index.htm) documentation. -* **Active instances**: Instance data is available for _active_ instances. An instance is considered inactive when its most recent check-in was more than 24 hours ago. An instance can become inactive if it is decommissioned, stops checking for updates, or otherwise stops reporting. +To configure Okta and download the required files: - The Vendor Portal continues to display data for an inactive instance from its most-recently seen state. This means that data for an inactive instance might continue to show a Ready status after the instance becomes inactive. Replicated recommends that you use the timestamp in the **Last Check-in** field to understand if an instance might have become inactive, causing its data to be out-of-date. -* **Air gap instances**: There are additional limitations for air gap telemetry. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). -* **Instance data freshness**: The rate at which data is updated in the Vendor Portal varies depending on how often the Vendor Portal receives instance data. -* **Event timestamps**: The timestamp of events displayed on the **Instances details** page is the timestamp when the Replicated Vendor API received the data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. -* **Caching for kURL cluster data**: For clusters created with Replicated kURL (embedded clusters), KOTS stores the counts of total nodes and ready nodes in a cache for five minutes. If KOTS sends instance data to the Vendor Portal within the five minute window, then the reported data for total nodes and ready nodes reflects the data in the cache. This means that events displayed on the **Instances details** page for the total nodes and ready nodes can show values that differ from the current values of these fields. +1. Log in to your Okta Admin dashboard, and click applications. -================ -File: docs/vendor/instance-notifications-config.mdx -================ -import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" +1. Select **Create new app integration**, and create a new application as a SAML 2.0 application. +1. Provide a name and icon for the application, such as Replicated Vendor Portal. You can download a high quality Replicated icon [here](https://help.replicated.com/images/guides/vendor-portal-saml/replicated-application-icon.png). -# Configuring Instance Notifications (Beta) +1. Click **Next**. - + The Configuring SAML page opens. -This topic describes how to configure Slack or email notifications in the Replicted Vendor Portal for instances of your application. +1. Click **Download Okta Certificate**. This downloads your x.509 certificate to provide to Replicated. Save this file to safe location. -For information about creating and managing instance notifications with the Vendor API v3, see the [notifications](https://replicated-vendor-api.readme.io/reference/subscribeinstanceevents) section in the Vendor API v3 documentation. +1. On this same page, edit the following fields: -## Overview + | Field Name | Description | + | :---------------------- | ----------------------------------------------------------------------------------------------- | + | Single Sign On URL | Set this to `https://id.replicated.com/v1/saml`. | + | Audience URI (SP Entity ID) | Displays on the Vendor Portal [SAML authentication](https://vendor.replicated.com/team/saml-authentication) tab, and is unique to your team. | + | Name ID Format | Change this to `EmailAddress`. | -Teams can receive notifications about customer instances through a Slack channel. Individual users can also receive email notifications. +1. Click **Next**. -Instance notifications can be disabled when they are no longer needed. For example, a team member can turn off their email notifications for a customer instance when they are no longer responsible for supporting that customer. +1. Select **I’m an Okta customer adding an internal app** on the final screen, and click **Finish**. -## Prerequisite +1. Click **Identity provider metadata** to download the Metadata.xml file. This likely opens an XML download that you can right-click and select **Save Link As…** to download this file. -For Slack notifications, you must configure a Slack webhook in the Vendor Portal at the Team level before you can turn on instance notifications. For more information, see [Configuring a Slack Webhook (Beta)](team-management-slack-config). +### Next Step -For email notification, no prior configuration is required. The email address listed in your Vendor Portal account settings is used. +Configure and enable SAML in the Vendor Portal. For more information, see [Configure SAML](#configure-saml). -## Configure Notifications +## Configure SAML -Follow this procedure to configure Slack or email notifications for application instances. You can enable notifications for application status changes, system events such as Kubernetes upgrades, or changes in the values of any custom metrics configured for the application. +When you initially configure SAML, we do not recommend that you disable username/password access at the same time. It is possible, and recommended during testing, to support both SAML and non-SAML authentication on your account simultaneously. -To configure notifications: +**Prerequisite** -1. Go to **Applications > Customers**, and click an active customer instance that you want to receive notifications for. +- Download your XML Metadata file and x.509 public certificate PEM file from your SAML provider. For more information on supported SAML providers and how to find these files, see [Supported SAML providers](#downloading-certificates-from-supported-saml-providers). - Customer instances list in the Vendor Portal +To configure SAML: -1. On the Instance Details page, click **Notifications**. +1. Log in to the Vendor Portal [Team Members page](https://vendor.replicated.com/team/members) as a user with Admin access. +1. Click [SAML Authentication](https://vendor.replicated.com/team/saml-authentication) from the left menu. If you do not see these options, contact [Support](https://vendor.replicated.com/support). - + The SAML Authentication page opens. -1. From the **Configure Instance Notifications** dialog, select the types of notifications to enable. - - ![Configure Instance Notifications dialog](/images/instance-notifications-dialog.png) + ![SAML Authentication](/images/team-mgmt-saml-authentication.png) - [View a larger version of this image](/images/instance-notifications-dialog.png) + [View a larger version of this image](/images/team-mgmt-saml-authentication.png) -1. Click **Save**. +1. Browse for, or drag and drop, your XML Metadata file and x.509 PEM file from your SAML provider. -1. Repeat these steps to configure notifications for other application instances. +1. Click **Upload Metadata & Cert**. +### Next Step -## Test Notifications +At this point, SAML is configured, but not enabled. The next step is to enable SAML enforcement options. For more information, see [Enable SAML Enforcement](#enable-saml-enforcement). -After you enable notifications for a running development instance, test that your notifications are working as expected. +## Enable SAML Enforcement -Do this by forcing your application into a non-ready state. For example, you can delete one or more application Pods and wait for a ReplicationController to recreate them. +After you have uploaded the metadata and x.509 public certificate PEM file, you must enable SAML enforcement options. Replicated provides options that can be enabled or disabled at any time. You can also change the IDP metadata if needed. -Then, look for notifications in the assigned Slack channel. You also receive an email if you enabled email notifications. +To enable SAML enforcement: -:::note -There is a 30-second buffer between event detection and notifications being sent. This buffer provides better roll-ups and reduces noise. -::: +1. From the Vendor Portal, select **Team > [SAML Authentication](https://vendor.replicated.com/team/saml-authentication)**. -================ -File: docs/vendor/kots-faq.mdx -================ -import SDKOverview from "../partials/replicated-sdk/_overview.mdx" -import EmbeddedKubernetes from "../partials/kots/_embedded-kubernetes-definition.mdx" -import Helm from "../partials/helm/_helm-definition.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" +1. Select either or both login method options in the the Manage your SAML authentication pane. Allowing both login methods is a good way to test SAML without risking any interruption for the rest of your team. -# Replicated FAQs + **Enable SAML for team logins** - Allows members of your team to log in to the Vendor Portal through your identity provider. This option does not remove, change, or restrict any other authentication that methods you have configured in the Vendor Portal. If you enable SAML and your team already is logging in with accounts provisioned in the Vendor Portal, they will be able to continue logging in with those accounts. -This topic lists frequently-asked questions (FAQs) for different components of the Replicated Platform. + **Only allow SAML logins** - Requires members of your team to log in to the Vendor Portal through your identity provider. Prevents any non-SAML accounts from logging in. Replicated does not delete the existing accounts. If you turn on this option and then later disable it, accounts that never logged in using SAML will be able to log in again. If an account exists outside of SAML and then is authenticated with SAML, the account is converted and cannot authenticate using a password again. -## Getting Started FAQs + ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) -### What are the supported application packaging options? + [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) -Replicated strongly recommends that all applications are packaged using Helm. +1. (Optional) Set a default policy for new accounts from the drop-down list. +1. (Optional) Click **Change IdP Metadata** and follow the prompts to upload any changes to your metadata. - +SAML is now enabled on your account. For your team to use the SAML login option, you must enable access through your SAML identity provider’s dashboard. For example, if you use Okta, assign the application to users or groups. When a user clicks through to use the application, they are granted access as described in [SCIM](#scim). -Many enterprise customers expect to be able to install an application with Helm in their own cluster. Packaging with Helm allows you to support installation with the Helm CLI and with the Replicated installers (Replicated Emebdded Cluster and Replicated KOTS) from a single release in the Replicated Platform. +## Disable SAML Enforcement -For vendors that do not want to use Helm, applications distributed with Replicated can be packaged as Kubernetes manifest files. +You can disable SAML authentication options at any time and re-enable them later if needed. -### How do I get started with Replicated? +To disable SAML enforcement: -Replicated recommends that new users start by completing one or more labs or tutorials to get familiar with the processes of creating, installing, and iterating on releases for an application with the Replicated Platform. +1. From the Vendor Portal, select **Team > SAML Authentication**. -Then, when you are ready to begin onboarding your own application to the Replicated Platform, see [Replicated Onboarding](replicated-onboarding) for a list of Replicated features to begin integrating. +1. Click **Deprovision SAML** in the Manage your SAML authentication pane. -#### Labs + ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) -The following labs in Instruqt provide a hands-on introduction to working with Replicated features, without needing your own sample application or development environment: + [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) -* [Distributing Your Application with Replicated](https://play.instruqt.com/embed/replicated/tracks/distributing-with-replicated?token=em_VHOEfNnBgU3auAnN): Learn how to quickly get value from the Replicated Platform for your application. -* [Delivering Your Application as a Kubernetes Appliance](https://play.instruqt.com/embed/replicated/tracks/delivering-as-an-appliance?token=em_lUZdcv0LrF6alIa3): Use Embedded Cluster to distribute Kubernetes and an application together as a single appliance. -* [Avoiding Installation Pitfalls](https://play.instruqt.com/embed/replicated/tracks/avoiding-installation-pitfalls?token=em_gJjtIzzTTtdd5RFG): Learn how to use preflight checks to avoid common installation issues and assure your customer is installing into a supported environment. -* [Closing the Support Information Gap](https://play.instruqt.com/embed/replicated/tracks/closing-information-gap?token=em_MO2XXCz3bAgwtEca): Learn how to use support bundles to close the information gap between your customers and your support team. -* [Protecting Your Assets](https://play.instruqt.com/embed/replicated/tracks/protecting-your-assets?token=em_7QjY34G_UHKoREBd): Assure your customers have the right access to your application artifacts and features using Replicated licensing. +================ +File: docs/vendor/team-management-slack-config.mdx +================ +import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" -#### Tutorials -The following getting started tutorials demonstrate how to integrate key Replicated features with a sample Helm chart application: -* [Install a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup): Create a release that can be installed on a VM with the Embedded Cluster installer. -* [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup): Create a release that can be installed with both the KOTS installer and the Helm CLI. -* [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup): Configure the Admin Console Config screen to collect user-supplied values. -* [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup): Create preflight checks for your application by addin a spec for preflight checks to a Secret in the Helm templates. +# Configuring a Slack Webhook (Beta) -### What are air gap installations? +As a vendor, anyone on your team can set up Slack notifications, which are sent to a shared Slack channel. Notifications give your team visibility into customer instance statuses and changes. -_Air gap_ refers to a computer or network that does not have outbound internet access. Air-gapped environments are common for enterprises that require high security, such as government agencies or financial institutions. + -Traditionally, air-gapped systems are physically isolated from the network. For example, an air-gapped server might be stored in a separate location away from network-connected servers. Physical access to air-gapped servers is often restricted as well. +While email notifications are specific to each user, Slack notifications settings are shared, viewable, and editable by the entire team. Any changes made by a team member impacts the team. -It is also possible to use _virtual_ or _logical_ air gaps, in which security controls such as firewalls, role-based access control (RBAC), and encryption are used to logically isolate a device from a network. In this way, network access is still restricted, but there is not a phyiscal air gap that disconnects the device from the network. +## Limitations -Replicated supports installations into air-gapped environments. In an air gap installation, users first download the images and other assets required for installation on an internet-connected device. These installation assets are usually provided in an _air gap bundle_ that ISVs can build in the Replicated Vendor Portal. Then, users transfer the installation assets to their air-gapped machine where they can push the images to an internal private registry and install. +As a Beta feature, the following limitations apply: -For more information, see: -* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) -* [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap) +- Only one Slack channel per team is supported. -### What is the Commercial Sotware Distribution Lifecycle? +- RBAC policies are not supported for configuring granular permissions. -Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. +## Prerequisite -Replicated has developed the Commercial Software Distribution Lifecycle to represent the stages that are essential for every company that wants to deliver their software securely and reliably to customer-controlled environments. +Create a Slack webhook URL. For more information, see [Sending Messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) in the Slack API documentation. -This lifecycle was inspired by the DevOps lifecycle and the Software Development Lifecycle (SDLC), but it focuses on the unique things requirements for successfully distributing commercial software to tens, hundreds, or thousands of enterprise customers. +Make sure to keep the URL secure because it contains a Secret that allows write access to one or more channels in your Slack Workspace. -The phases are: -* Develop -* Test -* Release -* License -* Install -* Report -* Support +## Configure the Webhook in the Vendor Portal -For more information about the Replicated features that enhance each phase of the lifecycle, see [Introduction to Replicated](../intro-replicated). +When you enable Slack notifications for a team, you must first configure the Slack webhook in the Vendor Portal. Typically you do this one time. Then you can configure notifications for individual customer instances. -## Compatibility Matrix FAQs +To configure the Slack webhook: -### What types of clusters can I create with Compatibility Matrix? +1. From the **[Team Vendor Portal](https://vendor.replicated.com/team/members)** page, click **Slack Notifications**. -You can use Compatibility Matrix to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports a variety of VM and cloud distributions, including Red Hat OpenShift, Replicated Embedded Cluster, and Oracle Container Engine for Kubernetes (OKE). For a complete list, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +1. On the **Slack Notifications Setup** page, paste the Slack webhook URL. Click **Save**. -### How does billing work? +## Next Step -Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a running status and ends when the cluster is deleted. For more information, see [Billing and Credits](/vendor/testing-about#billing-and-credits). +[Configure Slack notifications for customer instances](instance-notifications-config). -### How do I buy credits? +================ +File: docs/vendor/team-management-two-factor-auth.md +================ +# Managing Two-Factor Authentication -To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to **[Compatibility Matrix > Buy additional credits](https://vendor.replicated.com/compatibility-matrix)**. Otherwise, to request credits, log in to the Vendor Portal and go to **[Compatibility Matrix > Request more credits](https://vendor.replicated.com/compatibility-matrix)**. +This topic describes how to enable and disable Replicated two-factor authentication for individual and team accounts in the Replicated Vendor Portal. -### How do I add Comaptibility Matrix to my CI/CD pipelines? +Alternatively, you can use Google Authentication or SAML Authentication to access the Vendor Portal. For more information about those options, see [Managing Google Authentication](team-management-google-auth) and [Managing SAML Authentication](team-management-saml-auth). -You can use Replicated CLI commands to integrate Compatibility Matrix into your CI/CD development and production workflows. This allows you to programmatically create multiple different types of clusters where you can deploy and test your application before releasing. +## About Two-Factor Authentication -For more information, see [About Integrating with CI/CD](/vendor/ci-overview). +Two-factor authentication (2FA) provides additional security by requiring two methods of authentication to access resources and data. When you enable the 2FA option in the Vendor Portal, you are asked to provide an authentication code and your password during authentication. Replicated uses the open algorithm known as the Time-based One-time Password (TOTP 7), which is specified by the Internet Engineering Task Force (IETF) under RFC 6238 2. -## KOTS and Embedded Cluster FAQs +## Limitation -### What is the Admin Console? +If SAML Authentication or Google Authentication is configured and 2FA is also enabled, then 2FA is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. -The Admin Console is the user interface deployed by the Replicated KOTS installer. Users log in to the Admin Console to configure and install the application. Users also access to the Admin Console after installation to complete application mangement tasks such as performing updates, syncing their license, and generating support bundles. For installations with Embedded Cluster, the Admin Console also includes a **Cluster Management** tab where users can manage the nodes in the cluster. +## Enable 2FA on Individual Accounts -The Admin Console is available in installations with Replicated Embedded Cluster and Replicated KOTS. +If you are an administrator or if 2FA is enabled for your team, you can enable 2FA on your individual account. -The following shows an example of the Admin Console dashboard for an Embedded Cluster installation of an application named "Gitea": +To enable two-factor authentication on your individual account: -admin console dashboard +1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. -[View a larger version of this image](/images/gitea-ec-ready.png) + Vendor portal account settings -### How do Embedded Cluster installations work? + [View a larger version of this image](/images/vendor-portal-account-settings.png) -To install with Embedded Cluster, users first download and extract the Embedded Cluster installation assets for the target application release on their VM or bare metal server. Then, they run an Embedded Cluster installation command to provision the cluster. During installation, Embedded Cluster also installs Replicated KOTS in the cluster, which deploys the Admin Console. +1. In the **Two-Factor Authentication** pane, click **Turn on two-factor authentication**. -After the installation command finishes, users log in to the Admin Console to provide application configuration values, optionally join more nodes to the cluster, run preflight checks, and deploy the application. + Turn on 2FA in the Vendor Portal -Customer-specific Embedded Cluster installation instructions are provided in the Replicated Vendor Portal. For more information, see [Installing with Embedded Cluster](/enterprise/installing-embedded). + [View a larger version of this image](/images/vendor-portal-password-2fa.png) -### Does Replicated support installations into air gap environments? +1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. -Yes. The Embedded Cluster and KOTS installers support installation in _air gap_ environments with no outbound internet access. +1. Scan the QR code that displays using a supported two-factor authentication application on your mobile device, such as Google Authenticator. Alternatively, click **Use this text code** in the Vendor Portal to generate an alphanumeric code that you enter in the mobile application. -To support air gap installations, vendors can build air gap bundles for their application in the Vendor Portal that contain all the required assets for a specific release of the application. Additionally, Replicated provides bundles that contain the assets for the Replicated installers. + Turn on 2FA in the Vendor Portal -For more information about how to install with Embedded Cluster and KOTS in air gap environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). + [View a larger version of this image](/images/vendor-portal-scan-qr.png) -### Can I deploy Helm charts with KOTS? + Your mobile application displays an authentication code. -Yes. An application deployed with KOTS can use one or more Helm charts, can include Helm charts as components, and can use more than a single instance of any Helm chart. Each Helm chart requires a unique HelmChart custom resource (`apiVersion: kots.io/v1beta2`) in the release. +1. Enter the authentication code in the Vendor Portal. -For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + Two-factor authentication is enabled and a list of recovery codes is displayed at the bottom of the **Two-Factor Authentication** pane. -### What's the difference between Embedded Cluster and kURL? +1. Save the recovery codes in a secure location. These codes can be used any time (one time per code), if you lose your mobile device. -Replicated Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster feature offers significantly faster installation, updates, and node joins, a redesigned Admin Console UI, improved support for multi-node clusters, one-click updates that update the application and the cluster at the same time, and more. +1. Log out of your account, then log back in to test that it is enabled. You are prompted to enter a one-time code generated by the application on your mobile device. - -For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). +## Disable 2FA on Individual Accounts -### How do I enable Embedded Cluster and KOTS installations for my application? +To disable two-factor authentication on your individual account: -Releases that support installation with KOTS include the manifests required by KOTS to define the Admin Console experience and install the application. +1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. -In addition to the KOTS manifests, releases that support installation with Embedded Cluster also include the Embedded Cluster Config. The Embedded Cluster Config defines aspects of the cluster that will be provisioned and also sets the version of KOTS that will be installed. + Vendor portal account settings -For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). + [View a larger version of this image](/images/vendor-portal-account-settings.png) -### Can I use my own branding? +1. In the **Two-Factor Authentication** pane, click **Turn off two-factor authentication**. -The KOTS Admin Console and the Replicated Download Portal support the use of a custom logo. Additionally, software vendors can use custom domains to alias the endpoints for Replicated services. +1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. -For more information, see [Customizing the Admin Console and Download Portal](/vendor/admin-console-customize-app-icon) and [About Custom Domains](custom-domains). +## Enable or Disable 2FA for a Team -## Replicated SDK FAQs +As an administrator, you can enable and disable 2FA for teams. You must first enable 2FA on your individual account before you can enable 2FA for teams. After you enable 2FA for your team, team members can enable 2FA on their individual accounts. -### What is the SDK? +To enable or disable 2FA for a team: - +1. In the [Vendor Portal](https://vendor.replicated.com), select the **Team** tab, then select **Multifactor Auth**. -### Is the SDK supported in air gap environments? + Multifactor authentication for teams in the Vendor Portal -Yes. The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. + [View a larger image](/images/team-2fa-auth.png) -For more information, see [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). +1. On the **Multifactor Authentication** page, do one of the following with the **Require Two-Factor Authentication for all Username/Password authenticating users** toggle: -### How do I develop against the SDK API? + - Turn on the toggle to enable 2FA + - Turn off the toggle to disable 2FA -You can use the Replicated SDK in _integration mode_ to develop locally against the SDK API without needing to make real changes in the Replicated Vendor Portal or in your environment. +1. Click **Save changes**. -For more information, see [Developing Against the SDK API](/vendor/replicated-sdk-development). +================ +File: docs/vendor/team-management.md +================ +import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" +import CollabRbacImportant from "../partials/collab-repo/_collab-rbac-important.mdx" -### How does the Replicated SDK work with KOTS? +# Managing Team Members -The Replicated SDK is a Helm chart that can be installed as a small service alongside an application, or as a standalone component. The SDK can be installed using the Helm CLI or KOTS. +This topic describes how to manage team members in the Replicated Vendor Portal, such as inviting and removing members, and editing permissions. For information about managing user access to the Replicated collab repository in GitHub, see [Managing Collab Repository Access](team-management-github-username). -Replicated recommends that all applications include the SDK because it provides access to key functionality not available through KOTS, such as support for sending custom metrics from application instances. When both the SDK and KOTS are installed in a cluster alongside an application, both send instance telemetry to the Vendor Portal. +## Viewing Team Members +The [Team](https://vendor.replicated.com/team/members) page provides a list of all accounts currently associated with or invited to your team. Each row contains information about the user, including their two-factor authentication (2FA) status and role-based access control (RBAC) role, and lets administrators take additional actions, such as remove, re-invite, and edit permissions. -For more information about the SDK installation options, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). +View team members list in the Vendor Portal -## Vendor Portal FAQs +[View a larger image](/images/teams-view.png) -### How do I add and remove team members? +All users, including read-only, can see the name of the RBAC role assigned to each team member. When SAML authentication is enabled, users with the built-in read-only policy cannot see the RBAC role assigned to team members. -Admins can add, remove, and manage team members from the Vendor Portal. For more information, see [Managing Team Members](/vendor/team-management). +## Invite Members +By default, team administrators can invite more team members to collaborate. Invited users receive an email to activate their account. The activation link in the email is unique to the invited user. Following the activation link in the email also ensures that the invited user joins the team from which the invitation originated. -### How do I manage RBAC policies for my team members? - -By default, every team has two policies created automatically: Admin and Read Only. If you have an Enterprise plan, you will also have the Sales and Support policies created automatically. These default policies are not configurable. +:::note +Teams that have enforced SAML-only authentication do not use the email invitation flow described in this procedure. These teams and their users must log in through their SAML provider. +::: -You can also configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. +To invite a new team member: -For more information, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). +1. From the [Team Members](https://vendor.replicated.com/team/members) page, click **Invite team member**. -### Can I alias Replicated endpoints? + The Invite team member dialog opens. -Yes. Replicated supports the use of custom domains to alias the endpoints for Replicated services, such as the Replicated app service and the Replicated proxy registry. + Invite team member dialog in the Vendor Portal -Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. + [Invite team member dialog](/images/teams-invite-member.png) -For more information, see [Using Custom Domains](/vendor/custom-domains-using). +1. Enter the email address of the member. -### How does Replicated collect telemetry from instances of my application? +1. In the **Permissions** field, assign an RBAC policy from the dropdown list. -For instances running in online (internet-connected) customer environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster, then both send instance data. + -For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. The telemetry stored in the Secret is collected when a support bundle is generated in the environment. When the support bundle is uploaded to the Vendor Portal, the telemetry is associated with the correct customer and instance ID, and the Vendor Portal updates the instance insights and event data accordingly. +1. Click **Invite member**. -For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). + People invited to join your team receive an email notification to accept the invitation. They must follow the link in the email to accept the invitation and join the team. If they do not have a Replicated account already, they can create one that complies with your password policies, 2FA, and Google authentication requirements. If an invited user's email address is already associated with a Replicated account, by accepting your invitation, they automatically leave their current team and join the team that you have invited them to. -================ -File: docs/vendor/kurl-about.mdx -================ -import KurlDefinition from "../partials/kurl/_kurl-definition.mdx" -import Installers from "../partials/kurl/_installers.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" +## Managing Invitations -# Introduction to kURL +Invitations expire after 7 days. If a prospective member has not accepted their invitation in this time frame, you can re-invite them without having to reenter their details. You can also remove the prospective member from the list. - +You must be an administrator to perform this action. -This topic provides an introduction to the Replicated kURL installer, including information about kURL specifications and installations. +To re-invite or remove a prospective member, do one of the following on the **Team Members** page: -:::note -The Replicated KOTS entitlement is required to install applications with KOTS and kURL. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. -::: +* Click **Reinvite** from the row with the user's email address, and then click **Reinvite** in the confirmation dialog. -## Overview +* Click **Remove** from the row with the user's email address, and then click **Delete Invitation** in the confirmation dialog. - +## Edit Policy Permissions -### kURL Installers +You can edit the RBAC policy that is assigned to a member at any time. - + -To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release. For more information about creating kURL installers, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). +To edit policy permissions for individual team members: -### kURL Installations +1. From the the Team Members list, click **Edit permissions** next to a members name. -To install with kURL, users run a kURL installation script on their VM or bare metal server to provision a cluster. + :::note + The two-factor authentication (2FA) status displays on the **Team members** page, but it is not configured on this page. For more information about configuring 2FA, see [Managing Two-Factor Authentication](team-management-two-factor-auth). + ::: -When the KOTS add-on is included in the kURL installer spec, the kURL installation script installs the KOTS CLI and KOTS Admin Console in the cluster. After the installation script completes, users can access the Admin Console at the URL provided in the ouput of the command to configure and deploy the application with KOTS. +1. Select an RBAC policy from the **Permissions** dropdown list, and click **Save**. For information about configuring the RBAC policies that display in this list, see [Configuring RBAC Policies](team-management-rbac-configuring). -The following shows an example of the output of the kURL installation script: + Edit team member permissions in the Vendor Portal -```bash - Installation - Complete ✔ +## Enable Users to Auto-join Your Team +By default, users must be invited to your team. Team administrators can use the auto-join feature to allow users from the same email domain to join their team automatically. This applies to users registering with an email, or with Google authentication if it is enabled for the team. The auto-join feature does not apply to SAML authentication because SAML users log in using their SAML provider's application portal instead of the Vendor Portal. -Kotsadm: http://10.128.0.35:8800 -Login with password (will not be shown again): 3Hy8WYYid +To add, edit, or delete custom RBAC policies, see [Configuring RBAC Policies](team-management-rbac-configuring). -This password has been set for you by default. It is recommended that you change -this password; this can be done with the following command: -kubectl kots reset-password default -``` +To enable users to auto-join your team: -kURL installations are supported in online (internet-connected) and air gapped environments. +1. From the Team Members page, click **Auto-join** from the left navigation. +1. Enable the **Allow all users from my domain to be added to my team** toggle. -For information about how to install applications with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). + Auto join dialog in the Vendor Portal -## About the Open Source kURL Documentation + [View a larger image](/images/teams-auto-join.png) -The open source documentation for the kURL project is available at [kurl.sh](https://kurl.sh/docs/introduction/). +1. For **Default RBAC policy level for new accounts**, you can use the default Read Only policy or select another policy from the list. This RBAC policy is applied to all users who join the team with the auto-join feature. -The open source kURL documentation contains additional information including kURL installation options, kURL add-ons, and procedural content such as how to add and manage nodes in kURL clusters. Software vendors can use the open source kURL documentation to find detailed reference information when creating kURL installer specs or testing installation. + -================ -File: docs/vendor/kurl-nodeport-services.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" -# Exposing Services Using NodePorts +## Remove Members and End Sessions +As a Vendor Portal team admin, you can remove team members, except for the account you are currently logged in with. - +If the team member that you remove added their GitHub username to their Account Settings page in the Vendor Portal to access the Replicated collab repository, then the Vendor Portal also automatically removes their username from the collab repository. For more information, see [Managing Collab Repository Access](team-management-github-username). -This topic describes how to expose NodePort services in [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about) installations on VMs or bare metal servers. +SAML-created users must be removed using this method to expire their existing sessions because Replicated does not support System for Cross-domain Identity Management (SCIM). -## Overview +To remove a member: -For installations into existing clusters, KOTS automatically creates a port forward tunnel to expose the Admin Console. Unlike installations into existing clusters, KOTS does _not_ automatically open the port forward tunnel for installations in embedded clusters provisioned on virtual machines (VMs) or bare metal servers. This is because it cannot be verified that the ports are secure and authenticated. For more information about the KOTS port forward tunnel, see [Port Forwarding Services with KOTS](/vendor/admin-console-port-forward). +1. From the Team Members page, click **Remove** on the right side of a user's row. -Instead, to expose the Admin Console in installations with [Embedded Cluster](/vendor/embedded-overview) or [kURL](/vendor/kurl-about), KOTS creates the Admin Console as a NodePort service so it can be accessed at the node's IP address on a node port (port 8800 for kURL installations and port 30000 for Embedded Cluster installations). Additionally, for kURL installations, the UIs of Prometheus, Grafana, and Alertmanager are also exposed using NodePorts. +1. Click **Remove** in the confirmation dialog. -For installations on VMs or bare metal servers where your application must be accessible from the user's local machine rather than from inside the cluster, you can expose application services as NodePorts to provide access to the application after installation. + The member is removed. All of their current user sessions are deleted and their next attempt at communicating with the server logs them out of their browser's session. -## Add a NodePort Service + If the member added their GitHub username to the Vendor Portal to access the collab repository, then the Vendor Portal also removes their GitHub username from the collab repository. -Services with `type: NodePort` are able to be contacted from outside the cluster by connecting to any node using the appropriate protocol and port. For more information about working with the NodePort service type, see [type: NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) in the Kubernetes documentation. + For Google-authenticated users, if the user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal unless the username and password are allowed. -The following shows an example of a NodePort type service: +## Update Email Addresses -```yaml -apiVersion: v1 -kind: Service -metadata: - name: sentry - labels: - app: sentry -spec: - type: NodePort - ports: - - port: 9000 - targetPort: 9000 - nodePort: 9000 - protocol: TCP - name: sentry - selector: - app: sentry - role: web -``` +:::important +Changing team member email addresses has security implications. Replicated advises that you avoid changing team member email addresses if possible. +::: -After configuring a NodePort service for your application, you can add a link to the service on the Admin Console dashboard where it can be accessed by users after the application is installed. For more information, see [About Accessing NodePort Services](#about-accessing-nodeport-services) below. +Updating the email address for a team member requires creating a new account with the updated email address, and then deactivating the previous account. -### Use KOTS Annotations to Conditionally Deploy NodePort Services +To update the email address for a team member: -You can use the KOTS [`kots.io/when`](/vendor/packaging-include-resources#kotsiowhen) annotation to conditionally deploy a service. This is useful when you want to deploy a ClusterIP or LoadBalancer service for existing cluster installations, and deploy a NodePort service for Embedded Cluster or kURL installations. +1. From the Team Members page, click **Invite team member**. -To conditionally deploy a service based on the installation method, you can use the following KOTS template functions in the `kots.io/when` annotation: -* [IsKurl](/reference/template-functions-static-context#iskurl): Detects kURL installations. For example, `repl{{ IsKurl }}` returns true for kURL installations, and `repl{{ not IsKurl }}` returns true for non-kURL installations. -* [Distribution](/reference/template-functions-static-context#distribution): Returns the distribution of the cluster where KOTS is running. For example, `repl{{ eq Distribution "embedded-cluster" }}` returns true for Embedded Cluster installations and `repl{{ ne Distribution "embedded-cluster" }}` returns true for non-Embedded Cluster installations. +1. Assign the required RBAC policies to the new user. -For example, the following `sentry` service with `type: NodePort` includes `annotation.kots.io/when: repl{{ eq Distribution "embedded-cluster" }}`. This creates a NodePort service _only_ when installing with Embedded Cluster: +1. Deactivate the previous team member account. - ```yaml - apiVersion: v1 - kind: Service - metadata: - name: sentry - labels: - app: sentry - annotations: - # This annotation ensures that the NodePort service - # is only created in Embedded Cluster installations - kots.io/when: repl{{ eq Distribution "embedded-cluster" }} - spec: - type: NodePort - ports: - - port: 9000 - targetPort: 9000 - nodePort: 9000 - protocol: TCP - name: sentry - selector: - app: sentry - role: web - ``` +================ +File: docs/vendor/telemetry-air-gap.mdx +================ +import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" -Similarly, to ensure that a `sentry` service with `type: ClusterIP` is only created in existing cluster installations, add `annotations.kots.io/when: repl{{ ne Distribution "embedded-cluster" }}` to the ClusterIP specification: +# Collecting Telemetry for Air Gap Instances -```yaml -apiVersion: v1 -kind: Service -metadata: - name: sentry - labels: - app: sentry -annotations: - # This annotation ensures that the ClusterIP service - # is only created in existing cluster installations - kots.io/when: repl{{ ne Distribution "embedded-cluster" }} -spec: - type: ClusterIP - ports: - - port: 9000 - targetPort: 9000 - protocol: TCP - name: sentry - selector: - app: sentry - role: web -``` +This topic describes how to collect telemetry for instances in air gap environments. -## About Accessing NodePort Services +## Overview -This section describes providing access to NodePort services after installation. +Air gap instances run in environments without outbound internet access. This limitation prevents these instances from periodically sending telemetry to the Replicated Vendor Portal through the Replicated SDK or Replicated KOTS. For more information about how the Vendor Portal collects telemetry from online (internet-connected) instances, see [About Instance and Event Data](/vendor/instance-insights-event-data#about-reporting). -### VM Firewall Requirements + -To be able to access the Admin Console and any NodePort services for your application, the firewall for the VM where the user installs must allow HTTP traffic and allow inbound traffic to the port where the service is exposed from their workstation. Users can consult their cloud provider's documentation for more information about updating firewall rules. +The following diagram demonstrates how air gap telemetry is collected and stored by the Replicated SDK in a customer environment, and then shared to the Vendor Portal in a support bundle: -### Add a Link on the Admin Console Dashboard {#add-link} +Air gap telemetry collected by the SDK in a support bundle -You can provide a link to a NodePort service on the Admin Console dashboard by configuring the `links` array in the Kubernetes SIG Application custom resource. This provides users with an easy way to access the application after installation. For more information, see [Adding Links to the Dashboard](admin-console-adding-buttons-links). +[View a larger version of this image](/images/airgap-telemetry.png) -For example: +All support bundles uploaded to the Vendor Portal from air gap customers contributes to a comprehensive dataset, providing parity in the telemetry for air gap and online instances. Replicated recommends that you collect support bundles from air gap customers regularly (monthly or quarterly) to improve the completeness of the dataset. The Vendor Portal handles any overlapping event archives idempotently, ensuring data integrity. -Admin Console dashboard with Open App link +## Requirement -[View a larger version of this image](/images/gitea-open-app.png) +Air gap telemetry has the following requirements: -================ -File: docs/vendor/kurl-reset.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" +* To collect telemetry from air gap instances, one of the following must be installed in the cluster where the instance is running: + + * The Replicated SDK installed in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). + + * KOTS v1.92.1 or later -# Resetting a kURL Cluster + :::note + When both the Replicated SDK and KOTS v1.92.1 or later are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), both collect and store instance telemetry in their own dedicated secret, subject to the size limitation noted below. In the case of any overlapping data points, the Vendor Portal will report these data points chronologically based on their timestamp. + ::: - +* To collect custom metrics from air gap instances, the Replicated SDK must installed in the cluster in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). -This topic describes how to use the kURL `reset` command to reset a kURL cluster. + For more information about custom metrics, see [Configuring Custom Metrics](https://docs.replicated.com/vendor/custom-metrics). -## Overview +Replicated strongly recommends that all applications include the Replicated SDK because it enables access to both standard instance telemetry and custom metrics for air gap instances. -If you need to reset a kURL installation, such as when you are testing releases with kURL, You can use the kURL `tasks.sh` `reset` command to remove Kubernetes from the system. +## Limitation -Alterntaively, you can discard your current VM (if you are using one) and recreate the VM with a new OS to reinstall with kURL. +Telemetry data is capped at 4,000 events or 1MB per Secret; whichever limit is reached first. -For more information about the `reset` command, see [Resetting a Node](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node) in the kURL documentation. +When a limit is reached, the oldest events are purged until the payload is within the limit. For optimal use, consider collecting support bundles regularly (monthly or quarterly) from air gap customers. -To reset a kURL installation: +## Collect and View Air Gap Telemetry -1. Access the machine where you installed with kURL. +To collect telemetry from air gap instances: -1. Run the following command to remove Kubernetes from the system: +1. Ask your customer to collect a support bundle. See [Generating Support Bundles](/vendor/support-bundle-generating). - ``` - curl -sSL https://k8s.kurl.sh/latest/tasks.sh | sudo bash -s reset - ``` +1. After receiving the support bundle from your customer, go to the Vendor Portal **Customers**, **Customer Reporting**, or **Instance Details** page and upload the support bundle: -1. Follow the instructions in the output of the command to manually remove any files that the `reset` command does not remove. + ![upload new bundle button on instance details page](/images/airgap-upload-telemetry.png) -If the `reset` command is unsuccessful, discard your current VM, and recreate the VM with a new OS to reinstall the Admin Console and an application. + The telemetry collected from the support bundle appears in the instance data shortly. Allow a few minutes for all data to be processed. ================ -File: docs/vendor/licenses-about-types.md +File: docs/vendor/testing-about.md ================ -# About Community Licenses +import Overview from "../partials/cmx/_overview.mdx" +import SupportedClusters from "../partials/cmx/_supported-clusters-overview.mdx" -This topic describes community licenses. For more information about other types of licenses, see [Customer Types](licenses-about#customer-types) in _About Customers_. +# About Compatibility Matrix + +This topic describes Replicated Compatibility Matrix, including use cases, billing, limitations, and more. ## Overview -Community licenses are intended for use with a free or low cost version of your application. For example, you could use community licenses for an open source version of your application. + -After installing an application with a community license, users can replace their community license with a new license of a different type without having to completely reinstall the application. This means that, if you have several community users who install with the same license, then you can upgrade a single community user without editing the license for all community users. +You can use Compatibility Matrix with the Replicated CLI or the Replicated Vendor Portal. For more information about how to use Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). -Community licenses are supported for applications that are installed with Replicated KOTS or with the Helm CLI. +### Supported Clusters -For applications installed with KOTS, community license users can upload a new license file of a different type in the Replicated admin console. For more information, see [Upgrade from a Community License](/enterprise/updating-licenses#upgrade-from-a-community-license) in _Updating Licenses in the Admin Console_. + -## Limitations +### Billing and Credits -Community licenses function in the same way as the other types of licenses, with the following -exceptions: +Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a `running` status and ends when the cluster is deleted. Compatibility Matrix marks a cluster as `running` when a working kubeconfig for the cluster is accessible. -* Updating a community license to another type of license cannot be reverted. -* Community license users are not supported by the Replicated Support team. -* Community licenses cannot support air gapped installations. -* Community licenses cannot include an expiration date. +You are billed only for the time that the cluster is in a `running` status. You are _not_ billed for the time that it takes Compatibility Matrix to create and tear down clusters, including when the cluster is in an `assigned` status. -## Community License Admin Console Branding +For more information about pricing, see [Compatibility Matrix Pricing](testing-pricing). -For applications installed with KOTS, the branding in the admin console for community users differs in the following ways: +To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. +If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to [**Compatibility Matrix > Buy additional credits**](https://vendor.replicated.com/compatibility-matrix). +Otherwise, to request credits, log in to the Vendor Portal and go to [**Compatibility Matrix > Request more credits**](https://vendor.replicated.com/compatibility-matrix). -* The license tile on the admin console **Dashboard** page is highlighted in yellow and with the words **Community Edition**. +### Quotas and Capacity - ![Community License Dashboard](/images/community-license-dashboard.png) - - [View a larger version of this image](/images/community-license-dashboard.png) +By default, Compatibility Matrix sets quotas for the capacity that can be used concurrently by each vendor portal team. These quotas are designed to ensure that Replicated maintains a minimum amount of capacity for provisioning both VM and cloud-based clusters. -* All support bundles and analysis in the admin console are clearly marked as **Community Edition**. +By default, the quota for cloud-based cluster distributions (AKS, GKE, EKS) is three clusters running concurrently. - ![Community License Support Bundle](/images/community-license-bundle.png) - - [View a larger version of this image](/images/community-license-bundle.png) +VM-based cluster distributions (such as kind, OpenShift, and Replicated Embedded Cluster) have the following default quotas: +* 32 vCPUs +* 128 GiB memory +* 800 GiB disk size -================ -File: docs/vendor/licenses-about.mdx -================ -import ChangeChannel from "../partials/customers/_change-channel.mdx" +You can request increased quotas at any time with no additional cost. To view your team's current quota and capacity usage, or to request a quota increase, go to [**Compatibility Matrix > Settings**](https://vendor.replicated.com/compatibility-matrix/settings) in the vendor portal: -# About Customers and Licensing +![Compatibility matrix settings page](/images/compatibility-matrix-settings.png) -This topic provides an overview of customers and licenses in the Replicated Platform. +[View a larger version of this image](/images/compatibility-matrix-settings.png) -## Overview +### Cluster Status -The licensing features of the Replicated Platform allow vendors to securely grant access to software, making license agreements available to the application in end customer environments at startup and runtime. +Clusters created with Compatibility Matrix can have the following statuses: -The Replicated Vendor Portal also allows vendors to create and manage customer records. Each customer record includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements. +* `assigned`: The cluster resources were requested and Compatibility Matrix is provisioning the cluster. You are not billed for the time that a cluster spends in the `assigned` status. -Vendors can use these licensing features to enforce entitlements such as license expiration dates, and to track and report on software usage for the purpose of surfacing insights to both internal teams and customers. +* `running`: A working kubeconfig for the cluster is accessible. Billing begins when the cluster reaches a `running` status. -The following diagram provides an overview of licensing with the Replicated Platform: + Additionally, clusters are verified prior to transitioning to a `running` status. Verification includes checking that the cluster is healthy and running with the correct number of nodes, as well as passing [sonobuoy](https://sonobuoy.io/) tests in `--quick` mode. -![App instance communicates with the Replicated licensing server](/images/licensing-overview.png) +* `terminated`: The cluster is deleted. Billing ends when the cluster status is changed from `running` to `terminated`. -[View a larger version of this image](/images/licensing-overview.png) +* `error`: An error occured when attempting to provision the cluster. -As shown in the diagram above, the Replicated license and update server manages and distributes customer license information. The license server retrieves this license information from customer records managed by vendors in the Vendor Portal. +You can view the status of clusters using the `replicated cluster ls` command. For more information, see [cluster ls](/reference/replicated-cli-cluster-ls). -During installation or upgrade, the customer's license ID is used to authenticate with the license server. The license ID also provides authentication for the Replicated proxy registry, securely granting proxy access to images in the vendor's external registry. +### Cluster Add-ons -The license server is identified with a CNAME record where it can be accessed from end customer environments. When running alongside an application in a customer environment, the Replicated SDK retrieves up-to-date customer license information from the license server during runtime. The in-cluster SDK API `/license/` endpoints can be used to get customer license information on-demand, allowing vendors to programmatically enforce and report on license agreements. +The Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. +This allows you to more easily provision dependencies required by your application. -Vendors can also integrate internal Customer Relationship Management (CRM) tools such as Salesforce with the Replicated Platform so that any changes to a customer's entitlements are automatically reflected in the Vendor Portal. This ensures that updates to license agreements are reflected in the customer environment in real time. +For more information about how to use the add-ons, see [Compatibility Matrix Cluster Add-ons](testing-cluster-addons). -## About Customers +## Limitations -Each customer that you create in the Replicated Vendor Portal has a unique license ID. Your customers use their license when they install or update your application. +Compatibility Matrix has the following limitations: -You assign customers to channels in the Vendor Portal to control their access to your application releases. Customers can install or upgrade to releases that are promoted to the channel they are assigned. For example, assigning a customer to your Beta channel allows that customer to install or upgrade to only releases promoted to the Beta channel. +- Clusters cannot be resized. Create another cluster if you want to make changes, such as add another node. +- Clusters cannot be rebooted. Create another cluster if you need to reset/reboot the cluster. +- On cloud clusters, node groups are not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- Multi-node support is not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- ARM instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- GPU instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- There is no support for IPv6 as a single stack. Dual stack support is available on kind clusters. +- There is no support for air gap testing. +- The `cluster upgrade` feature is available only for kURL distributions. See [cluster upgrade](/reference/replicated-cli-cluster-upgrade). +- Cloud clusters do not allow for the configuration of CNI, CSI, CRI, Ingress, or other plugins, add-ons, services, and interfaces. +- The node operating systems for clusters created with Compatibility Matrix cannot be configured nor replaced with different operating systems. +- The Kubernetes scheduler for clusters created with Compatibility Matrix cannot be replaced with a different scheduler. +- Each team has a quota limit on the amount of resources that can be used simultaneously. This limit can be raised by messaging your account representative. +- Team actions with Compatibility Matrix (for example, creating and deleting clusters and requesting quota increases) are not logged and displayed in the [Vendor Team Audit Log](https://vendor.replicated.com/team/audit-log). -Each customer license includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements, such as if the license has an expiration date or what application functionality the customer can access. Replicated securely delivers these entitlements to the application and makes them available at installation or at runtime. +For additional distribution-specific limitations, see [Supported Compatibility Matrix Cluster Types](testing-supported-clusters). -For more information about how to create and manage customers, see [Creating and Managing Customers](releases-creating-customer). +================ +File: docs/vendor/testing-cluster-addons.md +================ +# Compatibility Matrix Cluster Add-ons (Alpha) -### Customer Channel Assignment {#channel-assignment} +This topic describes the supported cluster add-ons for Replicated Compatibility Matrix. - +## Overview -For example, if the latest release promoted to the Beta channel is version 1.25.0 and version 1.10.0 is marked as required, when you edit an existing customer to assign them to the Beta channel, then the KOTS Admin Console always fetches 1.25.0, even though 1.10.0 is marked as required. The required release 1.10.0 is ignored and is not available to the customer for upgrade. +Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. +This allows you to more easily provision dependencies required by your application. -For more information about how to mark a release as required, see [Properties](releases-about#properties) in _About Channels and Releases_. For more information about how to synchronize licenses in the Admin Console, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). +## CLI -### Customer Types +The Replicated CLI can be used to [create](/reference/replicated-cli-cluster-addon-create), [manage](/reference/replicated-cli-cluster-addon-ls) and [remove](/reference/replicated-cli-cluster-addon-rm) cluster add-ons. -Each customer is assigned one of the following types: +## Supported Add-ons -* **Development**: The Development type can be used internally by the development -team for testing and integration. -* **Trial**: The Trial type can be used for customers who are on 2-4 week trials -of your software. -* **Paid**: The Paid type identifies the customer as a paying customer for which -additional information can be provided. -* **Community**: The Community type is designed for a free or low cost version of your application. For more details about this type, see [Community Licenses](licenses-about-types). -* (Beta) **Single Tenant Vendor Managed**: The Single Tenant Vendor Managed type is for customers for whom your team is operating the application in infrastructure you fully control and operate. Single Tenant Vendor Managed licenses are free to use, but come with limited support. The Single Tenant Vendor Managed type is a Beta feature. Reach out to your Replicated account representative to get access. +This section lists the supported cluster add-ons for clusters created with Compatibility Matrix. -Except Community licenses, the license type is used solely for reporting purposes and a customer's access to your application is not affected by the type that you assign. +### object-store (Alpha) -You can change the type of a license at any time in the Vendor Portal. For example, if a customer upgraded from a trial to a paid account, then you could change their license type from Trial to Paid for reporting purposes. +The Replicated cluster object store add-on can be used to create S3 compatible object store buckets for clusters (currently only AWS S3 is supported for EKS clusters). -### About Managing Customers +Assuming you already have a cluster, run the following command with the cluster ID to create an object store bucket: -Each customer record in the Vendor Portal has built-in fields and also supports custom fields: -* The built-in fields include values such as the customer name, customer email, and the license expiration date. You can optionally set initial values for the built-in fields so that each new customer created in the Vendor Portal starts with the same set of values. -* You can also create custom fields to define entitlements for your application. For example, you can create a custom field to set the number of active users permitted. +```bash +$ replicated cluster addon create object-store 4d2f7e70 --bucket-prefix mybucket +05929b24 Object Store pending {"bucket_prefix":"mybucket"} +$ replicated cluster addon ls 4d2f7e70 +ID TYPE STATUS DATA +05929b24 Object Store ready {"bucket_prefix":"mybucket","bucket_name":"mybucket-05929b24-cmx","service_account_namespace":"cmx","service_account_name":"mybucket-05929b24-cmx","service_account_name_read_only":"mybucket-05929b24-cmx-ro"} +``` -For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). +This will create two service accounts in a namespace, one read-write and the other read-only access to the object store bucket. -You can make changes to a customer record in the Vendor Portal at any time. The license ID, which is the unique identifier for the customer, never changes. For more information about managing customers in the Vendor Portal, see [Creating and Managing Customers](releases-creating-customer). +Additional service accounts can be created in any namespace with access to the object store by annotating the new service account with the same `eks.amazonaws.com/role-arn` annotation found in the predefined ones (`service_account_name` and `service_account_name_read_only`). -### About the Customers Page + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported Kubernetes DistributionsEKS (AWS S3)
    CostFlat fee of $0.50 per bucket.
    Options +
      +
    • bucket_prefix (string): A prefix for the bucket name to be created (required)
    • +
    +
    Data +
      +
    • bucket_prefix: The prefix specified by the user for the bucket name
    • +
    +
      +
    • bucket_name: The actual bucket name
    • +
    +
      +
    • service_account_namespace: The namespace in which the service accounts (`service_account_name` and `service_account_name_read_only`) have been created.
    • +
    +
      +
    • service_account_name: The service account name for read-write access to the bucket.
    • +
    +
      +
    • service_account_name_read_only: The service account name for read-only access to the bucket.
    • +
    +
    -The following shows an example of the **Customers** page: +================ +File: docs/vendor/testing-how-to.md +================ +import TestRecs from "../partials/ci-cd/_test-recs.mdx" +import Prerequisites from "../partials/cmx/_prerequisites.mdx" -![Customers page](/images/customers-page.png) +# Using Compatibility Matrix -[View a larger version of this image](/images/customers-page.png) +This topic describes how to use Replicated Compatibility Matrix to create ephemeral clusters. -From the **Customers** page, you can do the following: +## Prerequisites -* Create new customers. +Before you can use Compatibility Matrix, you must complete the following prerequisites: -* Download CSVs with customer and instance data. + -* Search and filter customers. +* Existing accounts must accept the TOS for the trial on the [**Compatibility Matrix**](https://vendor.replicated.com/compatibility-matrix) page in the Replicated Vendor Portal. -* Click the **Manage customer** button to edit details such as the customer name and email, the custom license fields assigned to the customer, and the license expiration policy. For more information, see [Creating and Managing Customers](releases-creating-customer). +## Create and Manage Clusters -* Download the license file for each customer. +This section explains how to use Compatibility Matrix to create and manage clusters with the Replicated CLI or the Vendor Portal. -* Click the **Customer reporting** button to view data about the active application instances associated with each customer. For more information, see [Customer Reporting](customer-reporting). +For information about creating and managing clusters with the Vendor API v3, see the [clusters](https://replicated-vendor-api.readme.io/reference/listclusterusage) section in the Vendor API v3 documentation. -* View instance details for each customer, including the version of the application that this instance is running, the Kubernetes distribution of the cluster, the last check-in time, and more: +### Create Clusters - - - [View a larger version of this image](/images/customer-reporting-details.png) +You can create clusters with Compatibility Matrix using the Replicated CLI or the Vendor Portal. -* Archive customers. For more information, see [Creating and Managing Customers](releases-creating-customer). +#### Replicated CLI -* Click on a customer on the **Customers** page to access the following customer-specific pages: - * [Reporting](#about-the-customer-reporting-page) - * [Manage customer](#about-the-manage-customer-page) - * [Support bundles](#about-the-customer-support-bundles-page) +To create a cluster using the Replicated CLI: -### About the Customer Reporting Page +1. (Optional) View the available cluster distributions, including the supported Kubernetes versions, instance types, and maximum nodes for each distribution: -The **Reporting** page for a customer displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page for a customer that has two active application instances: + ```bash + replicated cluster versions + ``` + For command usage, see [cluster versions](/reference/replicated-cli-cluster-versions). -![Customer reporting page in the Vendor Portal](/images/customer-reporting-page.png) -[View a larger version of this image](/images/customer-reporting-page.png) +1. Run the following command to create a cluster: -For more information about interpreting the data on the **Reporting** page, see [Customer Reporting](customer-reporting). + ``` + replicated cluster create --name NAME --distribution K8S_DISTRO --version K8S_VERSION --disk DISK_SIZE --instance-type INSTANCE_TYPE [--license-id LICENSE_ID] + ``` + Where: + * `NAME` is any name for the cluster. If `--name` is excluded, a name is automatically generated for the cluster. + * `K8S_DISTRO` is the Kubernetes distribution for the cluster. + * `K8S_VERSION` is the Kubernetes version for the cluster if creating a standard Cloud or VM-based cluster. If creating an Embedded Cluster or kURL cluster type,`--version` is optional: + * For Embedded Cluster types, `--verison` is the latest available release on the channel by default. Otherwise, to specify a different release, set `--version` to the `Channel release sequence` value for the release. + * For kURL cluster types, `--verison` is the `"latest"` kURL Installer ID by default. Otherwise, to specify a different kURL Installer, set `--version` to the kURL Installer ID. + * `DISK_SIZE` is the disk size (GiB) to request per node. + * `INSTANCE_TYPE` is the instance type to use for each node. + * (Embedded Cluster Only) `LICENSE_ID` is a valid customer license. Required to create an Embedded Cluster. -### About the Manage Customer Page + For command usage and additional optional flags, see [cluster create](/reference/replicated-cli-cluster-create). -The **Manage customer** page for a customer displays details about the customer license, including the customer name and email, the license expiration policy, custom license fields, and more. + **Example:** -The following shows an example of the **Manage customer** page: + The following example creates a kind cluster with Kubernetes version 1.27.0, a disk size of 100 GiB, and an instance type of `r1.small`. -![Manage customer page in the Vendor Portal](/images/customer-details.png) -[View a larger version of this image](/images/customer-details.png) + ```bash + replicated cluster create --name kind-example --distribution kind --version 1.27.0 --disk 100 --instance-type r1.small + ``` -From the **Manage customer** page, you can view and edit the customer's license fields or archive the customer. For more information, see [Creating and Managing Customers](releases-creating-customer). +1. Verify that the cluster was created: -### About the Customer Support Bundles Page + ```bash + replicated cluster ls CLUSTER_NAME + ``` + Where `CLUSTER_NAME` is the name of the cluster that you created. -The **Support bundles** page for a customer displays details about the support bundles collected from the customer. Customers with the **Support Bundle Upload Enabled** entitlement can provide support bundles through the KOTS Admin Console, or you can upload support bundles manually in the Vendor Portal by going to **Troubleshoot > Upload a support bundle**. For more information about uploading and analyzing support bundles, see [Inspecting Support Bundles](support-inspecting-support-bundles). + In the output of the command, you can see that the `STATUS` of the cluster is `assigned`. When the kubeconfig for the cluster is accessible, the cluster's status is changed to `running`. For more information about cluster statuses, see [Cluster Status](testing-about#cluster-status) in _About Compatibility Matrix._ -The following shows an example of the **Support bundles** page: +#### Vendor Portal -![Support bundles page in the Vendor Portal](/images/customer-support-bundles.png) -[View a larger version of this image](/images/customer-support-bundles.png) +To create a cluster using the Vendor Portal: -As shown in the screenshot above, the **Support bundles** page lists details about the collected support bundles, such as the date the support bundle was collected and the debugging insights found. You can click on a support bundle to view it in the **Support bundle analysis** page. You can also click **Delete** to delete the support bundle, or click **Customer Reporting** to view the **Reporting** page for the customer. +1. Go to [**Compatibility Matrix > Create cluster**](https://vendor.replicated.com/compatibility-matrix/create-cluster). -## About Licensing with Replicated + Create a cluster page -### About Syncing Licenses + [View a larger version of this image](/images/create-a-cluster.png) -When you edit customer licenses for an application installed with a Replicated installer (Embedded Cluster, KOTS, kURL), your customers can use the KOTS Admin Console to get the latest license details from the Vendor Portal, then deploy a new version that includes the license changes. Deploying a new version with the license changes ensures that any license fields that you have templated in your release using [KOTS template functions](/reference/template-functions-about) are rendered with the latest license details. +1. On the **Create a cluster** page, complete the following fields: -For online instances, KOTS pulls license details from the Vendor Portal when: -* A customer clicks **Sync license** in the Admin Console. -* An automatic or manual update check is performed by KOTS. -* An update is performed with Replicated Embedded Cluster. See [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). -* An application status changes. See [Current State](instance-insights-details#current-state) in _Instance Details_. + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    Kubernetes distributionSelect the Kubernetes distribution for the cluster.
    VersionSelect the Kubernetes version for the cluster. The options available are specific to the distribution selected.
    Name (optional)Enter an optional name for the cluster.
    TagsAdd one or more tags to the cluster as key-value pairs.
    Set TTLSelect the Time to Live (TTL) for the cluster. When the TTL expires, the cluster is automatically deleted. TTL can be adjusted after cluster creation with [cluster update ttl](/reference/replicated-cli-cluster-update-ttl).
    -For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). +1. For **Nodes & Nodes Groups**, complete the following fields to configure nodes and node groups for the cluster: -### About Syncing Licenses in Air-Gapped Environments + + + + + + + + + + + + + +
    Instance typeSelect the instance type to use for the nodes in the node group. The options available are specific to the distribution selected.
    Disk sizeSelect the disk size in GiB to use per node.
    NodesSelect the number of nodes to provision in the node group. The options available are specific to the distribution selected.
    -To update licenses in air gap installations, customers need to upload the updated license file to the Admin Console. +1. (Optional) Click **Add node group** to add additional node groups. -After you update the license fields in the Vendor Portal, you can notify customers by either sending them a new license file or instructing them to log into their Download Portal to downlaod the new license. +1. Click **Create cluster**. -For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). + The cluster is displayed in the list of clusters on the **Compatibility Matrix** page with a status of Assigned. When the kubeconfig for the cluster is accessible, the cluster's status is changed to Running. -### Retrieving License Details with the SDK API + :::note + If the cluster is not automatically displayed, refresh your browser window. + ::: -The [Replicated SDK](replicated-sdk-overview) includes an in-cluster API that can be used to retrieve up-to-date customer license information from the Vendor Portal during runtime through the [`license`](/reference/replicated-sdk-apis#license) endpoints. This means that you can add logic to your application to get the latest license information without the customer needing to perform a license update. The SDK API polls the Vendor Portal for updated data every four hours. + Cluster configuration dialog -In KOTS installations that include the SDK, users need to update their licenses from the Admin Console as described in [About Syncing Licenses](#about-syncing-licenses) above. However, any logic in your application that uses the SDK API will update the user's license information without the customer needing to deploy a license update in the Admin Console. + [View a larger version of this image](/images/cmx-assigned-cluster.png) -For information about how to use the SDK API to query license entitlements at runtime, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). +### Prepare Clusters -### License Expiration Handling {#expiration} +For applications distributed with the Replicated Vendor Portal, the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) command reduces the number of steps required to provision a cluster and then deploy a release to the cluster for testing. This is useful in continuous integration (CI) workflows that run multiple times a day. For an example workflow that uses the `cluster prepare` command, see [Recommended CI/CD Workflows](/vendor/ci-workflows). -The built-in `expires_at` license field defines the expiration date for a customer license. When you set an expiration date in the Vendor Portal, the `expires_at` field is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. +The `cluster prepare` command does the following: +* Creates a cluster +* Creates a release for your application based on either a Helm chart archive or a directory containing the application YAML files +* Creates a temporary customer of type `test` + :::note + Test customers created by the `cluster prepare` command are not saved in your Vendor Portal team. + ::: +* Installs the release in the cluster using either the Helm CLI or Replicated KOTS -Replicated enforces the following logic when a license expires: -* By default, instances with expired licenses continue to run. - To change the behavior of your application when a license expires, you can can add custom logic in your application that queries the `expires_at` field using the Replicated SDK in-cluster API. For more information, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). -* Expired licenses cannot log in to the Replicated registry to pull a Helm chart for installation or upgrade. -* Expired licenses cannot pull application images through the Replicated proxy registry or from the Replicated registry. -* In Replicated KOTS installations, KOTS prevents instances with expired licenses from receiving updates. +The `cluster prepare` command requires either a Helm chart archive or a directory containing the application YAML files to be installed: -### Replacing Licenses for Existing Installations +* **Install a Helm chart with the Helm CLI**: -Community licenses are the only license type that can be replaced with a new license without needing to reinstall the application. For more information, see [Community Licenses](licenses-about-types). + ```bash + replicated cluster prepare \ + --distribution K8S_DISTRO \ + --version K8S_VERSION \ + --chart HELM_CHART_TGZ + ``` + The following example creates a kind cluster and installs a Helm chart in the cluster using the `nginx-chart-0.0.14.tgz` chart archive: + ```bash + replicated cluster prepare \ + --distribution kind \ + --version 1.27.0 \ + --chart nginx-chart-0.0.14.tgz \ + --set key1=val1,key2=val2 \ + --set-string s1=val1,s2=val2 \ + --set-json j1='{"key1":"val1","key2":"val2"}' \ + --set-literal l1=val1,l2=val2 \ + --values values.yaml + ``` -Unless the existing customer is using a community license, it is not possible to replace one license with another license without reinstalling the application. When you need to make changes to a customer's entitlements, Replicated recommends that you edit the customer's license details in the Vendor Portal, rather than issuing a new license. +* **Install with KOTS from a YAML directory**: -================ -File: docs/vendor/licenses-adding-custom-fields.md -================ -# Managing Customer License Fields + ```bash + replicated cluster prepare \ + --distribution K8S_DISTRO \ + --version K8S_VERSION \ + --yaml-dir PATH_TO_YAML_DIR + ``` + The following example creates a k3s cluster and installs an application in the cluster using the manifest files in a local directory named `config-validation`: + ```bash + replicated cluster prepare \ + --distribution k3s \ + --version 1.26 \ + --namespace config-validation \ + --shared-password password \ + --app-ready-timeout 10m \ + --yaml-dir config-validation \ + --config-values-file conifg-values.yaml \ + --entitlements "num_of_queues=5" + ``` -This topic describes how to manage customer license fields in the Replicated Vendor Portal, including how to add custom fields and set initial values for the built-in fields. +For command usage, including additional options, see [cluster prepare](/reference/replicated-cli-cluster-prepare). -## Set Initial Values for Built-In License Fields (Beta) +### Access Clusters -You can set initial values to populate the **Create Customer** form in the Vendor Portal when a new customer is created. This ensures that each new customer created from the Vendor Portal UI starts with the same set of built-in license field values. +Compatibility Matrix provides the kubeconfig for clusters so that you can access clusters with the kubectl command line tool. For more information, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. -:::note -Initial values are not applied to new customers created through the Vendor API v3. For more information, see [Create a customer](https://replicated-vendor-api.readme.io/reference/createcustomer-1) in the Vendor API v3 documentation. -::: +To access a cluster from the command line: -These _initial_ values differ from _default_ values in that setting initial values does not update the license field values for any existing customers. +1. Verify that the cluster is in a Running state: -To set initial values for built-in license fields: + ```bash + replicated cluster ls + ``` + In the output of the command, verify that the `STATUS` for the target cluster is `running`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). -1. In the Vendor Portal, go to **License Fields**. +1. Run the following command to open a new shell session with the kubeconfig configured for the cluster: -1. Under **Built-in license options**, click **Edit** next to each license field where you want to set an initial value. + ```bash + replicated cluster shell CLUSTER_ID + ``` + Where `CLUSTER_ID` is the unique ID for the running cluster that you want to access. - ![Edit Initial Value](/images/edit-initial-value.png) + For command usage, see [cluster shell](/reference/replicated-cli-cluster-shell). - [View a larger version of this image](/images/edit-initial-value.png) +1. Verify that you can interact with the cluster through kubectl by running a command. For example: -## Manage Custom License Fields + ```bash + kubectl get ns + ``` -You can create custom license fields in the Vendor Portal. For example, you can create a custom license field to set the number of active users permitted. Or, you can create a field that sets the number of nodes a customer is permitted on their cluster. +1. Press Ctrl-D or type `exit` when done to end the shell and the connection to the server. -The custom license fields that you create are displayed in the Vendor Portal for all new and existing customers. If the custom field is not hidden, it is also displayed to customers under the **Licenses** tab in the Replicated Admin Console. +### Upgrade Clusters (kURL Only) -### Limitation +For kURL clusters provisioned with Compatibility Matrix, you can use the the `cluster upgrade` command to upgrade the version of the kURL installer specification used to provision the cluster. A recommended use case for the `cluster upgrade` command is for testing your application's compatibility with Kubernetes API resource version migrations after upgrade. -The maximum size for a license field value is 64KB. +The following example upgrades a kURL cluster from its previous version to version `9d5a44c`: -### Create Custom License Fields +```bash +replicated cluster upgrade cabb74d5 --version 9d5a44c +``` -To create a custom license field: +For command usage, see [cluster upgrade](/reference/replicated-cli-cluster-upgrade). -1. Log in to the Vendor Portal and select the application. +### Delete Clusters -1. On the **License Fields** page, click **Create license field**. +You can delete clusters using the Replicated CLI or the Vendor Portal. - create a new License Field dialog +#### Replicated CLI - [View a larger version of this image](/images/license-add-custom-field.png) +To delete a cluster using the Replicated CLI: -1. Complete the following fields: +1. Get the ID of the target cluster: - | Field | Description | - |-----------------------|------------------------| - | Field | The name used to reference the field. This value cannot be changed. | - | Title| The display name for the field. This is how the field appears in the Vendor Portal and the Admin Console. You can change the title in the Vendor Portal. | - | Type| The field type. Supported formats include integer, string, text (multi-line string), and boolean values. This value cannot be changed. | - | Default | The default value for the field for both existing and new customers. It is a best practice to provide a default value when possible. The maximum size for a license field value is 64KB. | - | Required | If checked, this prevents the creation of customers unless this field is explicitly defined with a value. | - | Hidden | If checked, the field is not visible to your customer in the Replicated Admin Console. The field is still visible to you in the Vendor Portal. **Note**: The Hidden field is displayed only for vendors with access to the Replicated installers (KOTS, kURL, Embedded Cluster). | + ``` + replicated cluster ls + ``` + In the output of the command, copy the ID for the cluster. -### Update Custom License Fields + **Example:** -To update a custom license field: + ``` + ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES + 1234abc My Test Cluster eks 1.27 running 2023-10-09 17:08:01 +0000 UTC - + ``` -1. Log in to the Vendor Portal and select the application. -1. On the **License Fields** page, click **Edit Field** on the right side of the target row. Changing the default value for a field updates the value for each existing customer record that has not overridden the default value. + For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). - :::important - Enabling **Is this field is required?** updates the license field to be required on all new and existing customers. If you enable **Is this field is required?**, you must either set a default value for the field or manually update each existing customer to provide a value for the field. - ::: - -### Set Customer-Specific Values for Custom License Fields +1. Run the following command: -To set a customer-specific value for a custom license field: + ``` + replicated cluster rm CLUSTER_ID + ``` + Where `CLUSTER_ID` is the ID of the target cluster. + For command usage, see [cluster rm](/reference/replicated-cli-cluster-rm). +1. Confirm that the cluster was deleted: + ``` + replicated cluster ls CLUSTER_ID --show-terminated + ``` + Where `CLUSTER_ID` is the ID of the target cluster. + In the output of the command, you can see that the `STATUS` of the cluster is `terminated`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). +#### Vendor Portal -1. Log in to the Vendor Portal and select the application. -1. Click **Customers**. -1. For the target customer, click the **Manage customer** button. -1. Under **Custom fields**, enter values for the target custom license fields for the customer. +To delete a cluster using the Vendor Portal: - :::note - The maximum size for a license field value is 64KB. - ::: +1. Go to **Compatibility Matrix**. - Custom license fields section in the manage customer page +1. Under **Clusters**, in the vertical dots menu for the target cluster, click **Delete cluster**. - [View a larger version of this image](/images/customer-license-custom-fields.png) + Delete cluster button -### Delete Custom License Fields + [View a larger version of this image](/images/cmx-delete-cluster.png) -Deleted license fields and their values do not appear in the customer's license in any location, including your view in the Vendor Portal, the downloaded YAML version of the license, and the Admin Console **License** screen. +## About Using Compatibility Matrix with CI/CD -By default, deleting a custom license field also deletes all of the values associated with the field in each customer record. +Replicated recommends that you integrate Compatibility Matrix into your existing CI/CD workflow to automate the process of creating clusters to install your application and run tests. For more information, including additional best practices and recommendations for CI/CD, see [About Integrating with CI/CD](/vendor/ci-overview). -Only administrators can delete license fields. +### Replicated GitHub Actions -:::important -Replicated recommends that you take care when deleting license fields. +Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to using Compatibility Matrix and distributing applications with Replicated. -Outages can occur for existing deployments if your application or the Admin Console **Config** page expect a license file to provide a required value. -::: +If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. -To delete a custom license field: +To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. -1. Log in to the Vendor Portal and select the application. -1. On the **License Fields** page, click **Edit Field** on the right side of the target row. -1. Click **Delete** on the bottom left of the dialog. -1. (Optional) Enable **Preserve License Values** to save values for the license field that were not set by the default in each customer record. Preserved license values are not visible to you or the customer. +For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). - :::note - If you enable **Preserve License Values**, you can create a new field with the same name and `type` as the deleted field to reinstate the preserved values. - ::: +### Recommended Workflows -1. Follow the instructions in the dialog and click **Delete**. +Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). For example development and release workflows that integrate Compatibility Matrix for testing, see [Recommended CI/CD Workflows](/vendor/ci-workflows). + +### Test Script Recommendations + +Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: + + ================ -File: docs/vendor/licenses-download.md +File: docs/vendor/testing-ingress.md ================ -import AirGapLicenseDownload from "../partials/install/_airgap-license-download.mdx" +# Accessing Your Application -# Downloading Customer Licenses +This topic describes the networking options for accessing applications deployed on clusters created with Replicated Compatibility Matrix. It also describes how to use and manage Compatibility Matrix tunnels. -This topic describes how to download a license file from the Replicated Vendor Portal. +## Networking Options -For information about how to download customer licenses with the Vendor API v3, see [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) in the Vendor API v3 documentation. +After deploying your application into Compatibility Matrix clusters, you will want to execute your tests using your own test runner. +In order to do this, you need to access your application. +Compatibility matrix offers several methods to access your application. -## Download Licenses +Some standard Kubernetes networking options are available, but vary based on the distribution. +For VM-based distributions, there is no default network route into the cluster, making inbound connections challenging to create. -You can download license files for your customers from the **Customer** page in the Vendor Portal. +### Port Forwarding +Port forwarding is a low-cost and portable mechanism to access your application. +Port forwarding works on all clusters supported by Compatibility Matrix because the connection is initiated from the client, over the Kubernetes API server port. +If you have a single service or pod and are not worried about complex routing, this is a good mechanism. +The basic steps are to connect the port-forward, execute your tests against localhost, and then shut down the port-forward. -To download a license: +### LoadBalancer +If your application is only running on cloud services (EKS, GKE, AKS) you can create a service of type `LoadBalancer`. +This will provision the cloud-provider specific load balancer. +The `LoadBalancer` service will be filled by the in-tree Kubernetes functionality that's integrated with the underlying cloud provider. +You can then query the service definition using `kubectl` and connect to and execute your tests over the `LoadBalancer` IP address. -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page. -1. In the row for the target customer, click the **Download License** button. +### Ingress +Ingress is a good way to recreate customer-representative environments, but the problem still remains on how to get inbound access to the IP address that the ingress controller allocates. +Ingress is also not perfectly portable; each ingress controller might require different annotations in the ingress resource to work properly. +Supported ingress controllers vary based on the distribution. +Compatibility matrix supports ingress controllers that are running as a `NodePort` service. - ![Download license button](/images/download-license-button.png) +### Compatibility Matrix Tunnels +All VM-based Compatibility Matrix clusters support tunneling traffic into a `NodePort` service. +When this option is used, Replicated is responsible for creating the DNS record and TLS certs. +Replicated will route traffic from `:443` and/or `:80` into the `NodePort` service you defined. For more information about using tunnels, see [Managing Compatibility Matrix Tunnels](#manage-nodes) below. - [View a larger version of this image](/images/download-license-button.png) +The following diagram shows how the traffic is routed into the service using Compatibility Matrix tunnels: -## Enable and Download Air Gap Licenses {#air-gap-license} +Compatibility Matrix ingress -The **Airgap Download Enabled** license option allows KOTS to install an application without outbound internet access using the `.airgap` bundle. +[View a larger version of this image](/images/compatibility-matrix-ingress.png) -To enable the air gap entitlement and download the license: +## Managing Compatibility Matrix Tunnels {#manage-nodes} - +Tunnels are viewed, created, and removed using the Compatibility Matrix UI within Vendor Portal, the Replicated CLI, GitHub Actions, or directly with the Vendor API v3. There is no limit to the number of tunnels you can create for a cluster and multiple tunnels can connect to a single service, if desired. -================ -File: docs/vendor/licenses-install-types.mdx -================ -import InstallerOnlyAnnotation from "../partials/helm/_installer-only-annotation.mdx" +### Limitations -# Managing Install Types for a License +Compatibility Matrix tunnels have the following limitations: +* One tunnel can only connect to one service. If you need fanout routing into different services, consider installing the nginx ingress controller as a `NodePort` service and exposing it. +* Tunnels are not supported for cloud distributions (EKS, GKE, AKS). -This topic describes how to manage which installation types and options are enabled for a license. +### Supported Protocols -## Overview +A tunnel can support one or more protocols. +The supported protocols are HTTP, HTTPS, WS and WSS. +GRPC and other protocols are not routed into the cluster. -You can control which installation methods are available to each of your customers by enabling or disabling **Install types** fields in the customer's license. +### Exposing Ports +Once you have a node port available on the cluster, you can use the Replicated CLI to expose the node port to the public internet. +This can be used multiple times on a single cluster. -The following shows an example of the **Install types** field in a license: +Optionally, you can specify the `--wildcard` flag to expose this port with wildcard DNS and TLS certificate. +This feature adds extra time to provision the port, so it should only be used if necessary. -![Install types license fields](/images/license-install-types.png) +```bash +replicated cluster port expose \ + [cluster id] \ + --port [node port] \ + --protocol [protocol] \ + --wildcard +``` -[View a larger version of this image](/images/license-install-types.png) +For example, if you have the nginx ingress controller installed and the node port is 32456: -The installation types that are enabled or disabled for a license determine the following: -* The Replicated installers ([Replicated KOTS](../intro-kots), [Replicated Embedded Cluster](/vendor/embedded-overview), [Replicated kURL](/vendor/kurl-about)) that the customer's license entitles them to use -* The installation assets and/or instructions provided in the Replicated Download Portal for the customer -* The customer's KOTS Admin Console experience +```bash +% replicated cluster ls +ID NAME DISTRIBUTION VERSION STATUS +1e616c55 tender_ishizaka k3s 1.29.2 running -Setting the supported installation types on a per-customer basis gives you greater control over the installation method used by each customer. It also allows you to provide a more curated Download Portal experience, in that customers will only see the installation assets and instructions that are relevant to them. +% replicated cluster port expose \ + 1e616c55 \ + --port 32456 \ + --protocol http \ + --protocol https \ + --wildcard +``` -## Understanding Install Types {#install-types} +:::note +You can expose a node port that does not yet exist in the cluster. +This is useful if you have a deterministic node port, but need the DNS name as a value in your Helm chart. +::: -In the customer license, under **Install types**, the **Available install types** field allows you to enable and disable different installation methods for the customer. +### Viewing Ports +To view all exposed ports, use the Replicated CLI `port ls` subcommand with the cluster ID: -You can enable one or more installation types for a license. +```bash +% replicated cluster port ls 1e616c55 +ID CLUSTER PORT PROTOCOL EXPOSED PORT WILDCARD STATUS +d079b2fc 32456 http http://happy-germain.ingress.replicatedcluster.com true ready -The following describes each installation type available, as well as the requirements for enabling each type: +d079b2fc 32456 https https://happy-germain.ingress.replicatedcluster.com true ready +``` - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Install TypeDescriptionRequirements
    Existing Cluster (Helm CLI)

    Allows the customer to install with Helm in an existing cluster. The customer does not have access to the Replicated installers (Embedded Cluster, KOTS, and kURL).

    When the Helm CLI Air Gap Instructions (Helm CLI only) install option is also enabled, the Download Portal displays instructions on how to pull Helm installable images into a local repository. See Understanding Additional Install Options below.

    -

    The latest release promoted to the channel where the customer is assigned must contain one or more Helm charts. It can also include Replicated custom resources, such as the Embedded Cluster Config custom resource, the KOTS HelmChart, Config, and Application custom resources, or the Troubleshoot Preflight and SupportBundle custom resources.

    - -
    Existing Cluster (KOTS install)Allows the customer to install with Replicated KOTS in an existing cluster. -
      -
    • Your Vendor Portal team must have the KOTS entitlement
    • -
    • The latest release promoted to the channel where the customer is assigned must contain KOTS custom resources, such as the KOTS HelmChart, Config, and Application custom resources. For more information, see [About Custom Resources](/reference/custom-resource-about).
    • -
    -
    kURL Embedded Cluster (first generation product) -

    Allows the customer to install with Replicated kURL on a VM or bare metal server.

    -

    Note: For new installations, enable Replicated Embedded Cluster (current generation product) instead of Replicated kURL (first generation product).

    -
    -
      -
    • Your Vendor Portal team must have the kURL entitlement
    • -
    • A kURL installer spec must be promoted to the channel where the customer is assigned. For more information, see Creating a kURL Installer.
    • -
    -
    Embedded Cluster (current generation product)Allows the customer to install with Replicated Embedded Cluster on a VM or bare metal server. -
      -
    • Your Vendor Portal team must have the Embedded Cluster entitlement
    • -
    • The latest release promoted to the channel where the customer is assigned must contain an Embedded Cluster Config custom resource. For more information, see Embedded Cluster Config.
    • -
    -
    +### Removing Ports +Exposed ports are automatically deleted when a cluster terminates. +If you want to remove a port (and the associated DNS records and TLS certs) prior to cluster termination, run the `port rm` subcommand with the cluster ID: -## Understanding Additional Install Options {#install-options} +```bash +% replicated cluster port rm 1e616c55 --id d079b2fc +``` -After enabling installation types in the **Available install types** field, you can also enable the following options in the **Additional install options** field: +You can remove just one protocol, or all. +Removing all protocols also removes the DNS record and TLS cert. - - - - - - - - - - - - - - - - -
    Install TypeDescriptionRequirements
    Helm CLI Air Gap Instructions (Helm CLI only)

    When enabled, a customer will see instructions on the Download Portal on how to pull Helm installable images into their local repository.

    Helm CLI Air Gap Instructions is enabled by default when you select the Existing Cluster (Helm CLI) install type. For more information see [Installing with Helm in Air Gap Environments](/vendor/helm-install-airgap)

    The Existing Cluster (Helm CLI) install type must be enabled
    Air Gap Installation Option (Replicated Installers only)

    When enabled, new installations with this license have an option in their Download Portal to install from an air gap package or do a traditional online installation.

    -

    At least one of the following Replicated install types must be enabled:

    -
      -
    • Existing Cluster (KOTS install)
    • -
    • kURL Embedded Cluster (first generation product)
    • -
    • Embedded Cluster (current generation product)
    • -
    -
    +================ +File: docs/vendor/testing-pricing.mdx +================ +# Compatibility Matrix Pricing -## About Migrating Existing Licenses to Use Install Types +This topic describes the pricing for Replicated Compatibility Matrix. -By default, when an existing customer license is migrated to include the Beta **Install types** field, the Vendor Portal automatically enables certain install types so that the customer does not experience any interruptions or errors in their deployment. +## Pricing Overview -The Vendor Portal uses the following logic to enable install types for migrated licenses: +Compatibility Matrix usage-based pricing includes a $0.50 per cluster startup cost, plus by the minute pricing based on instance size and count (starting at the time the cluster state changed to "running" and ending when the cluster is either expired (TTL) or removed). Minutes will be rounded up, so there will be a minimum charge of $0.50 plus 1 minute for all running clusters. Each cluster's cost will be rounded up to the nearest cent and subtracted from the available credits in the team account. Remaining credit balance is viewable on the Replicated Vendor Portal [Cluster History](https://vendor.replicated.com/compatibility-matrix/history) page or with the Vendor API v3 [/vendor/v3/cluster/stats](https://replicated-vendor-api.readme.io/reference/getclusterstats) endpoint. Cluster [add-ons](/vendor/testing-cluster-addons) may incur additional charges. -If the existing license has the **KOTS Install Enabled** field enabled, then the Vendor Portal enables the following install types in the migrated license by default: -* Existing Cluster (Helm CLI) -* Existing Cluster (KOTS install) -* kURL Embedded Cluster (first generation product) -* Embedded Cluster (current generation product) +If the team's available credits are insufficient to run the cluster for the full duration of the TTL, the cluster creation will be rejected. -Additionally, if the existing **KOTS Install Enabled** license also has the **Airgap Download Enabled** option enabled, then the Vendor Portal enables both of the air gap install options in the migrated license (**Helm CLI Air Gap Instructions (Helm CLI only)** and **Air Gap Installation Option (Replicated Installers only)**). +## Cluster Quotas -Otherwise, if the **KOTS Install Enabled** field is disabled for the existing license, then the Vendor Portal enables only the **Existing Cluster (Helm CLI)** install type by default. All other install types will be disabled by default. +Each team is limited by the number of clusters that they can run concurrently. To increase the quota, reach out to your account manager. -================ -File: docs/vendor/licenses-reference-helm.md -================ -# Checking Entitlements in Helm Charts Before Deployment - -This topic describes how to check license entitlements before a Helm chart is installed or upgraded. The information in this topic applies to Helm charts installed with Replicated KOTS or Helm. - -The Replicated SDK API can be used to check entitlements at runtime. For more information, see [Querying Entitlements with the Replicated SDK API](licenses-reference-sdk). - -## Overview - -The Replicated registry automatically injects customer entitlement information in the `global.replicated.licenseFields` field of your Helm chart values. For example: - -```yaml -# Helm chart values.yaml -global: - replicated: - licenseFields: - expires_at: - description: License Expiration - name: expires_at - signature: - v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... - title: Expiration - value: "2023-05-30T00:00:00Z" - valueType: String -``` - -You can access the values in the `global.replicated.licenseFields` field from your Helm templates to check customer entitlements before installation. - -## Prerequisite - -Add the Replicated SDK to your application: -* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ -* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Kubernetes Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ - -## Check Entitlements Before Installation or Upgrade - -To check entitlements before installation: - -1. Create or edit a customer to use for testing: - - 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). - - 1. Edit the built-in license fields or add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). - -1. In your Helm chart, update the Helm templates with one or more directives to access the license field. For example, you can access the built-in `expires_at` field with `{{ .Values.global.replicated.licenseFields.expires_at }}`. Add the desired logic to control application behavior based on the values of license fields. - - For more information about accessing values files from Helm templates, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the _Chart Template Guide_ section of the Helm documentation. - -1. Test your changes by promoting a new release and installing in a development environment: - - 1. Package your Helm chart and its dependencies into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). - - 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - - 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). - -1. Repeat these steps to add and test new license fields. - -================ -File: docs/vendor/licenses-reference-kots-runtime.mdx -================ -# Querying Entitlements with the KOTS License API - -This topic describes how to use the Replicated KOTS License API to query license fields during runtme. The information in this topic applies to applications installed with KOTS. - -:::important -Using the KOTS License API to check entitlements during runtime is _not_ recommended for new applications distributed with Replciated. Instead, Replicated recommends that you include the Replicated SDK with your application and query entitlements during runtime using the SDK in-cluster API. See [Checking Entitlements with the Replicated SDK](licenses-reference-sdk). -::: - -## Overview - -KOTS includes default logic to control access to features in the KOTS Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. - -For information about creating custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). - -The KOTS Admin Console runs on the customer's cluster and provides entitlement information during application runtime. You can query the admin console `/license/v1/license` endpoint to enforce entitlements at runtime. - -## Query Fields - -To reference license fields at runtime, send an HTTP request to the admin console `/license/v1/license` endpoint at the following location: - -``` -http://kotsadm:3000/license/v1/license -``` - -The query returns a response in YAML format. For example: - -```javascript -{"license_id":"WicPRaoCv1pJ57ZMf-iYRxTj25eZalw3", -"installation_id":"a4r1s31mj48qw03b5vwbxvm5x0fqtdl6", -"assignee":"FirstCustomer", -"release_channel":"Unstable", -"license_type":"trial", -"expiration_time":"2026-01-23T00:00:00Z", -"fields":[ - {"field":"Customer ID","title":"Customer ID (Internal)","type":"Integer","value":121,"hide_from_customer":true}, - {"field":"Modules","title":"Enabled Modules","type":"String","value":"Analytics, Integration"}]} -``` -## Parse the API Response - -To return a license field value, parse the response using the name of the license -field. - -For example, the following Javascript parses the response for the value of a -`seat_count` custom field: - -```javascript -import * as rp from "request-promise"; - -rp({ - uri: "http://kotsadm:3000/license/v1/license", - json: true -}).then(license => { - const seatCount = license.fields.find((field) => { - return field.field === "seat_count"; - }); - console.log(seatCount.value); -}).catch(err => { - // Handle error response from `kotsadm` -}); -``` - -================ -File: docs/vendor/licenses-reference-sdk.mdx -================ -# Querying Entitlements with the Replicated SDK API - -This topic describes how to query license entitlements at runtime using the Replicated SDK in-cluster API. The information in this topic applies to applications installed with Replicated KOTS or Helm. - -## Overview - -The Replicated SDK retrieves up-to-date customer license information from the Vendor Portal during runtime. This means that any changes to customer licenses are reflected in real time in the customer environment. For example, you can revoke access to your application when a license expires, expose additional product functionality dynamically based on entitlements, and more. For more information about distributing the SDK with your application, see [About the Replicated SDK](replicated-sdk-overview). - -After the Replicated SDK is initialized and running in a customer environment, you can use the following SDK API endpoints to get information about the license: -* `/api/v1/license/info`: List license details, including the license ID, the channel the customer is assigned, and the license type. -* `/api/v1/license/fields`: List all the fields in the license. -* `/api/v1/license/fields/{field_name}`: List details about a specific license field, including the field name, description, type, and the value. - -For more information about these endpoints, see [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API_. - -## Prerequisite - -Add the Replicated SDK to your application: -* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ -* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Standard Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ - -## Query License Entitlements at Runtime {#runtime} - -To use the SDK API to query entitlements at runtime: - -1. Create or edit a customer to use for testing: - - 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). - - 1. Edit the built-in fields and add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). - -1. (Recommended) Develop against the SDK API `license` endpoints locally: - - 1. Install the Replicated SDK as a standalone component in your cluster. This is called _integration mode_. Installing in integration mode allows you to develop locally against the SDK API without needing to create releases for your application in the Vendor Portal. See [Developing Against the SDK API](/vendor/replicated-sdk-development). - - 1. In your application, add logic to control application behavior based on the customer license information returned by the SDK API service running in your cluster. See [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API (Beta)_. - - **Example:** - - ```bash - curl replicated:3000/api/v1/license/fields/expires_at - ``` - - ```json - { - "name": "expires_at", - "title": "Expiration", - "description": "License Expiration", - "value": "2023-05-30T00:00:00Z", - "valueType": "String", - "signature": { - "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2M..." - } - } - ``` - -1. When you are ready to test your changes outside of integration mode, do the following: - - 1. Package your Helm chart and its dependencies (including the Replicated SDK) into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). - - 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - - 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). - - 1. (Optional) As needed, verify the license information returned by the SDK API in your development environment using port forwarding to access the SDK service locally: - - 1. Use port forwarding to access the `replicated` service from the local development environment on port 3000: - - ```bash - kubectl port-forward service/replicated 3000 - ``` - - The output looks similar to the following: - - ```bash - Forwarding from 127.0.0.1:3000 -> 3000 - ``` - - For more information about `kubectl port-forward`, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the kubectl reference documentation. - - 1. With the port forward running, in another terminal, use the SDK API to return information about the license. - - **Example:** - - ``` - curl localhost:3000/api/v1/license/fields/expires_at - ``` - -1. Repeat these steps to add and test new license fields. - -1. (Recommended) Use signature verification in your application to ensure the integrity of the license field. See [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). - -================ -File: docs/vendor/licenses-referencing-fields.md -================ -# Checking Entitlements in Preflights with KOTS Template Functions - -This topic describes how to check custom entitlements before installation or upgrade using preflight checks and KOTS template functions in the License context. The information in this topic applies to applications installed with KOTS. - -## Overview - -KOTS includes default logic to control access to features in the Replicated Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. - -For more information, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). - -## Add Preflights to Check Entitlements Before Installation or Upgrade {#install} - -To enforce entitlements when your customer installs or updates your application, -you can use the Replicated LicenseFieldValue template function in your application to read the value of license fields. The LicenseFieldValue template function accepts the built-in license fields and any custom fields that you configure. For more information, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. - -For example, a license might limit how many nodes are permitted in a customer's -cluster. You could define this limit by creating a `node_count` custom license field: - -| Name | Key | Type | Description | -|------|-----|------|-------------| -| Node Count | node_count | Integer | The maximum number of nodes permitted | - -To enforce the node count when a customer installs or updates your application, -you can use LicenseFieldValue to create a preflight check that references the custom `node_count` field: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: example-preflight-checks -spec: - analyzers: - - nodeResources: - checkName: Node Count Check - outcomes: - - fail: - when: 'count() > {{repl LicenseFieldValue "node_count"}}' - message: The cluster has more nodes than the {{repl LicenseFieldValue "node_count"}} you are licensed for. - - pass: - message: The number of nodes matches your license ({{repl LicenseFieldValue "node_count"}}) -``` - -In the example above, the preflight check uses the `nodeResources` analyzer and the value of the custom `node_count` field to determine if the customer has exceeded the maximum number of nodes permitted by their license. If the preflight checks fails, a failure message is displayed to the user and KOTS prevents the installation or upgrade from continuing. - -For more information about this example, see [How Can I Use License Custom Fields Value in a Pre-Flight Check?](https://help.replicated.com/community/t/how-can-i-use-license-custom-fields-value-in-a-pre-flight-check/624) in Replicated Community. - -For more information about defining preflight checks, see [Defining Preflight Checks](preflight-defining). - -================ -File: docs/vendor/licenses-using-builtin-fields.mdx -================ -import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" - -# Built-In License Fields - -This topic describes the built-in license fields that appear customer licenses for applications distributed with Replicated. - -## Overview - -The license associated with each customer record in the Replicated Vendor Portal includes several built-in fields. These built-in fields include customer properties (such as the customer name, customer email, and the Vendor Portal channel where the customer is assigned), the license expiration date, as well as the Replicated features that are enabled for the customer (such as the supported install types or Admin Console features). - -When a customer installs an application distributed with Replicated, the values for each built-in and custom field in their license can be accessed using the [Replicated SDK](/vendor/replicated-sdk-overview) in-cluster API [license](/reference/replicated-sdk-apis#license) endpoints. Applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster) can also access license fields using the Replicated KOTS [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function. - -The following shows an example of a customer license: - -```yaml -apiVersion: kots.io/v1beta1 -kind: License -metadata: - name: customertest -spec: - appSlug: gitea - channelID: 2iy68JBTkvUqamgD... - channelName: Beta - channels: - - channelID: 2iy68JBTkvUqamgD... - channelName: Beta - channelSlug: beta - endpoint: https://replicated.app - isDefault: true - isSemverRequired: true - replicatedProxyDomain: proxy.replicated.com - customerEmail: example@replicated.com - customerName: Customer Test - endpoint: https://replicated.app - entitlements: - expires_at: - description: License Expiration - signature: {} - title: Expiration - value: "" - valueType: String - isAirgapSupported: true - isEmbeddedClusterDownloadEnabled: true - isKotsInstallEnabled: true - isSemverRequired: true - isSupportBundleUploadSupported: true - licenseID: 2sY6ZC2J9sO2... - licenseSequence: 4 - licenseType: prod - replicatedProxyDomain: proxy.replicated.com - signature: eyJsaWNlbnNlRGF... -``` - -## License Field Names - -This section lists the built-in fields that are included in customer licenses for applications distributed with Replicated. - -:::note -The built-in license fields are reserved field names. -::: +## VM Cluster Pricing (Openshift, RKE2, K3s, Kind, Embedded Cluster, kURL) -### General License Fields +VM-based clusters approximately match the AWS m6.i instance type pricing. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription
    `appSlug`The unique application slug that the customer is associated with. This value never changes.
    `channelID`The ID of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.
    `channelName`The name of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.
    `licenseID`, `licenseId`Unique ID for the installed license. This value never changes.
    `customerEmail`The customer email address.
    `endpoint`For applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster), this is the endpoint that the KOTS Admin Console uses to synchronize the licenses and download updates. This is typically `https://replicated.app`.
    `entitlementValues`Includes both the built-in `expires_at` field and any custom license fields. For more information about adding custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields).
    `expires_at`

    Defines the expiration date for the license. The date is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. If a license does not expire, this field is missing.

    For information about the default behavior when a license expires, see [License Expiration Handling](licenses-about#expiration) in _About Customers_.

    `licenseSequence`Every time a license is updated, its sequence number is incremented. This value represents the license sequence that the client currently has.
    `customerName`The name of the customer.
    `signature`The base64-encoded license signature. This value will change when the license is updated.
    `licenseType`A string value that describes the type of the license, which is one of the following: `paid`, `trial`, `dev`, `single-tenant-vendor-managed` or `community`. For more information about license types, see [Managing License Type](licenses-about-types).
    - -### Install Types - -The table below describes the built-in license fields related to the supported install type(s). For more information, see [Managing Install Types for a License](/vendor/licenses-install-types). +
    Instance TypeVCPUsMemory (GiB)USD/Credit per hour
    r1.small28$0.096
    r1.medium416$0.192
    r1.large832$0.384
    r1.xlarge1664$0.768
    r1.2xlarge32128$1.536
    - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription
    `isEmbeddedClusterDownloadEnabled`

    If a license supports installation with Replicated Embedded Cluster, this field is set to `true` or missing. If Embedded Cluster installations are not supported, this field is `false`.

    This field requires that the vendor has the Embedded Cluster entitlement and that the release at the head of the channel includes an [Embedded Cluster Config](/reference/embedded-config) custom resource. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.

    `isHelmInstallEnabled`

    If a license supports Helm installations, this field is set to `true` or missing. If Helm installations are not supported, this field is set to `false`. This field requires that the vendor packages the application as Helm charts and, optionally, Replicated custom resources.

    This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.

    `isKotsInstallEnabled`

    If a license supports installation with Replicated KOTS, this field is set to `true`. If KOTS installations are not supported, this field is either `false` or missing.

    This field requires that the vendor has the KOTS entitlement.

    `isKurlInstallEnabled`

    If a license supports installation with Replicated kURL, this field is set to `true` or missing. If kURL installations are not supported, this field is `false`.

    This field requires that the vendor has the kURL entitlement and a promoted kURL installer spec. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.

    +## Cloud Cluster Pricing -### Install Options +### AWS EKS Cluster Pricing -The table below describes the built-in license fields related to install options. +AWS clusters will be charged AWS pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. Pricing for Extended Support EKS versions (those Kubernetes versions considered deprecated by upstream Kubernetes) will have additional charges applied. - - + + + + - - + + + + - - + + + + -
    Field NameDescriptionInstance TypeVCPUsMemory (GiB)USD/Credit per hour
    `isAirgapSupported`

    If a license supports air gap installations with the Replicated installers (KOTS, kURL, Embedded Cluster), then this field is set to `true`. If Replicated installer air gap installations are not supported, this field is missing.

    When you enable this field for a license, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.

    m6i.large28$0.115
    `isHelmAirgapEnabled`

    If a license supports Helm air gap installations, then this field is set to `true` or missing. If Helm air gap is not supported, this field is missing.

    When you enable this feature, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.

    This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.

    m6i.xlarge416$0.230
    - -### Admin Console Feature Options - -The table below describes the built-in license fields related to the Admin Console feature options. The Admin Console feature options apply only to licenses that support installation with the Replicated installers (KOTS, kURL, Embedded Cluster). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription
    `isDisasterRecoverySupported`If a license supports the Embedded Cluster disaster recovery feature, this field is set to `true`. If a license does not support disaster recovery for Embedded Cluster, this field is either missing or `false`. **Note**: Embedded Cluster Disaster Recovery is in Alpha. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). For more information, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery).
    `isGeoaxisSupported`(kURL Only) If a license supports integration with GeoAxis, this field is set to `true`. If GeoAxis is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor has the GeoAxis entitlement. It also requires that the vendor has access to the Identity Service entitlement.
    `isGitOpsSupported` If a license supports the KOTS AutoGitOps workflow in the Admin Console, this field is set to `true`. If Auto-GitOps is not supported, this field is either `false` or missing. See [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow).
    `isIdentityServiceSupported`If a license supports identity-service enablement for the Admin Console, this field is set to `true`. If identity service is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor have access to the Identity Service entitlement.
    `isSemverRequired`If set to `true`, this field requires that the Admin Console orders releases according to Semantic Versioning. This field is controlled at the channel level. For more information about enabling Semantic Versioning on a channel, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_.
    `isSnapshotSupported`If a license supports the snapshots backup and restore feature, this field is set to `true`. If a license does not support snapshots, this field is either missing or `false`. **Note**: This field requires that the vendor have access to the Snapshots entitlement.
    `isSupportBundleUploadSupported`If a license supports uploading a support bundle in the Admin Console, this field is set to `true`. If a license does not support uploading a support bundle, this field is either missing or `false`.
    - -================ -File: docs/vendor/licenses-verify-fields-sdk-api.md -================ -# Verifying License Field Signatures with the Replicated SDK API - -This topic describes how to verify the signatures of license fields when checking customer license entitlements with the Replicated SDK. - -## Overview - -To prevent man-in-the-middle attacks or spoofing by your customers, license fields are cryptographically signed with a probabilistic signature scheme (PSS) signature to ensure their integrity. The PSS signature for a license field is included in the response from the Replicated SDK API `/license/fields` and `/license/fields/{field-name}` endpoints as a Base64 encoded string. - -The following shows an example of a Base64 encoded PSS signature for an `expires_at` field returned by the SDK API: - -```json -{ - "name": "expires_at", - "title": "Expiration", - "description": "License Expiration", - "value": "2023-05-30T00:00:00Z", - "valueType": "String", - "signature": { - "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2MD0YBlIAZEs1zXpmvwLdfcoTsZMOj0lZbxkPN5dPhEPIVcQgrzfzwU5HIwQbwc2jwDrLBQS4hGOKdxOWXnBUNbztsHXMqlAYQsmAhspRLDhBiEoYpFV/8oaaAuNBrmRu/IVAW6ahB4KtP/ytruVdBup3gn1U/uPAl5lhzuBifaW+NDFfJxAX..." - } -} -``` - -Replicated recommends that you use signature verification to ensure the integrity of each license field you use in your application. For more information about how to check entitlements in your application for Helm CLI installations, see [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). - -## Requirement - -Include the Replicated SDK as a dependency of your application Helm chart. For more information, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. - -## Use Your Public Key to Verify License Field Signatures - -In your application, you can use your public key (available in the Vendor Portal) and the MD5 hash of a license field value to verify the PSS signature of the license field. - -To use your public key to verify license field signatures: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Settings** page. - -1. Click the **Replicated SDK Signature Verification** tab. - - ![signature verification page](/images/signature-verification.png) - [View a larger version of this image](/images/signature-verification.png) - -1. Under **Your public key**, copy the key and save it in a secure location. - -1. (Optional) Under **Verification**, select the tab for the necessary programming language, and copy the code sample provided. - -1. In your application, add logic that uses the public key to verify the integrity of license field signatures. If you copied one of the code samples from the Vendor Portal in the previous step, paste it into your application and make any additional edits as required. - - If you are not using one of the code samples provided, consider the following requirements for verifying license field values: - * License field signatures included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints are Base64 encoded and must be decoded before they are verified. - * The MD5 hash of the license field value is required to verify the signature of the license field. The raw license field value is included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints. The MD5 hash of the value must be calculated and used for signature verification. - -================ -File: docs/vendor/namespaces.md -================ -# Application Namespaces - -Replicated strongly recommends that applications are architected to deploy a single application into a single namespace when possible. - -If you are distributing your application with Replicated KOTS, you can implement an architecture in which a single application is deployed into a single namespace. - -To do this, omit any namespace in the application manifests `metadata.namespace`. Do not use the Config custom resource object to make the namespace user-configurable. - -When you do not specify a namespace in application manifests, KOTS deploys to whatever namespace it is already running in. This gives the most flexibility when deploying to end user environments, as users already select the namespace where KOTS runs. Scoping to a single namespace also allows the app to run with minimal Kubernetes permissions, which can reduce friction when an application runs as a tenant in a large cluster. Overall, letting the end user manage namespaces is the easiest way to reduce friction. - -The following examples demonstrate the recommended approach of excluding the namespace from the application manifests, as well as the incorrect approaches of hardcoding the namespace or injecting the namespace as a user-supplied value: - -**Recommended** - -```yaml -# good, namespace absent -apiVersion: apps/v1 -kind: Deployment -metadata: - name: spline-reticulator -spec: -``` - -**Not Recommended** - -```yaml -# bad, hardcoded -apiVersion: apps/v1 -kind: Deployment -metadata: - name: spline-reticulator - namespace: graphviz-pro -spec: -``` - -```yaml -# bad, configurable -apiVersion: apps/v1 -kind: Deployment -metadata: - name: spline-reticulator - namespace: repl{{ ConfigOption "gv_namespace" }} -spec: -``` - -================ -File: docs/vendor/offsite-backup.md -================ -# Offsite Data Backup - -Replicated stores customer data in multiple databases across Amazon Web -Services (AWS) S3 buckets. Clustering and network redundancies help to avoid a -single point of failure. - -The offsite data backup add-on provides additional redundancy by copying data to -an offsite Google Cloud Provider (GCP) storage location. This helps to mitigate -any potential data loss caused by an outage to AWS. - -:::note -The offsite data backup add-on is available only to [Replicated Enterprise](https://www.replicated.com/pricing/) customers at an additional cost. Please [open a product request](https://vendor.replicated.com/support?requestType=feature&productArea=vendor) if you are interested in this feature. -::: - -## Overview - -When the offsite data backup add-on is enabled, data is migrated from Replicated's existing AWS S3 buckets to a dedicated second set of AWS S3 buckets. These buckets are only used for vendors with this add-on enabled, and all vendor data remains logically isolated by vendor Team. After data is migrated from existing S3 buckets to the secondary buckets, -all data is deleted from the original S3 buckets. - -To ensure customer data in the offsite GCP storage remains up-to-date, the GCP -account uses the Google Storage Transfer service to synchronize at least daily with the -secondary dedicated S3 buckets. - -The offsite GCP data backup functions only as secondary data storage and does not serve customer -data. Customer data continues to be served from the AWS S3 buckets. In the case of an AWS outage, Replicated can use a manual -process to restore customer data from the GCP backups into a production-grade database. - -For more information, see [Architecture](#architecture) below. - -## Architecture - -The following diagram shows the flow of air gap build data and registry image data -when the offsite data backup add-on is enabled. The flow of data that is backed -up offsite in GCP is depicted with green arrows. - -![architecture of offsite data storage with the offsite data backup add-on](../../static/images/offsite-backup.png) - -[View a larger version of this image](../../static/images/offsite-backup.png) - -As shown in the diagram above, when the offsite data backup add-on is enabled, -registry and air gap data are stored in dedicated S3 buckets. Both of -these dedicated S3 buckets back up data to offsite storage in GCP. - -The diagram also shows how customer installations continue to pull data from the -vendor registry and the customer portal when offsite data backup is enabled. - -================ -File: docs/vendor/operator-defining-additional-images.mdx -================ -import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" - -# Defining Additional Images - -This topic describes how to define additional images to be included in the `.airgap` bundle for a release. - -## Overview - - - -When building the `.airgap` bundle for a release, the Replicated Vendor Portal finds and includes all images defined in the Pod specs for the release. During installation or upgrade, KOTS retags images from the `.airgap` bundle and pushes them to the registry configured in KOTS. - -Any required images that are _not_ defined in your application manifests must be listed in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the `.airgap` bundle for the release. - -## Define Additional Images for Air Gap Bundles - -KOTS supports including the following types of images in the `additionalImages` field: - -* Public images referenced by the docker pullable image name. -* Images pushed to a private registry that was configured in the Vendor Portal, referenced by the docker-pullable, upstream image name. For more information about configuring private registries, see [Connecting to an External Registry](/vendor/packaging-private-images). - :::note - If you use the [Replicated proxy registry](/vendor/private-images-about) for online (internet-connected) installations, be sure to use the _upstream_ image name in the `additionalImages` field, rather than referencing the location of the image at `proxy.replicated.com`. - ::: -* Images pushed to the Replicated registry referenced by the `registry.replicated.com` name. - -The following example demonstrates adding multiple images to `additionalImages`: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: my-app -spec: - additionalImages: - - elasticsearch:7.6.0 - - quay.io/orgname/private-image:v1.2.3 - - registry.replicated.com/my-operator/my-private-image:abd123f -``` - -================ -File: docs/vendor/operator-defining-additional-namespaces.md -================ -# Defining Additional Namespaces - -Operators often need to be able to manage resources in multiple namespaces in the cluster. -When deploying an application to an existing cluster, Replicated KOTS creates a Kubernetes Role and RoleBinding that are limited to only accessing the namespace that the application is being installed into. - -In addition to RBAC policies, clusters running in air gap environments or clusters that are configured to use a local registry also need to ensure that image pull secrets exist in all namespaces that the operator will manage resource in. - -## Creating additional namespaces - -An application can identify additional namespaces to create during installation time. -You can define these additional namespaces in the Application custom resource by adding an `additionalNamespaces` attribute to the Application custom resource manifest file. For more information, see [Application](../reference/custom-resource-application) in the _Custom Resources_ section. - -When these are defined, `kots install` will create the namespaces and ensure that the KOTS Admin Console has full access to manage resources in these namespaces. -This is accomplished by creating a Role and RoleBinding per namespace, and setting the Subject to the Admin Console service account. -If the current user account does not have access to create these additional namespaces, the installer will show an error and fail. - -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: my-operator -spec: - additionalNamespaces: - - namespace1 - - namespace2 -``` - -In addition to creating these namespaces, the Admin Console will ensure that the application pull secret exists in them, and that this secret has access to pull the application images. This includes both images that are used and additional images defined in the Application custom resource manifest. For more information, see [Defining Additional Images](operator-defining-additional-images). - -Pull secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. -An operator can reliably depend on this secret existing in all installs (online and air gapped), and can use this secret name in any created `podspec` to pull private images. - -## Dynamic namespaces - -Some applications need access to dynamically created namespaces or even all namespaces. -In this case, an application spec can list `"*"` as one of its `addtionalNamespaces` in the Application manifest file. -When KOTS encounters the wildcard, it will not create any namespaces, but it will ensure that the application image pull secret is copied to all namespaces. -The Admin Console will run an informer internally to watch namespaces in the cluster, and when a new namespace is created, the secret will automatically be copied to it. - -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: my-operator -spec: - additionalNamespaces: - - "*" -``` - -When the wildcard (`"*"`) is listed in `additionalNamespaces`, KOTS will use a ClusterRole and ClusterRoleBinding for the Admin Console. -This will ensure that the Admin Console will continue to have permissions to all newly created namespaces, even after the install has finished. - -================ -File: docs/vendor/operator-packaging-about.md -================ -# About Packaging a Kubernetes Operator Application - -Kubernetes Operators can be packaged and delivered as an application using the same methods as other Kubernetes applications. - -Operators are good for [specific use cases](https://blog.replicated.com/operators-in-kots/). In general, we recommend thinking deeply about the problem space an application solves before going down the Operator path because, although powerful, Operators take a lot of time to build and maintain. - -Operators are generally defined using one or more `CustomResourceDefinition` manifests, and the controller is often a `StatefulSet`, along with other additional objects. -These Kubernetes manifests can be included in an application by adding them to a release and promoting the release to a channel. - -Kubernetes Operators differ from traditional applications because they interact with the Kubernetes API to create and manage other objects at runtime. -When a `CustomResource` is deployed to the cluster that has the operator running, the Operator may need to create new Kubernetes objects to fulfill the request. -When an Operator creates an object that includes a `PodSpec`, the Operator should use locally-available images in order to remain compatible with air gapped environments and customers who have configured a local registry to push all images to. -Even environments that aren't air gapped may need access to private images that are included as part of the application at runtime. - -An application includes a definition for the developer to list the additional images that are required for the application, and by exposing the local registry details (endpoint, namespace and secrets) to the application so that they can be referenced when creating a `PodSpec` at runtime. - -================ -File: docs/vendor/operator-referencing-images.md -================ -# Referencing Images - -This topic explains how to support the use of private image registries for applications that are packaged with Kubernetes Operators. - -## Overview - -To support the use of private images in all environments, the Kubernetes Operator code must use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. - -There are several template functions available to assist with this. -This might require two new environment variables to be added to a manager to read these values. - -The steps to ensure that an Operator is using the correct image names and has the correct image pull secrets in dynamically created pods are: - -1. Add a new environment variables to the Manager Pod so that the Manager knows the location of the private registry, if one is set. -2. Add a new environment variable to the Manager Pod so that the Manager also knows the `imagePullSecret` that's needed to pull the local image. - -## Step 1: Add a reference to the local registry - -The manager of an Operator is often a `Statefulset`, but could be a `Deployment` or another kind. -Regardless of where the spec is defined, the location of the private images can be read using the Replicated KOTS template functions. For more information about using template functions, see [About Template Functions](/reference/template-functions-about). - -#### Option 1: Define each image -If an Operator only requires one additional image, the easiest way to determine this location is to use the `LocalImageName` function. -This will always return the image name to use, whether the customer's environment is configured to use a local registry or not. - -**Example:** - -```yaml -env: - - name: IMAGE_NAME_ONE - value: 'repl{{ LocalImageName "elasticsearch:7.6.0" }}' -``` - -For online installations (no local registry), this will be written with no changes -- the variable will contain `elasticsearch:7.6.0`. -For installations that are air gapped or have a locally-configured registry, this will be rewritten as the locally referenceable image name. For example, `registry.somebigbank.com/my-app/elasticsearch:7.6.0`. - -**Example:** - -```yaml -env: - - name: IMAGE_NAME_TWO - value: 'repl{{ LocalImageName "quay.io/orgname/private-image:v1.2.3" }}' -``` - -In the above example, this is a private image, and will always be rewritten. For online installations, this will return `proxy.replicated.com/proxy/app-name/quay.io/orgname/private-image:v1.2.3` and for installations with a locally-configured registry it will return `registry.somebigbank.com/org/my-app-private-image:v.1.2.3`. - -#### Option 2: Build image names manually - -For applications that have multiple images or dynamically construct the image name at runtime, the KOTS template functions can also return the elements that make up the local registry endpoint and secrets, and let the application developer construct the locally-referenceable image name. - -**Example:** - -```yaml -env: - - name: REGISTRY_HOST - value: 'repl{{ LocalRegistryHost }}' - - name: REGISTRY_NAMESPACE - value: 'repl{{ LocalRegistryNamespace }}' -``` - -## Step 2: Determine the imagePullSecret - -Private, local images will need to reference an image pull secret to be pulled. -The value of the secret's `.dockerconfigjson` is provided in a template function, and the application can write this pull secret as a new secret to the namespace. -If the application is deploying the pod to the same namespace as the Operator, the pull secret will already exist in the namespace, and the secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. -KOTS will create this secret automatically, but only in the namespace that the Operator is running in. -It's the responsibility of the application developer (the Operator code) to ensure that this secret is present in any namespace that new pods will be deployed to. - -This template function returns the base64-encoded, docker auth that can be written directly to a secret, and referenced in the `imagePullSecrets` attribute of the PodSpec. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: myregistrykey - namespace: awesomeapps -data: - .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' -type: kubernetes.io/dockerconfigjson -``` - -This will return an image pull secret for the locally configured registry. - -If your application has both public and private images, it is recommended that the image name is passed to the image pull secret for the locally configured registry. This will ensure that installs without a local registry can differentiate between private, proxied and public images. - -**Example:** - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: my-pull-secret - namespace: awesomeapps -data: - .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' -type: kubernetes.io/dockerconfigjson -``` - -In the above example, the `LocalRegistryImagePullSecret()` function will return an empty auth array if the installation is not air gapped, does not have a local registry configured, and the `elasticsearch:7.6.0` image is public. -If the image is private, the function will return the license-key derived pull secret. -And finally, if the installation is using a local registry, the image pull secret will contain the credentials needed to pull from the local registry. - -**Example:** - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: my-pull-secret - namespace: awesomeapps -data: - .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' -type: kubernetes.io/dockerconfigjson -``` - -The above example will always return an image pull secret. -For installations without a local registry, it will be the Replicated license secret, and for installations with a local registry, it will be the local registry. - -## Using the local registry at runtime - -The developer of the Operator should use these environment variables to change the `image.name` in any deployed PodSpec to ensure that it will work in air gapped environments. - -================ -File: docs/vendor/orchestrating-resource-deployment.md -================ -import WeightLimitation from "../partials/helm/_helm-cr-weight-limitation.mdx" -import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" -import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" - -# Orchestrating Resource Deployment - -This topic describes how to orchestrate the deployment order of resources deployed as part of your application. The information in this topic applies to Helm chart- and standard manifest-based applications deployed with Replicated KOTS. - -## Overview - -Many applications require that certain resources are deployed and in a ready state before other resources can be deployed. - -When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. - -For applications deployed with KOTS, you can manage the order in which resources are deployed using the following methods: - -* For Helm charts, set the `weight` property in the corresponding HelmChart custom resource. See [HelmChart `weight`](#weight). - -* For standard manifests, add KOTS annotations to the resources. See [Standard Manifest Deployment Order with KOTS Annotations](#manifests). - -## Helm Chart Deployment Order with `weight` {#weight} - -You can configure the [`weight`](/reference/custom-resource-helmchart-v2#weight) property of the Replicated HelmChart custom resource to define the order in which the Helm charts in your release are installed. - -KOTS directs Helm to install the Helm charts based on the value of `weight` in ascending order, deploying the chart with the lowest weight first. Any dependencies are installed along with the parent chart. For example, a chart with a `weight` of `-1` deploys before a chart with a `weight` of `0`. - -The value for the `weight` property can be any negative or positive integer or `0`. By default, when you do not provide a `weight` for a Helm chart, the `weight` is `0`. - -For example: - -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - releaseName: samplechart-release-1 - # weight determines the order that charts are applied, with lower weights first. - weight: 4 -``` - -#### Limitations - -The `weight` field in the HelmChart custom resource has the following limitations: - -* - -* When installing a Helm chart-based application, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. The `weight` property does not allow Helm charts to be deployed before standard manifests. - -## Standard Manifest Deployment Order with KOTS Annotations {#manifests} - -You can use the KOTS annotations described in this section to control the order in which standard manifests are deployed. - -### Requirement - -You must quote the boolean or integer values in annotations because Kubernetes annotations must be strings. For more information about working with annotations in Kubernetes resources, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. - -### `kots.io/creation-phase` - -When the `kots.io/creation-phase: ''` annotation is present on a resource, KOTS groups the resource into the specified creation phase. KOTS deploys each phase in order from lowest to highest. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. - -Resources in the same phase are deployed in the same order that Helm installs resources. To view the order in which KOTS deploys resources of the same phase, see [Helm installs resources in the following order](https://helm.sh/docs/intro/using_helm/#:~:text=Helm%20installs%20resources%20in%20the,order) in the Helm documentation. - -The following example deploys the `CustomResourceDefinition` before the default creation phase: - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: myresources.example.com - annotations: - kots.io/creation-phase: "-1" -... -``` - -### `kots.io/deletion-phase` - -When the `kots.io/deletion-phase: ''` annotation is present on a resource, KOTS groups the resource into the specified deletion phase. KOTS deletes each phase in order from lowest to highest. Resources within the same phase are deleted in the reverse order from which they were created. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. - -The following example deploys the `CustomResourceDefinition` before the default creation phase and deletes the resource after the default deletion phase: - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: myresources.example.com - annotations: - kots.io/creation-phase: "-1" - kots.io/deletion-phase: "1" -... -``` -### `kots.io/wait-for-ready` - -When the `kots.io/wait-for-ready: ''` annotation is present on a resource and evaluates to `'true'`, KOTS waits for the resource to be in a ready state before deploying any other resources. For most resource types, KOTS has existing logic to determine if a resource is ready. If there is no existing logic for the given resource type, then KOTS waits until the resource exists and is queryable from the Kubernetes API server. - -In the following example, KOTS waits for the Postgres `StatefulSet` to be ready before continuing to deploy other resources: - -```yaml -apiVersion: apps/v1 -kind: Statefulset -metadata: - name: postgresql - annotations: - kots.io/wait-for-ready: 'true' - labels: - app: postgresql -spec: - selector: - matchLabels: - app: postgresql - strategy: - type: Recreate - template: - metadata: - labels: - app: postgresql - spec: - containers: - - name: postgresql - image: "postgres:9.6" - imagePullPolicy: "" -... -``` - -### `kots.io/wait-for-properties` - -When the `kots.io/wait-for-properties: '=,='` annotation is present on a resource, KOTS waits for one or more specified resource properties to match the desired values before deploying other resources. This annotation is useful when the `kots.io/wait-for-ready` annotation, which waits for a resource to exist, is not sufficient. - -The value for this annotation is a comma-separated list of key-value pairs, where the key is a JSONPath specifying the path to the property and the value is the desired value for the property. In the following example, KOTS waits for a resource to reach a desired state before deploying other resources. In this case, KOTS waits until each of the three status properties have the target values: - -```yaml -kind: MyResource -metadata: - name: my-resource - annotations: - kots.io/wait-for-properties: '.status.tasks.extract=true,.status.tasks.transform=true,.status.tasks.load=true' -... -status: - tasks: - extract: false - transform: false - load: false -``` - -================ -File: docs/vendor/packaging-air-gap-excluding-minio.md -================ -# Excluding MinIO from Air Gap Bundles (Beta) - -The Replicated KOTS Admin Console requires an S3-compatible object store to store application archives and support bundles. By default, KOTS deploys MinIO to satisfy the object storage requirement. For more information about the options for installing without MinIO in existing clusters, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). - -As a software vendor, you can exclude MinIO images from all Admin Console air gap distributions (`kotsadm.tar.gz`) in the download portal. Excluding MinIO from the `kotsadm.tar.gz` air gap bundle is useful if you want to prevent MinIO images from appearing in the air gap distribution that your end users download. It also reduces the file size of `kotsadm.tar.gz`. - -:::note -You can still retrieve a bundle with MinIO images from the KOTS release page in GitHub when this feature is enabled. See [replicatedhq/kots](https://github.com/replicatedhq/kots/releases/) in GitHub. -::: - -To exclude MinIO from the `kotsadm.tar.gz` Admin Console air gap bundle: - -1. Log in to your Vendor Portal account. Select **Support** > **Request a feature**, and submit a feature request for "Exclude MinIO image from air gap bundle". After this feature is enabled, all `kotsadm.tar.gz` files in the download portal will not include MinIO. - -1. Instruct your end users to set the flag `--with-minio=false` with the `kots install` command during an air gap installation. For more information about setting this runtime flag, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). - - :::important - If you have this feature enabled in your Team account and the end user does not include `--with-minio=false` with the `kots install` command, then the installation fails. - ::: - -================ -File: docs/vendor/packaging-cleaning-up-jobs.md -================ -# Cleaning Up Kubernetes Jobs - -This topic describes how to use the Replicated KOTS `kots.io/hook-delete-policy` annotation to remove Kubernetes job objects from the cluster after they complete. - -## About Kubernetes Jobs - -Kubernetes Jobs are designed to run and then terminate. But, they remain in the namespace after completion. Because Job objects are immutable, this can cause conflicts and errors when attempting to update the Job later. - -A common workaround is to use a content SHA from the Job object in the name. However, a user can update their application instance through various events (upstream update, license sync, config update, CLI upload). If the Job is already completed, it is an error to reapply the same job to the cluster again. - -The built-in Replicated KOTS operator/controller can help by deleting Jobs upon completion. -This allows the same Job to be deployed again without polluting the namespace with completed Jobs. - -For more information about Job objects, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) in the Kubernetes documentation. - -## KOTS `hook-delete-policy` Annotation - -To enable the built-in KOTS operator/controller to automatically delete Jobs when they complete, specify a delete hook policy as an annotation on the Job object. - -The KOTS annotation key is `kots.io/hook-delete-policy` and there are two possible values (you can use both simultaneously): `hook-succeeded` and `hook-failed`. - -When this annotation is present and includes `hook-succeeded`, the job is deleted when it completes successfully. -If this annotation is present and includes `hook-failed`, the job is deleted on failure. - -For Helm charts deployed with KOTS, KOTS automatically adds this `kots.io/hook-delete-policy` annotation to any Job objects in the Helm chart that include a `helm.sh/hook-delete-policy` annotation. This means that there is nothing extra to configure when deploying a Helm chart with Helm delete hooks. - -The following example shows a Job object with the `kots.io/hook-delete-policy` annotation: - -```yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: pi - annotations: - "kots.io/hook-delete-policy": "hook-succeeded, hook-failed" -spec: - template: - spec: - containers: - - name: pi - image: perl - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] - restartPolicy: Never - backoffLimit: 4 -``` - -================ -File: docs/vendor/packaging-embedded-kubernetes.mdx -================ -import Installers from "../partials/kurl/_installers.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Creating a kURL Installer - - - -This topic describes how to create a kURL installer spec in the Replicated Vendor Portal to support installations with Replicated kURL. - -For information about creating kURL installers with the Replicated CLI, see [installer create](/reference/replicated-cli-installer-create). - -## Overview - - - -For more information about kURL, see [Introduction to kURL](kurl-about). - -## Create an Installer - -To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release: - - - - - - - - - - - - - - -
    MethodDescription
    Promote the installer to a channel

    The installer is promoted to one or more channels. All releases on the channel use the kURL installer that is currently promoted to that channel. There can be only one active kURL installer on each channel at a time.

    The benefit of promoting an installer to one or more channels is that you can create a single installer without needing to add a separate installer for each release. However, because all the releases on the channel will use the same installer, problems can occur if all releases are not tested with the given installer.

    Include the installer in a release (Beta)

    The installer is included as a manifest file in a release. This makes it easier to test the installer and release together. It also makes it easier to know which installer spec customers are using based on the application version that they have installed.

    - -### Promote the Installer to a Channel {#channel} - -To promote a kURL installer to a channel: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **kURL Installers**. - -1. On the **kURL Installers** page, click **Create kURL installer**. - - vendor portal kurl installers page - - [View a larger version of this image](/images/kurl-installers-page.png) - -1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [Requirements and Recommendations](#requirements-and-recommendations) below. - - You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: - - kurl.sh landing page - - [View a larger version of this image](/images/kurl-build-an-installer.png) - -1. Click **Save installer**. You can continue to edit your file until it is promoted. - -1. Click **Promote**. In the **Promote Installer** dialog that opens, edit the fields: - - promote installer dialog - - [View a larger version of this image](/images/promote-installer.png) - - - - - - - - - - - - - - -
    FieldDescription
    ChannelSelect the channel or channels where you want to promote the installer.
    Version labelEnter a version label for the installer.
    - -1. Click **Promote** again. The installer appears on the **kURL Installers** page. - - To make changes after promoting, create and promote a new installer. - -### Include an Installer in a Release (Beta) {#release} - -To include the kURL installer in a release: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Releases**. Then, either click **Create Release** to create a new release, or click **Edit YAML** to edit an existing release. - - The YAML editor opens. - -1. Create a new file in the release with `apiVersion: cluster.kurl.sh/v1beta1` and `kind: Installer`: - - ```yaml - apiVersion: cluster.kurl.sh/v1beta1 - kind: Installer - metadata: - name: "latest" - spec: - - ``` - -1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [ kURL Add-on Requirements and Recommendations](#requirements-and-recommendations) below. - - You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: - - kurl.sh landing page - - [View a larger version of this image](/images/kurl-build-an-installer.png) - -1. Click **Save**. This saves a draft that you can continue to edit until you promote it. - -1. Click **Promote**. - - To make changes after promoting, create a new release. - -## kURL Add-on Requirements and Recommendations {#requirements-and-recommendations} - -KURL includes several add-ons for networking, storage, ingress, and more. The add-ons that you choose depend on the requirements for KOTS and the unique requirements for your application. For more information about each add-on, see the open source [kURL documentation](https://kurl.sh/docs/introduction/). - -When creating a kURL installer, consider the following requirements and guidelines for kURL add-ons: - -- You must include the KOTS add-on to support installation with KOTS and provision the KOTS Admin Console. See [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the kURL documentation. - -- To support the use of KOTS snapshots, Velero must be installed in the cluster. Replicated recommends that you include the Velero add-on in your kURL installer so that your customers do not have to manually install Velero. - - :::note - During installation, the Velero add-on automatically deploys internal storage for backups. The Velero add-on requires the MinIO or Rook add-on to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. - ::: - -- You must select storage add-ons based on the KOTS requirements and the unique requirements for your application. For more information, see [About Selecting Storage Add-ons](packaging-installer-storage). - -- kURL installers that are included in releases must pin specific add-on versions and cannot pin `latest` versions or x-ranges (such as 1.2.x). Pinning specific versions ensures the most testable and reproducible installations. For example, pin `Kubernetes 1.23.0` in your manifest to ensure that version 1.23.0 of Kubernetes is installed. For more information about pinning Kubernetes versions, see [Versions](https://kurl.sh/docs/create-installer/#versions) and [Versioned Releases](https://kurl.sh/docs/install-with-kurl/#versioned-releases) in the kURL open source documentation. - - :::note - For kURL installers that are _not_ included in a release, pinning specific versions of Kubernetes and Kubernetes add-ons in the kURL installer manifest is not required, though is highly recommended. - ::: - -- After you configure a kURL installer, Replicated recommends that you customize host preflight checks to support the installation experience with kURL. Host preflight checks help ensure successful installation and the ongoing health of the cluster. For more information about customizing host preflight checks, see [Customizing Host Preflight Checks for Kubernetes Installers](preflight-host-preflights). - -- For installers included in a release, Replicated recommends that you define a preflight check in the release to ensure that the target kURL installer is deployed before the release is installed. For more information about how to define preflight checks, see [Defining Preflight Checks](preflight-defining). - - For example, the following preflight check uses the `yamlCompare` analyzer with the `kots.io/installer: "true"` annotation to compare the target kURL installer that is included in the release against the kURL installer that is currently deployed in the customer's environment. For more information about the `yamlCompare` analyzer, see [`yamlCompare`](https://troubleshoot.sh/docs/analyze/yaml-compare/) in the open source Troubleshoot documentation. - - ```yaml - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: installer-preflight-example - spec: - analyzers: - - yamlCompare: - annotations: - kots.io/installer: "true" - checkName: Kubernetes Installer - outcomes: - - fail: - message: The kURL installer for this version differs from what you have installed. It is recommended that you run the updated kURL installer before deploying this version. - uri: https://kurl.sh/my-application - - pass: - message: The kURL installer for this version matches what is currently installed. - ``` - -================ -File: docs/vendor/packaging-include-resources.md -================ -# Conditionally Including or Excluding Resources - -This topic describes how to include or exclude optional application resources based on one or more conditional statements. The information in this topic applies to Helm chart- and standard manifest-based applications. - -## Overview - -Software vendors often need a way to conditionally deploy resources for an application depending on users' configuration choices. For example, a common use case is giving the user the choice to use an external database or an embedded database. In this scenario, when a user chooses to use their own external database, it is not desirable to deploy the embedded database resources. - -There are different options for creating conditional statements to include or exclude resources based on the application type (Helm chart- or standard manifest-based) and the installation method (Replicated KOTS or Helm CLI). - -### About Replicated Template Functions - -For applications deployed with KOTS, Replicated template functions are available for creating the conditional statements that control which optional resources are deployed for a given user. Replicated template functions can be used in standard manifest files such as Replicated custom resources or Kubernetes resources like StatefulSets, Secrets, and Services. - -For example, the Replicated ConfigOptionEquals template functions returns true if the specified configuration option value is equal to a supplied value. This is useful for creating conditional statements that include or exclude a resource based on a user's application configuration choices. - -For more information about the available Replicated template functions, see [About Template Functions](/reference/template-functions-about). - -## Include or Exclude Helm Charts - -This section describes methods for including or excluding Helm charts from your application deployment. - -### Helm Optional Dependencies - -Helm supports adding a `condition` field to dependencies in the Helm chart `Chart.yaml` file to include subcharts based on one or more boolean values evaluating to true. - -For more information about working with dependencies and defining optional dependencies for Helm charts, see [Dependencies](https://helm.sh/docs/chart_best_practices/dependencies/) in the Helm documentation. - -### HelmChart `exclude` Field - -For Helm chart-based applications installed with KOTS, you can configure KOTS to exclude certain Helm charts from deployment using the HelmChart custom resource [`exclude`](/reference/custom-resource-helmchart#exclude) field. When the `exclude` field is set to a conditional statement, KOTS excludes the chart if the condition evaluates to `true`. - -The following example uses the `exclude` field and the ConfigOptionEquals template function to exclude a postgresql Helm chart when the `external_postgres` option is selected on the Replicated Admin Console **Config** page: - -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: postgresql -spec: - exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' - chart: - name: postgresql - chartVersion: 12.1.7 - releaseName: samplechart-release-1 -``` - -## Include or Exclude Standard Manifests - -For standard manifest-based applications installed with KOTS, you can use the `kots.io/exclude` or `kots.io/when` annotations to include or exclude resources based on a conditional statement. - -By default, if neither `kots.io/exclude` nor `kots.io/when` is present on a resource, the resource is included. - -### Requirements - -The `kots.io/exclude` and `kots.io/when` annotations have the following requirements: - -* Only one of the `kots.io/exclude` nor `kots.io/when` annotations can be present on a single resource. If both are present, the `kots.io/exclude` annotation is applied, and the `kots.io/when` annotation is ignored. - -* The values of the `kots.io/exclude` and `kots.io/when` annotations must be wrapped in quotes. This is because Kubernetes annotations must be strings. For more information about working with Kubernetes annotations, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. - -### `kots.io/exclude` - -When the `kots.io/exclude: ''` annotation is present on a resource and evaluates to true, the resource is excluded from the deployment. - -The following example uses the `kots.io/exclude` annotation and the ConfigOptionEquals template function to exclude the postgresql `StatefulSet` when an `install_postgres` checkbox on the Admin Console **Config** page is disabled: - -```yaml -apiVersion: apps/v1 -kind: Statefulset -metadata: - name: postgresql - annotations: - kots.io/exclude: '{{repl ConfigOptionEquals "install_postgres" "0" }}' - labels: - app: postgresql -spec: - selector: - matchLabels: - app: postgresql - strategy: - type: Recreate - template: - metadata: - labels: - app: postgresql - spec: - containers: - - name: postgresql - image: "postgres:9.6" - imagePullPolicy: "" -... -``` - -### `kots.io/when` - -When the `kots.io/when: ''` annotation is present on a resource and evaluates to true, the resource is included in the deployment. - -The following example uses the `kots.io/when` annotation and the ConfigOptionEquals template function to include the postgresql `StatefulSet` resource when the `install_postgres` checkbox on the Admin Console **Config** page is enabled: - -```yaml -apiVersion: apps/v1 -kind: Statefulset -metadata: - name: postgresql - annotations: - kots.io/when: '{{repl ConfigOptionEquals "install_postgres" "1" }}' - labels: - app: postgresql -spec: - selector: - matchLabels: - app: postgresql - strategy: - type: Recreate - template: - metadata: - labels: - app: postgresql - spec: - containers: - - name: postgresql - image: "postgres:9.6" - imagePullPolicy: "" -... -``` - -================ -File: docs/vendor/packaging-ingress.md -================ -# Adding Cluster Ingress Options - -When delivering a configurable application, ingress can be challenging as it is very cluster specific. -Below is an example of a flexible `ingress.yaml` file designed to work in most Kubernetes clusters, including embedded clusters created with Replicated kURL. - -## Example - -The following example includes an Ingress resource with a single host based routing rule. -The resource works in both existing clusters and kURL clusters. - -### Config - -A config option `enable_ingress` has been provided to allow the end-user to choose whether or not to enable the Ingress resource. -In some clusters a custom Ingress resource may be desired — when an ingress controller is not available, other means of exposing services may be preferred. - -An `annotations` text area has been made available for the end-user to add additional annotations to the ingress. -Here, cluster specific annotations can be added to support a variety of ingress controllers. -For example, when using the [ALB ingress controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) in AWS, it is necessary to include the `kubernetes.io/ingress.class: alb` annotation on your Ingress resource. - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: example-application -spec: - groups: - - name: ingress - title: Ingress - items: - - name: enable_ingress - type: bool - title: Enable Kubernetes Ingress - help_text: | - When checked, deploy the provided Kubernetes Ingress resource. - default: "1" - - name: hostname - type: text - title: Hostname - help_text: | - Use this field to provide a hostname for your Example Application installation. - required: true - when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} - - name: allow_http - type: bool - title: Allow Unsecured Access through HTTP - help_text: | - Uncheck this box to disable HTTP traffic between the client and the load balancer. - default: "1" - when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} - - name: annotations - type: textarea - title: Annotations - help_text: | - Use this textarea to provide annotations specific to your ingress controller. - For example, `kubernetes.io/ingress.class: alb` when using the ALB ingress controller. - when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} -``` - -### Ingress - -For ingress, you must create two separate resources. -The first of which will be deployed to existing cluster installations, while the second will only be deployed to an embedded cluster. -Both of these resources are selectively excluded with the [`exclude` annotation](packaging-include-resources). - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-application-ingress - annotations: - kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) IsKurl }}' - kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' - nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' - kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} -spec: - rules: - - host: repl{{ or (ConfigOption "hostname") "~" }} - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - service: - name: nginx - port: - number: 80 -``` - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-application-ingress-embedded - annotations: - kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) (not IsKurl) }}' - kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' - nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' - kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} -spec: - tls: - - hosts: - - repl{{ ConfigOption "hostname" }} - secretName: kotsadm-tls - rules: - - host: repl{{ ConfigOption "hostname" }} - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - service: - name: nginx - port: - number: 80 -``` - -================ -File: docs/vendor/packaging-installer-storage.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# About Selecting Storage Add-ons - - - -This topic provides guidance for selecting Replicated kURL add-ons to provide highly available data storage in kURL clusters. For additional guidance, see [Choosing a PV Provisioner](https://kurl.sh/docs/create-installer/choosing-a-pv-provisioner) in the open source kURL documentation. - -## Overview - -kURL includes add-ons for object storage and for dynamic provisioning of PersistentVolumes (PVs) in clusters. You configure these add-ons in your kURL installer to define how data for your application and data for Replicated KOTS is managed in the cluster. - -The following lists the kURL add-ons for data storage: -* **MinIO**: MinIO is an open source, S3-compatible object store. See [MinIO Add-on](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. -* **Rook**: Rook provides dynamic PV provisioning of distributed Ceph storage. Ceph is a distributed storage system that provides S3-compatible object storage. See [Rook Add-on](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. -* **OpenEBS**: OpenEBS Local PV creates a StorageClass to dynamically provision local PersistentVolumes (PVs) in a cluster. See [OpenEBS Add-on](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. -* **Longhorn**: Longhorn is an open source distributed block storage system for Kubernetes. See [Longhorn Add-on](https://kurl.sh/docs/add-ons/longhorn) in the kURL documentation. - - :::important - The Longhorn add-on is deprecated and not supported in production clusters. If you are currently using Longhorn, you must migrate data from Longhorn to either OpenEBS or Rook. For more information about migrating from Longhorn, see [Migrating to Change CSI Add-On](https://kurl.sh/docs/install-with-kurl/migrating-csi) in the kURL documentation. - ::: - -## About Persistent Storage for KOTS - -This section describes the default storage requirements for KOTS. Each of the [Supported Storage Configurations](#supported-storage-configurations) described below satisfy these storage requirements for KOTS. - -### rqlite StatefulSet - -KOTS deploys a rqlite StatefulSet to store the version history, application metadata and other small amounts of data needed to manage the application(s). No configuration is required to deploy rqlite. - -Rqlite is a distributed relational database that uses SQLite as its storage engine. For more information, see the [rqlite](https://rqlite.io/) website. - -### Object Storage or Local PV - -By default, KOTS requires an S3-compatible object store to store the following: -* Support bundles -* Application archives -* Backups taken with Replicated snapshots that are configured to NFS or host path storage destinations - -Both the Rook add-on and the MinIO add-on satisfy this object store requirement. - -Alternatively, you can configure KOTS to be deployed without object storage. This installs KOTS as a StatefulSet using a persistent volume (PV) for storage. When there is no object storage available, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in the local PV. In this case, the OpenEBS add-on can be included to provide the local PV storage. For more information, see [Installing Without Object Storage](/enterprise/installing-stateful-component-requirements). - -### Distributed Storage in KOTS v1.88 and Earlier - -KOTS v1.88 and earlier requires distributed storage. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. For more information, see [Rook Ceph](#rook-ceph) below. - -## Factors to Consider When Choosing a Storage Configuration - -The object store and/or PV provisioner add-ons that you choose to include in your kURL installer depend on the following factors: -* **KOTS storage requirements**: The storage requirements for the version of the KOTS add-on that you include in the spec. For example, KOTS v1.88 and earlier requires distributed storage. -* **Other add-on storage requirements**: The storage requirements for the other add-ons that you include in the spec. For example, the Velero add-on requires object storage to deploy the default internal storage for snapshots during installation. -* **Application storage requirements**: The storage requirements for your application. For example, you might include different add-ons depending on if your application requires a single or multi-node cluster, or if your application requires distributed storage. - -## Supported Storage Configurations - -This section describes the supported storage configurations for embedded clusters provisioned by kURL. - -### OpenEBS Without Object Storage (Single Node) {#single-node} - -If your application can be deployed to a single node cluster and does not require object storage, then you can choose to exclude object storage and instead use the OpenEBS add-on only to provide local storage on the single node in the cluster. - -When configured to use local PV storage instead of object storage, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in a PV on the single node in the cluster. - -#### Requirements - -To use the OpenEBS add-on without object storage, your kURL installer must meet the following requirements: - -* When neither the MinIO nor the Rook add-on are included in the kURL installer, you must set the `disableS3` field to `true` in the KOTS add-on. Setting `disableS3: true` in the KOTS add-on allows KOTS to use the local PV storage provided by OpenEBS instead of using object storage. For more information, see [Effects of the disableS3 Flag](https://kurl.sh/docs/add-ons/kotsadm#effects-of-the-disables3-flag) in _KOTS Add-on_ in the kURL documentation. - -* When neither the MinIO nor the Rook add-on are included in the kURL installer, the Velero add-on cannot be included. This is because, during installation, the Velero add-on automatically deploys internal storage for backups taken with the Replicated snapshots feature. The Velero add-on requires object storage to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. - - When the Velero add-on is not included, your users must install and configure Velero on the cluster after installation in order to use Replicated snapshots for backup and restore. See [About Backup and Restore with Snapshots](/vendor/snapshots-overview). - - For a storage configuration for single node clusters that supports the use of the Velero add-on, see [OpenEBS with MinIO (Single or Multi-Node)](#openebs-minio) below. - -#### Example - -The following is an example installer that uses OpenEBS v3.3.x with Local PV for local storage and disables object storage for KOTS: - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "local" -spec: - ... - openebs: - version: "3.3.x" - isLocalPVEnabled: true - localPVStorageClassName: "default" - kotsadm: - disables3: true -``` - -For more information about properties for the OpenEBS add-on, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. - -### OpenEBS with MinIO (Single or Multi-Node) {#openebs-minio} - -Using the OpenEBS add-on with the MinIO add-on provides a highly available data storage solution for multi-node clusters that is lighter-weight compared to using Rook Ceph. Replicated recommends that you use OpenEBS Local PV with MinIO for multi-node clusters if your application does _not_ require distributed storage. If your application requires distributed storage, see [Rook Ceph](#rook-ceph) below. - -When both the MinIO and OpenEBS add-ons are included, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in MinIO object storage. Additionally, KOTS uses OpenEBS Local PV to provision the PVs on each node that MinIO uses for local storage. - -#### Requirement - -To use both the OpenEBS add-on and the MinIO add-on, the KOTS add-on must use KOTS v1.89 or later. - -KOTS v1.88 and earlier requires distributed storage, which is not provided by OpenEBS Local PV. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. See [Rook Ceph](#rook-ceph) below. - -#### Example - -The following is an example installer that uses both the OpenEBS add-on version 3.3.x and MinIO add-on version `2022-09-07T22-25-02Z`: - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "openebs-with-minio" -spec: - ... - openebs: - version: "3.3.x" - isLocalPVEnabled: true - localPVStorageClassName: "default" - minio: - version: "2022-09-07T22-25-02Z" -``` - -For more information about properties for the OpenEBS and MinIO add-ons, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) and [MinIO](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. - -### Rook Ceph (Multi-Node) {#rook-ceph} - -If your application requires multiple nodes and distributed storage, Replicated recommends that you use the Rook add-on for storage. The Rook add-on creates an S3-compatible, distributed object store with Ceph and also creates a StorageClass for dynamically provisioning PVs. - -#### Requirement - -Rook versions 1.4.3 and later require a dedicated block device attached to each node in the cluster. The block device must be unformatted and dedicated for use by Rook only. The device cannot be used for other purposes, such as being part of a Raid configuration. If the device is used for purposes other than Rook, then the installer fails, indicating that it cannot find an available block device for Rook. - -For Rook Ceph versions earlier than 1.4.3, a dedicated block device is recommended in production clusters. Running distributed storage such as Rook on block devices is recommended for improved data stability and performance. - -#### Example - -The following is an example installer that uses the Rook add-on version 1.7.x: - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "distributed" -spec: - ... - rook: - version: "1.7.x" - storageClassName: "distributed" - isSharedFilesystemDisabled: true -``` - -For more information about properties for the Rook add-on, see [Rook](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. - -================ -File: docs/vendor/packaging-kots-versions.md -================ -# Setting Minimum and Target Versions for KOTS - -This topic describes how to set minimum and target version for Replicated KOTS in the KOTS [Application](/reference/custom-resource-application) custom resource. - -## Limitation - -Setting minimum and target versions for KOTS is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). - -This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed.`. - -To avoid installation failures, do not use `targetKotsVersion` or `minKotsVersion` in releases that support installation with Embedded Cluster. - -## Using Minimum KOTS Versions (Beta) - -The `minKotsVersion` attribute in the Application custom resource defines the minimum version of Replicated KOTS that is required by the application release. This can be useful when you want to get users who are lagging behind to update to a more recent KOTS version, or if your application requires functionality that was introduced in a particular KOTS version. - -Including this attribute enforces compatibility checks for both new installations and application updates. An installation or update is blocked if the currently deployed KOTS version is earlier than the specified minimum KOTS version. Users must upgrade to at least the specified minimum version of KOTS before they can install or update the application. - -### How the Admin Console Handles minKotsVersion - -When you promote a new release specifying a minimum KOTS version that is later than what a user currently has deployed, and that user checks for updates, that application version appears in the version history of the Admin Console. However, it is not downloaded. - -The Admin Console temporarily displays an error message that informs the user that they must update KOTS before downloading the application version. This error also displays when the user checks for updates with the [`kots upstream upgrade`](/reference/kots-cli-upstream-upgrade) command. - -KOTS cannot update itself automatically, and users cannot update KOTS from the Admin Console. For more information on how to update KOTS in existing clusters or in kURL clusters, see [Performing Updates in Existing Clusters](/enterprise/updating-app-manager) and [Performing Updates in kURL Clusters](/enterprise/updating-kurl). - -After updating KOTS to the minimum version or later, users can use the Admin Console or the [`kots upstream download`](/reference/kots-cli-upstream-download) command to download the release and subsequently deploy it. - - -## Using Target KOTS Versions - -Including `targetKotsVersion` in the Application custom resource enforces compatibility checks for new installations. It blocks the installation if a user tries to install a version of KOTS that is later than the target version. For example, this can prevent users from installing a version of KOTS that you have not tested yet. - -If the latest release in a channel includes `targetKotsVersion`, the install command for existing clusters is modified to install that specific version of KOTS. The install command for existing clusters is on the channel card in the [Vendor Portal](https://vendor.replicated.com). - -### How the Admin Console Handles targetKotsVersion - -Specifying a `targetKotsVersion` does not prevent an end user from upgrading to a later version of KOTS after the initial installation. - -If a new version of the application specifies a later target KOTS version than what is currently installed, users are not prevented from deploying that version of the application. - -If a user's Admin Console is running a version of KOTS that is earlier than the target version specified in a new version of the application, the Admin Console displays a notification in the footer, indicating that a newer supported version of KOTS is available. - -### Using Target Versions with kURL - -For installations in a cluster created by Replicated kURL, the version of the KOTS add-on must not be later than the target KOTS version specified in the Application custom resource. If the KOTS add-on version is later than the version specified for `targetKotsVersion`, the initial installation fails. - -For more information about the KOTS add-on, see [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the open source kURL documentation. - -================ -File: docs/vendor/packaging-private-images.md -================ -# Connecting to an External Registry - -This topic describes how to add credentials for an external private registry using the Replicated Vendor Portal or Replicated CLI. Adding an external registry allows you to grant proxy access to private images using the Replicated proxy registry. For more information, see [About the Replicated Proxy Registry](private-images-about). - -For information about adding a registry with the Vendor API v3, see [Create an external registry with the specified parameters](https://replicated-vendor-api.readme.io/reference/createexternalregistry) in the Vendor API v3 documentation. - -## Supported Registries - -Replicated recommends that application vendors use one the following external private registries: - -* Amazon Elastic Container Registry (ECR) -* DockerHub -* GitHub Container Registry -* Google Artifact Registry -* Google Container Registry (Deprecated) -* Sonatype Nexus -* Quay.io - -These registries have been tested for compatibility with KOTS. - -You can also configure access to most other external registries if the registry conforms to the Open Container Initiative (OCI) standard. - -## Add Credentials for an External Registry - -All applications in your team have access to the external registry that you add. This means that you can use the images in the external registry across multiple apps in the same team. - -### Using the Vendor Portal - -To add an external registry using the Vendor Portal: - -1. Log in to the [Vendor Portal](https://vendor.replicated.com) and go to the **Images** page. -1. Click **Add External Registry**. - - /images/add-external-registry.png - - [View a larger version of this image](/images/add-external-registry.png) - -1. In the **Provider** drop-down, select your registry provider. - -1. Complete the fields in the dialog, depending on the provider that you chose: - - :::note - Replicated stores your credentials encrypted and securely. Your credentials and the encryption key do not leave Replicated servers. - ::: - - * **Amazon ECR** - - - - - - - - - - - - - - - - - -
    FieldInstructions
    HostnameEnter the host name for the registry, such as 123456689.dkr.ecr.us-east-1.amazonaws.com
    Access Key IDEnter the Access Key ID for a Service Account User that has pull access to the registry. See Setting up the Service Account User.
    Secret Access KeyEnter the Secret Access Key for the Service Account User.
    - - * **DockerHub** - - - - - - - - - - - - - - - - - - - - - - -
    FieldInstructions
    HostnameEnter the host name for the registry, such as index.docker.io.
    Auth TypeSelect the authentication type for a DockerHub account that has pull access to the registry.
    UsernameEnter the host name for the account.
    Password or TokenEnter the password or token for the account, depending on the authentication type you selected.
    - - * **GitHub Container Registry** - - - - - - - - - - - - - - - - - - -
    FieldInstructions
    HostnameEnter the host name for the registry.
    UsernameEnter the username for an account that has pull access to the registry.
    GitHub TokenEnter the token for the account.
    - - * **Google Artifact Registry** - - - - - - - - - - - - - - - - - -
    FieldInstructions
    HostnameEnter the host name for the registry, such as
    us-east1-docker.pkg.dev
    Auth TypeSelect the authentication type for a Google Cloud Platform account that has pull access to the registry.
    Service Account JSON Key or Token -

    Enter the JSON Key from Google Cloud Platform assigned with the Artifact Registry Reader role, or token for the account, depending on the authentication type you selected.

    -

    For more information about creating a Service Account, see Access Control with IAM in the Google Cloud documentation.

    -
    - * **Google Container Registry** - :::important - Google Container Registry is deprecated. For more information, see Container Registry deprecation in the Google documentation. - ::: - - - - - - - - - - - - - -
    FieldInstructions
    HostnameEnter the host name for the registry, such as gcr.io.
    Service Account JSON Key

    Enter the JSON Key for a Service Account in Google Cloud Platform that is assigned the Storage Object Viewer role.

    For more information about creating a Service Account, see Access Control with IAM in the Google Cloud documentation.

    - - * **Quay.io** - - - - - - - - - - - - - - -
    FieldInstructions
    HostnameEnter the host name for the registry, such as quay.io.
    Username and PasswordEnter the username and password for an account that has pull access to the registry.
    - - * **Sonatype Nexus** - - - - - - - - - - - - - - -
    FieldInstructions
    HostnameEnter the host name for the registry, such as nexus.example.net.
    Username and PasswordEnter the username and password for an account that has pull access to the registry.
    - - * **Other** - - - - - - - - - - - - - - -
    FieldInstructions
    HostnameEnter the host name for the registry, such as example.registry.com.
    Username and PasswordEnter the username and password for an account that has pull access to the registry.
    - -1. For **Image name & tag**, enter the image name and image tag and click **Test** to confirm that the Vendor Portal can access the image. For example, `api:v1.0.1` or `my-app/api:v1.01`. - -1. Click **Link registry**. - -### Using the CLI - -To configure access to private images in an external registry using the Replicated CLI: - -1. Install and configure the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -1. Run the `registry add` command for your external private registry. For more information about the `registry add` command, see [registry add](/reference/replicated-cli-registry-add) in _Replicated CLI_. - - For example, to add a DockerHub registry: - - ```bash - replicated registry add dockerhub --username USERNAME \ - --password PASSWORD - ``` - - Where: - * `USERNAME` is the username for DockerHub credentials with access to the registry. - * `PASSWORD` is the password for DockerHub credentials with access to the registry. - - :::note - To prevent the password from being saved in your shell history, Replicated recommends that you use the `--password-stdin` flag and entering the password when prompted. - ::: - -## Test External Registry Credentials - -Replicated recommends that you test external registry credentials to ensure that the saved credentials on Replicated servers can pull the specified image. - -To validate that the configured registry can pull specific images: - -```bash -replicated registry test HOSTNAME \ - --image IMAGE_NAME -``` - -Where: -* `HOSTNAME` is the name of the host, such as `index.docker.io`. -* `IMAGE_NAME` is the name of the target image in the registry. - -For example: - -```bash -replicated registry test index.docker.io --image my-company/my-image:v1.2.3 -``` - -## Related Topic - -[Tutorial: Using ECR for Private Images](tutorial-ecr-private-images) - -================ -File: docs/vendor/packaging-private-registry-security.md -================ -# Replicated Registry Security - -This document lists the security measures and processes in place to ensure that images pushed to the Replicated registry remain private. For more information about pushing images to the Replicated registry, see [Using the Replicated Registry for KOTS Installations](private-images-replicated). - - -## Single Tenant Isolation - -The registry is deployed and managed as a multi-tenant application, but each tenant is completely isolated from data that is created and pulled by other tenants. Docker images have shared base layers, but the private registry does not share these between tenants. For example, if a tenant creates an image `FROM postgres:10.3` and pushes the image to Replicated, all of the layers are uploaded, even if other tenants have this base layer uploaded. - -A tenant in the private registry is a team on the Replicated [Vendor Portal](https://vendor.replicated.com). Licenses and customers created by the team are also granted some permissions to the registry data, as specified in the following sections. Cross-tenant access is never allowed in the private registry. - - -## Authentication and Authorization - -The private registry supports several methods of authentication. Public access is never allowed because the registry only accepts authenticated requests. - - -### Vendor Authentication - -All accounts with read/write access on the Vendor Portal have full access to all images pushed by the tenant to the registry. These users can push and pull images to and from the registry. - - -### End Customer Authentication - -A valid (unexpired) license file has an embedded `registry_token` value. Replicated components shipped to customers use this value to authenticate to the registry. Only pull access is enabled when authenticating using a `registry_token`. A `registry_token` has pull access to all images in the tenant's account. All requests to pull images are denied when a license expires or the expiration date is changed to a past date. - - -## Networking and Infrastructure - -A dedicated cluster is used to run the private registry and is not used for any services. - -The registry metadata is stored in a shared database instance. This database contains information about each layer in an image, but not the image data itself. - -The registry image data is securely stored in an encrypted S3 bucket. Each layer is encrypted at rest, using a shared key stored in [Amazon Key Management Service](https://aws.amazon.com/kms/). Each tenant has a unique directory in the shared bucket and access is limited to the team or license making the request. - -The registry cluster runs on a hardened operating system image (CentOS-based), and all instances are on a private virtual private cloud (VPC). Public IP addresses are not assigned to the instances running the cluster and the registry images. Instead, only port 443 traffic is allowed from a layer 7 load balancer to these servers. - -There are no SSH public keys on these servers, and password-based SSH login is disallowed. The servers are not configured to have any remote access. All deployments to these servers are automated using tools such as Terraform and a custom-built CI/CD process. Only verified images are pulled and run. - - -## Runtime Monitoring - -Replicated uses a Web Application Firewall (WAF) on the cluster that monitors and blocks any unusual activity. When unusual activity is detected, access from that endpoint is automatically blocked for a period of time, and a Replicated site reliability engineer (SRE) is alerted. - - -## Penetration Testing - -Replicated completed a formal pen test that included the private registry in the scope of the test. Replicated also runs a bug bounty program and encourages responsible disclosure on any vulnerabilities that are found. - -================ -File: docs/vendor/packaging-public-images.mdx -================ -# Connecting to a Public Registry through the Proxy Registry - -This topic describes how to pull images from public registries using the Replicated proxy registry. - -For more information about the Replicated proxy registry, see [About the Replicated Proxy Registry](private-images-about). - -## Pull Public Images Through the Replicated Proxy Registry - -You can use the Replicated proxy registry to pull both public and private images. Using the Replicated proxy registry for public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. - -For public images, you need to first configure registry credentials. - -To pull public images through the Replicated proxy registry, use the following `docker` command: - -```bash -docker pull REPLICATED_PROXY_DOMAIN/proxy/APPSLUG/UPSTREAM_REGISTRY_HOSTNAME/IMAGE:TAG -``` -Where: -* `APPSLUG` is your Replicated app slug found on the [app settings page](https://vendor.replicated.com/settings). -* `REPLICATED_PROXY_DOMAIN` is `proxy.replicated.com` or your custom domain. For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). -* `UPSTREAM_REGISTRY_HOSTNAME` is the hostname for the public registry where the image is located. If the image is located in a namespace within the registry, include the namespace after the hostname. For example, `quay.io/namespace`. -* `IMAGE` is the image name. -* `TAG` is the image tag. - -## Examples - -This section includes examples of pulling public images through the Replicated proxy registry. - -### Pull Images from DockerHub - -The following examples show how to pull public images from DockerHub: - -```bash -# DockerHub is the default when no hostname is specified -docker pull proxy.replicated.com/proxy/APPSLUG/busybox -docker pull proxy.replicated.com/proxy/APPSLUG/nginx:1.16.0 -``` -```bash -# You can also optionally specify docker.io -docker pull proxy.replicated.com/proxy/APPSLUG/docker.io/replicated/replicated-sdk:1.0.0 -``` - -### Pull Images from Other Registries - -The following example shows how to pull images from the Amazon ECR Public Gallery: - -```bash -docker pull proxy.replicated.com/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest -``` - -### Pull Images Using a Custom Domain for the Proxy Registry - -The following example shows how to pull a public image when a custom domain is configured for the proxy registry: - -```bash -docker pull my.customdomain.io/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest -``` -For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). - -## Related Topic - -[Connecting to an External Registry](packaging-private-images) - -================ -File: docs/vendor/packaging-rbac.md -================ -# Configuring KOTS RBAC - -This topic describes role-based access control (RBAC) for Replicated KOTS in existing cluster installations. It includes information about how to change the default cluster-scoped RBAC permissions granted to KOTS. - -## Cluster-scoped RBAC - -When a user installs your application with KOTS in an existing cluster, Kubernetes RBAC resources are created to allow KOTS to install and manage the application. - -By default, the following ClusterRole and ClusterRoleBinding resources are created that grant KOTS access to all resources across all namespaces in the cluster: - -```yaml -apiVersion: "rbac.authorization.k8s.io/v1" -kind: "ClusterRole" -metadata: - name: "kotsadm-role" -rules: - - apiGroups: ["*"] - resources: ["*"] - verbs: ["*"] -``` - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kotsadm-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kotsadm-role -subjects: -- kind: ServiceAccount - name: kotsadm - namespace: appnamespace -``` - -Alternatively, if your application does not require access to resources across all namespaces in the cluster, then you can enable namespace-scoped RBAC for KOTS. For information, see [About Namespace-scoped RBAC](#min-rbac) below. - -## Namespace-scoped RBAC {#min-rbac} - -Rather than use the default cluster-scoped RBAC, you can configure your application so that the RBAC permissions granted to KOTS are limited to a target namespace or namespaces. By default, for namespace-scoped installations, the following Role and RoleBinding resources are created that grant KOTS permissions to all resources in a target namespace: - -```yaml -apiVersion: "rbac.authorization.k8s.io/v1" -kind: "Role" -metadata: - name: "kotsadm-role" -rules: - - apiGroups: ["*"] - resources: ["*"] - verbs: ["*"] -``` - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kotsadm-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kotsadm-role -subjects: -- kind: ServiceAccount - name: kotsadm - namespace: appnamespace -``` - -Namespace-scoped RBAC is supported for applications that use Kubernetes Operators or multiple namespaces. During application installation, if there are `additionalNamespaces` specified in the Application custom resource manifest file, then Roles and RoleBindings are created to grant KOTS access to resources in all specified namespaces. - -### Enable Namespace-scoped RBAC {#enable} - -To enable namespace-scoped RBAC permissions for KOTS, specify one of the following options in the Application custom resource manifest file: - -* `supportMinimalRBACPrivileges`: Set to `true` to make namespace-scoped RBAC optional for existing cluster installations. When `supportMinimalRBACPrivileges` is `true`, cluster-scoped RBAC is used by default and users must pass the `--use-minimal-rbac` flag with the installation or upgrade command to use namespace-scoped RBAC. - -* `requireMinimalRBACPrivileges`: Set to `true` to require that all installations to existing clusters use namespace-scoped access. When `requireMinimalRBACPrivileges` is `true`, all installations use namespace-scoped RBAC automatically and users do not pass the `--use-minimal-rbac` flag. - -For more information about these options, see [requireMinimalRBACPrivileges](/reference/custom-resource-application#requireminimalrbacprivileges) and [supportMinimalRBACPrivileges](/reference/custom-resource-application#supportminimalrbacprivileges) in _Application_. - -### About Installing with Minimal RBAC - -In some cases, it is not possible to grant the user `* * *` permissions in the target namespace. For example, an organization might have security policies that prevent this level of permissions. - -If the user installing or upgrading KOTS cannot be granted `* * *` permissions in the namespace, then they can instead request the following: -* The minimum RBAC permissions required by KOTS -* RBAC permissions for any CustomResourceDefinitions (CRDs) that your application includes - -Installing with the minimum KOTS RBAC permissions also requires that the user manually creates a ServiceAccount, Role, and RoleBinding for KOTS, rather than allowing KOTS to automatically create a Role with `* * *` permissions. - -For more information about how users can install KOTS with minimal RBAC when namespace-scoped RBAC is enabled, see [Namespace-scoped RBAC Requirements](/enterprise/installing-general-requirements#namespace-scoped) in _Installation Requirements_. - -### Limitations - -The following limitations apply when using the `requireMinimalRBACPrivileges` or `supportMinimalRBACPrivileges` options to enable namespace-scoped RBAC for KOTS: - -* **Existing clusters only**: The `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` options apply only to installations in existing clusters. - -* **Preflight checks**: When namespace-scoped access is enabled, preflight checks cannot read resources outside the namespace where KOTS is installed. The preflight checks continue to function, but return less data. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). - -* **Velero namespace access for KOTS snapshots**: Velero is required for enabling backup and restore with the KOTS snapshots feature. Namespace-scoped RBAC does not grant access to the namespace where Velero is installed in the cluster. - - To set up snapshots when KOTS has namespace-scoped access, users can run the `kubectl kots velero ensure-permissions` command. This command creates additional Roles and RoleBindings to allow the necessary cross-namespace access. For more information, see [`velero ensure-permissions`](/reference/kots-cli-velero-ensure-permissions/) in the KOTS CLI documentation. - - For more information about snapshots, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). - -* **Air Gap Installations**: For air gap installations, the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags are supported only in automated, or _headless_, installations. In headless installations, the user passes all the required information to install both KOTS and the application with the `kots install` command. In non-headless installations, the user provides information to install the application through the Admin Console UI after KOTS is installed. - - In non-headless installations in air gap environments, KOTS does not have access to the application's `.airgap` package during installation. This means that KOTS does not have the information required to determine whether namespace-scoped access is needed, so it defaults to the more permissive, default cluster-scoped RBAC policy. - - For more information about how to do headless installations in air gap environments, see [Air Gap Installation](/enterprise/installing-existing-cluster-automation#air-gap) in _Installing with the KOTS CLI_. - -* **Changing RBAC permissions for installed instances**: The RBAC permissions for KOTS are set during its initial installation. KOTS runs using the assumed identity and cannot change its own authorization. When you update your application to add or remove the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags in the Application custom resource, the RBAC permissions for KOTS are affected only for new installations. Existing KOTS installations continue to run with their current RBAC permissions. - - To expand the scope of RBAC for KOTS from namespace-scoped to cluster-scoped in new installations, Replicated recommends that you include a preflight check to ensure the permission is available in the cluster. - -================ -File: docs/vendor/packaging-using-tls-certs.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Using TLS Certificates - - - -Replicated KOTS provides default self-signed certificates that renew automatically. For embedded clusters created with Replicated kURL, the self-signed certificate renews 30 days before expiration when you enable the kURL EKCO add-on version 0.7.0 and later. - -Custom TLS options are supported: - -- **Existing clusters:** The expectation is for the end customer to bring their own Ingress Controller such as Contour or Istio and upload their own `kubernetes.io/tls` secret. For an example, see [Ingress with TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) in the Kubernetes documentation. - -- **Embedded kURL clusters:** End customers can upload a custom TLS certificate. Replicated kURL creates a TLS secret that can reused by other Kubernetes resources, such as Deployment or Ingress, which can be easier than providing and maintaining multiple certificates. As a vendor, you can enable the use of custom TLS certificates with these additional resources. - -For example, if your application does TLS termination, your deployment would need the TLS secret. Or if the application is connecting to another deployment that is also secured using the same SSL certificate (which may not be a trusted certificate), the custom TLS certificate can be used to do validation without relying on the trust chain. - -### Get the TLS Secret - -kURL sets up a Kubernetes secret called `kotsadm-tls`. The secret stores the TLS certificate, key, and hostname. Initially, the secret has an annotation set called `acceptAnonymousUploads`. This indicates that a new TLS certificate can be uploaded by the end customer during the installation process. For more information about installing with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). - -Before you can reference the TLS certificate in other resources, you must get the `kotsadm-tls` secret output. - -To get the `kots-adm-tls` secret, run: - -```shell -kubectl get secret kotsadm-tls -o yaml -``` - -In the output, the `tls.crt` and `tls.key` hold the certificate and key that can be referenced in other Kubernetes resources. - -**Example Output:** - -```yaml -apiVersion: v1 -kind: Secret -type: kubernetes.io/tls -metadata: - name: kotsadm-tls -data: - tls.crt: - tls.key: -``` - -### Use TLS in a Deployment Resource - -This procedure shows how to reference the `kotsadm-tls` secret using an example nginx Deployment resource (`kind: Deployment`). - -To use the `kotsadm-tls` secret in a Deployment resource: - -1. In the Deployment YAML file, configure SSL for volumeMounts and volumes, and add the `kotsadm-tls` secret to volumes: - - **Example:** - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: nginx - spec: - template: - spec: - containers: - volumeMounts: - - mountPath: "/etc/nginx/ssl" - name: nginx-ssl - readOnly: true - volumes: - - name: nginx-ssl - secret: - secretName: kotsadm-tls - ``` - -1. Deploy the release, and then verify the pod deployment using the `kubectl exec` command: - - **Example:** - - ```shell - export POD_NAME=nginx- - kubectl exec -it ${POD_NAME} bash - ``` - -1. Run the `ls` and `cat` commands to verify that the certificate and key were deployed to the specified volumeMount: - - **Example:** - - ```shell - $ ls /etc/nginx/ssl - tls.crt tls.key - - $ cat /etc/nginx/ssl/tls.crt - -----BEGIN CERTIFICATE----- - MIID8zCCAtugAwIBAgIUZF+NWHnpJCt2R1rDUhYjwgVv72UwDQYJKoZIhvcNAQEL - - $ cat /etc/nginx/ssl/tls.key - -----BEGIN PRIVATE KEY----- - MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCyiGNuHw2LY3Rv - ``` - -### Use TLS in an Ingress Resource - -You can add the `kotsadm-tls` secret to the Ingress resource to terminate TLS at the contour layer. The following example shows how to configure `secretName: kotsadm-tls` under the TLS `hosts` field in an Ingress resource (`kind: Ingress`): - -**Example:** - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: nginx -spec: - rules: - tls: - - hosts: - - 'tls.foo.com' - secretName: kotsadm-tls - - host: tls.foo.com - http: - paths: - - path: / - backend: - serviceName: nginx - servicePort: 80 -``` -:::note -`tls.foo.com` must resolve to a valid IP, and also must match the Common Name (CN) or Subjective Alternative Name (SAN) of the TLS certificate. -::: - -================ -File: docs/vendor/planning-questionnaire.md -================ -# Customer Application Deployment Questionnaire - -Before you package and distribute an application, Replicated recommends that you -understand several key characteristics about the environments where your customers -will deploy your application. - -To gather this information about your customers' environments: -1. Copy and customize the [$APP Deployment Questionnaire](#app-deployment-questionnaire) below. -1. Replace $APP with the name of your application. -1. Send the questionnaire to your users. - -## $APP Deployment Questionnaire - -### Infrastructure - -This section includes questions about your infrastructure and how you deploy software. -This includes both internally-written and Commercial Off The Shelf (COTS) applications. - -If it’s more convenient, limit answers to the scope of the target infrastructure for deploying $APP. - -- Do you use any IaaS like AWS, GCP, or Azure? - -- If you deploy to a physical datacenter, do you use a Hypervisor like VSphere? - -- Do you ever install on bare metal? - -- Do you have any restrictions on what operating systems are used? - -- Does the target infrastructure have a direct outbound internet connection? Can it connect out via a Proxy? - -- If the environment has no outbound network, do machines in a DMZ have direct network access to the air gapped infrastructure, or do release artifacts need to be copied to physical media for installation? - -- If there is an issue causing downtime in the on-prem application, would you be willing to give the $APP team direct SSH access to the instance(s)? - -### Development and Deployment Processes - -- Do you require applications be deployed by a configuration management framework like Chef, Ansible, or Puppet? - -- Do you run any container-based workloads today? - -- If you run container workloads, do you run any kind of orchestration like Kubernetes, Mesos, or Docker Swarm? - -- If you run container workloads, what tools do you use to host and serve container images? - -- If you run container workloads, what tools do you use to scan and secure container images? - -- If you are deploying $APP to your existing Kubernetes cluster, can your cluster nodes pull images from the public internet, or do you require images to be stored in an internal registry? - -### Change Management - -- How do you test new releases of COTS software? Do you have a UAT or Staging environment? Are there other change management requirements? - -- How often do you like to receive planned (non-critical) software updates? Quarterly? Monthly? As often as possible? - -- For critical updates, what is your target deployment time for new patches? Do you have a requirement for how quickly patches are made available after a vulnerability is announced? - -- Do you drive production deploys automatically from version control (“gitops”)? - - -### Application Usage and Policy Requirements - -- For applications that expose a web UI, how will you be connecting to the instance? As much as possible, include details about your workstation, any tunneling/VPN/proxy infrastructure, and what browsers you intend to use. - -- Do you require a disaster recovery strategy for deployed applications? If so, where are backups stored today? (SFTP? NAS? S3-compliant object store? Something else?) - -- Do you require deployed COTS applications to support logins with an internal identity provider like OpenLDAP, Windows AD or SAML? - -- Do you require an audit log of all user activity performed in $APP? What are your needs around exporting / aggregating audit log data? - -- Do you anticipate the need to scale the capacity of $APP up and down during its lifetime? - -- What are your requirements around log aggregation? What downstream systems do you need system logs to be piped to? - -================ -File: docs/vendor/policies-data-transmission.md -================ -# Data Transmission Policy - -A Replicated installation connects to a Replicated-hosted endpoint periodically to perform various tasks including checking for updates and synchronizing the installed license properties. During this time, some data is transmitted from an installed instance to the Replicated API. This data is limited to: - -- The IP address of the primary Replicated instance. -- The ID of the installation. -- [Resource statuses](/enterprise/status-viewing-details#resource-statuses) -- Information about the installation including data needed for [instance details](/vendor/instance-insights-details). -- [Custom metrics](/vendor/custom-metrics) which the vendor may configure as part of the installation. -- Date and timestamps of the data transmission. - -This data is required to provide the expected update and license services. The data is also used to provide telemetry and other reporting features. - -By default, no additional data is collected and transmitted from the instance to external servers. - -All data is encrypted in transit according to industry best practices. For more information about Replicated's security practices, see [Security at Replicated](https://www.replicated.com/security/) on the Replicated website. - -For more information about application instance data fields that the Replicated Vendor Portal uses to generate events for instances, see [About Instance and Event Data](/vendor/instance-insights-event-data). - -Last modified December 31, 2023 - -================ -File: docs/vendor/policies-infrastructure-and-subprocessors.md -================ -# Infrastructure and Subprocessor Providers - -This lists describes the infrastructure environment, subprocessors and other entities material to the Replicated products and services. - -Prior to engaging any third party, Replicated performs diligence to evaluate their privacy, security and confidentiality practices. Whenever possible, Replicated uses encryption for data at rest and in motion so that all information is not available to these third parties. - -Replicated does not engage in the business of selling or trading personal information. Any personally identifible information Replicated might possibly hold is data that a customer has provided to us. - -The fields that Replicated may posess as identifiable to a physical person may include: -- Name -- Email -- Phone Number -- Job Title -- Business Address -- Github Username - -Note: This does not imply that all these fields are collected for each person. It also does not mean all these datapoints are used with each declared provider. - - -## Replicated Infrastructure Providers - -Replicated might use the following entities to provide infrastructure that helps with delivery of our products: - - -| Entity Name | Purpose | Country where Infrastructure Resides | Notes -|---------------------|----------------------------|-------|----| -| Amazon Web Services | Various IaaS | United States | Vendor portal, registry, api and supporting infrastructure services. -| Cloudflare | Network security, DDoS mitigation, DNS | United States | -| Datadog | Performance monitoring | United States | -| DBT Labs | Data transformation or migration | United States | -| FiveTran | Data transformation or migration | United States | -| Github | Customer support | United States | Replicated's customers may engage with our customer support team using Github issues in a private repo. -| Google Looker | Product usage metrics | United States | -| Hex | Data transformation or migration | United States | -| Knock Labs, Inc.| Event notifications | United States | | -| Postmark / Active Campaign | Transactional emails from Vendor Portal. Marketing related communications. | United States | Active Campaign and Postmark businesses merged.| -| Salesforce |Customer and sales relationship management| United States | -| Snowflake | Usage data analysis and transformation | United States | -| Timescale | Time-series data of instance metrics | United States | See our [Data Transmission Policy](/vendor/policies-data-transmission) - -Last modified January 4, 2024 - -================ -File: docs/vendor/policies-support-lifecycle.md -================ -# Support Lifecycle Policy - -Replicated will provide support for products per our terms and services until that product is noted as End of Life (EOL). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Product PhaseDefinition
    AlphaA product or feature that is exploratory or experimental. Typically, access to alpha features and their documentation is limited to customers providing early feedback. While most alpha features progress to beta and general availability (GA), some are deprecated based on assessment learnings.
    Beta

    A product or feature that is typically production-ready, but has not met Replicated’s definition of GA for one or more of the following reasons:

    • Remaining gaps in intended functionality
    • Outstanding needs around testing
    • Gaps in documentation or sales enablement
    • In-progress customer value validation efforts

    Documentation for beta products and features is published on the Replicated Documentation site with a "(Beta)" label. Beta products or features follow the same build and test processes required for GA.

    Please contact your Replicated account representative if you have questions about why a product or feature is beta.

    “GA” - General AvailabilityA product or feature that has been validated as both production-ready and value-additive by a percentage of Replicated customers. Products in the GA phase are typically those that are available for purchase from Replicated.
    “LA” - Limited AvailabilityA product has reached the Limited Availability phase when it is no longer available for new purchases from Replicated. Updates will be primarily limited to security patches, critical bugs and features that enable migration to GA products.
    “EOA” - End of Availability

    A product has reached the End of Availability phase when it is no longer available for renewal purchase by existing customers. This date may coincide with the Limited Availability phase.

    This product is considered deprecated, and will move to End of Life after a determined support window. Product maintenance is limited to critical security issues only.

    “EOL” - End of Life

    A product has reached its End of Life, and will no longer be supported, patched, or fixed by Replicated. Associated product documentation may no longer be available.

    The Replicated team will continue to engage to migrate end customers to GA product based deployments of your application.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Replicated ProductProduct PhaseEnd of AvailabilityEnd of Life
    Compatibility MatrixGAN/AN/A
    Replicated SDKBetaN/AN/A
    Replicated KOTS InstallerGAN/AN/A
    Replicated kURL InstallerGAN/AN/A
    Replicated Embedded Cluster InstallerGAN/AN/A
    Replicated Classic Native InstallerEOL2023-12-31*2024-12-31*
    - -*Except for customers who have specifically contracted different dates for the End of Availability and End of Life timelines. - -## Supported Replicated Installer Versions - -The following table lists the versions of Replicated KOTS and Replicated kURL that are supported on each Kubernetes version. - -The End of Replicated Support date is the End Of Life (EOL) date for the Kubernetes version. The EOL date for each Kubernetes version is published on the [Releases](https://kubernetes.io/releases/) page in the Kubernetes documentation. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Kubernetes VersionEmbedded Cluster VersionsKOTS VersionskURL VersionsEnd of Replicated Support
    1.32N/AN/AN/A2026-02-28
    1.31N/A1.117.0 and laterv2024.08.26-0 and later2025-10-28
    1.301.16.0 and later1.109.1 and laterv2024.05.03-0 and later2025-06-28
    1.291.0.0 and later1.105.2 and laterv2024.01.02-0 and later2025-02-28
    - -Replicated support for end-customer installations is limited to those installs using a Replicated provided installer product, such as KOTS, kURL or Embedded Cluster, available with the [Business or Enterprise plans](https://www.replicated.com/pricing). Replicated support for direct Helm CLI installs or other vendor provided installers is limited to the successful distribution of the software to the end-customer, as well as any issues with the Replicated SDK if included with the installation. - - -The information contained herein is believed to be accurate as of the date of publication, but updates and revisions may be posted periodically and without notice. - -Last modified January 2, 2025. - -================ -File: docs/vendor/policies-vulnerability-patch.md -================ -# Vulnerability Patch Policy - -While it’s our goal to distribute vulnerability-free versions of all components, this isn’t always possible. -Kubernetes and KOTS are made from many components, each authored by different vendors. - -The best way to stay ahead of vulnerabilities is to run the latest version and have a strategy to quickly update when a patch is available. - -## How We Scan - -Our build pipeline uses [Trivy](https://www.aquasec.com/products/trivy/) to scan for and detect known, published vulnerabilities in our images. -It’s possible that other security scanners will detect a different set of results. -We commit to patching vulnerabilities according to the timeline below based on the results of our internal scans. - -If you or your customer detects a different vulnerability using a different scanner, we encourage you to report it to us in a GitHub issue, Slack message, or opening a support issue from the Replicated Vendor Portal. -Our team will evaluate the vulnerability and determine the best course of action. - -## Base Images - -KOTS images are built on top of Chainguard's open source [Wolfi](https://edu.chainguard.dev/open-source/wolfi/overview/) base image. Wolfi is a Linux undistro that is focused on supply chain security. - -KOTS has automation that uses the Chainguard [melange](https://edu.chainguard.dev/open-source/melange/overview/) and [apko](https://edu.chainguard.dev/open-source/apko/overview/) projects to build packages and assemble images on Wolfi. Building and assembling images in this way helps to ensure that any CVEs can be resolved quickly and efficiently. - -## Upstream CVE Disclosure - -Replicated KOTS, kURL, and Embedded Cluster deliver many upstream Kubernetes and ecosystem components. -We do not build these packages and rely on the upstream software vendor to distribute patches. -Our intent is to make any patches available as soon as possible, but guarantee the following timeline to make upstream patches available after we learn about the vulnerability and a patch is available to us: - -| CVE Level | Time to release | -|-----------|-----------------| -| Critical | Within 2 weeks | -| High | Within 60 days | -| Medium | Within 90 days | -| Low | Best effort unless risk accepted | - -## Notable Upstream CVEs - -This section lists CVEs that have yet to be resolved by the upstream maintainers and therefore are not patched in Replicated. This is not an exhaustive list of unpatched upstream CVEs; instead, these are noteworthy CVEs that we have evaluated and on which we offer our opinion to help with your own security reviews. When available, we will apply upstream patches in accordance with our policy desribed in [Upstream CVE Disclosure](#upstream-cve-disclosure) above. We will update this list after applying any upstream patches. - -| CVE ID | Explanation| -|--------|------------| -| None | N/A | - -## Vulnerability Management Exception Policy -There might be instances where policy exceptions are required to continue using third party software with known vulnerabilities in our on premises products. Some reasons for an exception include: - -- Feature breakage or bugs in patched versions -- Performance issues in patched versions -- Patched version contains higher severity vulnerabilities - -Regardless of the reason, an exception is vetted from a business impact and security standpoint. The business review assesses the overall impact to the product created by the patched, but otherwise problematic, piece of software. The security portion determines if the CVE is applicable to this specific context and if that CVE's impact to the product’s overall security posture is acceptable. - -In the event of a vulnerability management exception, a notice is posted containing: - -- The impacted product(s) -- The rationale for the exception -- The relevant CVE(s) -- A risk assessment in the product context for each CVE - -As subsequent versions of the vulnerable software are released, Replicated continues to research to find a solution that satisfies the business and security requirements of the original exception.  - -## Known Disclosed Vulnerabilities in our On Premises Products - -| CVE | CVE Summary | Rationale | Additional Reading | -|-----|-------------|-----------|--------------------| -| None | N/A | N/A | N/A | - -Last modified January 29, 2025. - -================ -File: docs/vendor/preflight-defining.mdx -================ -# Defining Preflight Checks - -This topic describes how to define preflight checks in Helm and Kubernetes manifest-based applications. For more information about preflight checks, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). - -The information in this topic applies to applications that are installed with Helm or with Replicated KOTS. - -## Step 1: Create the Manifest File - -You can define preflight checks in a Kubernetes Secret or in a Preflight custom resource. The type of manifest file that you use depends on your application type (Helm or Kubernetes manifest-based) and the installation methods that your application supports (Helm, KOTS v1.101.0 or later, or KOTS v1.100.3 or earlier). - -* **Helm Applications**: For Helm applications, see the following guidance: - - * **(Recommended) Helm or KOTS v1.101.0 or Later**: For Helm applications installed with Helm or KOTS v1.101.0 or later, define the preflight checks in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). - - * **KOTS v1.100.3 or Earlier**: For Helm applications installed with KOTS v1.100.3 or earlier, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). - -* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). - -### Kubernetes Secret {#secret} - -For Helm applications installed with Helm or KOTS v1.101.0 or later, define preflight checks in a Kubernetes Secret in your Helm chart `templates`. This allows you to define the preflights spec only one time to support running preflight checks in both Helm and KOTS installations. - -For a tutorial that demonstrates how to define preflight checks in a Secret in chart `templates` and then run the preflight checks in both Helm and KOTS installations, see [Tutorial: Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup). - -Add the following YAML to a Kubernetes Secret in your Helm chart `templates` directory: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - collectors: [] - analyzers: [] -``` - -As shown above, the Secret must include the following: - -* The label `troubleshoot.sh/kind: preflight` -* A `stringData` field with a key named `preflight.yaml` so that the preflight binary can use this Secret when it runs from the CLI - -### Preflight Custom Resource {#preflight-cr} - -Define preflight checks in a Preflight custom resource for the following installation types: -* Kubernetes manifest-based applications installed with any version of KOTS -* Helm applications installed with KOTS v1.100.3 and earlier - :::note - For Helm charts installed with KOTS v1.101.0 and later, Replicated recommends that you define preflight checks in a Secret in the Helm chart `templates` instead of using the Preflight custom resource. See [Create a Secret](#secret) above. - - In KOTS v1.101.0 and later, preflights defined in the Helm chart override the Preflight custom resource used by KOTS. During installation, if KOTS v1.101.0 and later cannot find preflights specified in the Helm chart archive, then KOTS searches for `kind: Preflight` in the root of the release. - ::: - -Add the following YAML to a new file in a release: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: preflights -spec: - collectors: [] - analyzers: [] -``` - -For more information about the Preflight custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). - -## Step 2: Define Collectors and Analyzers - -This section describes how to define collectors and analyzers for preflight checks based on your application needs. You add the collectors and analyzers that you want to use in the `spec.collectors` and `spec.analyzers` keys in the manifest file that you created. - -### Collectors - -Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define to generate results for the preflight checks. - -The following default collectors are included automatically to gather information about the cluster and cluster resources: -* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) -* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) - -You do not need manually include the `clusterInfo` or `clusterResources` collectors in the specification. To use only the `clusterInfo` and `clusterResources` collectors, delete the `spec.collectors` key from the preflight specification. - -The Troubleshoot open source project includes several additional collectors that you can include in the specification to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. - -### Analyzers - -Analyzers use the output from the collectors to generate results for the preflight checks, including the criteria for pass, fail, and warn outcomes and custom messages for each outcome. - -For example, in a preflight check that checks the version of Kubernetes running in the target cluster, the analyzer can define a fail outcome when the cluster is running a version of Kubernetes less than 1.25 that includes the following custom message to the user: `The application requires Kubernetes 1.25.0 or later, and recommends 1.27.0`. - -The Troubleshoot open source project includes several analyzers that you can include in your preflight check specification. The following are some of the analyzers in the Troubleshoot project that use the default `clusterInfo` or `clusterResources` collectors: -* [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) -* [clusterVersion](https://troubleshoot.sh/docs/analyze/cluster-version/) -* [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) -* [distribution](https://troubleshoot.sh/docs/analyze/distribution/) -* [nodeResources](https://troubleshoot.sh/docs/analyze/node-resources/) -* [statefulsetStatus](https://troubleshoot.sh/docs/analyze/stateful-set-status/) -* [storageClass](https://troubleshoot.sh/docs/analyze/storage-class/) - -To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. - -### Block Installation with Required (Strict) Preflights {#strict} - -For applications installed with KOTS, you can set any preflight analyzer to `strict: true`. When `strict: true` is set, any `fail` outcomes for the analyzer block the deployment of the release. - -:::note -Strict preflight analyzers are ignored if the `exclude` property is also included and evaluates to `true`. See [exclude](https://troubleshoot.sh/docs/analyze/#exclude) in the Troubleshoot documentation. -::: - -### Examples - -For common examples of collectors and analyzers used in preflight checks, see [Examples of Preflight Specs](/vendor/preflight-examples). - -================ -File: docs/vendor/preflight-examples.mdx -================ -import HttpSecret from "../partials/preflights/_http-requests-secret.mdx" -import HttpCr from "../partials/preflights/_http-requests-cr.mdx" -import MySqlSecret from "../partials/preflights/_mysql-secret.mdx" -import MySqlCr from "../partials/preflights/_mysql-cr.mdx" -import K8sVersionSecret from "../partials/preflights/_k8s-version-secret.mdx" -import K8sVersionCr from "../partials/preflights/_k8s-version-cr.mdx" -import K8sDistroSecret from "../partials/preflights/_k8s-distro-secret.mdx" -import K8sDistroCr from "../partials/preflights/_k8s-distro-cr.mdx" -import NodeReqSecret from "../partials/preflights/_node-req-secret.mdx" -import NodeReqCr from "../partials/preflights/_node-req-cr.mdx" -import NodeCountSecret from "../partials/preflights/_node-count-secret.mdx" -import NodeCountCr from "../partials/preflights/_node-count-cr.mdx" -import NodeMemSecret from "../partials/preflights/_node-mem-secret.mdx" -import NodeMemCr from "../partials/preflights/_node-mem-cr.mdx" -import NodeStorageClassSecret from "../partials/preflights/_node-storage-secret.mdx" -import NodeStorageClassCr from "../partials/preflights/_node-storage-cr.mdx" -import NodeEphemStorageSecret from "../partials/preflights/_node-ephem-storage-secret.mdx" -import NodeEphemStorageCr from "../partials/preflights/_node-ephem-storage-cr.mdx" -import NodeCpuSecret from "../partials/preflights/_node-cpu-secret.mdx" -import NodeCpuCr from "../partials/preflights/_node-cpu-cr.mdx" -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -# Example Preflight Specs - -This section includes common examples of preflight check specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/preflight) in GitHub. - -## Check HTTP or HTTPS Requests from the Cluster - -The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. - -For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. - - - - - - - -

    The following shows how the pass outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    - Preflight checks in Admin Console showing pass message - View a larger version of this image -
    -
    - -## Check Kubernetes Version - -The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. - -For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. - - - - - - - -

    The following shows how the warn outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    - Preflight checks in Admin Console showing warning message - View a larger version of this image -
    -
    - -## Check Kubernetes Distribution - -The examples below use the `distribution` analyzer to check the Kubernetes distribution of the cluster. The `distribution` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. - -For more information, see [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) and [Distribution](https://troubleshoot.sh/docs/analyze/distribution/) in the Troubleshoot documentation. - - - - - - - -

    The following shows how the pass outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    - Preflight checks in Admin Console showing pass message - View a larger version of this image -
    -
    - -## Check MySQL Version Using Template Functions - -The examples below use the `mysql` collector and the `mysql` analyzer to check the version of MySQL running in the cluster. - -For more information, see [Collect > MySQL](https://troubleshoot.sh/docs/collect/mysql/) and [Analyze > MySQL](https://troubleshoot.sh/docs/analyze/mysql/) in the Troubleshoot documentation. - - - -

    This example uses Helm template functions to render the credentials and connection details for the MySQL server that were supplied by the user. Additionally, it uses Helm template functions to create a conditional statement so that the MySQL collector and analyzer are included in the preflight checks only when MySQL is deployed, as indicated by a .Values.global.mysql.enabled field evaluating to true.

    -

    For more information about using Helm template functions to access values from the values file, see Values Files.

    - -
    - -

    This example uses KOTS template functions in the Config context to render the credentials and connection details for the MySQL server that were supplied by the user in the Replicated Admin Console Config page. Replicated recommends using a template function for the URI, as shown above, to avoid exposing sensitive information. For more information about template functions, see About Template Functions.

    -

    This example also uses an analyzer with strict: true, which prevents installation from continuing if the preflight check fails.

    - -

    The following shows how a fail outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade when strict: true is set for the analyzer:

    - Strict preflight checks in Admin Console showing fail message - View a larger version of this image -
    -
    - -## Check Node Memory - -The examples below use the `nodeResources` analyzer to check that a required storage class is available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - - - - - - - -

    The following shows how a warn outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    - Preflight checks in Admin Console showing warn message - View a larger version of this image -
    -
    - -## Check Node Storage Class Availability - -The examples below use the `storageClass` analyzer to check that a required storage class is available in the nodes in the cluster. The `storageClass` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - - - - - - - -

    The following shows how a fail outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    - Preflight checks in Admin Console showing fail message - View a larger version of this image -
    -
    - -## Check Node Ephemeral Storage - -The examples below use the `nodeResources` analyzer to check the ephemeral storage available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - - - - - - - -

    The following shows how a pass outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    - Preflight checks in Admin Console showing pass message - View a larger version of this image -
    -
    - -## Check Requirements Are Met By At Least One Node - -The examples below use the `nodeResources` analyzer with filters to check that the requirements for memory, CPU cores, and architecture are met by at least one node in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - - - - - - - -

    The following shows how the fail outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    - Preflight checks in Admin Console showing fail message - View a larger version of this image -
    -
    - -## Check Total CPU Cores Across Nodes - -The examples below use the `nodeResources` analyzer to check the version of Kubernetes running in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - - - - - - - -

    The following shows how the pass outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:

    - Preflight checks in Admin Console showing fail message - View a larger version of this image -
    -
    - -================ -File: docs/vendor/preflight-host-preflights.md -================ -# Customizing Host Preflight Checks for kURL - -This topic provides information about how to customize host preflight checks for installations with Replicated kURL. For information about the default host preflight checks that run for installations with Replicated Embedded Cluster, see [About Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. - -## About Host Preflight Checks -You can include host preflight checks with kURL to verify that infrastructure requirements are met for: - -- Kubernetes -- kURL add-ons -- Your application - -This helps to ensure successful installation and the ongoing health of the cluster. - -While host preflights are intended to ensure requirements are met for running the cluster, you can also use them to codify some of your application requirements so that users get feedback even earlier in the installation process, rather than waiting to run preflights after the cluster is already installed. For more information about application checks, collectors, and analyzers, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). - -Default host preflight checks verify conditions such as operating system and disk usage. Default host preflight failures block the installation from continuing and exit with a non-zero return code. Users can then update their environment and run the kURL installation script again to re-run the host preflight checks. - -Host preflight checks run automatically. The default host preflight checks that run can vary, depending on whether the installation is new, an upgrade, joining a node, or an air gap installation. Additionally, some checks only run when certain add-ons are enabled in the installer. For a complete list of default host preflight checks, see [Default Host Preflights](https://kurl.sh/docs/install-with-kurl/host-preflights#default-host-preflights) in the kURL documentation. - -There are general kURL host preflight checks that run with all installers. There are also host preflight checks included with certain add-ons. Customizations include the ability to: - - - Bypass failures - - Block an installation for warnings - - Exclude certain preflights under specific conditions, such as when a particular license entitlement is enabled - - Skip the default host preflight checks and run only custom checks - - Add custom checks to the default host preflight checks - -For more information about customizing host preflights, see [Customize Host Preflight Checks](#customize-host-preflight-checks). - -## Customize Host Preflight Checks - -The default host preflights run automatically as part of your kURL installation. You can customize the host preflight checks by disabling them entirely, adding customizations to the default checks to make them more restrictive, or completely customizing them. You can also customize the outcomes to enforce warnings or ignore failures. - -### Add Custom Preflight Checks to the Defaults - -To run customized host preflight checks in addition to the default host preflight checks, add a `hostPreflights` field to the `kurl` field in your Installer manifest. Under the `hostPreflights` field, add a host preflight specification (`kind: HostPreflight`) with your customizations. You only need to specify your customizations because the default host preflights run automatically. - -Customized host preflight checks run in addition to default host preflight checks, if the default host preflight checks are enabled. - -If you only want to make the default host preflight checks more restrictive, add your more restrictive host preflight checks to `kurl.hostPreflights`, and do not set `excludeBuiltinHostPreflights`. For example, if your application requires 6 CPUs but the default host preflight check requires 4 CPUs, you can simply add a custom host preflight check for 6 CPUs, since the default host preflight must pass if the more restrictive custom check passes. - -The following example shows customized `kurl` host preflight checks for: - - - An application that requires more CPUs than the default - - Accessing a website that is critical to the application - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "latest" -spec: - kurl: - hostPreflights: - apiVersion: troubleshoot.sh/v1beta2 - kind: HostPreflight - spec: - collectors: - - cpu: {} - - http: - collectorName: Can Access A Website - get: - url: https://myFavoriteWebsite.com - analyzers: - - cpu: - checkName: Number of CPU check - outcomes: - - fail: - when: "count < 4" - message: This server has less than 4 CPU cores - - warn: - when: "count < 6" - message: This server has less than 6 CPU cores - - pass: - message: This server has at least 6 CPU cores - - http: - checkName: Can Access A Website - collectorName: Can Access A Website - outcomes: - - warn: - when: "error" - message: Error connecting to https://myFavoriteWebsite.com - - pass: - when: "statusCode == 200" - message: Connected to https://myFavoriteWebsite.com -``` - -### Customize the Default Preflight Checks - -To customize the default host preflights: - -1. Disable the default host preflight checks using `excludeBuiltinHostPreflights: true`. -1. Copy the default `host-preflights.yaml` specification for kURL from [host-preflights.yaml](https://github.com/replicatedhq/kURL/blob/main/pkg/preflight/assets/host-preflights.yaml) in the kURL repository. -1. Copy the default `host-preflight.yaml` specification for any and all add-ons that are included in your specification and have default host preflights. For links to the add-on YAML files, see [Finding the Add-on Host Preflight Checks](https://kurl.sh/docs/create-installer/host-preflights/#finding-the-add-on-host-preflight-checks) in the kURL documentation. -1. Merge the copied host preflight specifications into one host preflight specification, and paste it to the `kurl.hostPreflights` field in the Installer YAML in the Vendor Portal. -1. Edit the defaults as needed. - -### Ignore or Enforce Warnings and Failures - -Set either of the following flags to customize the outcome of your host preflight checks: - - - - - - - - - - - - - - -
    Flag: ValueDescription
    hostPreflightIgnore: trueIgnores host preflight failures and warnings. The installation proceeds regardless of host preflight outcomes.
    hostPreflightEnforceWarnings: trueBlocks an installation if the results include a warning.
    - -### Disable Host Preflight Checks - -To disable the default host preflight checks for Kubernetes and all included add-ons, add the `kurl` field to your Installer manifest and add `kurl.excludeBuiltinHostPreflights: true`. In this case, no host preflight checks are run. - -`excludeBuiltinHostPreflights` is an aggregate flag, so setting it to `true` disables the default host preflights for Kubernetes and all included add-ons. - -**Example:** - - ```yaml - apiVersion: "cluster.kurl.sh/v1beta1" - kind: "Installer" - metadata: - name: "latest" - spec: - kurl: - excludeBuiltinHostPreflights: true - ``` - -## Example of Customized Host Preflight Checks - -The following example shows: - -- Default host preflights checks are disabled -- Customized host preflight checks run -- The installation is blocked if there is a warning - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "latest" -spec: - kurl: - excludeBuiltinHostPreflights: true - hostPreflightEnforceWarnings: true - hostPreflights: - apiVersion: troubleshoot.sh/v1beta2 - kind: HostPreflight - spec: - collectors: - - cpu: {} - - http: - collectorName: Can Access A Website - get: - url: https://myFavoriteWebsite.com - analyzers: - - cpu: - checkName: Number of CPU check - outcomes: - - fail: - when: "count < 4" - message: This server has less than 4 CPU cores - - warn: - when: "count < 6" - message: This server has less than 6 CPU cores - - pass: - message: This server has at least 6 CPU cores - - http: - checkName: Can Access A Website - collectorName: Can Access A Website - outcomes: - - warn: - when: "error" - message: Error connecting to https://myFavoriteWebsite.com - - pass: - when: "statuscode == 200" - message: Connected to https://myFavoriteWebsite.com - ``` - -================ -File: docs/vendor/preflight-running.md -================ -# Running Preflight Checks for Helm Installations - -This topic describes how to use the preflight kubectl plugin to run preflight checks for applications installed with the Helm CLI. - -## Overview - -For applications installed with the Helm CLI, your users can optionally run preflight checks using the open source preflight kubectl plugin before they run `helm install`. - -The preflight plugin requires a preflight check specification as input. For Helm chart-based applications, the specification is defined in a Secret in the Helm chart `templates` directory. For information about how to configure preflight checks for your application, see [Defining Preflight Checks](preflight-defining). - -To run preflight checks that are defined in your application Helm chart templates, your users run `helm template` to render the Helm chart templates and then provide the result to the preflight plugin as stdin. The preflight plugin automatically filters the stream of stdout from the `helm template` command to find and run any preflight specifications. - -## Prerequisite - -The preflight kubectl plugin is required to run preflight checks for Helm CLI installations. The preflight plugin is a client-side utility that adds a single binary to the path. - -To install the preflight plugin, run the following command to install the preflight plug-in using krew: - -``` -curl https://krew.sh/preflight | bash -``` -For information about the preflight plugin, including additional installation options, see [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. - -## Command - -``` -helm template HELM_CHART | kubectl preflight - -``` - -Where `HELM_CHART` is the Helm chart that contains the preflight specification. - -For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. - -**Examples:** - -``` -helm template gitea-1.0.6.tgz | kubectl preflight - -``` -``` -helm template gitea | kubectl preflight - -``` -``` -helm template oci://myregistry.io/org/examplechart | kubectl preflight - -``` - -## Run Preflight Checks from a Release - -When you promote a release that contains one or more Helm charts, the Helm charts are automatically pushed to the Replicated registry. To run preflight checks before installing a release, your users must first log in to the Replicated registry where they can access your application Helm chart containing the preflight specification. - -To run preflights checks from a release before installation: - -1. In the [Vendor Portal](https://vendor.replicated.com/apps/gitea-boxer/customers), go to the **Customers** page. Click on the name of the target customer. - -1. On the landing page for the customer, click **Helm install instructions**. - - The **Helm install instructions** dialog opens: - - Helm install instructions dialog with preflight checks - - [View a larger version of this image](/images/helm-install-preflights.png) - -1. Run the commands provided in the dialog: - - 1. Run the first command to log in to the Replicated registry: - - ``` - helm registry login registry.replicated.com --username USERNAME --password PASSWORD - ``` - - Where: - - `USERNAME` is the customer's email address. - - `PASSWORD` is the customer's license ID. - - **Example:** - ``` - helm registry login registry.replicated.com --username example@companyname.com password 1234abcd - ``` - - 1. Run the second command to install the kubectl plugin with krew: - - ``` - curl https://krew.sh/preflight | bash - ``` - - 1. Run the third command to run preflight checks: - - ``` - helm template oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART | kubectl preflight - - ``` - - Where: - - `APP_SLUG` is the name of the application. - - `CHANNEL` is the lowercased name of the release channel. - - `CHART` is the name of the Helm chart. - - **Examples:** - - ``` - helm template oci://registry.replicated.com/gitea-app/unstable/gitea | kubectl preflight - - ``` - ``` - helm template oci://registry.replicated.com/gitea-app/unstable/gitea --values values.yaml | kubectl preflight - - ``` - - For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. - - 1. (Optional) Run the fourth command to install the application. For more information, see [Installing with Helm](install-with-helm). - -## (Optional) Save Preflight Check Results - -The output of the preflight plugin shows the success, warning, or fail message for each preflight, depending on how they were configured. You can ask your users to send you the results of the preflight checks if needed. - -To save the results of preflight checks to a `.txt` file, users can can press `s` when viewing the results from the CLI, as shown in the example below: - -![Save output dialog](/images/helm-preflight-save-output.png) - -[View a larger version of this image](/images/helm-preflight-save-output.png) - -================ -File: docs/vendor/preflight-sb-helm-templates-about.md -================ -# Using Helm Templates in Specifications - -You can use Helm templates to configure collectors and analyzers for preflight checks and support bundles for Helm installations. - -Helm templates can be useful when you need to: - -- Run preflight checks based on certain conditions being true or false, such as the customer wants to use an external database. -- Pull in user-specific information from the values.yaml file, such as the version a customer is using for an external database. - -You can also use Helm templating with the Troubleshoot template functions for the `clusterPodStatuses` analyzer. For more information, see [Helm and Troubleshoot Template Example](#troubleshoot). - -## Helm Template Example - -In the following example, the `mysql` collector is included in a preflight check if the customer does not want to use the default MariaDB. This is indicated by the template `{{- if eq .Values.global.mariadb.enabled false -}}`. - -This specification also takes the MySQL connection string information from the `values.yaml` file, indicated by the template `'{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false'` in the `uri` field. - -Additionally, the specification verifies the maximum number of nodes in the `values.yaml` file is not exceeded by including the template `'count() > {{ .Values.global.maxNodeCount }}'` for the `nodeResources` analyzer. - -```yaml -{{- define "preflight.spec" }} -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: preflight-sample -spec: - {{ if eq .Values.global.mariadb.enabled false }} - collectors: - - mysql: - collectorName: mysql - uri: '{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false' - {{ end }} - analyzers: - - nodeResources: - checkName: Node Count Check - outcomes: - - fail: - when: 'count() > {{ .Values.global.maxNodeCount }}' - message: "The cluster has more than {{ .Values.global.maxNodeCount }} nodes." - - pass: - message: You have the correct number of nodes. - - clusterVersion: - outcomes: - - fail: - when: "< 1.22.0" - message: The application requires at least Kubernetes 1.22.0, and recommends 1.23.0. - uri: https://kubernetes.io - - warn: - when: "< 1.23.0" - message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.23.0 or later. - uri: https://kubernetes.io - - pass: - message: Your cluster meets the recommended and required versions of Kubernetes. - {{ if eq .Values.global.mariadb.enabled false }} - - mysql: - checkName: Must be MySQL 8.x or later - collectorName: mysql - outcomes: - - fail: - when: connected == false - message: Cannot connect to MySQL server - - fail: - when: version < 8.x - message: The MySQL server must be at least version 8 - - pass: - message: The MySQL server is ready - {{ end }} -{{- end }} ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - app.kubernetes.io/version: {{ .Chart.AppVersion }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | -{{- include "preflight.spec" . | indent 4 }} -``` - -## Helm and Troubleshoot Template Example {#troubleshoot} - -You can also use Helm templates with the Troubleshoot template functions to automatically add the Pod name and namespace to a message when a `clusterPodStatuses` analyzer fails. For more information about the Troubleshoot template function, see [Cluster Pod Statuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) in the Troubleshoot documentation. - -When you add the `clusterPodStatuses` analyzer template function values (such as `{{ .Name }}`) to your Helm template, you must encapsulate the Helm template using \{\{ ` ` \}\} so that Helm does not expand it. - -The following example shows an analyzer that uses Troubleshoot templates and the override for Helm: - -```yaml -# This is the support bundle config secret that will be used to generate the support bundle -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: support-bundle - name: {{ .Release.Name }}-support-bundle - namespace: {{ .Release.Namespace }} -type: Opaque -stringData: - # This is the support bundle spec that will be used to generate the support bundle - # Notes: we use {{ .Release.Namespace }} to ensure that the support bundle is scoped to the release namespace - # We can use any of Helm's templating features here, including {{ .Values.someValue }} - support-bundle-spec: | - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: support-bundle - spec: - collectors: - - clusterInfo: {} - - clusterResources: {} - - logs: - selector: - - app=someapp - namespace: {{ .Release.Namespace }} - analyzers: - - clusterPodStatuses: - name: unhealthy - namespaces: - - default - - myapp-namespace - outcomes: - - fail: - when: "== CrashLoopBackOff" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a CrashLoopBackOff state.` }} - - fail: - when: "== ImagePullBackOff" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a ImagePullBackOff state.` }} - - fail: - when: "== Pending" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Pending state.` }} - - fail: - when: "== Evicted" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Evicted state.` }} - - fail: - when: "== Terminating" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Terminating state.` }} - - fail: - when: "== Init:Error" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in an Init:Error state.` }} - - fail: - when: "== Init:CrashLoopBackOff" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in an Init:CrashLoopBackOff state.` }} - - fail: - when: "!= Healthy" # Catch all unhealthy pods. A pod is considered healthy if it has a status of Completed, or Running and all of its containers are ready. - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is unhealthy with a status of {{ .Status.Reason }}.` }} -``` - -================ -File: docs/vendor/preflight-support-bundle-about.mdx -================ -import Overview from "../partials/preflights/_preflights-sb-about.mdx" - -# About Preflight Checks and Support Bundles - -This topic provides an introduction to preflight checks and support bundles, which are provided by the [Troubleshoot](https://troubleshoot.sh/) open source project. - -## Overview - - - -Preflight checks and support bundles consist of _collectors_, _redactors_, and _analyzers_ that are defined in a YAML specification. When preflight checks or support bundles are executed, data is collected, redacted, then analyzed to provide insights to users, as illustrated in the following diagram: - -![Troubleshoot Workflow Diagram](/images/troubleshoot-workflow-diagram.png) - -[View a larger version of this image](/images/troubleshoot-workflow-diagram.png) - -For more information about each step in this workflow, see the sections below. - -### Collect - -During the collection phase, _collectors_ gather information from the cluster, the environment, the application, and other sources. - -The data collected depends on the types of collectors that are included in the preflight or support bundle specification. For example, the Troubleshoot project provides collectors that can gather information about the Kubernetes version that is running in the cluster, information about database servers, logs from pods, and more. - -For more information, see the [Collect](https://troubleshoot.sh/docs/collect/) section in the Troubleshoot documentation. - -### Redact - -During the redact phase, _redactors_ censor sensitive customer information from the data before analysis. By default, the following information is automatically redacted: - -- Passwords -- API token environment variables in JSON -- AWS credentials -- Database connection strings -- URLs that include usernames and passwords - -For Replicated KOTS installations, it is also possible to add custom redactors to redact additional data. For more information, see the [Redact](https://troubleshoot.sh/docs/redact/) section in the Troubleshoot documentation. - -### Analyze - -During the analyze phase, _analyzers_ use the redacted data to provide insights to users. - -For preflight checks, analyzers define the pass, fail, and warning outcomes, and can also display custom messages to the user. For example, you can define a preflight check that fails if the cluster's Kubernetes version does not meet the minimum version that your application supports. - -For support bundles, analyzers can be used to identify potential problems and share relevant troubleshooting guidance with users. Additionally, when a support bundle is uploaded to the Vendor Portal, it is extracted and automatically analyzed. The goal of analyzers in support bundles is to surface known issues or hints of what might be a problem to make troubleshooting easier. - -For more information, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. - -## Preflight Checks - - -This section provides an overview of preflight checks, including how preflights are defined and run. - -### Overview - -Preflight checks let you define requirements for the cluster where your application is installed. When run, preflight checks provide clear feedback to your customer about any missing requirements or incompatibilities in the cluster before they install or upgrade your application. For KOTS installations, preflight checks can also be used to block the deployment of the application if one or more requirements are not met. - -Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. - -### About Host Preflights {#host-preflights} - -_Host preflight checks_ automatically run during [Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated kURL](/vendor/kurl-about) installations on a VM or bare metal server. The purpose of host preflight checks is to verify that the user's installation environment meets the requirements of the Embedded Cluster or kURL installer, such as checking the number of CPU cores in the system, available disk space, and memory usage. If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. - -Host preflight checks are separate from any application-specific preflight checks that are defined in the release, which run in the Admin Console before the application is deployed with KOTS. Both Embedded Cluster and kURL have default host preflight checks that are specific to the requirements of the given installer. For kURL installations, it is possible to customize the default host preflight checks. - -For more information about the default Embedded Cluster host preflight checks, see [Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. - -For more information about kURL host preflight checks, including information about how to customize the defaults, see [Customizing Host Preflight Checks for kURL](/vendor/preflight-host-preflights). - -### Defining Preflights - -To add preflight checks for your application, create a Preflight YAML specification that defines the collectors and analyzers that you want to include. - -For information about how to add preflight checks to your application, including examples, see [Defining Preflight Checks](preflight-defining). - -### Blocking Installation with Required (Strict) Preflights - -For applications installed with KOTS, it is possible to block the deployment of a release if a preflight check fails. This is helpful when it is necessary to prevent an installation or upgrade from continuing unless a given requirement is met. - -You can add required preflight checks for an application by including `strict: true` for the target analyzer in the preflight specification. For more information, see [Block Installation with Required Preflights](preflight-defining#strict) in _Defining Preflight Checks_. - -### Running Preflights - -This section describes how users can run preflight checks for KOTS and Helm installations. - -#### Replicated Installations - -For Replicated installations with Embedded Cluster, KOTS, or kURL, preflight checks run automatically as part of the installation process. The results of the preflight checks are displayed either in the KOTS Admin Console or in the KOTS CLI, depending on the installation method. - -Additionally, users can access preflight checks from the Admin Console after installation to view their results and optionally re-run the checks. - -The following shows an example of the results of preflight checks displayed in the Admin Console during installation: - -![Preflight results in Admin Console](/images/preflight-warning.png) - -[View a larger version of this image](/images/preflight-warning.png) - -#### Helm Installations - -For installations with Helm, the preflight kubectl plugin is required to run preflight checks. The preflight plugin is a client-side utility that adds a single binary to the path. For more information, see [Getting Started](https://troubleshoot.sh/docs/) in the Troubleshoot documentation. - -Users can optionally run preflight checks before they run `helm install`. The results of the preflight checks are then displayed through the CLI, as shown in the example below: - -![Save output dialog](/images/helm-preflight-save-output.png) - -[View a larger version of this image](/images/helm-preflight-save-output.png) - -For more information, see [Running Preflight Checks for Helm Installations](preflight-running). - -## Support Bundles - -This section provides an overview of support bundles, including how support bundles are customized and generated. - -### Overview - -Support bundles collect and analyze troubleshooting data from customer environments, helping both users and support teams diagnose problems with application deployments. - -Support bundles can collect a variety of important cluster-level data from customer environments, such as: -* Pod logs -* Node resources and status -* The status of replicas in a Deployment -* Cluster information -* Resources deployed to the cluster -* The history of Helm releases installed in the cluster - -Support bundles can also be used for more advanced use cases, such as checking that a command successfully executes in a pod in the cluster, or that an HTTP request returns a succesful response. - -Support bundles then use the data collected to provide insights to users on potential problems or suggested troubleshooting steps. The troubleshooting data collected and analyzed by support bundles not only helps users to self-resolve issues with their application deployment, but also helps reduce the amount of time required by support teams to resolve requests by ensuring they have access to all the information they need up front. - -### About Host Support Bundles - -For installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about), it is possible to generate a support bundle that includes host-level information to help troubleshoot failures related to host configuration like DNS, networking, or storage problems. - -For Embedded Cluster installations, a default spec can be used to generate support bundles that include cluster- and host-level information. See [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). - -For kURL installations, vendors can customize a host support bundle spec for their application. See [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). - -### Customizing Support Bundles - -To enable support bundles for your application, add a support bundle YAML specification to a release. An empty support bundle specification automatically includes several default collectors and analzyers. You can also optionally customize the support bundle specification for by adding, removing, or editing collectors and analyzers. - -For more information, see [Adding and Customizing Support Bundles](support-bundle-customizing). - -### Generating Support Bundles - -Users generate support bundles as `tar.gz` files from the command line, using the support-bundle kubectl plugin. Your customers can share their support bundles with your team by sending you the resulting `tar.gz` file. - -KOTS users can also generate and share support bundles from the KOTS Admin Console. - -For more information, see [Generating Support Bundles](support-bundle-generating). - -================ -File: docs/vendor/private-images-about.md -================ -# About the Replicated Proxy Registry - -This topic describes how the Replicated proxy registry can be used to grant proxy access to your application's private images or allow pull through access of public images. - -## Overview - -If your application images are available in a private image registry exposed to the internet such as Docker Hub or Amazon Elastic Container Registry (ECR), then the Replicated proxy registry can grant proxy, or _pull-through_, access to the images without exposing registry credentials to your customers. When you use the proxy registry, you do not have to modify the process that you already use to build and push images to deploy your application. - -To grant proxy access, the proxy registry uses the customer licenses that you create in the Replicated vendor portal. This allows you to revoke a customer’s ability to pull private images by editing their license, rather than having to manage image access through separate identity or authentication systems. For example, when a trial license expires, the customer's ability to pull private images is automatically revoked. - -The following diagram demonstrates how the proxy registry pulls images from your external registry, and how deployed instances of your application pull images from the proxy registry: - -![Proxy registry workflow diagram](/images/private-registry-diagram.png) - -[View a larger version of this image](/images/private-registry-diagram-large.png) - -## About Enabling the Proxy Registry - -The proxy registry requires read-only credentials to your private registry to access your application images. See [Connecting to an External Registry](/vendor/packaging-private-images). - -After connecting your registry, the steps the enable the proxy registry vary depending on your application deployment method. For more information, see: -* [Using the Proxy Registry with KOTS Installations](/vendor/private-images-kots) -* [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) - -## About Allowing Pull-Through Access of Public Images - -Using the Replicated proxy registry to grant pull-through access to public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. - -For more information about how to pull public images through the proxy registry, see [Connecting to a Public Registry through the Proxy Registry](/vendor/packaging-public-images). - -================ -File: docs/vendor/private-images-kots.mdx -================ -import Deprecated from "../partials/helm/_replicated-deprecated.mdx" -import StepCreds from "../partials/proxy-service/_step-creds.mdx" -import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" - -# Using the Proxy Registry with KOTS Installations - -This topic describes how to use the Replicated proxy registry with applications deployed with Replicated KOTS. - -## Overview - -Replicated KOTS automatically creates the required image pull secret for accessing the Replicated proxy registry during application deployment. When possible, KOTS also automatically rewrites image names in the application manifests to the location of the image at `proxy.replicated.com` or your custom domain. - -### Image Pull Secret - -During application deployment, KOTS automatically creates an `imagePullSecret` with `type: kubernetes.io/dockerconfigjson` that is based on the customer license. This secret is used to authenticate with the proxy registry and grant proxy access to private images. - -For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. - -### Image Location Patching (Standard Manifests and HelmChart v1) - -For applications packaged with standard Kubernetes manifests (or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource), KOTS automatically patches image names to the location of the image at at `proxy.replicated.com` or your custom domain during deployment. If KOTS receives a 401 response when attempting to load image manifests using the image reference from the PodSpec, it assumes that this is a private image that must be proxied through the proxy registry. - -KOTS uses Kustomize to patch the `midstream/kustomization.yaml` file to change the image name during deployment to reference the proxy registry. For example, a PodSpec for a Deployment references a private image hosted at `quay.io/my-org/api:v1.0.1`: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: example -spec: - template: - spec: - containers: - - name: api - image: quay.io/my-org/api:v1.0.1 -``` - -When this application is deployed, KOTS detects that it cannot access -the image at quay.io. So, it creates a patch in the `midstream/kustomization.yaml` -file that changes the image name in all manifest files for the application. This causes the container runtime in the cluster to use the proxy registry to pull the images, using the license information provided to KOTS for authentication. - -```yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -bases: -- ../../base -images: -- name: quay.io/my-org/api:v1.0.1 - newName: proxy.replicated.com/proxy/my-kots-app/quay.io/my-org/api -``` - -## Enable the Proxy Registry - -This section describes how to enable the proxy registry for applications deployed with KOTS, including how to ensure that image names are rewritten and that the required image pull secret is provided. - -To enable the proxy registry: - -1. - -1. - -1. Rewrite images names to the location of the image at `proxy.replicated.com` or your custom domain. Also, ensure that the correct image pull secret is provided for all private images. The steps required to configure image names and add the image pull secret vary depending on your application type: - - * **HelmChart v2**: For Helm charts deployed with the[ HelmChart v2](/reference/custom-resource-helmchart-v2) custom resource, configure the HelmChart v2 custom resource to dynamically update image names in your Helm chart and to inject the image pull secret that is automatically created by KOTS. For instructions, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). - - * **Standard Manifests or HelmChart v1**: For standard manifest-based applications or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource, no additional configuration is required. KOTS automatically rewrites image names and injects image pull secrets during deployment for these application types. - - :::note - - ::: - - * **Kubernetes Operators**: For applications packaged with Kubernetes Operators, KOTS cannot modify pods that are created at runtime by the Operator. To support the use of private images in all environments, the Operator code should use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. For instructions, see [Referencing Images](/vendor/operator-referencing-images) in the _Packaging Kubernetes Operators_ section. - -1. If you are deploying Pods to namespaces other than the application namespace, add the namespace to the `additionalNamespaces` attribute of the KOTS Application custom resource. This ensures that KOTS can provision the `imagePullSecret` in the namespace to allow the Pod to pull the image. For instructions, see [Defining Additional Namespaces](operator-defining-additional-namespaces). - -================ -File: docs/vendor/private-images-replicated.mdx -================ -import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" - -# Using the Replicated Registry for KOTS Installations - -This topic describes how to push images to the Replicated private registry. - -## Overview - -For applications installed with KOTS, you can host private images on the Replicated registry. Hosting your images on the Replicated registry is useful if you do not already have your images in an existing private registry. It is also useful for testing purposes. - -Images pushed to the Replicated registry are displayed on the **Images** page in the Vendor Portal: - -![Replicated Private Registry section of the vendor portal Images page](/images/images-replicated-registry.png) - -[View a larger version of this image](/images/images-replicated-registry.png) - -For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). - -## Limitations - -The Replicated registry has the following limitations: - -* You cannot delete images from the Replicated registry. As a workaround, you can push a new, empty image to the registry using the same tags as the target image. Replicated does not recommend removing tags from the registry because it could break older releases of your application. - -* When using Docker Build to build and push images to the Replicated registry, provenance attestations are not supported. To avoid a 400 error, include the `--provenance=false` flag to disable all provenance attestations. For more information, see [docker buildx build](https://docs.docker.com/engine/reference/commandline/buildx_build/#provenance) and [Provenance Attestations](https://docs.docker.com/build/attestations/slsa-provenance/) in the Docker documentation. - -* You might encounter a timeout error when pushing images with layers close to or exceeding 2GB in size, such as: "received unexpected HTTP status: 524." To work around this, reduce the size of the image layers and push the image again. If the 524 error persists, continue decreasing the layer sizes until the push is successful. - -## Push Images to the Replicated Registry - -This procedure describes how to tag and push images to the Replicated registry. For more information about building, tagging, and pushing Docker images, see the -[Docker CLI documentation](https://docs.docker.com/engine/reference/commandline/cli/). - -To push images to the Replicated registry: - -1. Do one of the following to connect with the `registry.replicated.com` container registry: - * **(Recommended) Log in with a user token**: Use `docker login registry.replicated.com` with your Vendor Portal email as the username and a Vendor Portal user token as the password. For more information, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. - * **Log in with a service account token:** Use `docker login registry.replicated.com` with a Replicated Vendor Portal service account as the password. If you have an existing team token, you can use that instead. You can use any string as the username. For more information, see [Service Accounts](replicated-api-tokens#service-accounts) in _Generating API Tokens_. - - - - * **Log in with your credentials**: Use `docker login registry.replicated.com` with your Vendor Portal email and password as the credentials. - -1. Tag your private image with the Replicated registry hostname in the standard -Docker format: - - ``` - docker tag IMAGE_NAME registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG - ``` - - Where: - * `IMAGE_NAME` is the name of the existing private image for your application. - * `APPLICATION_SLUG` is the unique slug for the application. You can find the application slug on the **Application Settings** page in the Vendor Portal. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. - * `TARGET_IMAGE_NAME` is a name for the image. Replicated recommends that the `TARGET_IMAGE_NAME` is the same as the `IMAGE_NAME`. - * `TAG` is a tag for the image. - - For example: - - ```bash - docker tag worker registry.replicated.com/myapp/worker:1.0.1 - ``` - -1. Push your private image to the Replicated registry using the following format: - - ``` - docker push registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG - ``` - Where: - * `APPLICATION_SLUG` is the unique slug for the application. - * `TARGET_IMAGE_NAME` is a name for the image. Use the same name that you used when tagging the image in the previous step. - * `TAG` is a tag for the image. Use the same tag that you used when tagging the image in the previous step. - - For example: - - ```bash - docker push registry.replicated.com/myapp/worker:1.0.1 - ``` - -1. In the [Vendor Portal](https://vendor.replicated.com/), go to **Images** and scroll down to the **Replicated Private Registry** section to confirm that the image was pushed. - -================ -File: docs/vendor/private-images-tags-digests.md -================ -# Using Image Tags and Digests - -This topic describes using image tags and digests with your application images. It includes information about when image tags and digests are supported, and how to enable support for image digests in air gap bundles. - -## Support for Image Tags and Digests - -The following table describes the use cases in which image tags and digests are supported: - - - - - - - - - - - - - - - - - -
    InstallationSupport for Image TagsSupport for Image Digests
    OnlineSupported by defaultSupported by default
    Air GapSupported by default for Replicated KOTS installations -

    Supported for applications on KOTS v1.82.0 and later when the Enable new air gap bundle format toggle is enabled on the channel.

    -

    For more information, see Using Image Digests in Air Gap Installations below.

    -
    - -:::note -You can use image tags and image digests together in any case where both are supported. -::: - -## Using Image Digests in Air Gap Installations {#digests-air-gap} - -For applications installed with KOTS v1.82.0 or later, you can enable a format for air gap bundles that supports the use of image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. - -You can enable or disable this air gap bundle format using the **Enable new air gap bundle format** toggle in the settings for any channel in the Vendor Portal. The **Enable new air gap bundle format** toggle is enabled by default. - -When you enable **Enable new air gap bundle format** on a channel, all air gap bundles that you build or rebuild on that channel use the updated air gap bundle format. - -If users on a version of KOTS earlier than v1.82.0 attempt to install or upgrade an application with an air gap bundle that uses the **Enable new air gap bundle format** format, then the Admin Console displays an error message when they attempt to upload the bundle. - -To enable the new air gap bundle format on a channel: - -1. In the Replicated [Vendor Portal](https://vendor.replicated.com/channels), go to the Channels page and click the edit icon in the top right of the channel where you want to use the new air gap bundle format. -1. Enable the **Enable new air gap bundle format** toggle. -1. (Recommended) To prevent users on a version of KOTS earlier than v1.82.0 from attempting to upgrade with an air gap bundle that uses the new air gap bundle format, set `minKotsVersion` to "1.82.0" in the Application custom resource manifest file. - - `minKotsVersion` defines the minimum version of KOTS required by the application release. Including `minKotsVersion` displays a warning in the Admin Console when users attempt to install or upgrade the application if they are not on the specified minimum version or later. For more information, see [Setting Minimum and Target Versions for KOTS](packaging-kots-versions). - - **Example**: - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - ... - minKotsVersion: "1.82.0" - ... - ``` - -1. Test your changes: - 1. Save and promote the release to a development environment. - 1. On the channel where you enabled **Enable new air gap bundle format**, click **Release history**. On the Release History page, click **Build** next to the latest release to create an air gap bundle with the new format. - - ![Vendor portal release history page](../../static/images/airgap-download-bundle.png) - - 1. Click **Download Airgap Bundle**. - 1. Install or upgrade the application with version 1.82.0 or later of the Admin Console or the KOTS CLI. Upload the new air gap bundle to confirm that the installation or upgrade completes successfully. - -================ -File: docs/vendor/quick-start.mdx -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import HelmPackage from "../partials/helm/_helm-package.mdx" -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" -import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" -import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" -import EcCr from "../partials/embedded-cluster/_ec-config.mdx" -import Requirements from "../partials/embedded-cluster/_requirements.mdx" - -# Replicated Quick Start - -Welcome! This topic provides a quick start workflow to help new users learn about the Replicated Platform. Complete this quick start before you onboard your application to the platform. - -## Introduction - -This quick start shows how to create, install, and update releases for a sample Helm chart in the Replicated Platform. You will repeat these same basic steps to create and test releases throughout the onboarding process to integrate Replicated features with your own application. - -The goals of this quick start are to introduce new Replicated users to the following common tasks for the purpose of preparing to onboard to the Replicated Platform: - -* Working with _applications_, _channels_, _releases_, and _customers_ in the Replicated Vendor Portal - -* Working with the Replicated CLI - -* Installing and updating applications on a VM with Replicated Embedded Cluster - -* Managing an installation with the Replicated KOTS Admin Console - -## Set Up the Environment - -Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: - - - -## Quick Start - -1. Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). - -1. Create an application using the Replicated CLI: - - 1. On your local machine, install the Replicated CLI: - - ```bash - brew install replicatedhq/replicated/cli - ``` - For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - - 1. Authorize the Replicated CLI: - - ```bash - replicated login - ``` - In the browser window that opens, complete the prompts to log in to your Vendor Portal account and authorize the CLI. - - 1. Create an application named `Gitea`: - - ```bash - replicated app create Gitea - ``` - - 1. Set the `REPLICATED_APP` environment variable to the application that you created: - - ```bash - export REPLICATED_APP=APP_SLUG - ``` - Where `APP_SLUG` is the unique application slug provided in the output of the `app create` command. For example, `export REPLICATED_APP=gitea-kite`. - - This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command. - -1. Get the sample Bitnami Gitea Helm chart and add the Replicated SDK as a dependency: - - 1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: - - ``` - helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 - ``` - For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. - - 1. Change to the new `gitea` directory that was created: - - ```bash - cd gitea - ``` - - 1. In the Helm chart `Chart.yaml`, add the Replicated SDK as a dependency: - - - - The Replicated SDK is a Helm chart that provides access to Replicated features and can be installed as a small service alongside your application. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). - - 1. Update dependencies and package the Helm chart to a `.tgz` chart archive: - - ```bash - helm package -u . - ``` - Where `-u` or `--dependency-update` is an option for the helm package command that updates chart dependencies before packaging. For more information, see [Helm Package](https://helm.sh/docs/helm/helm_package/) in the Helm documentation. - -1. Add the chart archive to a release: - - 1. In the `gitea` directory, create a subdirectory named `manifests`: - - ``` - mkdir manifests - ``` - - You will add the files required to support installation with Replicated KOTS and Replicated Embedded Cluster to this subdirectory. - - 1. Move the Helm chart archive that you created to `manifests`: - - ``` - mv gitea-1.0.6.tgz manifests - ``` - - 1. In `manifests`, create the following YAML files: - ``` - cd manifests - ``` - ``` - touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml - ``` - - 1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: - - - -
    Description
    -

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The optionalValues field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment for the purpose of this quick start.

    -
    YAML
    - -
    - -
    Description
    -

    The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the gitea Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.

    -
    YAML
    - -
    - -
    Description
    -

    The Kubernetes SIG Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.

    -
    YAML
    - -
    - -
    Description
    -

    To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.

    -
    YAML
    - -
    -
    - - 1. Lint the YAML files: - - ```bash - replicated release lint --yaml-dir . - ``` - **Example output:** - ```bash - RULE TYPE FILENAME LINE MESSAGE - config-spec warn Missing config spec - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - :::note - You can ignore any warning messages for the purpose of this quick start. - ::: - - 1. Create the release and promote it to the Unstable channel: - - ```bash - replicated release create --yaml-dir . --promote Unstable - ``` - **Example output**: - ```bash - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 1 - • Promoting ✓ - • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 1 - ``` - -1. Create a customer so that you can install the release on your VM with Embedded Cluster: - - 1. In the [Vendor Portal](https://vendor.replicated.com), under the application drop down, select the Gitea application that you created. - - App drop down - - [View a larger version of this image](/images/quick-start-select-gitea-app.png) - - 1. Click **Customers > Create customer**. - - The **Create a new customer** page opens: - - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - - [View a larger version of this image](/images/create-customer.png) - - 1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. - - 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. - - 1. For **License type**, select **Development**. - - 1. For **License options**, enable the following entitlements: - * **KOTS Install Enabled** - * **Embedded Cluster Enabled** - - 1. Click **Save Changes**. - -1. Install the application with Embedded Cluster: - - 1. On the page for the customer that you created, click **Install instructions > Embedded Cluster**. - - ![Customer install instructions dropdown](/images/customer-install-instructions-dropdown.png) - - [View a larger image](/images/customer-install-instructions-dropdown.png) - - 1. On the command line, SSH onto your VM and run the commands in the **Embedded cluster install instructions** dialog to download the latest release, extract the installation assets, and install. - - embedded cluster install instructions dialog - - [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) - - 1. When prompted, enter a password for accessing the Admin Console. - - The installation command takes a few minutes to complete. - - **Example output:** - - ```bash - ? Enter an Admin Console password: ******** - ? Confirm password: ******** - ✔ Host files materialized! - ✔ Running host preflights - ✔ Node installation finished! - ✔ Storage is ready! - ✔ Embedded Cluster Operator is ready! - ✔ Admin Console is ready! - ✔ Additional components are ready! - Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 - ``` - - At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. - - 1. Go to the URL provided in the output to access to the Admin Console. - - 1. On the Admin Console landing page, click **Start**. - - 1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. - - 1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. - - By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. - - 1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. - - 1. On the **Configure the cluster** screen, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. - - The Admin Console dashboard opens. - - 1. On the Admin Console dashboard, next to the version, click **Deploy** and then **Yes, Deploy**. - - The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. - - 1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser. - - For example: - - ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) - - [View a larger version of this image](/images/gitea-ec-ready.png) - - Gitea app landing page - - [View a larger version of this image](/images/gitea-app.png) - -1. Return to the Vendor Portal and go to **Customers**. Under the name of the customer, confirm that you can see an active instance. - - This instance telemetry is automatically collected and sent back to the Vendor Portal by both KOTS and the Replicated SDK. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). - -1. Under **Instance ID**, click on the ID to view additional insights including the versions of Kubernetes and the Replicated SDK running in the cluster where you installed the application. For more information, see [Instance Details](/vendor/instance-insights-details). - -1. Create a new release that adds preflight checks to the application: - - 1. In your local filesystem, go to the `gitea` directory. - - 1. Create a `gitea-preflights.yaml` file in the `templates` directory: - - ``` - touch templates/gitea-preflights.yaml - ``` - - 1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a simple preflight spec: - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" - stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - collectors: - - http: - collectorName: slack - get: - url: https://api.slack.com/methods/api.test - analyzers: - - textAnalyze: - checkName: Slack Accessible - fileName: slack.json - regex: '"status": 200,' - outcomes: - - pass: - when: "true" - message: "Can access the Slack API" - - fail: - when: "false" - message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." - ``` - The YAML above defines a preflight check that confirms that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. - - 1. In the `Chart.yaml` file, increment the version to 1.0.7: - - ```yaml - # Chart.yaml - version: 1.0.7 - ``` - - 1. Update dependencies and package the chart to a `.tgz` chart archive: - - ```bash - helm package -u . - ``` - - 1. Move the chart archive to the `manifests` directory: - - ```bash - mv gitea-1.0.7.tgz manifests - ``` - - 1. In the `manifests` directory, open the KOTS HelmChart custom resource (`gitea.yaml`) and update the `chartVersion`: - - ```yaml - # gitea.yaml KOTS HelmChart - chartVersion: 1.0.7 - ``` - - 1. Remove the chart archive for version 1.0.6 of the Gitea chart from the `manifests` directory: - - ``` - rm gitea-1.0.6.tgz - ``` - - 1. From the `manifests` directory, create and promote a new release, setting the version label of the release to `0.0.2`: - - ```bash - replicated release create --yaml-dir . --promote Unstable --version 0.0.2 - ``` - **Example output**: - ```bash - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 2 - • Promoting ✓ - • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 2 - ``` - -1. On your VM, update the application instance to the new version that you just promoted: - - 1. In the Admin Console, go to the **Version history** tab. - - The new version is displayed automatically. - - 1. Click **Deploy** next to the new version. - - The Embedded Cluster upgrade wizard opens. - - 1. In the Embedded Cluster upgrade wizard, on the **Preflight checks** screen, note that the "Slack Accessible" preflight check that you added was successful. Click **Next: Confirm and deploy**. - - ![preflight page of the embedded cluster upgrade wizard](/images/quick-start-ec-upgrade-wizard-preflight.png) - - [View a larger version of this image](/images/quick-start-ec-upgrade-wizard-preflight.png) - - :::note - The **Config** screen in the upgrade wizard is bypassed because this release does not contain a KOTS Config custom resource. The KOTS Config custom resource is used to set up the Config screen in the KOTS Admin Console. - ::: - - 1. On the **Confirm and Deploy** page, click **Deploy**. - -1. Reset and reboot the VM to remove the installation: - - ```bash - sudo ./APP_SLUG reset - ``` - Where `APP_SLUG` is the unique slug for the application. - - :::note - You can find the application slug by running `replicated app ls` on your local machine. - ::: - -## Next Steps - -Congratulations! As part of this quick start, you: -* Added the Replicated SDK to a Helm chart -* Created a release with the Helm chart -* Installed the release on a VM with Embedded Cluster -* Viewed telemetry for the installed instance in the Vendor Portal -* Created a new release to add preflight checks to the application -* Updated the application from the Admin Console - -Now that you are familiar with the workflow of creating, installing, and updating releases, you can begin onboarding your own application to the Replicated Platform. - -To get started, see [Replicated Onboarding](replicated-onboarding). - -## Related Topics - -For more information about the Replicated Platform features mentioned in this quick start, see: - -* [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) -* [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about) -* [About the Replicated SDK](/vendor/replicated-sdk-overview) -* [Introduction to KOTS](/intro-kots) -* [Managing Releases with the CLI](/vendor/releases-creating-cli) -* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) -* [Using Embedded Cluster](/vendor/embedded-overview) - -## Related Tutorials - -For additional tutorials related to this quick start, see: - -* [Deploying a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup) -* [Adding Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) -* [Deploying a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup) - -================ -File: docs/vendor/releases-about.mdx -================ -import ChangeChannel from "../partials/customers/_change-channel.mdx" -import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" -import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" -import VersionLabelReqsHelm from "../partials/releases/_version-label-reqs-helm.mdx" - -# About Channels and Releases - -This topic describes channels and releases, including information about the **Releases** and **Channels** pages in the Replicated Vendor Portal. - -## Overview - -A _release_ represents a single version of your application. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. - -Channels also control which customers are able to install a release. You assign each customer to a channel to define the releases that the customer can access. For example, a customer assigned to the Stable channel can only install releases that are promoted to the Stable channel, and cannot see any releases promoted to other channels. For more information about assigning customers to channels, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. - -Using channels and releases helps you distribute versions of your application to the right customer segments, without needing to manage different release workflows. - -You can manage channels and releases with the Vendor Portal, the Replicated CLI, or the Vendor API v3. For more information about creating and managing releases or channels, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Creating and Editing Channels](releases-creating-channels). - -## About Channels - -This section provides additional information about channels, including details about the default channels in the Vendor Portal and channel settings. - -### Unstable, Beta, and Stable Channels - -Replicated includes the following channels by default: - -* **Unstable**: The Unstable channel is designed for internal testing and development. You can create and assign an internal test customer to the Unstable channel to install in a development environment. Replicated recommends that you do not license any of your external users against the Unstable channel. -* **Beta**: The Beta channel is designed for release candidates and early-adopting customers. Replicated recommends that you promote a release to the Beta channel after it has passed automated testing in the Unstable channel. You can also choose to license early-adopting customers against this channel. -* **Stable**: The Stable channel is designed for releases that are generally available. Replicated recommends that you assign most of your customers to the Stable channel. Customers licensed against the Stable channel only receive application updates when you promote a new release to the Stable channel. - -You can archive or edit any of the default channels, and create new channels. For more information, see [Creating and Editing Channels](releases-creating-channels). - -### Settings - -Each channel has settings. You can customize the settings for a channel to control some of the behavior of releases promoted to the channel. - -The following shows the **Channel Settings** dialog, accessed by clicking the settings icon on a channel: - -Channel Settings dialog in the Vendor Portal - -[View a larger version of this image](/images/channel-settings.png) - -The following describes each of the channel settings: - -* **Channel name**: The name of the channel. You can change the channel name at any time. Each channel also has a unique ID listed below the channel name. -* **Description**: Optionally, add a description of the channel. -* **Set this channel to default**: When enabled, sets the channel as the default channel. The default channel cannot be archived. -* **Custom domains**: Select the customer-facing domains that releases promoted to this channel use for the Replicated registry, Replicated proxy registry, Replicated app service, or Replicated Download Portal endpoints. If a default custom domain exists for any of these endpoints, choosing a different domain in the channel settings overrides the default. If no custom domains are configured for an endpoint, the drop-down for the endpoint is disabled. - - For more information about configuring custom domains and assigning default domains, see [Using Custom Domains](custom-domains-using). -* The following channel settings apply only to applications that support KOTS: - * **Automatically create airgap builds for newly promoted releases in this channel**: When enabled, the Vendor Portal automatically builds an air gap bundle when a new release is promoted to the channel. When disabled, you can generate an air gap bundle manually for a release on the **Release History** page for the channel. - * **Enable semantic versioning**: When enabled, the Vendor Portal verifies that the version label for any releases promoted to the channel uses a valid semantic version. For more information, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_. - * **Enable new airgap bundle format**: When enabled, air gap bundles built for releases promoted to the channel use a format that supports image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. For more information, see [Using Image Digests in Air Gap Installations](private-images-tags-digests#digests-air-gap) in _Using Image Tags and Digests_. - - :::note - The new air gap bundle format is supported for applications installed with KOTS v1.82.0 or later. - ::: - -## About Releases - -This section provides additional information about releases, including details about release promotion, properties, sequencing, and versioning. - -### Release Files - -A release contains your application files as well as the manifests required to install the application with the Replicated installers ([Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated KOTS](../intro-kots)). - -The application files in releases can be Helm charts and/or Kubernetes manifests. Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise customers will expect to be able to install with Helm. - -### Promotion - -Each release is promoted to one or more channels. While you are developing and testing releases, Replicated recommends promoting to a channel that does not have any real customers assigned, such as the default Unstable channel. When the release is ready to be shared externally with customers, you can then promote to a channel that has the target customers assigned, such as the Beta or Stable channel. - -A release cannot be edited after it is promoted to a channel. This means that you can test a release on an internal development channel, and know with confidence that the same release will be available to your customers when you promote it to a channel where real customers are assigned. - -### Properties - -Each release has properties. You define release properties when you promote a release to a channel. You can edit release properties at any time from the channel **Release History** page in the Vendor Portal. For more information, see [Edit Release Properties](releases-creating-releases#edit-release-properties) in _Managing Releases with the Vendor Portal_. - -The following shows an example of the release properties dialog: - -release properties dialog for a release with version label 0.1.22 - -[View a larger version of this image](/images/release-properties.png) - -As shown in the screenshot above, the release has the following properties: - -* **Version label**: The version label for the release. Version labels have the following requirements: - - * If semantic versioning is enabled for the channel, you must use a valid semantic version. For more information, see [Semantic Versioning](#semantic-versioning). - - - -* **Requirements**: Select **Prevent this release from being skipped during upgrades** to mark the release as required. - - - - - -* **Release notes (supports markdown)**: Detailed release notes for the release. The release notes support markdown and are shown to your customer. - -### Sequencing - -By default, Replicated uses release sequence numbers to organize and order releases, and uses instance sequence numbers in an instance's internal version history. - -#### Release Sequences - -In the Vendor Portal, each release is automatically assigned a unique, monotonically-increasing sequence number. You can use this number as a fallback to identify a promoted or draft release, if you do not set the `Version label` field during promotion. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). - -The following graphic shows release sequence numbers in the Vendor Portal: - -Release sequence numbers - -[View a larger version of this image](/images/release-sequences.png) - -#### Instance Sequences - -When a new version is available for upgrade, including when KOTS checks for upstream updates as well as when the user syncs their license or makes a config change, the KOTS Admin Console assigns a unique instance sequence number to that version. The instance sequence in the Admin Console starts at 0 and increments for each identifier that is returned when a new version is available. - -This instance sequence is unrelated to the release sequence dispalyed in the Vendor Portal, and it is likely that the instance sequence will differ from the release sequence. Instance sequences are only tracked by KOTS instances, and the Vendor Portal has no knowledge of these numbers. - -The following graphic shows instance sequence numbers on the Admin Console dashboard: - -Instance sequence numbers - -[View a larger version of this image](/images/instance-sequences.png) - -#### Channel Sequences - -When a release is promoted to a channel, a channel sequence number is assigned. This unique sequence number increments by one and tracks the order in which releases were promoted to a channel. You can view the channel sequence on the **Release History** page in the Vendor Portal, as shown in the image below: - -Channel sequence on Release History page - -[View a larger version of this image](/images/release-history-channel-sequence.png) - -The channel sequence is also used in certain URLs. For example, a release with a *release sequence* of `170` can have a *channel sequence* of `125`. The air gap download URL for that release can contain `125` in the URL, even though the release sequence is `170`. - -Ordering is more complex if some or all of the releases in a channel have a semantic version label and semantic versioning is enabled for the channel. For more information, see [Semantic Versioning Sequence](#semantic-versioning-sequence). - -#### Semantic Versioning Sequence - -For channels with semantic versioning enabled, the Admin Console sequences instance releases by their semantic versions instead of their promotion dates. - -If releases without a valid semantic version are already promoted to a channel, the Admin Console sorts the releases that do have semantic versions starting with the earliest version and proceeding to the latest. The releases with non-semantic versioning stay in the order of their promotion dates. For example, assume that you promote these releases in the following order to a channel: - -- 1.0.0 -- abc -- 0.1.0 -- xyz -- 2.0.0 - -Then, you enable semantic versioning on that channel. The Admin Console sequences the version history for the channel as follows: - -- 0.1.0 -- 1.0.0 -- abc -- xyz -- 2.0.0 - -### Semantic Versioning - -Semantic versioning is available with the Replicated KOTS v1.58.0 and later. Note the following: - -- For applications created in the Vendor Portal on or after February 23, 2022, semantic versioning is enabled by default on the Stable and Beta channels. Semantic versioning is disabled on the Unstable channel by default. - -- For existing applications created before February 23, 2022, semantic versioning is disabled by default on all channels. - -Semantic versioning is recommended because it makes versioning more predictable for users and lets you enforce versioning so that no one uses an incorrect version. - -To use semantic versioning: - -1. Enable semantic versioning on a channel, if it is not enabled by default. Click the **Edit channel settings** icon, and turn on the **Enable semantic versioning** toggle. -1. Assign a semantic version number when you promote a release. - -Releases promoted to a channel with semantic versioning enabled are verified to ensure that the release version label is a valid semantic version. For more information about valid semantic versions, see [Semantic Versioning 2.0.0](https://semver.org). - -If you enable semantic versioning for a channel and then promote releases to it, Replicated recommends that you do not later disable semantic versioning for that channel. - -You can enable semantic versioning on a channel that already has releases promoted to it without semantic versioning. Any subsequently promoted releases must use semantic versioning. In this case, the channel will have releases with and without semantic version numbers. For information about how Replicated organizes these release sequences, see [Semantic Versioning Sequences](#semantic-versioning-sequence). - -### Demotion - -A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. - -The demoted release's channel sequence and version are not reused. For customers, the release will appear to have been skipped. Un-demoting a release will restore its place in the channel sequence making it again available for download and installation. - -For information about how to demote a release, see [Demote a Release](/vendor/releases-creating-releases#demote-a-release) in _Managing Releases with the Vendor Portal_. - -## Vendor Portal Pages - -This section provides information about the channels and releases pages in the Vendor Portal. - -### Channels Page - -The **Channels** page in the Vendor Portal includes information about each channel. From the **Channels** page, you can edit and archive your channels. You can also edit the properties of the releases promoted to each channel, and view and edit the customers assigned to each channel. - -The following shows an example of a channel in the Vendor Portal **Channels** page: - -Channel card in the Vendor Portal - -[View a larger version of this image](/images/channel-card.png) - -As shown in the image above, you can do the following from the **Channels** page: - -* Edit the channel settings by clicking on the settings icon, or archive the channel by clicking on the trash can icon. For information about channel settings, see [Settings](#settings). - -* In the **Adoption rate** section, view data on the adoption rate of releases promoted to the channel among customers assigned to the channel. - -* In the **Customers** section, view the number of active and inactive customers assigned to the channel. Click **Details** to go to the **Customers** page, where you can view details about the customers assigned to the channel. - -* In the **Latest release** section, view the properties of the latest release, and get information about any warnings or errors in the YAML files for the latest release. - - Click **Release history** to access the history of all releases promoted to the channel. From the **Release History** page, you can view the version labels and files in each release that has been promoted to the selected channel. - - You can also build and download air gap bundles to be used in air gap installations with Replicated installers (Embedded Cluster, KOTS, kURL), edit the release properties for each release promoted to the channel from the **Release History** page, and demote a release from the channel. - - The following shows an example of the **Release History** page: - - Release history page in the Vendor Portal - - [View a larger version of this image](/images/channel-card.png) - -* For applications that support KOTS, you can also do the following from the **Channel** page: - - * In the **kURL installer** section, view the current kURL installer promoted to the channel. Click **Installer history** to view the history of kURL installers promoted to the channel. For more information about creating kURL installers, see [Creating a kURL Installer](packaging-embedded-kubernetes). - - * In the **Install** section, view and copy the installation commands for the latest release on the channel. - -### Draft Release Page - -For applications that support installation with KOTS, the **Draft** page provides a YAML editor to add, edit, and delete your application files and Replicated custom resources. You click **Releases > Create Release** in the Vendor Portal to open the **Draft** page. - -The following shows an example of the **Draft** page in the Vendor Portal: - - Draft release page - - [View a larger version of this image](/images/guides/kots/default-yaml.png) - -You can do the following tasks on the **Draft** page: - -- In the file directory, manage the file directory structure. Replicated custom resource files are grouped together above the white line of the file directory. Application files are grouped together underneath the white line in the file directory. - - Delete files using the trash icon that displays when you hover over a file. Create a new file or folder using the corresponding icons at the bottom of the file directory pane. You can also drag and drop files in and out of the folders. - - ![Manage File Directory](/images/new-file-and-trash.png) - -- Edit the YAML files by selecting a file in the directory and making changes in the YAML editor. - -- In the **Help** or **Config help** pane, view the linter for any errors. If there are no errors, you get an **Everything looks good!** message. If an error displays, you can click the **Learn how to configure** link. For more information, see [Linter Rules](/reference/linter). - -- Select the Config custom resource to preview how your application's Config page will look to your customers. The **Config preview** pane only appears when you select that file. For more information, see [About the Configuration Screen](config-screen-about). - -- Select the Application custom resource to preview how your application icon will look in the Admin Console. The **Application icon preview** only appears when you select that file. For more information, see [Customizing the Application Icon](admin-console-customize-app-icon). - -================ -File: docs/vendor/releases-creating-channels.md -================ -# Creating and Editing Channels - -This topic describes how to create and edit channels using the Replicated Vendor Portal. For more information about channels, see [About Channels and Releases](releases-about). - -For information about creating channels with the Replicated CLI, see [channel create](/reference/replicated-cli-channel-create). - -For information about creating and managing channels with the Vendor API v3, see the [channels](https://replicated-vendor-api.readme.io/reference/createchannel) section in the Vendor API v3 documentation. - -## Create a Channel - -To create a channel: - -1. From the Replicated [Vendor Portal](https://vendor.replicated.com), select **Channels** from the left menu. -1. Click **Create Channel**. - - The Create a new channel dialog opens. For example: - - Create channel dialog - -1. Enter a name and description for the channel. -1. (Recommended) Enable semantic versioning on the channel if it is not enabled by default by turning on **Enable semantic versioning**. For more information about semantic versioning and defaults, see [Semantic Versioning](releases-about#semantic-versioning). - -1. (Recommended) Enable an air gap bundle format that supports image digests and deduplication of image layers, by turning on **Enable new air gap bundle format**. For more information, see [Using Image Tags and Digests](private-images-tags-digests). - -1. Click **Create Channel**. - -## Edit a Channel - -To edit the settings of an existing channel: - -1. In the Vendor Portal, select **Channels** from the left menu. -1. Click the gear icon on the top right of the channel that you want to modify. - - The Channel settings dialog opens. For example: - - Channel Settings dialog in the Vendor Portal - -1. Edit the fields and click **Save**. - - For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. - -## Archive a Channel - -You can archive an existing channel to prevent any new releases from being promoted to the channel. - -:::note -You cannot archive a channel if: -* There are customers assigned to the channel. -* The channel is set as the default channel. - -Assign customers to a different channel and set a different channel as the default before archiving. -::: - -To archive a channel with the Vendor Portal or the Replicated CLI: - -* **Vendor portal**: In the Vendor Portal, go to the **Channels** page and click the trash can icon in the top right corner of the card for the channel that you want to archive. -* **Replicated CLI**: - 1. Run the following command to find the ID for the channel that you want to archive: - ``` - replicated channel ls - ``` - The output of this command includes the ID and name for each channel, as well as information about the latest release version on the channels. - - 1. Run the following command to archive the channel: - ``` - replicated channel rm CHANNEL_ID - ``` - Replace `CHANNEL_ID` with the channel ID that you retrieved in the previous step. - - For more information, see [channel rm](/reference/replicated-cli-channel-rm) in the Replicated CLI documentation. - -================ -File: docs/vendor/releases-creating-cli.mdx -================ -# Managing Releases with the CLI - -This topic describes how to use the Replicated CLI to create and promote releases. - -For information about creating and managing releases with the Vendor Portal, see [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - -For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) section in the Vendor API v3 documentation. - -## Prerequisites - -Before you create a release using the Replicated CLI, complete the following prerequisites: - -* Install the Replicated CLI and then log in to authorize the CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -* Create a new application using the `replicated app create APP_NAME` command. You only need to do this procedure one time for each application that you want to deploy. See [`app create`](/reference/replicated-cli-app-create) in _Reference_. - -* Set the `REPLICATED_APP` environment variable to the slug of the target application. See [Set Environment Variables](/reference/replicated-cli-installing#env-var) in _Installing the Replicated CLI_. - - **Example**: - - ```bash - export REPLICATED_APP=my-app-slug - ``` - -## Create a Release From a Local Directory {#dir} - -You can use the Replicated CLI to create a release from a local directory that contains the release files. - -To create and promote a release: - -1. (Helm Charts Only) If your release contains any Helm charts: - - 1. Package each Helm chart as a `.tgz` file. See [Packaging a Helm Chart for a Release](/vendor/helm-install-release). - - 1. Move the `.tgz` file or files to the local directory that contains the release files: - - ```bash - mv CHART_TGZ PATH_TO_RELEASE_DIR - ``` - Where: - * `CHART_TGZ` is the `.tgz` Helm chart archive. - * `PATH_TO_RELEASE_DIR` is path to the directory that contains the release files. - - **Example** - - ```bash - mv wordpress-1.3.5.tgz manifests - ``` - - 1. In the same directory that contains the release files, add a HelmChart custom resource for each Helm chart in the release. See [Configuring the HelmChart Custom Resource](helm-native-v2-using). - -1. Lint the application manifest files and ensure that there are no errors in the YAML: - - ```bash - replicated release lint --yaml-dir=PATH_TO_RELEASE_DIR - ``` - - Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. - - For more information, see [release lint](/reference/replicated-cli-release-lint) and [Linter Rules](/reference/linter). - -1. Do one of the following: - - * **Create and promote the release with one command**: - - ```bash - replicated release create --yaml-dir PATH_TO_RELEASE_DIR --lint --promote CHANNEL - ``` - Where: - * `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. - * `CHANNEL` is the channel ID or the case sensitive name of the channel. - - * **Create and edit the release before promoting**: - - 1. Create the release: - - ```bash - replicated release create --yaml-dir PATH_TO_RELEASE_DIR - ``` - Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. - - For more information, see [release create](/reference/replicated-cli-release-create). - - 1. Edit and update the release as desired: - - ``` - replicated release update SEQUENCE --yaml-dir PATH_TO_RELEASE_DIR - ``` - Where: - - - `SEQUENCE` is the release sequence number. This identifies the existing release to be updated. - - `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. - - For more information, see [release update](/reference/replicated-cli-release-update). - - 1. Promote the release when you are ready to test it. Releases cannot be edited after they are promoted. To make changes after promotion, create a new release. - - ``` - replicated release promote SEQUENCE CHANNEL - ``` - - Where: - - - `SEQUENCE` is the release sequence number. - - `CHANNEL` is the channel ID or the case sensitive name of the channel. - - For more information, see [release promote](/reference/replicated-cli-release-promote). - -1. Verify that the release was promoted to the target channel: - - ``` - replicated release ls - ``` - -================ -File: docs/vendor/releases-creating-customer.mdx -================ -import ChangeChannel from "../partials/customers/_change-channel.mdx" -import Download from "../partials/customers/_download.mdx" -import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" - -# Creating and Managing Customers - -This topic describes how to create and manage customers in the Replicated Vendor Portal. For more information about customer licenses, see [About Customers](licenses-about). - -## Create a Customer - -This procedure describes how to create a new customer in the Vendor Portal. You can edit customer details at any time. - -For information about creating a customer with the Replicated CLI, see [customer create](/reference/replicated-cli-customer-create). - -For information about creating and managing customers with the Vendor API v3, see the [customers](https://replicated-vendor-api.readme.io/reference/getcustomerentitlements) section in the Vendor API v3 documentation. - -To create a customer: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. - - The **Create a new customer** page opens: - - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - - [View a larger version of this image](/images/create-customer.png) - -1. For **Customer name**, enter a name for the customer. - -1. For **Customer email**, enter the email address for the customer. - - :::note - A customer email address is required for Helm installations. This email address is never used to send emails to customers. - ::: - -1. For **Assigned channel**, assign the customer to one of your channels. You can select any channel that has at least one release. The channel a customer is assigned to determines the application releases that they can install. For more information, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. - - :::note - - ::: - -1. For **Custom ID**, you can enter a custom ID for the customer. Setting a custom ID allows you to easily associate this Replicated customer record to your own internal customer data systems during data exports. Replicated recommends using an alphanumeric value such as your Salesforce ID or Hubspot ID. - - :::note - Replicated does _not_ require that the custom ID is unique. The custom ID is for vendor data reconciliation purposes, and is not used by Replicated for any functionality purposes. - ::: - -1. For **Expiration policy**, by default, **Customer's license does not expire** is enabled. To set an expiration date for the license, enable **Customer's license has an expiration date** and specify an expiration date in the **When does this customer expire?** calendar. - -1. For **Customer type**, set the customer type. Customer type is used only for reporting purposes. Customer access to your application is not affected by the type you assign to them. By default, **Trial** is selected. For more information, see [About Customer License Types](licenses-about-types). - -1. Enable any of the available options for the customer. For more information about the license options, see [Built-in License Fields](/vendor/licenses-using-builtin-fields). For more information about enabling install types, see [Managing Install Types for a License (Beta)](/vendor/licenses-install-types). - -1. For **Custom fields**, configure any custom fields that you have added for your application. For more information about how to create custom fields for your application, see [Managing Customer License Fields](licenses-adding-custom-fields). - -1. Click **Save Changes**. - -## Edit a Customer - -You can edit the built-in and custom license fields for a customer at any time by going to the **Manage customer** for a customer. For more information, see [Manage Customer Page](licenses-about#about-the-manage-customer-page) in _About Customers and Licensing_. - -Replicated recommends that you test any licenses changes in a development environment. If needed, install the application using a developer license matching the current customer's entitlements before editing the developer license. Then validate the updated license. - -:::important -For online environments, changing license entitlements can trigger changes to the customer's installed application instance during runtime. Replicated recommends that you verify the logic your application uses to query and enforce the target entitlement before making any changes. -::: - -To edit license fields: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers**. - -1. Select the target customer and click the **Manage customer** tab. - -1. On the **Manage customer** page, edit the desired fields and click **Save**. - - ![Full manage customer page for a customer named Prestige Financial](/images/customer-details.png) - -1. Test the changes by installing or updating in a development environment. Do one of the following, depending on the installation method for your application: - * For applications installed with Helm that use the Replicated SDK, you can add logic to your application to enforce entitlements before installation or during runtime using the Replicated SDK API license endpoints. See [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). - * For applications installed with Replicated KOTS, update the license in the admin console. See [Update Online Licenses](/enterprise/updating-licenses#update-online-licenses) and [Update Air Gap Licenses](/enterprise/updating-licenses#update-air-gap-licenses) in _Updating Licenses in the Admin Console_. - -## Archive a Customer - -When you archive a customer in the Vendor Portal, the customer is hidden from search by default and becomes read-only. Archival does not affect the utility of license files downloaded before the customer was archived. - -To expire a license, set an expiration date and policy in the **Expiration policy** field before you archive the customer. - -To archive a customer: - -1. In the Vendor Portal, click **Customers**. Select the target customer then click the **Manage customer** tab. - -1. Click **Archive Customer**. In the confirmation dialog, click **Archive Customer** again. - -You can unarchive by clicking **Unarchive Customer** in the customer's **Manage customer** page. - -## Export Customer and Instance Data {#export} - - - -For more information about the data fields in the CSV downloads, see [Data Dictionary](/vendor/instance-data-export#data-dictionary) in _Export Customers and Instance Data_. -## Filter and Search Customers - -The **Customers** page provides a search box and filters that help you find customers: - -search box and filters on the customers page - -[View a larger version of this image](/images/customers-filter.png) - -You can filter customers based on whether they are active, by license type, and by channel name. You can filter using more than one criteria, such as Active, Paid, and Stable. However, you can select only one license type and one channel at a time. - -If there is adoption rate data available for the channel that you are filtering by, you can also filter by current version, previous version, and older versions. - -You can also filter customers by custom ID or email address. To filter customers by custom ID or email, use the search box and prepend your search term with "customId:" (ex: `customId:1234`) or "email:" (ex: `email:bob@replicated.com`). - -If you want to filter information using multiple license types or channels, you can download a CSV file instead. For more information, see [Export Customer and Instance Data](#export) above. - -================ -File: docs/vendor/releases-creating-releases.mdx -================ -import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" -import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" - -# Managing Releases with the Vendor Portal - -This topic describes how to use the Replicated Vendor Portal to create and promote releases, edit releases, edit release properties, and archive releases. - -For information about creating and managing releases with the CLI, see [Managing Releases with the CLI](/vendor/releases-creating-cli). - -For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) and [channelReleases](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) sections in the Vendor API v3 documentation. - -## Create a Release - -To create and promote a release in the Vendor Portal: - -1. From the **Applications** dropdown list, select **Create an app** or select an existing application to update. - -1. Click **Releases > Create release**. - - ![Create Release](/images/release-create-new.png) - - [View a larger version of this image](/images/release-create-new.png) - -1. Add your files to the release. You can do this by dragging and dropping files to the file directory in the YAML editor or clicking the plus icon to add a new, untitled YAML file. - -1. For any Helm charts that you add to the release, in the **Select Installation Method** dialog, select the version of the HelmChart custom resource that KOTS will use to install the chart. kots.io/v1beta2 is recommended. For more information about the HelmChart custom resource, see [Configuring the HelmChart Custom Resource](helm-native-v2-using). - - select installation method dialog - - [View a larger version of this image](/images/helm-select-install-method.png) - -1. Click **Save release**. This saves a draft that you can continue to edit until you promote it. - -1. Click **Promote**. In the **Promote Release** dialog, edit the fields: - - For more information about the requirements and limitations of each field, see Properties in _About Channels and Releases_. - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    Channel -

    Select the channel where you want to promote the release. If you are not sure which channel to use, use the default Unstable channel.

    -
    Version label -

    Enter a version label.

    -

    If you have one or more Helm charts in your release, the Vendor Portal automatically populates this field. You can change the version label to any version specified in any of the Chart.yaml files included in the release.

    -
    Requirements - Select the Prevent this release from being skipped during upgrades to mark the release as required for KOTS installations. This option does not apply to installations with Helm. -
    Release notesAdd release notes. The release notes support markdown and are shown to your customer.
    - -1. Click **Promote**. - - The release appears in an **Active** state on the Releases page. - -## Edit a Draft Release - -To edit a draft release: - -1. From the **Applications** dropdown list, select an existing application to update. -1. On the **Releases** page, find the draft release you want to edit and click **Edit YAML**. - - Edit YAML button for a draft release in the Vendor Portal - - [View a larger image](/images/releases-edit-draft.png) - -1. Click **Save** to save your updated draft. -1. (Optional) Click **Promote**. - -## Edit Release Properties - -You can edit the properties of a release at any time. For more information about release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. - -To edit release properties: - -1. Go to **Channels**. -1. In the channel where the release was promoted, click **Release History**. -1. For the release sequence that you want to edit, open the dot menu and click **Edit release**. -1. Edit the properties as needed. - Release Properties dialog in the Vendor Portal - - [View a larger image](/images/release-properties.png) -1. Click **Update Release**. - -## Archive a Release - -You can archive releases to remove them from view on the **Releases** page. Archiving a release that has been promoted does _not_ remove the release from the channel's **Release History** page or prevent KOTS from downloading the archived release. - -To archive one or more releases: - -1. From the **Releases** page, click the trash can icon in the upper right corner. -1. Select one or more releases. -1. Click **Archive Releases**. -1. Confirm the archive action when prompted. - -## Demote a Release - -A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. For more information, see [Demotion](/vendor/releases-about#demotion) in _About Channels and Releases_. - -For information about demoting and un-demoting releases with the Replicated CLI, see [channel demote](/reference/replicated-cli-channel-demote) and [channel un-demote](/reference/replicated-cli-channel-un-demote). - -To demote a release in the Vendor Portal: - -1. Go to **Channels**. -1. In the channel where the release was promoted, click **Release History**. -1. For the release sequence that you want to demote, open the dot menu and select **Demote Release**. - - ![Release history page](/images/channels-release-history.png) - [View a larger version of this image](/images/channels-release-history.png) - - After the release is demoted, the given release sequence is greyed out and a **Demoted** label is displayed next to the release on the **Release History** page. - -================ -File: docs/vendor/releases-share-download-portal.md -================ -import DownloadPortal from "../partials/kots/_download-portal-about.mdx" - -# Downloading Assets from the Download Portal - -This topic describes how to download customer license files, air gap bundles, and other assets from the Replicated Download Portal. - -For information about downloading air gap bundles and licenses with the Vendor API v3, see the following pages in the Vendor API v3 documentation: -* [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) -* [Trigger airgap build for a channel's release](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbuild) -* [Get airgap bundle download URL for the active release on the channel](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) - -## Overview - - - -The most common use case for the Download Portal is for customers installing into air gap environments who need to download both their license file as well as multiple air gap bundles. - -The following is an example of the Download Portal for an air gap customer installing in their own existing cluster: - -![Download Portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) - -[View a larger version of this image](/images/download-portal-existing-cluster.png) - -## Limitations - -* Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. - -* Sessions in the Download Portal are valid for 72 hours. After the session expires, your customer must log in again. The Download Portal session length is not configurable. - -## Download Assets from the Download Portal - -To log in to the Download Portal and download assets: - -1. In the [Vendor Portal](https://vendor.replicated.com), on the **Customers** page, click on the name of the customer. - -1. (Optional) On the **Manage customer** tab, enable the **Airgap Download Enabled** option. This makes air gap bundles available in the Download Portal. - - ![airgap download enabled license option](/images/airgap-download-enabled.png) - - [View a larger version of this image](/images/airgap-download-enabled.png) - -1. On the **Reporting** tab, in the **Download portal** section, click **Manage customer password**. - - ![download portal section](/images/download-portal-link.png) - - [View a larger version of this image](/images/download-portal-link.png) - -1. In the pop-up window, enter a password or click **Generate**. - - download portal password pop-up - - [View a larger version of this image](/images/download-portal-password-popup.png) - -1. Click **Copy** to copy the password to your clipboard. - - After the password is saved, it cannot be retrieved again. If you lose the password, you can generate a new one. - -1. Click **Save** to set the password. - -1. Click **Visit download portal** to log in to the Download Portal -and preview your customer's experience. - - :::note - By default, the Download Portal uses the domain `get.replicated.com`. You can optionally use a custom domain for the Download Portal. For more information, see [Using Custom Domains](/vendor/custom-domains-using). - ::: - -1. In the Download Portal, on the left side of the screen, select one of the following: - * **Bring my own Kubernetes**: View the downloadable assets for existing cluster installations with KOTS. - * **Embedded Kubernetes**: View the downloadable assets for Replicated kURL installations. - - :::note - Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. - ::: - - The following is an example of the Download Portal for an air gap customer: - - ![download portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) - - [View a larger version of this image](/images/download-portal-existing-cluster.png) - -1. Under **Select application version**, use the dropdown to select the target application release version. The Download Portal automatically makes the correct air gap bundles available for download based on the selected application version. - -1. Click the download button to download each asset. - -1. To share installation files with a customer, send the customer their unique link and password for the Download Portal. - -================ -File: docs/vendor/releases-sharing-license-install-script.mdx -================ -import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - -# Finding Installation Commands for a Release - -This topic describes where to find the installation commands and instructions for releases in the Replicated Vendor Portal. - -For information about getting installation commands with the Replicated CLI, see [channel inspect](/reference/replicated-cli-channel-inspect). For information about getting installation commands with the Vendor API v3, see [Get install commands for a specific channel release](https://replicated-vendor-api.readme.io/reference/getchannelreleaseinstallcommands) in the Vendor API v3 documentation. - -## Get Commands for the Latest Release - -Every channel in the Vendor Portal has an **Install** section where you can find installation commands for the latest release on the channel. - -To get the installation commands for the latest release: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. - -1. On the target channel card, under **Install**, click the tab for the type of installation command that you want to view: - - - -

    View the command for installing with Replicated KOTS in existing clusters.

    - - Install section of the channel card - [View a larger version of this image](/images/channel-card-install-kots.png) -
    - -

    View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.

    - -

    In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:

    - - Install section of the channel card - [View a larger version of this image](/images/channel-card-install-kurl.png) - - Install section of the channel card - [View a larger version of this image](/images/channel-card-install-ec.png) - - :::note - The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. - ::: -
    - -

    View the command for installing with the Helm CLI in an existing cluster.

    - - Install section of the channel card - [View a larger version of this image](/images/channel-card-install-helm.png) - - :::note - The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. - ::: -
    -
    - -## Get Commands for a Specific Release - -Every channel in the Vendor Portal has a **Release history** page where you can find the installation commands for specific release versions. - -To get the command for a specific release version: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. - -1. On the channel card, click **Release history**. - - Release history link on channel card - - [View a larger version of this image](/images/release-history-link.png) - -1. For the target release version, open the dot menu and click **Install Commands**. - - ![Release history page](/images/channels-release-history.png) - - [View a larger version of this image](/images/channels-release-history.png) - -1. In the **Install Commands** dialog, click the tab for the type of installation command that you want to view: - - - -

    View the command for installing with Replicated KOTS in existing clusters.

    - - Install section of the channel card - [View a larger version of this image](/images/release-history-install-kots.png) -
    - -

    View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.

    - -

    In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:

    - - Install section of the channel card - [View a larger version of this image](/images/release-history-install-kurl.png) - - Install section of the channel card - [View a larger version of this image](/images/release-history-install-embedded-cluster.png) - - :::note - The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. - ::: -
    - -

    View the command for installing with the Helm CLI in an existing cluster.

    - - Install section of the channel card - [View a larger version of this image](/images/release-history-install-helm.png) - - :::note - The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. - ::: -
    -
    - -## Get Customer-Specific Installation Instructions for Helm or Embedded Cluster {#customer-specific} - -Installation instructions for the Helm CLI and Replicated Embedded Cluster are customer-specific. You can find installation instructions on the page for the target customer. - -To get customer-specific Helm or Embedded Cluster installation instructions: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page and click on the target customer. - -1. At the top of the page, click the **Install instructions** drop down, then click **Helm** or **Embedded cluster**. - - ![Install instructions button](/images/customer-install-instructions-dropdown.png) - - [View a larger version of this image](/images/customer-install-instructions-dropdown.png) - -1. In the dialog that opens, follow the installation instructions to install. - - - -

    View the customer-specific Helm CLI installation instructions. For more information about installing with the Helm CLI, see [Installing with Helm](/vendor/install-with-helm).

    - Helm install button - [View a larger version of this image](/images/helm-install-instructions-dialog.png) -
    - -

    View the customer-specific Embedded Cluster installation instructions. For more information about installing with Embedded Cluster, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded).

    - Embedded cluster install instructions - [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) -
    -
    - -================ -File: docs/vendor/replicated-api-tokens.md -================ -import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" - -# Generating API Tokens - -This topic describes the available types of API tokens and how to generate them for use with the Replicated CLI and Replicated Vendor API v3. - -## About API Tokens - -The Vendor API v3 is the API that manages applications in the Replicated Vendor Portal. The Replicated CLI is an implementation of the Vendor API v3. - -Using the Replicated CLI and Vendor API V3 requires an API token for authorization. Tokens are primarily used for automated customer, channel, and release management. You create tokens in the Vendor Portal. - -The following types of tokens are available: - -- [Service Accounts](#service-accounts) -- [User API Tokens](#user-api-tokens) - - - -### Service Accounts - -Service accounts are assigned a token and associated with an RBAC policy. Users with the proper permissions can create, retrieve, or revoke service account tokens. Admin users can assign any RBAC policy to a service account. Non-admin users can only assign their own RBAC policy when they create a service account. - -Service accounts are useful for operations that are not tied to a particular user, such as CI/CD or integrations. - -Updates to a service account's RBAC policy are automatically applied to its associated token. When a service account is removed, its tokens are also invalidated. - -### User API Tokens - -User API tokens are private to the user creating the token. User tokens assume the user's account when used, including any RBAC permissions. - -Updates to a user's RBAC role are applied to all of the tokens belonging to that user. - -Revoking a user token immediately invalidates that token. When a user account is deleted, its user tokens are also deleted. - -## Generate Tokens - -To use the Replicated CLI or the Vendor API v3, you need a User API token or a Service Account token. Existing team API tokens also continue to work. - -### Generate a Service Account - -To generate a service account: - -1. Log in to the Vendor Portal, and select [**Team > Service Accounts**](https://vendor.replicated.com/team/serviceaccounts). -1. Select **New Service Account**. If one or more service accounts already exist, you can add another by selecting **New Service Account**. - -1. Edit the fields in the **New Service Account** dialog: - - New Service Accounts Dialog - - [View a larger version of this image](/images/service-accounts.png) - - 1. For **Nickname**, enter a name the token. Names for service accounts must be unique within a given team. - - 1. For **RBAC**, select the RBAC policy from the dropdown list. The token must have `Admin` access to create new releases. - - This list includes the Vendor Portal default policies `Admin` and `Read Only`. Any custom policies also display in this list. For more information, see [Configuring RBAC Policies](team-management-rbac-configuring). - - Users with a non-admin RBAC role cannot select any other RBAC role when creating a token. They are restricted to creating a token with their same level of access to avoid permission elevation. - - 1. (Optional) For custom RBAC policies, select the **Limit to read-only version of above policy** check box to if you want use a policy that has Read/Write permissions but limit this service account to read-only. This option lets you maintain one version of a custom RBAC policy and use it two ways: as read/write and as read-only. - -1. Select **Create Service Account**. - -1. Copy the service account token and save it in a secure location. The token will not be available to view again. - - :::note - To remove a service account, select **Remove** for the service account that you want to delete. - ::: - -### Generate a User API Token - -To generate a user API token: - -1. Log in to the Vendor Portal and go to the [Account Settings](https://vendor.replicated.com/account-settings) page. -1. Under **User API Tokens**, select **Create a user API token**. If one or more tokens already exist, you can add another by selecting **New user API token**. - - User API Token Page - - [View a larger version of this image](/images/user-token-list.png) - -1. In the **New user API token** dialog, enter a name for the token in the **Nickname** field. Names for user API tokens must be unique per user. - - Create New User Token Dialog - - [View a larger version of this image](/images/user-token-create.png) - -1. Select the required permissions or use the default **Read and Write** permissions. Then select **Create token**. - - :::note - The token must have `Read and Write` access to create new releases. - ::: - -1. Copy the user API token that displays and save it in a secure location. The token will not be available to view again. - - :::note - To revoke a token, select **Revoke token** for the token that you want to delete. - ::: - -================ -File: docs/vendor/replicated-onboarding.mdx -================ -import CreateRelease from "../partials/getting-started/_create-promote-release.mdx" -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import EcCr from "../partials/embedded-cluster/_ec-config.mdx" -import HelmPackage from "../partials/helm/_helm-package.mdx" -import Requirements from "../partials/embedded-cluster/_requirements.mdx" -import SDKOverview from "../partials/replicated-sdk/_overview.mdx" -import TestYourChanges from "../partials/getting-started/_test-your-changes.mdx" -import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" - -# Replicated Onboarding - -This topic describes how to onboard applications to the Replicated Platform. - -## Before You Begin - -This section includes guidance and prerequisites to review before you begin onboarding your application. - -### Best Practices and Recommendations - -The following are some best practices and recommendations for successfully onboarding with Replicated: - -* When integrating new Replicated features with an application, make changes in small iterations and test frequently by installing or upgrading the application in a development environment. This will help you to more easily identify issues and troubleshoot. This onboarding workflow will guide you through the process of integrating features in small iterations. - -* Use the Replicated CLI to create and manage your application and releases. Getting familiar with the Replicated CLI will also help later on when integrating Replicated workflows into your CI/CD pipelines. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - -* These onboarding tasks assume that you will test the installation of each release on a VM with the Replicated Embedded Cluster installer _and_ in a cluster with the Replicated KOTS installer. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then can choose to test with Embedded Cluster only. - -* Ask for help from the Replicated community. For more information, see [Getting Help from the Community](#community) below. - -### Getting Help from the Community {#community} - -The [Replicated community site](https://community.replicated.com/) is a forum where Replicated team members and users can post questions and answers related to working with the Replicated Platform. It is designed to help Replicated users troubleshoot and learn more about common tasks involved with distributing, installing, observing, and supporting their application. - -Before posting in the community site, use the search to find existing knowledge base articles related to your question. If you are not able to find an existing article that addresses your question, create a new topic or add a reply to an existing topic so that a member of the Replicated community or team can respond. - -To search and participate in the Replicated community, see https://community.replicated.com/. - -### Prerequisites - -* Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). - -* Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -* Complete a basic quick start workflow to create an application with a sample Helm chart and then promote and install releases in a development environment. This helps you get familiar with the process of creating, installing, and updating releases in the Replicated Platform. See [Replicated Quick Start](/vendor/quick-start). - -* Ensure that you have access to a VM that meets the requirements for the Replicated Embedded Cluster installer. You will use this VM to test installation with Embedded Cluster. - - Embedded Cluster has the following requirements: - - - -* (Optional) Ensure that you have kubectl access to a Kubernetes cluster. You will use this cluster to test installation with KOTS. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then you do not need access to a cluster for the main onboarding tasks. - - You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. - -## Onboard - -Complete the tasks in this section to onboard your application. When you are done, you can continue to [Next Steps](#next-steps) to integrate other Replicated features with your application. - -### Task 1: Create An Application - -To get started with onboarding, first create a new application. This will be the official Vendor Portal application used by your team to create and promote both internal and customer-facing releases. - -To create an application: - -1. Create a new application using the Replicated CLI or the Vendor Portal. Use an official name for your application. See [Create an Application](/vendor/vendor-portal-manage-app#create-an-application). - -
    - Can I change the application name in the future? - - You can change the application name, but you cannot change the application _slug_. - - The Vendor Portal automatically generates and assigns a unique slug for each application based on the application's name. For example, the slug for "Example App" would be `example-app`. - - Application slugs are unique across all of Replicated. This means that, if necessary, the Vendor Portal will append a random word to the end of slug to ensure uniqueness. For example, `example-app-flowers`. -
    - -1. Set the `REPLICATED_APP` environment variable to the unique slug of the application that you created. This will allow you to interact with the application from the Replicated CLI throughout onboarding. See [Set Environment Variables](/reference/replicated-cli-installing#replicated_app) in _Installing the Replicated CLI_. - - For example: - - ```bash - export REPLICATED_APP=my-app - ``` - -### Task 2: Connect Your Image Registry - -Add credentials for your image registry to the Vendor Portal. This will allow you to use the Replicated proxy registry in a later step so that you can grant proxy access to application images without exposing registry credentials to your customers. - -For more information, see [Connecting to an External Registry](/vendor/packaging-private-images). - -### Task 3: Add the Replicated SDK and Package your Chart - -Next, add the Replicated SDK as a dependency of your Helm chart and package the chart as a `.tgz` archive. - -The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. The SDK provides access to key Replicated functionality, including an in-cluster API and automatic access to insights and operational telemetry for instances running in customer environments. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). - -To package your Helm chart with the Replicated SDK: - -1. Go to the local directory where your Helm chart is. - -1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. - - If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. For more information, see [Packaging a Helm Chart for a Release](helm-install-release). - - - -1. Update dependencies and package the chart as a `.tgz` file: - - - - - -1. If your application is deployed as multiple Helm charts, package each chart as a separate `.tgz` archive using the `helm package -u PATH_TO_CHART` command. Do not declare the SDK in more than one chart. - -### Task 4: Create the Initial Release with KOTS HelmChart and Embedded Cluster Config {#first-release} - -After packaging your Helm chart, you can create a release. The initial release for your application will include the minimum files required to install a Helm chart with the Embedded Cluster installer: -* The Helm chart `.tgz` archive -* [KOTS HelmChart custom resource](/reference/custom-resource-helmchart-v2) -* [Embedded Cluster Config](/reference/embedded-config) - -If you have multiple charts, you will add each chart archive to the release, plus a corresponding KOTS HelmChart custom resource for each archive. - -:::note -Configuring the KOTS HelmChart custom resource includes several tasks, and involves the use of KOTS template functions. Depending on how many Helm charts your application uses, Replicated recommends that you allow about two to three hours for configuring the HelmChart custom resource and creating and testing your initial release. -::: - -To create the first release for your application: - -1. In the local directory for your Helm chart, create a subdirectory named `manifests` where you will add the files for the release. - -1. In the `manifests` directory: - - 1. Move the `.tgz` chart archive that you packaged. If your application is deployed as multiple Helm charts, move each `.tgz` archive to `manifests`. - - 1. Create an `embedded-cluster.yaml` file with the following default Embedded Cluster Config: - - - -
    - What is the Embedded Cluster Config? - - The Embedded Cluster Config is required to install with Embedded Cluster. -
    - - For more information, see [Using Embedded Cluster](/vendor/embedded-overview). - - 1. Create a new YAML file. In this file, configure the KOTS HelmChart custom resource by completing the workflow in [Configuring the HelmChart Custom Resource](helm-native-v2-using). - -
    - What is the KOTS HelmChart custom resource? - - The KOTS HelmChart custom resource is required to install Helm charts with KOTS and Embedded Cluster. As part of configuring the KOTS HelmChart custom resource, you will rewrite image names and add image pull secrets to allow your application images to be accessed through the Replicated proxy registry. -
    - - 1. If your application is deployed as multiple Helm charts, repeat the step above to add a separate HelmChart custom resource for each Helm chart archive in the release. - - 1. If there are values in any of your Helm charts that need to be set for the installation to succeed, you can set those values using the `values` key in the corresponding HelmChart custom resource. See [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys). - - This is a temporary measure to ensure the values get passed to the Helm chart during installation until you configure the Admin Console Config screen in a later onboarding task. If your default Helm values are sufficient for installation, you can skip this step. - - 1. If your application requires that certain components are deployed before the application and as part of the Embedded Cluster itself, then update the Embedded Cluster Config to add [extensions](/reference/embedded-config#extensions). Extensions allow you to provide Helm charts that are deployed before your application. For example, one situation where this is useful is if you want to ship an ingress controller because Embedded Cluster does not include one. - - For more information, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. - -1. From the `manifests` directory, create a release and promote it to the Unstable channel. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - - ```bash - replicated release create --yaml-dir . --promote Unstable - ``` - -1. Install the release in your development environment to test: - - 1. Install with Embedded Cluster on a VM. See [Online Installation with Embedded Cluster](/enterprise/installing-embedded). - - 1. (Optional) Install in an existing cluster with KOTS. See [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). - -After successfully installing the initial release on a VM with Embedded Cluster (and optionally in an existing cluster with KOTS), go to the next task. You will continue to iterate throughout the rest of the onboarding process by creating and promoting new releases, then upgrading to the new version in your development environment. - -### Task 5: Customize the KOTS Admin Console {#admin-console} - -Configure the KOTS Application custom resource to add an application name, icon, and status informers. The name and icon will be displayed in the Admin Console and the Replicated Download Portal. The status informers will be used to display the application status on the Admin Console dashboard. - -To configure the KOTS Application custom resource: - -1. In your `manifests` directory, create a new `kots-app.yaml` file. - -1. In the `kots-app.yaml` file, add the [KOTS Application](/reference/custom-resource-application) custom resource YAML and set the `title`, `icon`, and `statusInformers` fields. - - **Example:** - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: gitea - spec: - title: Gitea - # Base64 encoded image string - icon: fyJINrigNkt5VsRiub9nXICdsYyVd2NcVvA3ScE5t2rb5JuEeyZnAhmLt9NK63vX1O - statusInformers: - - deployment/gitea - ``` - For more information, see: - * [Customizing the Application Icon](/vendor/admin-console-customize-app-icon) - * [Enabling and Understanding Application Status](/vendor/insights-app-status) - * [Application](/reference/custom-resource-application) -
    -
    - Can I preview the icon before installing the release? - - Yes. The Vendor Portal includes a **Application icon preview** in the **Help** pane on the **Edit release** page. - - ![Icon preview](/images/icon-preview.png) - - [View a larger version of this image](/images/icon-preview.png) - -
    - -1. - -1. - -### Task 6: Set Up the Admin Console Config Screen and Map to Helm Values - -The KOTS Admin Console Config screen is used to collect required and optional application configuration values from your users. User-supplied values provided on the Config screen can be mapped to your Helm values. - -Before you begin this task, you can complete the [Set Helm Values with KOTS](/vendor/tutorial-config-setup) tutorial to learn how to map user-supplied values from the Admin Console Config screen to a Helm chart. - -:::note -Setting up the Admin Console config screen can include the use of various types of input fields, conditional statements, and KOTS template functions. Depending on your application's configuration options, Replicated recommends that you allow about two to three hours for configuring the Config custom resource and testing the Admin Console config screen. -::: - -To set up the Admin Console Config screen for your application: - -1. In your `manifests` directory, create a new file named `kots-config.yaml`. - -1. In `kots-config.yaml`, add the KOTS Config custom resource. Configure the KOTS Config custom resource based on the values that you need to collect from users. - - **Example:** - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: my-application - spec: - groups: - - name: example_group - title: Example Group - items: - - name: example_item - title: Example Item - type: text - default: "Hello World" - ``` - - For more information, see: - * [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen) - * [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional) - * [Config](/reference/custom-resource-config) - -
    - -
    - Can I preview the Admin Console config screen before installing the release? - - Yes. The Vendor Portal includes a **Config preview** in the **Help** pane on the **Edit release** page. - - For example: - - ![Config preview](/images/config-preview.png) - - [View a larger version of this image](/images/config-preview.png) -
    - -1. - -1. - -1. In `manifests`, open the KOTS HelmChart custom resource that you configured in a previous step. Configure the `values` key of the HelmChart custom resource to map the fields in the KOTS Config custom resource to your Helm values. - - For more information, see: - * [Mapping User-Supplied Values](/vendor/config-screen-map-inputs) - * [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup) - * [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) - * [`values`](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_ - -1. - -1. - -1. Continue to create and test new releases with new config fields until you are ready to move on to the next task. - -### Task 7: Define Preflight Checks - -In the next two tasks, you will add specs for _preflight checks_ and _support bundles_. - -Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. Troubleshoot is a kubectl plugin that provides diagnostic tools for Kubernetes applications. For more information, see the open source [Troubleshoot](https://troubleshoot.sh/docs/) documentation. - -Preflight checks and support bundles analyze data from customer environments to provide insights that help users to avoid or troubleshoot common issues with an application: -* **Preflight checks** run before an application is installed to check that the customer environment meets the application requirements. -* **Support bundles** collect troubleshooting data from customer environments to help users diagnose problems with application deployments. - -:::note -Before you begin this task, you can complete the [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) tutorial to learn how to add a preflight spec to a Helm chart in a Kubernetes secret and run the preflight checks before installation. -::: - -To define preflight checks for your application: - -1. In your Helm chart `templates` directory, add a Kubernetes Secret that includes a preflight spec. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). For examples, see [Example Preflight Specs](/vendor/preflight-examples). - :::note - If your application is deployed as multiple Helm charts, add the Secret to the `templates` directory for the chart that is installed first. - ::: - -1. Update dependencies and package the chart as a `.tgz` file: - - - -1. Move the `.tgz` file to the `manifests` directory. - -1. - -1. - - Preflight checks run automatically during installation. - -1. Continue to create and test new releases with additional preflight checks until you are ready to move on to the next task. - -### Task 8: Add a Support Bundle Spec - -To add the default support bundle spec to your application: - -1. In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret to enable the default support bundle spec for your application: - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - labels: - troubleshoot.sh/kind: support-bundle - name: example - stringData: - support-bundle-spec: | - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: support-bundle - spec: - collectors: [] - analyzers: [] - ``` - :::note - If your application is installed as multiple Helm charts, you can optionally create separate support bundle specs in each chart. The specs are automatically merged when a support bundle is generated. Alternatively, continue with a single support bundle spec and then optionally revisit how you organize your support bundle specs after you finish onboarding. - ::: - -1. (Recommended) At a minimum, Replicated recommends that all support bundle specs include the `logs` collector. This collects logs from running Pods in the cluster. - - **Example:** - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle - stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: - - logs: - selector: - - app.kubernetes.io/name=myapp - namespace: {{ .Release.Namespace }} - limits: - maxAge: 720h - maxLines: 10000 - ``` - - For more information, see: - * [Adding and Customizing Support Bundles](/vendor/support-bundle-customizing) - * [Example Support Bundle Specs](/vendor/support-bundle-examples) - * [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. - -1. (Recommended) Ensure that any preflight checks that you added are also include in your support bundle spec. This ensures that support bundles collect at least the same information collected when running preflight checks. - -1. Update dependencies and package the chart as a `.tgz` file: - - - -1. Move the `.tgz` file to the `manifests` directory. - -1. - -1. - - For information about how to generate support bundles, see [Generating Support Bundles](/vendor/support-bundle-generating). - -1. (Optional) Customize the support bundle spec by adding additional collectors and analyzers. - -### Task 9: Alias Replicated Endpoints with Your Own Domains - -Your customers are exposed to several Replicated domains by default. Replicated recommends you use custom domains to unify the customer's experience with your brand and simplify security reviews. - -For more information, see [Using Custom Domains](/vendor/custom-domains-using). - -## Next Steps - -After completing the main onboarding tasks, Replicated recommends that you also complete the following additional tasks to integrate other Replicated features with your application. You can complete these next recommended tasks in any order and at your own pace. - -### Add Support for Helm Installations - -Existing KOTS releases that include one or more Helm charts can be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. - -To enable Helm installations for Helm charts distributed with Replicated, the only extra step is to add a Secret to your chart to authenticate with the Replicated proxy registry. - -This is the same secret that is passed to KOTS in the HelmChart custom resource using `'{{repl ImagePullSecretName }}'`, which you did as part of [Task 4: Create and Install the Initial Release](#first-release). So, whereas this Secret is created automatically for KOTS and Embedded Cluster installations, you need to create it and add it to your Helm chart for Helm installations. - -:::note -Before you test Helm installations for your application, you can complete the [Deploy a Helm Chart with KOTS and the Helm CLI](tutorial-kots-helm-setup) tutorial to learn how to install a single release with both KOTS and Helm. -::: - -To support and test Helm installations: - -1. Follow the steps in [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) to authenticate with the Replicated proxy registry by creating a Secret with `type: kubernetes.io/dockerconfigjson` in your Helm chart. - -1. Update dependencies and package the chart as a `.tgz` file: - - - -1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - -1. Install the release in a cluster with the Helm CLI to test your changes. For more information, see [Installing with Helm](/vendor/install-with-helm). - -### Add Support for Air Gap Installations - -Replicated Embedded Cluster and KOTS support installations in _air gap_ environments with no outbound internet access. Users can install with Embedded Cluster and KOTS in air gap environments by providing air gap bundles that contain the required images for the installers and for your application. - -:::note -Replicated also offers Alpha support for air gap installations with Helm. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. -::: - -To add support for air gap installations: - -1. If there are any images for your application that are not listed in your Helm chart, list these images in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release. One common use case for this is applications that use Kubernetes Operators. See [Define Additional Images](/vendor/operator-defining-additional-images). - -1. In the KOTS HelmChart custom resource `builder` key, pass any values that are required in order for `helm template` to yield all the images needed to successfully install your application. See [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). - - :::note - If the default values in your Helm chart already enable all the images needed to successfully deploy, then you do not need to configure the `builder` key. - ::: - -
    - How do I know if I need to configure the `builder` key? - - When building an air gap bundle, the Vendor Portal templates the Helm charts in a release with `helm template` in order to detect the images that need to be included in the bundle. Images yielded by `helm template` are included in the bundle for the release. - - For many applications, running `helm template` with the default values would not yield all the images required to install. In these cases, vendors can pass the additional values in the `builder` key to ensure that the air gap bundle includes all the necessary images. -
    - -1. If you have not done so already as part of [Task 4: Create and Install the Initial Release](#first-release), ensure that the `values` key in the KOTS HelmChart custom resource correctly rewrites image names for air gap installations. This is done using the KOTS HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace template functions to render the location of the given image in the user's own local registry. - - For more information, see [Rewrite Image Names](/vendor/helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart Custom Resource v2_. - -1. Create and promote a new release with your changes. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - -1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the release was promoted to build the air gap bundle. Do one of the following: - * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. - * If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. - -1. Create a customer with the **Airgap Download Enabled** entitlement enabled so that you can test air gap installations. See [Creating and Managing Customers](/vendor/releases-creating-customer). - -1. Download the Embedded Cluster air gap installation assets, then install with Embedded Cluster on an air gap VM to test. See [Installing in Air Gap Environments with Embedded Cluster](/enterprise/installing-embedded-air-gap). - -1. (Optional) Download the `.airgap` bundle for the release and the air gap bundle for the KOTS Admin Console. You can also download both bundles from the Download Portal for the target customer. Then, install in an air gap existing cluster to test. See [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). - -1. (Optional) Follow the steps in [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap) to test air gap installation with Helm. - - :::note - Air gap Helm installations are an Alpha feature. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. - ::: - -### Add Roles for Multi-Node Clusters in Embedded Cluster Installations - -The Embedded Cluster Config supports roles for multi-node clusters. One or more roles can be selected and assigned to a node when it is joined to the cluster. Node roles can be used to determine which nodes run the Kubernetes control plane, and to assign application workloads to particular nodes. - -For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. - -### Add and Map License Entitlements - -You can add custom license entitlements for your application in the Vendor Portal. Custom license fields are useful when there is entitlement information that applies to a subset of customers. For example, you can use entitlements to: -* Limit the number of active users permitted -* Limit the number of nodes a customer is permitted on their cluster -* Identify a customer on a "Premium" plan that has access to additional features or functionality not available with your base plan - -For more information about how to create and assign custom entitlements in the Vendor Portal, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields) and [Creating and Managing Customers](/vendor/releases-creating-customer). - -#### Map Entitlements to Helm Values - -You can map license entitlements to your Helm values using KOTS template functions. This can be useful when you need to set certain values based on the user's license information. For more information, see [Using KOTS Template Functions](/vendor/helm-optional-value-keys#using-kots-template-functions) in _Setting Helm Values with KOTS_. - -#### Query Entitlements Before Installation and at Runtime - -You can add logic to your application to query license entitlements both before deployment and at runtime. For example, you might want to add preflight checks that verify a user's entitlements before installing. Or, you can expose additional product functionality dynamically at runtime based on a customer's entitlements. - -For more information, see: -* [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk) -* [Checking Entitlements in Preflights with KOTS Template Functions](/vendor/licenses-referencing-fields) - -### Add Application Links to the Admin Console Dashboard - -You can add the Kubernetes SIG Application custom resource to your release to add a link to your application from the Admin Console dashboard. This makes it easier for users to access your application after installation. - -You can also configure the Kubernetes SIG Application resource add links to other resources like documentation or dashboards. - -For more information, see [Adding Application Links to the Dashboard](/vendor/admin-console-adding-buttons-links). - -### Update the Preflight and Support Bundles Specs - -After adding basic specs for preflights and support bundles, you can continue to add more collectors and analyzers as needed. - -Consider the following recommendations and best practices: - -* Revisit your preflight and support bundle specs when new support issues arise that are not covered by your existing specs. - -* Your support bundles should include all of the same collectors and analyzers that are in your preflight checks. This ensures that support bundles include all the necessary troubleshooting information, including any failures in preflight checks. - -* Your support bundles will most likely need to include other collectors and analyzers that are not in your preflight checks. This is because some of the information used for troubleshooting (such as logs) is not necessary when running preflight checks before installation. - -* If your application is installed as multiple Helm charts, you can optionally add separate support bundle specs in each chart. This can make it easier to keep the specs up-to-date and to avoid merge conflicts that can be caused when multiple team members contribute to a single, large support bundle spec. When an application has multiple support bundle specs, the specs are automatically merged when generating a support bundle so that only a single support bundle is provided to the user. - -The documentation for the open-source Troubleshoot project includes the full list of available collectors and analyzers that you can use. See [All Collectors](https://troubleshoot.sh/docs/collect/all/) and the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. - -You can also view common examples of collectors and analyzers used in preflight checks and support bundles in [Preflight Spec Examples](preflight-examples) and [Support Bundle Spec Examples](support-bundle-examples). - -### Configure Backup and Restore - -Enable backup and restore with Velero for your application so that users can back up and restore their KOTS Admin Console and application data. - -There are different steps to configure backup and restore for Embedded Cluster and for existing cluster installations with KOTS: -* To configure the disaster recovery feature for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery) -* To configure the snapshots feature for existing cluster KOTS installations, see [Configuring Snapshots](snapshots-configuring-backups). - -### Add Custom Metrics - -In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running in customer environments. Custom metrics can be collected for application instances running in online or air gap environments using the Replicated SDK. - -For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). - -### Integrate with CI/CD - -Replicated recommends that teams integrate the Replicated Platform into their existing develeopment and production CI/CD workflows. This can be useful for automating the processes of creating new releases, promoting releases, and testing releases with the Replicated Compatibility Matrix. - -For more information, see: -* [About Integrating with CI/CD](/vendor/ci-overview) -* [About Compatibility Matrix](/vendor/testing-about) -* [Recommended CI/CD Workflows](/vendor/ci-workflows) - -### Customize Release Channels - -By default, the Vendor Portal includes Unstable, Beta, and Stable channels. You can customize the channels in the Vendor Portal based on your application needs. - -Consider the following recommendations: -* Use the Stable channel for your primary release cadence. Releases should be promoted to the Stable channel only as frequently as your average customer can consume new releases. Typically, this is no more than monthly. However, this cadence varies depending on the customer base. -* If you have a SaaS product, you might want to create an "Edge" channel where you promote the latest SaaS releases. -* You can consider a “Long Term Support” channel where you promote new releases less frequently and support those releases for longer. -* It can be useful to create channels for each feature branch so that internal teams reviewing a PR can easily get the installation artifacts as well as review the code. You can automate channel creation as part of a pipeline or Makefile. - -For more information, see: -* [About Channels and Releases](/vendor/releases-about) -* [Creating and Editing Channels](/vendor/releases-creating-channels) - -### Write Your Documentation - -Before distributing your application to customers, ensure that your documentation is up-to-date. In particular, be sure to update the installation documentation to include the procedures and requirements for installing with Embedded Cluster, Helm, and any other installation methods that you support. - -For guidance on how to get started with documentation for applications distributed with Replicated, including key considerations, examples, and templates, see [Writing Great Documentation for On-Prem Software Distributed with Replicated](https://www.replicated.com/blog/writing-great-documentation-for-on-prem-software-distributed-with-replicated) in the Replicated blog. - -================ -File: docs/vendor/replicated-sdk-airgap.mdx -================ -# Installing the SDK in Air Gap Environments - -This topic explains how to install the Replicated SDK in air gap environments by enabling air gap mode. - -## Overview - -The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. - -Air gap mode is enabled when `isAirgap: true` is set in the values for the SDK Helm chart. For more information, see [Install the SDK in Air Gap Mode](#install) below. Allowing air gap mode to be controlled with the `isAirgap` value means that vendors and enterprise customers do not need to rely on air gap environments being automatically detected, which is unreliable and error-prone. The `isAirgap` value also allows the SDK to be installed in air gap mode even if the instance can access the internet. - -## Differences in Air Gap Mode - -Air gap mode differs from non-air gap installations of the SDK in the following ways: -* The SDK stores instance telemetry and custom metrics in a Kubernetes Secret in the customer environment, rather than attempting to send telemetry and custom metrics back to the Replicated Vendor Portal. The telemetry and custom metrics stored in the Secret are collected whenever a support bundle is generated in the environment, and are reported when the support bundle is uploaded to the Vendor Portal. For more information about telemetry for air gap instances, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). -* The SDK returns an empty array (`[]`) for any requests to check for updates using the [`/api/v1/app/updates`](/reference/replicated-sdk-apis#get-appupdates) SDK API endpoint. This is because the SDK is not able to receive updates from the Vendor Portal when running in air gap environments. -* Instance tags cannot be updated with the [`/app/instance-tags`](/reference/replicated-sdk-apis#post-appinstance-tags) SDK API endpoint. - -In air gap mode, the SDK can still make requests to SDK API endpoints that do not require outbound internet access, such as the [`license`](/reference/replicated-sdk-apis#license) endpoints and the [`/app/info`](/reference/replicated-sdk-apis#get-appinfo) endpoint. However, these endpoints will return whatever values were injected into the SDK when the chart was most recently pulled. These values might not match the latest information available in the Vendor Portal because the SDK cannot receive updates when running in air gap environments. - -## Install the SDK in Air Gap Mode {#install} - -This section describes how to install the Replicated SDK in air gap mode with the Helm CLI and with Replicated KOTS. - -### Helm CLI - -When the SDK is installed with the Helm CLI, air gap mode can be enabled by passing `--set replicated.isAirgap=true` with the Helm CLI installation command. - -For example: - -``` -helm install gitea oci://registry.replicated.com/my-app/gitea --set replicated.isAirgap=true -``` - -For more information about Helm CLI installations with Replicated, see [Installing with Helm](/vendor/install-with-helm). For more information about setting Helm values with the `helm install` command, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. - -:::note -Replicated does not provide air gap bundles for applications installed with the Helm CLI. Air gap bundles are a feature of KOTS. -::: - -### KOTS - -When the SDK is installed by KOTS in an air gap environment, KOTS automatically sets `isAirGap: true` in the SDK Helm chart values to enable air gap mode. No additional configuration is required. - -================ -File: docs/vendor/replicated-sdk-customizing.md -================ -# Customizing the Replicated SDK - -This topic describes various ways to customize the Replicated SDK, including customizing RBAC, setting environment variables, adding tolerations, and more. - -## Customize RBAC for the SDK - -This section describes role-based access control (RBAC) for the Replicated SDK, including the default RBAC, minimum RBAC requirements, and how to install the SDK with custom RBAC. - -### Default RBAC - -The SDK creates default Role, RoleBinding, and ServiceAccount objects during installation. The default Role allows the SDK to get, list, and watch all resources in the namespace, to create Secrets, and to update the `replicated` and `replicated-instance-report` Secrets: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - {{- include "replicated.labels" . | nindent 4 }} - name: replicated-role -rules: -- apiGroups: - - '*' - resources: - - '*' - verbs: - - 'get' - - 'list' - - 'watch' -- apiGroups: - - '' - resources: - - 'secrets' - verbs: - - 'create' -- apiGroups: - - '' - resources: - - 'secrets' - verbs: - - 'update' - resourceNames: - - replicated - - replicated-instance-report - - replicated-custom-app-metrics-report -``` - -### Minimum RBAC Requirements - -The SDK requires the following minimum RBAC permissions: -* Create Secrets. -* Get and update Secrets named `replicated`, `replicated-instance-report`, and `replicated-custom-app-metrics-report`. -* The SDK requires the following minimum RBAC permissions for status informers: - * If you defined custom status informers, then the SDK must have permissions to get, list, and watch all the resources listed in the `replicated.statusInformers` array in your Helm chart `values.yaml` file. - * If you did _not_ define custom status informers, then the SDK must have permissions to get, list, and watch the following resources: - * Deployments - * Daemonsets - * Ingresses - * PersistentVolumeClaims - * Statefulsets - * Services - * For any Ingress resources used as status informers, the SDK requires `get` permissions for the Service resources listed in the `backend.Service.Name` field of the Ingress resource. - * For any Daemonset and Statefulset resources used as status informers, the SDK requires `list` permissions for pods in the namespace. - * For any Service resources used as status informers, the SDK requires `get` permissions for Endpoint resources with the same name as the service. - - The Replicated Vendor Portal uses status informers to provide application status data. For more information, see [Helm Installations](/vendor/insights-app-status#helm-installations) in _Enabling and Understanding Application Status_. -### Install the SDK with Custom RBAC - -#### Custom ServiceAccount - -To use the SDK with custom RBAC permissions, provide the name for a custom ServiceAccount object during installation. When a service account is provided, the SDK uses the RBAC permissions granted to the service account and does not create the default Role, RoleBinding, or ServiceAccount objects. - -To install the SDK with custom RBAC: - -1. Create custom Role, RoleBinding, and ServiceAccount objects. The Role must meet the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. -1. During installation, provide the name of the service account that you created by including `--set replicated.serviceAccountName=CUSTOM_SERVICEACCOUNT_NAME`. - - **Example**: - - ``` - helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.serviceAccountName=mycustomserviceaccount - ``` - - For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). - -#### Custom ClusterRole - -To use the SDK with an existing ClusterRole, provide the name for a custom ClusterRole object during installation. When a cluster role is provided, the SDK uses the RBAC permissions granted to the cluster role and does not create the default RoleBinding. Instead, the SDK creates a ClusterRoleBinding as well as a ServiceAccount object. - -To install the SDK with a custom ClusterRole: - -1. Create a custom ClusterRole object. The ClusterRole must meet at least the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. However, it can also provide additional permissions that can be used by the SDK, such as listing cluster Nodes. -1. During installation, provide the name of the cluster role that you created by including `--set replicated.clusterRole=CUSTOM_CLUSTERROLE_NAME`. - - **Example**: - - ``` - helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.clusterRole=mycustomclusterrole - ``` - - For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). - -## Set Environment Variables {#env-var} - -The Replicated SDK provides a `replicated.extraEnv` value that allows users to set additional environment variables for the deployment that are not exposed as Helm values. - -This ensures that users can set the environment variables that they require without the SDK Helm chart needing to be modified to expose the values. For example, if the SDK is running behind an HTTP proxy server, then the user could set `HTTP_PROXY` or `HTTPS_PROXY` environment variables to provide the hostname or IP address of their proxy server. - -To add environment variables to the Replicated SDK deployment, include the `replicated.extraEnv` array in your Helm chart `values.yaml` file. The `replicated.extraEnv` array accepts a list of environment variables in the following format: - -```yaml -# Helm chart values.yaml - -replicated: - extraEnv: - - name: ENV_VAR_NAME - value: ENV_VAR_VALUE -``` - -:::note -If the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` variables are configured with the [kots install](/reference/kots-cli-install) command, these variables will also be set automatically in the Replicated SDK. -::: - -**Example**: - -```yaml -# Helm chart values.yaml - -replicated: - extraEnv: - - name: MY_ENV_VAR - value: my-value - - name: MY_ENV_VAR_2 - value: my-value-2 -``` - -## Custom Certificate Authority - -When installing the Replicated SDK behind a proxy server that terminates TLS and injects a custom certificate, you must provide the CA to the SDK. This can be done by storing the CA in a ConfigMap or a Secret prior to installation and providing appropriate values during installation. - -### Using a ConfigMap - -To use a CA stored in a ConfigMap: - -1. Create a ConfigMap and the CA as the data value. Note that name of the ConfigMap and data key can be anything. - ```bash - kubectl -n create configmap private-ca --from-file=ca.crt=./ca.crt - ``` -1. Add the name of the config map to the values file: - ```yaml - replicated: - privateCAConfigmap: private-ca - ``` - -:::note -If the `--private-ca-configmap` flag is used with the [kots install](/reference/kots-cli-install) command, this value will be populated in the Replicated SDK automatically. -::: - -### Using a Secret - -To use a CA stored in a Secret: - -1. Create a Secret and the CA as a data value. Note that the name of the Secret and the key can be anything. - ```bash - kubectl -n create secret generic private-ca --from-file=ca.crt=./ca.crt - ``` -1. Add the name of the secret and the key to the values file: - ```yaml - replicated: - privateCASecret: - name: private-ca - key: ca.crt - ``` - -## Add Tolerations - -The Replicated SDK provides a `replicated.tolerations` value that allows users to add custom tolerations to the deployment. For more information about tolerations, see [Taints and Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the Kubernetes documentation. - -To add tolerations to the Replicated SDK deployment, include the `replicated.tolerations` array in your Helm chart `values.yaml` file. The `replicated.tolerations` array accepts a list of tolerations in the following format: - -```yaml -# Helm chart values.yaml - -replicated: - tolerations: - - key: "key" - operator: "Equal" - value: "value" - effect: "NoSchedule" -``` - -## Add Affinity - -The Replicated SDK provides a `replicated.affinity` value that allows users to add custom affinity to the deployment. For more information about affinity, see [Affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) in the Kubernetes documentation. - -To add affinity to the Replicated SDK deployment, include the `replicated.affinity` map in your Helm chart `values.yaml` file. The `replicated.affinity` map accepts a standard Kubernets affinity object in the following format: - -```yaml -# Helm chart values.yaml - -replicated: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: production/node-pool - operator: In - values: - - private-node-pool -``` -## Add Custom Labels - -With the Replicated SDK version 1.1.0 and later, you can pass custom labels to the Replicated SDK Helm Chart by setting the `replicated.commonLabels` and `replicated.podLabels` Helm values in your Helm chart. - -### Requirement - -The `replicated.commonLabels` and `replicated.podLabels` values are available with the Replicated SDK version 1.1.0 and later. - -### commonLabels - -The `replicated.commonLabels` value allows you to add one or more labels to all resources created by the SDK chart. - -For example: - -```yaml -# Helm chart values.yaml - -replicated: - commonLabels: - environment: production - team: platform -``` - -### podLabels - -The `replicated.podLabels` value allows you to add pod-specific labels to the pod template. - -For example: - -```yaml -# Helm chart values.yaml - -replicated: - podLabels: - monitoring: enabled - custom.company.io/pod-label: value -``` - -================ -File: docs/vendor/replicated-sdk-development.mdx -================ -import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" - -# Developing Against the SDK API - -This topic describes how to develop against the SDK API to test changes locally. It includes information about installing the SDK in integration mode and port forwarding the SDK API service to your local machine. For more information about the SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). - -## Install the SDK in Integration Mode - - - -## Port Forwarding the SDK API Service {#port-forward} - -After the Replicated SDK is installed and initialized in a cluster, the Replicated SDK API is exposed at `replicated:3000`. You can access the SDK API for testing by forwarding port 3000 to your local machine. - -To port forward the SDK API service to your local machine: - -1. Run the following command to port forward to the SDK API service: - - ```bash - kubectl port-forward service/replicated 3000 - ``` - ``` - Forwarding from 127.0.0.1:3000 -> 3000 - Forwarding from [::1]:3000 -> 3000 - ``` - -1. With the port forward running, test the SDK API endpoints as desired. For example: - - ```bash - curl localhost:3000/api/v1/license/fields/expires_at - curl localhost:3000/api/v1/license/fields/{field} - ``` - - For more information, see [Replicated SDK API](/reference/replicated-sdk-apis). - - :::note - When the SDK is installed in integration mode, requests to the `license` endpoints use your actual development license data, while requests to the `app` endpoints use the default mock data. - ::: - -================ -File: docs/vendor/replicated-sdk-installing.mdx -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" -import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" -import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" - -# Installing the Replicated SDK - -This topic describes the methods for distributing and installing the Replicated SDK. - -It includes information about how to install the SDK alongside Helm charts or Kubernetes manifest-based applications using the Helm CLI or a Replicated installer (Replicated KOTS, kURL, Embedded Cluster). It also includes information about installing the SDK as a standalone component in integration mode. - -For information about installing the SDK in air gap mode, see [Installing the SDK in Air Gap Environments](replicated-sdk-airgap). - -## Requirement - - - -## Install the SDK as a Subchart - -When included as a dependency of your application Helm chart, the SDK is installed as a subchart alongside the application. - -To install the SDK as a subchart: - -1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. - - - -1. Update the `charts/` directory: - - ``` - helm dependency update - ``` - :::note - - ::: - -1. Package the Helm chart into a `.tgz` archive: - - ``` - helm package . - ``` - -1. Add the chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - -1. (Optional) Add a KOTS HelmChart custom resource to the release to support installation with Embedded Cluster, KOTS, or kURL. For more information, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). - -1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. - -1. Install the release using Helm or a Replicated installer. For more information, see: - * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) - * [Installing with Helm](/vendor/install-with-helm) - * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) - * [Online Installation with kURL](/enterprise/installing-kurl) - -1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: - - ``` - kubectl get deploy --namespace NAMESPACE - ``` - Where `NAMESPACE` is the namespace in the cluster where the application and the SDK are installed. - - **Example output**: - - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - my-app 1/1 1 1 35s - replicated 1/1 1 1 35s - ``` - -## Install the SDK Alongside a Kubernetes Manifest-Based Application {#manifest-app} - -For applications that use Kubernetes manifest files instead of Helm charts, the SDK Helm chart can be added to a release and then installed by KOTS alongside the application. - - - -To add the SDK Helm chart to a release for a Kubernetes manifest-based application: - -1. Install the Helm CLI using Homebrew: - - ``` - brew install helm - ``` - For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. - -1. Download the `.tgz` chart archive for the SDK Helm chart: - - ``` - helm pull oci://registry.replicated.com/library/replicated --version SDK_VERSION - ``` - Where `SDK_VERSION` is the version of the SDK to install. For a list of available SDK versions, see the [replicated-sdk repository](https://github.com/replicatedhq/replicated-sdk/tags) in GitHub. - - The output of this command is a `.tgz` file with the naming convention `CHART_NAME-CHART_VERSION.tgz`. For example, `replicated-1.1.1.tgz`. - - For more information and additional options, see [Helm Pull](https://helm.sh/docs/helm/helm_pull/) in the Helm documentation. - -1. Add the SDK `.tgz` chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - - The following shows an example of the SDK Helm chart added to a draft release for a standard manifest-based application: - - ![SDK Helm chart in a draft release](/images/sdk-kots-release.png) - - [View a larger version of this image](/images/sdk-kots-release.png) - -1. If one was not created automatically, add a KOTS HelmChart custom resource to the release. HelmChart custom resources have `apiVersion: kots.io/v1beta2` and `kind: HelmChart`. - - **Example:** - - ```yaml - apiVersion: kots.io/v1beta2 - kind: HelmChart - metadata: - name: replicated - spec: - # chart identifies a matching chart from a .tgz - chart: - # for name, enter replicated - name: replicated - # for chartversion, enter the version of the - # SDK Helm chart in the release - chartVersion: 1.1.1 - ``` - - As shown in the example above, the HelmChart custom resource requires the name and version of the SDK Helm chart that you added to the release: - * **`chart.name`**: The name of the SDK Helm chart is `replicated`. You can find the chart name in the `name` field of the SDK Helm chart `Chart.yaml` file. - * **`chart.chartVersion`**: The chart version varies depending on the version of the SDK that you pulled and added to the release. You can find the chart version in the `version` field of SDK Helm chart `Chart.yaml` file. - - For more information about configuring the HelmChart custom resource to support KOTS installations, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) and [HelmChart v2](/reference/custom-resource-helmchart-v2). - -1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. - -1. Install the release using a Replicated installer. For more information, see: - * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) - * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) - * [Online Installation with kURL](/enterprise/installing-kurl) - -1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: - - ``` - kubectl get deploy --namespace NAMESPACE - ``` - Where `NAMESPACE` is the namespace in the cluster where the application, the Admin Console, and the SDK are installed. - - **Example output**: - - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - kotsadm 1/1 1 1 112s - my-app 1/1 1 1 28s - replicated 1/1 1 1 27s - ``` - -## Install the SDK in Integration Mode - - - -## Troubleshoot - -### 401 Unauthorized Error When Updating Helm Dependencies {#401} - -#### Symptom - -You see an error message similar to the following after adding the Replicated SDK as a dependency in your Helm chart then running `helm dependency update`: - -``` -Error: could not download oci://registry.replicated.com/library/replicated-sdk: failed to authorize: failed to fetch oauth token: unexpected status from GET request to https://registry.replicated.com/v2/token?scope=repository%3Alibrary%2Freplicated-sdk%3Apull&service=registry.replicated.com: 401 Unauthorized -``` - -#### Cause - -When you run `helm dependency update`, Helm attempts to pull the Replicated SDK chart from the Replicated registry. An error can occur if you are already logged in to the Replicated registry with a license that has expired, such as when testing application releases. - -#### Solution - -To solve this issue: - -1. Run the following command to remove login credentials for the Replicated registry: - - ``` - helm registry logout registry.replicated.com - ``` - -1. Re-run `helm dependency update` for your Helm chart. - -================ -File: docs/vendor/replicated-sdk-overview.mdx -================ -import SDKOverview from "../partials/replicated-sdk/_overview.mdx" -import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" - -# About the Replicated SDK - -This topic provides an introduction to using the Replicated SDK with your application. - -## Overview - - - -For more information about the Replicated SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). For information about developing against the SDK API locally, see [Developing Against the SDK API](replicated-sdk-development). - -## Limitations - -The Replicated SDK has the following limitations: - -* Some popular enterprise continuous delivery tools, such as ArgoCD and Pulumi, deploy Helm charts by running `helm template` then `kubectl apply` on the generated manifests, rather than running `helm install` or `helm upgrade`. The following limitations apply to applications installed by running `helm template` then `kubectl apply`: - - * The `/api/v1/app/history` SDK API endpoint always returns an empty array because there is no Helm history in the cluster. See [GET /app/history](/reference/replicated-sdk-apis#get-apphistory) in _Replicated SDK API_. - - * The SDK does not automatically generate status informers to report status data for installed instances of the application. To get instance status data, you must enable custom status informers by overriding the `replicated.statusInformers` Helm value. See [Enable Application Status Insights](/vendor/insights-app-status#enable-application-status-insights) in _Enabling and Understanding Application Status_. - -## SDK Resiliency - -At startup and when serving requests, the SDK retrieves and caches the latest information from the upstream Replicated APIs, including customer license information. - -If the upstream APIs are not available at startup, the SDK does not accept connections or serve requests until it is able to communicate with the upstream APIs. If communication fails, the SDK retries every 10 seconds and the SDK pod is at `0/1` ready. - -When serving requests, if the upstream APIs become unavailable, the SDK serves from the memory cache and sets the `X-Replicated-Served-From-Cache` header to `true`. Additionally, rapid successive requests to same SDK endpoint with the same request properties will be rate-limited returning the last cached payload and status code without reaching out to the upstream APIs. A `X-Replicated-Rate-Limited` header will set to `true`. - -## Replicated SDK Helm Values - - - -================ -File: docs/vendor/replicated-sdk-slsa-validating.md -================ -# SLSA Provenance Validation Process for the Replicated SDK - -This topic describes the process to perform provenance validation on the Replicated SDK. - -## About Supply Chain Levels for Software Artifacts (SLSA) - -[Supply Chain Levels for Software Artifacts (SLSA)](https://slsa.dev/), pronounced “salsa,” is a security framework that comprises standards and controls designed to prevent tampering, enhance integrity, and secure software packages and infrastructure. - - -## Purpose of Attestations -Attestations enable the inspection of an image to determine its origin, the identity of its creator, the creation process, and its contents. When building software using the Replicated SDK, the image’s Software Bill of Materials (SBOM) and SLSA-based provenance attestations empower your customers to make informed decisions regarding the impact of an image on the supply chain security of your application. This process ultimately enhances the security and assurances provided to both vendors and end customers. - -## Prerequisite -Before you perform these tasks, you must install [slsa-verifier](https://github.com/slsa-framework/slsa-verifier) and [crane](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md). - -## Validate the SDK SLSA Attestations - -The Replicated SDK build process utilizes Wolfi-based images to minimize the number of CVEs. The build process automatically generates SBOMs and attestations, and then publishes the image along with these metadata components. For instance, you can find all the artifacts readily available on [DockerHub](https://hub.docker.com/r/replicated/replicated-sdk/tags). The following shell script is a tool to easily validate the SLSA attestations for a given Replicated SDK image. - -``` -#!/bin/bash - -# This script verifies the SLSA metadata of a container image -# -# Requires -# - slsa-verifier (https://github.com/slsa-framework/slsa-verifier) -# - crane (https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md) -# - - -# Define the image and version to verify -VERSION=v1.0.0-beta.20 -IMAGE=replicated/replicated-sdk:${VERSION} - -# expected source repository that should have produced the artifact, e.g. github.com/some/repo -SOURCE_REPO=github.com/replicatedhq/replicated-sdk - - -# Use `crane` to retrieve the digest of the image without pulling the image -IMAGE_WITH_DIGEST="${IMAGE}@"$(crane digest "${IMAGE}") - -echo "Verifying artifact" -echo "Image: ${IMAGE_WITH_DIGEST}" -echo "Source Repo: ${SOURCE_REPO}" - -slsa-verifier verify-image "${IMAGE_WITH_DIGEST}" \ - --source-uri ${SOURCE_REPO} \ - --source-tag ${VERSION} - -``` - -================ -File: docs/vendor/resources-annotations-templating.md -================ -# Templating Annotations - -This topic describes how to use Replicated KOTS template functions to template annotations for resources and objects based on user-supplied values. - -## Overview - -It is common for users to need to set custom annotations for a resource or object deployed by your application. For example, you might need to allow your users to provide annotations to apply to a Service or Ingress object in public cloud environments. - -For applications installed with Replicated KOTS, you can apply user-supplied annotations to resources or objects by first adding a field to the Replicated Admin Console **Config** page where users can enter one or more annotations. For information about how to add fields on the **Config** page, see [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen). - -You can then map these user-supplied values from the **Config** page to resources and objects in your release using KOTS template functions. KOTS template functions are a set of custom template functions based on the Go text/template library that can be used to generate values specific to customer environments. The template functions in the Config context return user-supplied values on the **Config** page. - -For more information about KOTS template functions in the Config text, see [Config Context](/reference/template-functions-config-context). For more information about the Go library, see [text/template](https://pkg.go.dev/text/template) in the Go documentation. - -## About `kots.io/placeholder` - -For applications installed with KOTS that use standard Kubernetes manifests, the `kots.io/placeholder` annotation allows you to template annotations in resources and objects without breaking the base YAML or needing to include the annotation key. - -The `kots.io/placeholder` annotation uses the format `kots.io/placeholder 'bool' 'string'`. For example: - -```yaml -# Example manifest file - -annotations: - kots.io/placeholder: |- - repl{{ ConfigOption "additional_annotations" | nindent 4 }} -``` - -:::note -For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file using the Replicated HelmChart custom resource, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. - -For an example, see [Map User-Supplied Annotations to Helm Chart Values](#map-user-supplied-annotations-to-helm-chart-values) below. -::: - -## Annotation Templating Examples - -This section includes common examples of templating annotations in resources and objects to map user-supplied values. - -For additional examples of how to map values to Helm chart-based applications, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. - -### Map Multiple Annotations from a Single Configuration Field - -You can map one or more annotations from a single `textarea` field on the **Config** page. The `textarea` type defines multi-line text input and supports properties such as `rows` and `cols`. For more information, see [textarea](/reference/custom-resource-config#textarea) in _Config_. - -For example, the following Config custom resource adds an `ingress_annotations` field of type `textarea`: - -```yaml -# Config custom resource - -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config -spec: - groups: - - name: ingress_settings - title: Ingress Settings - description: Configure Ingress - items: - - name: ingress_annotations - type: textarea - title: Ingress Annotations - help_text: See your cloud provider’s documentation for the required annotations. -``` - -On the **Config** page, users can enter one or more key value pairs in the `ingress_annotations` field, as shown in the example below: - -![Config page with custom annotations in a Ingress Annotations field](/images/config-map-annotations.png) - -[View a larger version of this image](/images/config-map-annotations.png) - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - repl{{ ConfigOption "ingress_annotations" | nindent 4 }} -``` - -During installation, KOTS renders the YAML with the multi-line input from the configuration field as shown below: - -```yaml -# Rendered Ingress object -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - - key1: value1 - key2: value2 - key3: value3 -``` - -### Map Annotations from Multiple Configuration Fields - -You can specify multiple annotations using the same `kots.io/placeholder` annotation. - -For example, the following Ingress object includes ConfigOption template functions that render the user-supplied values for the `ingress_annotation` and `ingress_hostname` fields: - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - repl{{ ConfigOption "ingress_annotation" | nindent 4 }} - repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} -``` - -During installation, KOTS renders the YAML as shown below: - -```yaml -# Rendered Ingress object - -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - - key1: value1 - my.custom/annotation.ingress.hostname: example.hostname.com -``` - -### Map User-Supplied Value to a Key - -You can map a user-supplied value from the **Config** page to a pre-defined annotation key. - -For example, in the following Ingress object, `my.custom/annotation.ingress.hostname` is the key for the templated annotation. The annotation also uses the ConfigOption template function to map the user-supplied value from a `ingress_hostname` configuration field: - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} -``` - -During installation, KOTS renders the YAML as shown below: - -```yaml -# Rendered Ingress object - -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - - my.custom/annotation.ingress.hostname: example.hostname.com -``` - -### Include Conditional Statements in Templated Annotations - -You can include or exclude templated annotations based on a conditional statement. - -For example, the following Ingress object includes a conditional statement for `kots.io/placeholder` that renders `my.custom/annotation.class: somevalue` if the user enables a `custom_annotation` field on the **Config** page: - -```yaml -apiVersion: v1 -kind: Ingress -metadata: - name: myapp - labels: - app: myapp -annotations: - kots.io/placeholder: |- - repl{{if ConfigOptionEquals "custom_annotation" "1" }}repl{{ printf "my.custom/annotation.class: somevalue" | nindent 4 }}repl{{end}} -spec: -... -``` - -During installation, if the user enables the `custom_annotation` configuration field, KOTS renders the YAML as shown below: - -```yaml -# Rendered Ingress object - -apiVersion: v1 -kind: Ingress -metadata: - name: myapp - labels: - app: myapp - annotations: - kots.io/placeholder: |- - my.custom/annotation.class: somevalue -spec: -... -``` - -Alternatively, if the condition evaluates to false, the annotation does not appear in the rendered YAML: - -```yaml -apiVersion: v1 -kind: Ingress -metadata: - name: myapp - labels: - app: myapp - annotations: - kots.io/placeholder: |- -spec: -... -``` - -### Map User-Supplied Annotations to Helm Chart Values - -For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. - -To map user-supplied annotations from the **Config** page to the Helm chart `values.yaml` file, you use the `values` field of the Replicated HelmChart custom resource. For more information, see [values](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_. - -For example, the following HelmChart custom resource uses a ConfigOption template function in `values.services.myservice.annotations` to map the value of a configuration field named `additional_annotations`: - -```yaml -# HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: myapp -spec: - values: - services: - myservice: - annotations: repl{{ ConfigOption "additional_annotations" | nindent 10 }} -``` - -The `values.services.myservice.annotations` field in the HelmChart custom resource corresponds to a `services.myservice.annotations` field in the `value.yaml` file of the application Helm chart, as shown in the example below: - -```yaml -# Helm chart values.yaml - -services: - myservice: - annotations: {} -``` - -During installation, the ConfigOption template function in the HelmChart custom resource renders the user-supplied values from the `additional_annotations` configuration field. - -Then, KOTS replaces the value in the corresponding field in the `values.yaml` in the chart archive, as shown in the example below. - -```yaml -# Rendered Helm chart values.yaml - -services: - myservice: - annotations: - key1: value1 -``` - -In your Helm chart templates, you can access these values from the `values.yaml` file to apply the user-supplied annotations to the target resources or objects. For information about how to access values from a `values.yaml` file, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the Helm documentation. - -================ -File: docs/vendor/snapshots-configuring-backups.md -================ -# Configuring Snapshots - -This topic provides information about how to configure the Velero Backup resource to enable Replicated KOTS snapshots for an application. - -For more information about snapshots, see [About Backup and Restore with snapshots](/vendor/snapshots-overview). - -## Configure Snapshots - -Add a Velero Backup custom resource (`kind: Backup`, `apiVersion: velero.io/v1`) to your release and configure it as needed. After configuring the Backup resource, add annotations for each volume that you want to be included in backups. - -To configure snapshots for your application: - -1. In a new release containing your application files, add a Velero Backup resource (`kind: Backup` and `apiVersion: velero.io/v1`): - - ```yaml - apiVersion: velero.io/v1 - kind: Backup - metadata: - name: backup - spec: {} - ``` - -1. Configure the Backup resource to specify the resources that will be included in backups. - - For more information about the Velero Backup resource, including limitations, the list of supported fields for snapshots, and an example, see [Velero Backup Resource for Snapshots](/reference/custom-resource-backup). - -1. (Optional) Configure backup and restore hooks. For more information, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). - -1. For each volume that requires a backup, add the `backup.velero.io/backup-volumes` annotation. The annotation name is `backup.velero.io/backup-volumes` and the value is a comma separated list of volumes to include in the backup. - -
    - Why do I need to use the backup annotation? -

    By default, no volumes are included in the backup. If any pods mount a volume that should be backed up, you must configure the backup with an annotation listing the specific volumes to include in the backup.

    -
    - - **Example:** - - In the following Deployment manifest file, `pvc-volume` is the only volume that is backed up. The `scratch` volume is not included in the backup because it is not listed in annotation on the pod specification. - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: sample - labels: - app: foo - spec: - replicas: 1 - selector: - matchLabels: - app: foo - template: - metadata: - labels: - app: foo - annotations: - backup.velero.io/backup-volumes: pvc-volume - spec: - containers: - - image: k8s.gcr.io/test-webserver - name: test-webserver - volumeMounts: - - name: pvc-volume - mountPath: /volume-1 - - name: scratch - mountPath: /volume-2 - volumes: - - name: pvc-volume - persistentVolumeClaim: - claimName: test-volume-claim - - name: scratch - emptyDir: {} - - ``` - -1. (Optional) Configure manifest exclusions. By default, Velero also includes backups of all of the Kubernetes objects in the namespace. - - To exclude any manifest file, add a [`velero.io/exclude-from-backup=true`](https://velero.io/docs/v1.5/resource-filtering/#veleroioexclude-from-backuptrue) label to the manifest to be excluded. The following example shows the Secret manifest file with the `velero.io/exclude-from-backup` label: - - ```yaml - apiVersion: apps/v1 - kind: Secret - metadata: - name: sample - labels: - velero.io/exclude-from-backup: "true" - stringData: - uri: Secret To Not Include - - ``` - -1. If you distribute multiple applications with Replicated, repeat these steps for each application. Each application must have its own Backup resource to be included in a full backup with snapshots. - -1. (kURL Only) If your application supports installation with Replicated kURL, Replicated recommends that you include the kURL Velero add-on so that customers do not have to manually install Velero in the kURL cluster. For more information, see [Creating a kURL Installer](packaging-embedded-kubernetes). - -================ -File: docs/vendor/snapshots-hooks.md -================ -# Configuring Backup and Restore Hooks for Snapshots - -This topic describes the use of custom backup and restore hooks and demonstrates a common example. - -## About Backup and Restore Hooks - -Velero supports the use of backup hooks and restore hooks. - -Your application workload might require additional processing or scripts to be run before or after creating a backup to prepare the system for a backup. Many application workloads also require additional processing or scripts to run during or after the restore process. - -Some common examples of how to use a hook to create backups are: -- Run `pg_dump` to export a postgres database prior to backup -- Lock a file before running a backup, and unlock immediately after -- Delete TMP files that should not be backed up -- Restore a database file only if that file exists -- Perform required setup tasks in a restored Pod before the application containers can start - -Additionally, for embedded clusters created by Replicated kURL, you must write custom backup and restore hooks to enable back ups for any object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs). For more information about object-stored data, see [Other Object Stored Data](snapshots-overview#other-object-stored-data) in _Backup and Restore_. - -For more information about backup and restore hooks, see [Backup Hooks](https://velero.io/docs/v1.10/backup-hooks/) and [Restore Hooks](https://velero.io/docs/v1.10/restore-hooks) in the Velero documentation. - -## Example - -The following example demonstrates how to include Velero backup and restore hooks for a Postgres database in a Replicated HelmChart custom resource manifest file. - -The use case for this example is an application packaged with a Helm chart that includes a Postgres database. A description of key fields from the YAML follows the example. - -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: postgresql -spec: - exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' - - chart: - name: postgresql - chartVersion: 8.7.4 - - values: - - master: - podAnnotations: - backup.velero.io/backup-volumes: backup - pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U username -d dbname -h 127.0.0.1 > /scratch/backup.sql"]' - pre.hook.backup.velero.io/timeout: 3m - post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U username -h 127.0.0.1 -d dbname -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' - - extraVolumes: - - name: backup - emptyDir: - sizeLimit: 1Gi - extraVolumeMounts: - - name: backup - mountPath: /scratch - - global: - postgresql: - postgresqlUsername: username - postgresqlPassword: "repl{{ ConfigOption `embedded_postgres_password` }}" - postgresqlDatabase: dbname -``` - -The following describes key fields from the example above: - -* `spec.exclude`: A common and recommended pattern for applications. The customer can choose to bring an external Postgres instance instead of running it in-cluster. The Replicated KOTS template function in `spec.exclude` evaluates to true when the user specifies the external database option in the Admin Console **Config** page. This means that the internal Postgres database is not included in the deployment. - -* `spec.values.master.podAnnotations`: Adds podAnnotations to the postgres master PodSpec. Velero backup and restore hooks are included in the podAnnotations. The following table describes the podAnnotations: - - :::note - Run backup hooks inside the container that contains the data to back up. - ::: - - - - - - - - - - - - - - - - - - - - - - -
    podAnnotationDescription
    backup.velero.io/backup-volumesA comma-separated list of volumes from the Pod to include in the backup. The primary data volume is not included in this field because data is exported using the backup hook.
    pre.hook.backup.velero.io/commandA stringified JSON array containing the command for the backup hook. - This command is a pg_dump from the running database to the backup volume.
    pre.hook.backup.velero.io/timeoutA duration for the maximum time to let this script run.
    post.hook.restore.velero.io/commandA Velero exec restore hook that runs a script to check if the database file exists, and restores only if it exists. Then, the script deletes the file after the operation is complete.
    - -* `spec.master.extraVolumes`: A new volume that is injected into the postgres Pod. The new volume is an empty volume that uses ephemeral storage. The ephemeral storage must have enough space to accommodate the size of the exported data. -The `extraVolumeMounts` field mounts the volume into the `/scratch` directory of the master Pod. The volume is used as a destination when the backup hook command described above runs `pg_dump`. This is the only volume that is backed up. - -================ -File: docs/vendor/snapshots-overview.mdx -================ -import RestoreTable from "../partials/snapshots/_restoreTable.mdx" -import NoEcSupport from "../partials/snapshots/_limitation-no-ec-support.mdx" -import RestoreTypes from "../partials/snapshots/_restore-types.mdx" -import Dr from "../partials/snapshots/_limitation-dr.mdx" -import Os from "../partials/snapshots/_limitation-os.mdx" -import InstallMethod from "../partials/snapshots/_limitation-install-method.mdx" -import CliRestores from "../partials/snapshots/_limitation-cli-restores.mdx" - -# About Backup and Restore with Snapshots - -This topic provides an introduction to the Replicated KOTS snapshots feature for backup and restore. It describes how vendors enable snapshots, the type of data that is backed up, and how to troubleshoot issues for enterprise users. - -:::note - -::: - -## Overview - -An important part of the lifecycle of an application is backup and restore. You can enable Replicated KOTS snapshots to support backup and restore for existing cluster installations with KOTS and Replicated kURL installations. - -When snapshots is enabled for your application, your customers can manage and perform backup and restore from the Admin Console or KOTS CLI. - -Snapshots uses the Velero open source project as the backend to back up Kubernetes manifests and persistent volumes. Velero is a mature, fully-featured application. For more information, see the [Velero documentation](https://velero.io/docs/). - -In addition to the default functionality that Velero provides, KOTS exposes hooks that let you inject scripts that can execute both before and after a backup, and before and after a restore. For more information, see [Configuring Backup and Restore Hooks for Snapshots](/vendor/snapshots-hooks). - -### Limitations and Considerations - -* - -- The snapshots feature is available only for licenses with the **Allow Snapshots** option enabled. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). - -- Snapshots are useful for rollback and disaster recovery scenarios. They are not intended to be used for application migration. - -- - -- - -- - -- - -- Removing data from the snapshot storage itself results in data corruption and the loss of snapshots. Instead, use the **Snapshots** tab in the Admin Console to cleanup and remove snapshots. - -- Snapshots does not support Amazon Simple Storage Service (Amazon S3) buckets that have a bucket policy requiring the server-side encryption header. If you want to require server-side encryption for objects, you can enable default encryption on the bucket instead. For more information about Amazon S3, see the [Amazon S3](https://docs.aws.amazon.com/s3/?icmpid=docs_homepage_featuredsvcs) documentation. - -### Velero Version Compatibility - -The following table lists which versions of Velero are compatible with each version of KOTS. For more information, see the [Velero documentation](https://velero.io/docs/). - -| KOTS version | Velero version | -|------|-------------| -| 1.15 to 1.20.2 | 1.2.0 | -| 1.20.3 to 1.94.0 | 1.5.1 through 1.9.x | -| 1.94.1 and later | 1.6.x through 1.12.x | - -## About Backups - -This section describes the types of backups that are supported with snapshots. For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. - -### Application and Admin Console (Full) Backups - -Full backups (also referred to as _instance_ backups) include the KOTS Admin Console and all application data, including application volumes and manifest files. - -For clusters created with Replicated kURL, full backups also back up the Docker registry, which is required for air gapped installations. - -If you manage multiple applications with the Admin Console, data from all applications that support backups is included in a full backup. To be included in full backups, each application must include a manifest file with `kind: Backup` and `apiVersion: velero.io/v1`, which you can check for in the Admin Console. - -Full backups are recommended because they support all types of restores. For example, you can restore both the Admin Console and application from a full backup to a new cluster in disaster recovery scenarios. Or, you can use a full backup to restore only application data for the purpose of rolling back after deploying a new version of an application. - -### Application-Only (Partial) Backups - -Partial backups back up the application volumes and manifest files only. Partial backups do not back up the KOTS Admin Console. - -Partial backups can be useful if you need to roll back after deploying a new application version. Partial backups of the application only _cannot_ be restored to a new cluster, and are therefore not useable for disaster recovery scenarios. - -### Backup Storage Destinations - -For disaster recovery, backups should be configured to use a storage destination that exists outside of the cluster. This is especially true for installations in clusters created with Replicated kURL, because the default storage location on these clusters is internal. - -You can use a storage provider that is compatible with Velero as the storage destination for backups created with the Replicated snapshots feature. For a list of the compatible storage providers, see [Providers](https://velero.io/docs/v1.9/supported-providers/) in the Velero documentation. - -You initially configure backups on a supported storage provider backend using the KOTS CLI. If you want to change the storage destination after the initial configuration, you can use the the **Snapshots** page in the Admin Console, which has built-in support for the following storage destinations: - -- Amazon Web Services (AWS) -- Google Cloud Provider (GCP) -- Microsoft Azure -- S3-Compatible -- Network File System (NFS) -- Host Path - -kURL installers that include the Velero add-on also include a locally-provisioned object store. By default, kURL clusters are preconfigured in the Admin Console to store backups in the locally-provisioned object store. This object store is sufficient for only rollbacks and downgrades and is not a suitable configuration for disaster recovery. Replicated recommends that you configure a snapshots storage destination that is external to the cluster in the Admin Console for kURL clusters. - -For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. - -### What Data is Backed Up? - -Full backups include the Admin Console and all application data, including KOTS-specific object-stored data. For Replicated kURL installations, this also backs up the Docker registry, which is required for air gapped installations. - -#### Other Object-Stored Data - -For kURL clusters, you might be using object-stored data that is not specific to the kURL KOTS add-on. - -For object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs), you must write custom backup and restore hooks to enable back ups for that object-stored data. For example, Rook and Ceph do not use PVCs and so require custom backup and restore hooks. For more information about writing custom hooks, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). - -#### Pod Volume Data - -Replicated supports only the restic backup program for pod volume data. - -By default, Velero requires that you opt-in to have pod volumes backed up. In the Backup resource that you configure to enable snapshots, you must annotate each specific volume that you want to back up. For more information about including and excluding pod volumes, see [Configuring Snapshots](/vendor/snapshots-configuring-backups). - -## About Restores {#restores} - - - -When you restore an application with snapshots, KOTS first deletes the selected application. All existing application manifests are removed from the cluster, and all `PersistentVolumeClaims` are deleted. This action is not reversible. - -Then, the restore process redeploys all of the application manifests. All Pods are given an extra `initContainer` and an extra directory named `.velero`, which are used for restore hooks. For more information about the restore process, see [Restore Reference](https://velero.io/docs/v1.9/restore-reference/) in the Velero documentation. - -When you restore the Admin Console only, no changes are made to the application. - -For information about how to restore using the Admin Console or the KOTS CLI, see [Restoring from Backups](/enterprise/snapshots-restoring-full). - -## Using Snapshots - -This section provides an overview of how vendors and enterprise users can configure and use the snapshots feature. - -### How to Enable Snapshots for Your Application - -To enable the snapshots backup and restore feature for your users, you must: - -- Have the snapshots entitlement enabled in your Replicated vendor account. For account entitlements, contact the Replicated TAM team. -- Define a manifest for creating backups. See [Configuring Snapshots](snapshots-configuring-backups). -- When needed, configure backup and restore hooks. See [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). -- Enable the **Allow Snapshot** option in customer licenses. See [Creating and Managing Customers](releases-creating-customer). - -### Understanding Backup and Restore for Users {#how-users} - -After vendors enable backup and restore, enterprise users install Velero and configure a storage destination in the Admin Console. Then users can create backups manually or schedule automatic backups. - -Replicated recommends advising your users to make full backups for disaster recovery purposes. Additionally, full backups give users the flexibility to do a full restore, a partial restore (application only), or restore just the Admin Console. - -From a full backup, users restore using the KOTS CLI or the Admin Console as indicated in the following table: - - - -Partial backups are not recommended as they are a legacy feature and only back up the application volumes and manifests. Partial backups can be restored only from the Admin Console. - -### Troubleshooting Snapshots - -To support end users with backup and restore, use the following resources: - -- To help troubleshoot error messages, see [Troubleshooting Snapshots](/enterprise/snapshots-troubleshooting-backup-restore). - -- Review the Limitations and Considerations section to make sure an end users system is compliant. - -- Check that the installed Velero version and KOTS version are compatible. - -================ -File: docs/vendor/support-bundle-customizing.mdx -================ -# Adding and Customizing Support Bundles - -This topic describes how to add a default support bundle spec to a release for your application. It also describes how to customize the default support bundle spec based on your application's needs. For more information about support bundles, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). - -The information in this topic applies to Helm applications and Kubernetes manifest-based application installed with Helm or with Replicated KOTS. - -## Step 1: Add the Default Spec to a Manifest File - -You can add the support bundle spec to a Kubernetes Secret or a SupportBundle custom resource. The type of manifest file that you use depends on your application type (Helm or manifest-based) and installation method (Helm or KOTS). - -Use the following guidance to determine which type of manifest file to use for creating a support bundle spec: - -* **Helm Applications**: For Helm applications, see the following guidance: - - * **(Recommended) Helm or KOTS v1.94.2 and Later**: For Helm applications installed with Helm or KOTS v1.94.2 or later, create the support bundle spec in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). - - * **KOTS v1.94.1 and Earlier**: For Helm applications installed with KOTS v1.94.1 or earlier, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). - -* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). - -### Kubernetes Secret {#secret} - -You can define support bundle specs in a Kubernetes Secret for the following installation types: -* Installations with Helm -* Helm applications installed with KOTS v1.94.2 and later - -In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: support-bundle - name: example -stringData: - support-bundle-spec: | - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: support-bundle - spec: - collectors: [] - analyzers: [] -``` - -As shown above, the Secret must include the following: - -* The label `troubleshoot.sh/kind: support-bundle` -* A `stringData` field with a key named `support-bundle-spec` - -This empty support bundle spec includes the following collectors by default: -* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) -* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) - -You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. - -:::note -If your application is deployed as multiple Helm charts, Replicated recommends that you create separate support bundle specs for each subchart. This allows you to make specs that are specific to different components of your application. When a support bundle is generated, all the specs are combined to provide a single bundle. -::: - -After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. - -### SupportBundle Custom Resource {#sb-cr} - -You can define support bundle specs in a SupportBundle custom resource for the following installation types: -* Kubernetes manifest-based applications installed with KOTS -* Helm applications installed with KOTS v1.94.1 and earlier - -In a release for your application, add the following YAML to a new `support-bundle.yaml` manifest file: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: [] - analyzers: [] -``` -For more information about the SupportBundle custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). - -This empty support bundle spec includes the following collectors by default: -* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) -* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) - -You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. - -After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. - -## Step 2: Customize the Spec {#customize-the-spec} - -You can customize the support bundles for your application by: -* Adding collectors and analyzers -* Editing or excluding the default `clusterInfo` and `clusterResources` collectors - -### Add Collectors - -Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define. - -In addition to the default `clusterInfo` and `clusterResources` collectors, the Troubleshoot open source project includes several collectors that you can include in the spec to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. - -The following are some recommended collectors: - -- [logs](https://troubleshoot.sh/docs/collect/logs/) -- [secret](https://troubleshoot.sh/docs/collect/secret/) and [configMap](https://troubleshoot.sh/docs/collect/configmap/) -- [postgresql](https://troubleshoot.sh/docs/collect/postgresql/), [mysql](https://troubleshoot.sh/docs/collect/mysql/), and [redis](https://troubleshoot.sh/docs/collect/redis/) -- [runPod](https://troubleshoot.sh/docs/collect/run-pod/) -- [copy](https://troubleshoot.sh/docs/collect/copy/) and [copyFromHost](https://troubleshoot.sh/docs/collect/copy-from-host/) -- [http](https://troubleshoot.sh/docs/collect/http/) - -### Add Analyzers - -Analyzers use the data from the collectors to generate output for the support bundle. Good analyzers clearly identify failure modes and provide troubleshooting guidance for the user. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log and provides a description of the error to the user. - -The Troubleshoot open source project includes several analyzers that you can include in the spec. To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. - -The following are some recommended analyzers: - -- [textAnalyze](https://troubleshoot.sh/docs/analyze/regex/) -- [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) -- [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) -- [replicasetStatus](https://troubleshoot.sh/docs/analyze/replicaset-status/) -- [statefulsetStatus](https://troubleshoot.sh/docs/analyze/statefulset-status/) -- [postgresql](https://troubleshoot.sh/docs/analyze/postgresql/), [mysql](https://troubleshoot.sh/docs/analyze/mysql/), and [redis](https://troubleshoot.sh/docs/analyze/redis/) - -### Customize the Default `clusterResources` Collector - -You can edit the default `clusterResources` using the following properties: - -* `namespaces`: The list of namespaces where the resources and information is collected. If the `namespaces` key is not specified, then the `clusterResources` collector defaults to collecting information from all namespaces. The `default` namespace cannot be removed, but you can specify additional namespaces. - -* `ignoreRBAC`: When true, the `clusterResources` collector does not check for RBAC authorization before collecting resource information from each namespace. This is useful when your cluster uses authorization webhooks that do not support SelfSubjectRuleReviews. Defaults to false. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. - -The following example shows how to specify the namespaces where the `clusterResources` collector collects information: - -```yaml -spec: - collectors: - - clusterResources: - namespaces: - - default - - my-app-namespace - ignoreRBAC: true -``` - -The following example shows how to use Helm template functions to set the namespace: - -```yaml -spec: - collectors: - - clusterResources: - namespaces: {{ .Release.Namespace }} - ignoreRBAC: true -``` - -The following example shows how to use the Replicated Namespace template function to set the namespace: - -```yaml -spec: - collectors: - - clusterResources: - namespaces: '{{repl Namespace }}' - ignoreRBAC: true -``` -For more information, see [Namespace](/reference/template-functions-static-context#namespace) in _Static Context_. - -### Exclude the Default Collectors - -Although Replicated recommends including the default `clusterInfo` and `clusterResources` collectors because they collect a large amount of data to help with installation and debugging, you can optionally exclude them. - -The following example shows how to exclude both the clusterInfo and clusterResources collectors from your support bundle spec: - -```yaml -spec: - collectors: - - clusterInfo: - exclude: true - - clusterResources: - exclude: true -``` - -### Examples - -For common examples of collectors and analyzers used in support bundle specs, see [Examples of Support Bundle Specs](/vendor/support-bundle-examples). - -================ -File: docs/vendor/support-bundle-embedded.mdx -================ -import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" -import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" - -# Generating Support Bundles for Embedded Cluster - -This topic describes how to generate a support bundle that includes cluster- and host-level information for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. - -For information about generating host support bundles for Replicated kURL installations, see [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). - -## Overview - - - -## Generate a Support Bundle - - - -================ -File: docs/vendor/support-bundle-examples.mdx -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HttpSecret from "../partials/support-bundles/_http-requests-secret.mdx" -import HttpCr from "../partials/support-bundles/_http-requests-cr.mdx" -import NodeStatusSecret from "../partials/support-bundles/_node-status-secret.mdx" -import NodeStatusCr from "../partials/support-bundles/_node-status-cr.mdx" -import K8sVersionSecret from "../partials/support-bundles/_k8s-version-secret.mdx" -import K8sVersionCr from "../partials/support-bundles/_k8s-version-cr.mdx" -import DeployStatusSecret from "../partials/support-bundles/_deploy-status-secret.mdx" -import DeployStatusCr from "../partials/support-bundles/_deploy-status-cr.mdx" -import NodeResourcesSecret from "../partials/support-bundles/_node-resources-secret.mdx" -import NodeResourcesCr from "../partials/support-bundles/_node-resources-cr.mdx" -import LogsSelectorsSecret from "../partials/support-bundles/_logs-selectors-secret.mdx" -import LogsSelectorsCr from "../partials/support-bundles/_logs-selectors-cr.mdx" -import LogsLimitsSecret from "../partials/support-bundles/_logs-limits-secret.mdx" -import LogsLimitsCr from "../partials/support-bundles/_logs-limits-cr.mdx" -import RedisMysqlSecret from "../partials/support-bundles/_redis-mysql-secret.mdx" -import RedisMysqlCr from "../partials/support-bundles/_redis-mysql-cr.mdx" -import RunPodsSecret from "../partials/support-bundles/_run-pods-secret.mdx" -import RunPodsCr from "../partials/support-bundles/_run-pods-cr.mdx" - -# Example Support Bundle Specs - -This topic includes common examples of support bundle specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/support-bundle) in GitHub. - -## Check API Deployment Status - -The examples below use the `deploymentStatus` analyzer to check the version of Kubernetes running in the cluster. The `deploymentStatus` analyzer uses data from the default `clusterResources` collector. - -For more information, see [Deployment Status](https://troubleshoot.sh/docs/analyze/deployment-status/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. - - - - - - - - - - -## Check HTTP Requests - -If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. - -The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. - -For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. - - - - - - - - - - -## Check Kubernetes Version - -The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. - -For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. - - - - - - - - - - -## Check Node Resources - -The examples below use the `nodeResources` analyzer to check that the minimum requirements are met for memory, CPU cores, number of nodes, and ephemeral storage. The `nodeResources` analyzer uses data from the default `clusterResources` collector. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - - - - - - - - - - -## Check Node Status - -The following examples use the `nodeResources` analyzers to check the status of the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. - -For more information, see [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. - - - - - - - - - - -## Collect Logs Using Multiple Selectors - -The examples below use the `logs` collector to collect logs from various Pods where application workloads are running. They also use the `textAnalyze` collector to analyze the logs for a known error. - -For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. - -You can use the `selector` attribute of the `logs` collector to find Pods that have the specified labels. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector, as shown in the examples below. You can include the `logs` collector as many times as needed. - - - - - - - - - - -## Collect Logs Using `limits` - -The examples below use the `logs` collector to collect Pod logs from the Pod where the application is running. These specifications use the `limits` field to set a `maxAge` and `maxLines` to limit the output provided. - -For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. - - - - - - - - - - -## Collect Redis and MySQL Server Information - -The following examples use the `redis` and `mysql` collectors to collect information about Redis and MySQL servers running in the cluster. - -For more information, see [Redis](https://troubleshoot.sh/docs/collect/redis/) and [MySQL](https://troubleshoot.sh/docs/collect/mysql/) and in the Troubleshoot documentation. - - - - - - - - - - -## Run and Analyze a Pod - -The examples below use the `textAnalyze` analyzer to check that a command successfully executes in a Pod running in the cluster. The Pod specification is defined in the `runPod` collector. - -For more information, see [Run Pods](https://troubleshoot.sh/docs/collect/run-pod/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. - - - - - - - - - - -================ -File: docs/vendor/support-bundle-generating.mdx -================ -import InstallPlugin from "../partials/support-bundles/_install-plugin.mdx" -import GenerateBundle from "../partials/support-bundles/_generate-bundle.mdx" - -# Generating Support Bundles - -This topic describes how to generate support bundles from the command line using the kubectl support-bundle plugin. For more information about support bundles, see [About Preflights and Support Bundles](/vendor/preflight-support-bundle-about). - -The information in this topic applies to generating support bundles in clusters where you have kubectl access. For information about generating support bundles that include cluster- and host-level information for Replicated Embedded Cluster installations, see [Generating Support Bundles for Embedded Cluster](support-bundle-embedded). - -## Prerequisite: Install the support-bundle Plugin - - - -## Generate a Bundle - - - -## Generate a Bundle when a Helm Installation Fails - -If a Helm installation fails and you want to collect a support bundle to assist with diagnostics, you can use a Replicated default specification to generate the support bundle. - -Run the following command: - -```bash -kubectl support-bundle https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml -``` - -================ -File: docs/vendor/support-enabling-direct-bundle-uploads.md -================ -# Enabling Support Bundle Uploads (Beta) - -:::note -Direct bundle uploads is in beta. The functionality, requirements, and limitations of direct bundle uploads are subject to change. -::: - -When this feature is enabled, customers using online KOTS installations can upload support bundles directly through the Admin Console UI, eliminating the need to share the generated bundle with you manually. - -When enabled, your customers can use the **Send bundle to vendor button** in the Admin Console to upload a generated support bundle. - -Send bundle to vendor screen - -After clicking this button, the bundle will be immediately available under the Troubleshoot tab in the Vendor Portal team account associated with this customer. - -For more information on how your customer can use this feature, see [Generating Support Bundles from the Admin Console](/enterprise/troubleshooting-an-app). - -### How to Enable Direct Bundle Uploads - -Direct bundle uploads are disabled by default. To enable this feature for your customer: - -1. Log in to the Vendor Portal and navigate to your customer's **Manage Customer** page. -1. Under the **License options** section, make sure your customer has **KOTS Install Enabled** checked, and then check the **Support Bundle Upload Enabled (Beta)** option. - Customer license options: configure direct support bundle upload - - [View a larger version of this image](/images/configure-direct-support-bundle-upload.png) -1. Click **Save**. - -### Limitations - -- You will not receive a notification when a customer sends a support bundle to the Vendor Portal. To avoid overlooking these uploads, activate this feature only if there is a reliable escalation process already in place for the customer license. -- This feature only supports online KOTS installations. If enabled, but installed in air gap mode, the upload button will not appear. -- There is a 500mb limit for support bundles uploaded directly via the Admin Console. - -================ -File: docs/vendor/support-host-support-bundles.md -================ -import GenerateBundleHost from "../partials/support-bundles/_generate-bundle-host.mdx" - -# Generating Host Bundles for kURL - -This topic describes how to configure a host support bundle spec for Replicated kURL installations. For information about generating host support bundles for Replicated Embedded Cluster installations, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). - -## Overview - -Host support bundles can be used to collect information directly from the host where a kURL cluster is running, such as CPU, memory, available block devices, and the operating system. Host support bundles can also be used for testing network connectivity and gathering the output of provided commands. - -Host bundles for kURL are useful when: -- The kURL cluster is offline -- The kURL installer failed before the control plane was initialized -- The Admin Console is not working -- You want to debug host-specific performance and configuration problems even when the cluster is running - -You can create a YAML spec to allow users to generate host support bundles for kURL installations. For information, see [Create a Host Support Bundle Spec](#create-a-host-support-bundle-spec) below. - -Replicated also provides a default support bundle spec to collect host-level information for installations with the Embedded Cluster installer. For more information, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). - -## Create a Host Support Bundle Spec - -To allow users to generate host support bundles for kURL installations, create a host support bundle spec in a YAML manifest that is separate from your application release and then share the file with customers to run on their hosts. This spec is separate from your application release because host collectors and analyzers are intended to run directly on the host and not with Replicated KOTS. If KOTS runs host collectors, the collectors are unlikely to produce the desired results because they run in the context of the kotsadm Pod. - -To configure a host support bundle spec for kURL: - -1. Create a SupportBundle custom resource manifest file (`kind: SupportBundle`). - -1. Configure all of your host collectors and analyzers in one manifest file. You can use the following resources to help create your specification: - - - Access sample specifications in the the Replicated troubleshoot-specs repository, which provides specifications for supporting your customers. See [troubleshoot-specs/host](https://github.com/replicatedhq/troubleshoot-specs/tree/main/host) in GitHub. - - - View a list and details of the available host collectors and analyzers. See [All Host Collectors and Analyzers](https://troubleshoot.sh/docs/host-collect-analyze/all/) in the Troubleshoot documentation. - - **Example:** - - The following example shows host collectors and analyzers for the number of CPUs and the amount of memory. - - ```yaml - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: host-collectors - spec: - hostCollectors: - - cpu: {} - - memory: {} - hostAnalyzers: - - cpu: - checkName: "Number of CPUs" - outcomes: - - fail: - when: "count < 2" - message: At least 2 CPU cores are required, and 4 CPU cores are recommended. - - pass: - message: This server has at least 4 CPU cores. - - memory: - checkName: "Amount of Memory" - outcomes: - - fail: - when: "< 4G" - message: At least 4G of memory is required, and 8G is recommended. - - pass: - message: The system has at least 8G of memory. - ``` - -1. Share the file with your customers to run on their hosts. - -:::important -Do not store support bundles on public shares, as they may still contain information that could be used to infer private data about the installation, even if some values are redacted. -::: - -## Generate a Host Bundle for kURL - - - -================ -File: docs/vendor/support-inspecting-support-bundles.md -================ -# Inspecting Support Bundles - -You can use the Vendor Portal to get a visual analysis of customer support bundles and use the file inspector to drill down into the details and logs files. Use this information to get insights and help troubleshoot your customer issues. - -To inspect a support bundle: - -1. In the Vendor Portal, go to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Add support bundle > Upload a support bundle**. - -1. In the **Upload a support bundle** dialog, drag and drop or use the file selector to upload a support bundle file to the Vendor Portal. - - Upload a support bundle dialog - - [View a larger version of this image](/images/support-bundle-analyze.png) - -1. (Optional) If the support bundle relates to an open support issue, select the support issue from the dropdown to share the bundle with Replicated. - -1. Click **Upload support bundle**. - - The **Support bundle analysis** page opens. The **Support bundle analysis** page includes information about the bundle, any available instance reporting data from the point in time when the bundle was collected, an analysis overview that can be filtered to show errors and warnings, and a file inspector. - - ![Support bundle analysis overview](/images/support-bundle-analysis-overview.png) - - [View a larger version of this image](/images/support-bundle-analysis-overview.png) - -1. On the **File inspector** tab, select any files from the directory tree to inspect the details of any files included in the support bundle, such as log files. - -1. (Optional) Click **Download bundle** to download the bundle. This can be helpful if you want to access the bundle from another system or if other team members want to access the bundle and use other tools to examine the files. - -1. (Optional) Navigate back to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Create cluster** to provision a cluster with Replicated Compatibility Matrix. This can be helpful for creating customer-representative environments for troubleshooting. For more information about creating clusters with Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). - - Cluster configuration dialog - - [View a larger version of this image](/images/cmx-cluster-configuration.png) - -1. If you cannot resolve your customer's issue and need to submit a support request, go to the [**Support**](https://vendor.replicated.com/) page and click **Open a support request**. For more information, see [Submitting a Support Request](support-submit-request). - - :::note - The **Share with Replicated** button on the support bundle analysis page does _not_ open a support request. You might be directed to use the **Share with Replicated** option when you are already interacting with a Replicated team member. - ::: - - ![Submit a Support Request](/images/support.png) - - [View larger version of this image](/images/support.png) - -================ -File: docs/vendor/support-modular-support-bundle-specs.md -================ -# About Creating Modular Support Bundle Specs - -This topic describes how to use a modular approach to creating support bundle specs. - -## Overview - -Support bundle specifications can be designed using a modular approach. This refers to creating multiple different specs that are scoped to individual components or microservices, rather than creating a single, large spec. For example, for applications that are deployed as multiple Helm charts, vendors can create a separate support bundle spec in the `templates` directory in the parent chart as well as in each subchart. - -This modular approach helps teams develop specs that are easier to maintain and helps teams to avoid merge conflicts that are more likely to occur when making to changes to a large spec. When generating support bundles for an application that includes multiple modular specs, the specs are merged so that only one support bundle archive is generated. - -## Example: Support Bundle Specifications by Component {#component} - -Using a modular approach for an application that ships MySQL, NGINX, and Redis, your team can add collectors and analyzers in using a separate support bundle specification for each component. - -`manifests/nginx/troubleshoot.yaml` - -This collector and analyzer checks compliance for the minimum number of replicas for the NGINX component: - - ```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: nginx -spec: - collectors: - - logs: - selector: - - app=nginx - analyzers: - - deploymentStatus: - name: nginx - outcomes: - - fail: - when: replicas < 2 - ``` - -`manifests/mysql/troubleshoot.yaml` - -This collector and analyzer checks compliance for the minimum version of the MySQL component: - - ```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: mysql -spec: - collectors: - - mysql: - uri: 'dbuser:**REDACTED**@tcp(db-host)/db' - analyzers: - - mysql: - checkName: Must be version 8.x or later - outcomes: - - fail: - when: version < 8.x -``` - -`manifests/redis/troubleshoot.yaml` - -This collector and analyzer checks that the Redis server is responding: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: redis -spec: - collectors: - - redis: - collectorName: redis - uri: rediss://default:password@hostname:6379 -``` - -A single support bundle archive can be generated from a combination of these manifests using the `kubectl support-bundle --load-cluster-specs` command. -For more information and additional options, see [Generating Support Bundles](support-bundle-generating). - -================ -File: docs/vendor/support-online-support-bundle-specs.md -================ -# Making Support Bundle Specs Available Online - -This topic describes how to make your application's support bundle specs available online as well as how to link to online specs. - -## Overview - -You can make the definition of one or more support bundle specs available online in a source repository and link to it from the specs in the cluster. This approach lets you update collectors and analyzers outside of the application release and notify customers of potential problems and fixes in between application updates. - -The schema supports a `uri:` field that, when set, causes the support bundle generation to use the online specification. If the URI is unreachable or unparseable, any collectors or analyzers in the specification are used as a fallback. - -You update collectors and analyzers in the online specification to manage bug fixes. When a customer generates a support bundle, the online specification can detect those potential problems in the cluster and let them know know how to fix it. Without the URI link option, you must wait for the next time your customers update their applications or Kubernetes versions to get notified of potential problems. The URI link option is particularly useful for customers that do not update their application routinely. - -If you are using a modular approach to designing support bundles, you can use multiple online specs. Each specification supports one URI link. For more information about modular specs, see [About Creating Modular Support Bundle Specs](support-modular-support-bundle-specs). - -## Example: URI Linking to a Source Repository - -This example shows how Replicated could set up a URI link for one of its own components. You can follow a similar process to link to your own online repository for your support bundles. - -Replicated kURL includes an EKCO add-on for maintenance on embedded clusters, such as automating certificate rotation or data migration tasks. Replicated can ship this component with a support bundle manifest that warns users if they do not have this add-on installed or if it is not running in the cluster. - -**Example: Release v1.0.0** - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: -  name: ekco -spec: - collectors: - analyzers: - - deploymentStatus: - checkName: Check EKCO is operational - name: ekc-operator - namespace: kurl - outcomes: - - fail: - when: absent - message: EKCO is not installed - please add the EKCO component to your kURL spec and re-run the installer script - - fail: - when: "< 1" - message: EKCO does not have any ready replicas - - pass: - message: EKCO has at least 1 replica -``` - -If a bug is discovered at any time after the release of the specification above, Replicated can write an analyzer for it in an online specification. By adding a URI link to the online specification, the support bundle uses the assets hosted in the online repository, which is kept current. - -The `uri` field is added to the specification as a raw file link. Replicated hosts the online specification on [GitHub](https://github.com/replicatedhq/troubleshoot-specs/blob/main/in-cluster/default.yaml). - -**Example: Release v1.1.0** - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: -  name: ekco -spec: - uri: https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml - collectors: [...] - analyzers: [...] -``` - -Using the `uri:` property, the support bundle gets the latest online specification if it can, or falls back to the collectors and analyzers listed in the specification that is in the cluster. - -Note that because the release version 1.0.0 did not contain the URI, Replicated would have to wait until existing users upgrade a cluster before getting the benefit of the new analyzer. Then, going forward, those users get any future online analyzers without having to upgrade. New users who install the version containing the URI as their initial installation automatically get any online analyzers when they generate a support bundle. - -For more information about the URI, see [Troubleshoot schema supports a `uri://` field](https://troubleshoot.sh/docs/support-bundle/supportbundle/#uri) in the Troubleshoot documentation. For a complete example, see [Debugging Kubernetes: Enhancements to Troubleshoot](https://www.replicated.com/blog/debugging-kubernetes-enhancements-to-troubleshoot/#Using-online-specs-for-support-bundles) in The Replicated Blog. - -================ -File: docs/vendor/support-submit-request.md -================ -# Submitting a Support Request - -You can submit a support request and a support bundle using the Replicated Vendor Portal. Uploading a support bundle is secure and helps the Replicated support team troubleshoot your application faster. Severity 1 issues are resolved three times faster when you submit a support bundle with your support request. - -### Prerequisites - -The following prerequisites must be met to submit support requests: - -* Your Vendor Portal account must be configured for access to support before you can submit support requests. Contact your administrator to ensure that you are added to the correct team. - -* Your team must have a replicated-collab repository configured. If you are a team administrator and need information about getting a collab repository set up and adding users, see [Adding Users to the Collab Repository](team-management-github-username#add). - - -### Submit a Support Request - -To submit a support request: - -1. From the [Vendor Portal](https://vendor.replicated.com), click **Support > Submit a Support Request** or go directly to the [Support page](https://vendor.replicated.com/support). - -1. In section 1 of the Support Request form, complete the fields with information about your issue. - -1. In section 2, do _one_ of the following actions: - - Use your pre-selected support bundle or select a different bundle in the pick list - - Select **Upload and attach a new support bundle** and attach a bundle from your file browser - -1. Click **Submit Support Request**. You receive a link to your support issue, where you can interact with the support team. - - :::note - Click **Back** to exit without submitting a support request. - ::: - -================ -File: docs/vendor/team-management-github-username.mdx -================ -import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" -import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" -import CollabExistingUser from "../partials/collab-repo/_collab-existing-user.mdx" - - -# Managing Collab Repository Access - -This topic describes how to add users to the Replicated collab GitHub repository automatically through the Replicated Vendor Portal. It also includes information about managing user roles in this repository using Vendor Portal role-based access control (RBAC) policies. - -## Overview {#overview} - - - -To get access to the collab repository, members of a Vendor Portal team can add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. The Vendor Portal then automatically provisions the team member as a user in the collab repository in GitHub. The RBAC policy that the member is assigned in the Vendor Portal determines the GitHub role that they have in the collab repository. - -Replicated recommends that Vendor Portal admins manage user access to the collab repository through the Vendor Portal, rather than manually managing users through GitHub. Managing access through the Vendor Portal has the following benefits: -* Users are automatically added to the collab repository when they add their GitHub username in the Vendor Portal. -* Users are automatically removed from the collab repository when they are removed from the Vendor Portal team. -* Vendor portal and collab repository RBAC policies are managed from a single location. - -## Add Users to the Collab Repository {#add} - -This procedure describes how to use the Vendor Portal to access the collab repository for the first time as an Admin, then automatically add new and existing users to the repository. This allows you to use the Vendor Portal to manage the GitHub roles for users in the collab repository, rather than manually adding, managing, and removing users from the repository through GitHub. - -### Prerequisite - -Your team must have a replicated-collab repository configured to add users to -the repository and to manage repository access through the Vendor Portal. To get -a collab support repository configured in GitHub for your team, complete the onboarding -instructions in the email you received from Replicated. You can also access the [Replicated community help forum](https://community.replicated.com/) for assistance. - -### Procedure - -To add new and existing users to the collab repository through the Vendor Portal: - -1. As a Vendor Portal admin, log in to your Vendor Portal account. In the [Account Settings](https://vendor.replicated.com/account-settings) page, add your GitHub username and click **Save Changes**. - - Account info in the Vendor Portal - - The Vendor Portal automatically adds your GitHub username to the collab repository and assigns it the Admin role. You receive an email with details about the collab repository when you are added. - -1. Follow the collab repository link from the email that you receive to log in to your GitHub account and access the repository. - -1. (Recommended) Manually remove any users in the collab repository that were previously added through GitHub. - - :::note - - ::: - -1. (Optional) In the Vendor Portal, go to the [Team](https://vendor.replicated.com/team/members) page. For each team member, click **Edit permissions** as necessary to specify their GitHub role in the collab repository. - - For information about which policies to select, see [About GitHub Roles](#about-github-roles). - -1. Instruct each Vendor Portal team member to add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. - - The Vendor Portal adds the username to the collab repository and assigns a GitHub role to the user based on their Vendor Portal policy. - - Users receive an email when they are added to the collab repository. - -## About GitHub Roles - -When team members add a GitHub username to their Vendor Portal account, the Vendor Portal determines how to assign the user a default GitHub role in the collab repository based on the following criteria: -* If the GitHub username already exists in the collab repository -* The RBAC policy assigned to the member in the Vendor Portal - -You can also update any custom RBAC policies in the Vendor Portal to change the default GitHub roles for those policies. - -### Default Roles for Existing Users {#existing-username} - - - -### Default Role Mapping {#role-mapping} - -When team members add a GitHub username to their Vendor Portal account, the Vendor Portal assigns them to a GitHub role in the collab repository that corresponds to their Vendor Portal policy. For example, users with the default Read Only policy in the Vendor Portal are assigned the Read GitHub role in the collab repository. - -For team members assigned custom RBAC policies in the Vendor Portal, you can edit the custom policy to change their GitHub role in the collab repository. For more information, see [About Changing the Default GitHub Role](#custom) below. - -The table below describes how each default and custom Vendor Portal policy corresponds to a role in the collab repository in GitHub. For more information about each of the GitHub roles described in this table, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Vendor Portal RoleGitHub collab RoleDescription
    AdminAdmin

    Members assigned the default Admin role in the Vendor Portal are assigned the GitHub Admin role in the collab repository.

    Support EngineerTriage

    Members assigned the custom Support Engineer role in the Vendor Portal are assigned the GitHub Triage role in the collab repository.

    For information about creating a custom Support Engineer policy in the Vendor Portal, see Support Engineer in Configuring RBAC Policies.

    For information about editing custom RBAC policies to change this default GitHub role, see About Changing the Default GitHub Role below.

    Read OnlyReadMembers assigned the default Read Only role in the Vendor Portal are assigned the GitHub Read role in the collab repository.
    SalesN/A

    Users assigned the custom Sales role in the Vendor Portal do not have access to the collab repository.

    For information about creating a custom Sales policy in the Vendor Portal, see Sales in Configuring RBAC Policies.

    For information about editing custom RBAC policies to change this default GitHub role, see About Changing the Default GitHub Role below.

    Custom policies with **/admin under allowed:Admin -

    By default, members assigned to a custom RBAC policy that specifies **/admin under allowed: are assigned the GitHub Admin role in the collab repository.

    -

    For information about editing custom RBAC policies to change this default GitHub role, see About Changing the Default GitHub Role below.

    -
    Custom policies without **/admin under allowed:Read Only -

    By default, members assigned to any custom RBAC policies that do not specify **/admin under allowed: are assigned the Read Only GitHub role in the collab repository.

    -

    For information about editing custom RBAC policies to change this default GitHub role, see About Changing the Default GitHub Role below.

    -
    - -### Change the Default Role {#custom} - -You can update any custom RBAC policies that you create in the Vendor Portal to change the default GitHub roles for those policies. For example, by default, any team members assigned a custom policy with `**/admin` under `allowed:` are assigned the Admin role in the collab repository in GitHub. You can update the custom policy to specify a more restrictive GitHub role. - -To edit a custom policy to change the default GitHub role assigned to users with that policy, add one of the following RBAC resources to the `allowed:` or `denied:` list in the custom policy: - -* `team/support-issues/read` -* `team/support-issues/write` -* `team/support-issues/triage` -* `team/support-issues/admin` - -For more information about each of these RBAC resources, see [Team](team-management-rbac-resource-names#team) in _RBAC Resource Names_. - -For more information about how to edit the `allowed:` or `denied:` lists for custom policies in the Vendor Portal, see [Configuring Custom RBAC Policies](team-management-rbac-configuring). - - - -================ -File: docs/vendor/team-management-google-auth.md -================ -# Managing Google Authentication - -This topic describes the Google authentication options that you can configure to control access to the Replicated Vendor Portal. - -## Manage Google Authentication Options - -As a team administrator, you can enable, disable, or require Google authentication for all accounts in the team. - -A core benefit of using Google authentication is that when a user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal, unless the username and password is also allowed. Requiring Google authentication is an effective way of centrally removing access to the Vendor Portal. - -To manage Google authentication settings: - -1. Click **Team Settings > [Google Authentication](https://vendor.replicated.com/team/google-authentication)**. - - ![Google Auth Settings](/images/team-mgmt-google-auth.png) - -1. Enable or disable the settings: - - | Field | Instructions | - |-----------------------|------------------------| - | Allow Google authentication for team members | Enables team members to log in using a Google account. | - | Restrict login to only allow to Google authentication | Requires new users to accept an invitation and sign up with a Google account that exactly matches the email address that was invited to the team. The email address can be a gmail.com address or user from another domain, but it must match the email address from the invitation exactly. Disabling this setting requires users to accept the invitation by creating a username and password (or use the SAML workflow). | - - -## Migrating Existing Accounts -Excluding some teams that restrict end users to use only Security Assertion Markup Language (SAML) or require two-factor authentication (2FA), existing end users can seamlessly sign into an account that exactly matches their Google Workspace (formerly GSuite) email address. However, Google authentication only matches existing user accounts, so for users who have signed up using task-based email addresses (such as name+news@domain.com), you can continue to use email/password to sign in, invite your normal email address to your team, or contact support to change your email address. For more information about task-based email addresses, see [Create task-specific email addresses](https://support.google.com/a/users/answer/9308648?hl=en) in the Google Support site. - -Migrated accounts maintain the same role-based access control (RBAC) permissions that were previously assigned. After signing in with Google, users can choose to disable username/password-based authentication on their account or maintain both authentication methods using the Vendor Portal [account settings page](https://vendor.replicated.com/account-settings). - -## Limitations - -Using distribution lists for sending invitations to join a team are not supported. The invitations are sent, but are invalid and cannot be used to join a team using Google authentication. - -## Compatibility with Two-Factor Authentication -Google authentication is not entirely compatible with Replicated two-factor authentication (2FA) implementation because Google authentication bypasses account-based 2FA, relying on your Google Authentication instead. However, the Vendor Portal continues to enforce 2FA on all email/password-based authentication, even for the same user, if both options are enabled. - -## Related Topic - -[Managing Team Members](team-management) - -================ -File: docs/vendor/team-management-rbac-configuring.md -================ -import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" - -# Configuring RBAC Policies - -This topic describes how to use role-based access policies (RBAC) to grant or deny team members permissions to use Replicated services in the Replicated Vendor Portal. - -## About RBAC Policies - -By default, every team has two policies created automatically: **Admin** and **Read Only**. If you have an Enterprise plan, you will also have the **Sales** and **Support** policies created automatically. These default policies are not configurable. For more information, see [Default RBAC Policies](#default-rbac) below. - -You can configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. For example, you can limit access for the sales team to one application and to specific channels. Or, you can grant only certain users permission to promote releases to your production channels. - -You can also create custom RBAC policies in the Vendor Portal to manage user access and permissions in the Replicated collab repository in GitHub. For more information, see [Managing Access to the Collab Repository](team-management-github-username). - -## Default RBAC Policies {#default-rbac} - -This section describes the default RBAC policies that are included for Vendor Portal teams, depending on the team's Replicated pricing plan. - -### Admin - -The Admin policy grants read/write permissions to all resources on the team. - -:::note -This policy is automatically created for all plans. -::: - -```json -{ - "v1": { - "name": "Admin", - "resources": { - "allowed": [ - "**/*" - ], - "denied": [] - } - } -} -``` - -### Read Only - -The Read Only policy grants read permission to all resources on the team except for API tokens. - -:::note -This policy is automatically created for all plans. -::: - -```json -{ - "v1": { - "name": "Read Only", - "resources": { - "allowed": [ - "**/list", - "**/read" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -### Support Engineer - -The Support Engineer policy grants read access to release, channels, and application data, and read-write access to customer and license details. It also grants permission to open Replicated support issues and upload support bundles. - -:::note -This policy is automatically created for teams with the Enterprise plan only. -::: - -```json -{ - "v1": { - "name": "Support Engineer", - "resources": { - "allowed": [ - "**/read", - "**/list", - "kots/app/*/license/**", - "team/support-issues/read", - "team/support-issues/write" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -### Sales - -The Sales policy grants read-write access to customers and license details and read-only access to resources necessary to manage licenses (applications, channels, and license fields). No additional access is granted. - -:::note -This policy is automatically created for teams with the Enterprise plan only. -::: - -```json -{ - "v1": { - "name": "Sales", - "resources": { - "allowed": [ - "kots/app/*/read", - "kots/app/*/channel/*/read", - "kots/app/*/licensefields/read", - "kots/app/*/license/**" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -## Configure a Custom RBAC Policy - -To configure a custom RBAC policy: - -1. From the Vendor Portal [Team page](https://vendor.replicated.com/team), select **RBAC** from the left menu. - -1. Do _one_ of the following: - - - Click **Create Policy** from the RBAC page to create a new policy. - - Click **View policy** to edit an existing custom policy in the list. - - - -1. Edit the fields in the policy dialog. In the **Definition** pane, specify the `allow` and `denied` arrays in the resources key to create limits for the role. - - The default policy allows everything and the **Config help** pane displays any errors. - - ![Create RBAC Policy](/images/policy-create.png) - - - For more information, see [Policy Definition](#policy-definition). - - For more information about and examples of rule order, see [Rule Order](#rule-order). - - For a list of resource names, see [RBAC Resource Names](team-management-rbac-resource-names). - -1. Click **Create Policy** to create a new policy, or click **Update Policy** to update an existing policy. - - :::note - Click **Cancel** to exit without saving changes. - ::: - -1. To apply RBAC policies to Vendor Portal team members, you can: - - - Assign policies to existing team members - - Specify a policy when inviting new team members - - Set a default policy for auto-joining a team - - See [Managing Team Members](team-management). - -## Policy Definition - -A policy is defined in a single JSON document: - -``` -{ - "v1": { - "name": "Read Only", - "resources": { - "allowed": [ - "**/read", - "**/list" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -The primary content of a policy document is the resources key. The resources key should contain two arrays, identified as `allowed` and `denied`. Resources specified in the allowed list are allowed for users assigned to the policy, and resources specified in the denied list are denied. - -Resource names are hierarchical, and support wildcards and globs. For a complete list of resource names that can be defined in a policy document, see [RBAC Resource Names](team-management-rbac-resource-names). - -When a policy document has conflicting rules, the behavior is predictable. For more information about conflicting rules, see [Rule Order](#rule-order). - -### Example: View Specific Application and Channel - - The following policy definition example limits any user with this role to viewing a specific application and a specific channel for that application: - - ``` - { - "v1": { - "name": "Policy Name", - "resources": { - "allowed": [ - "kots/app/appID/list", - "kots/app/appID/read", - "kots/app/appID/channel/channelID/list", - "kots/app/appID/channel/channelID/read" - ], - "denied": [] - } - } - } - ``` - The example above uses an application ID and a channel ID to scope the permissions of the RBAC policy. To find your application and channel IDs, do the following: - - - To get the application ID, click **Settings > Show Application ID (Advanced)** in the Vendor Portal. - - - To get the channel ID, click **Channels** in the Vendor Portal. Then click the Release History link for the channel that you want to limit access to. The channel ID displays in your browser URL. - -## Rule Order - -When a resource name is specified in both the `allow` and the `deny` chains of a policy, defined rules determine which rule is applied. - -If `denied` is left empty, it is implied as a `**/*` rule, unless `**/*` rule is specified in the `allowed` resources. If a rule exactly conflicts with another rule, the `denied` rule takes precedence. - -### Defining Precedence Using Rule Specificity -The most specific rule definition is always applied, when compared with less specific rules. Specificity of a rule is calculated by the number of asterisks (`**` and `*`) in the definition. A `**` in the rule definition is the least specific, followed by rules with `*`, and finally rules with no wildcards as the most specific. - -### Example: No Access To Stable Channel - -In the following example, a policy grants access to promote releases to any channel except the Stable channel. It uses the rule pattern `kots/app/[:appId]/channel/[:channelId]/promote`. Note that you specify the channel ID, rather than the channel name. To find the channel ID, go to the Vendor Portal **Channels** page and click the **Settings** icon for the target channel. - -```json -{ - "v1": { - "name": "No Access To Stable Channel", - "resources": { - "allowed": [ - "**/*" - ], - "denied": [ - "kots/app/*/channel/1eg7CyEofYSmVAnK0pEKUlv36Y3/promote" - ] - } - } -} -``` - -### Example: View Customers Only - -In the following example, a policy grants access to viewing all customers, but not to creating releases, promoting releases, or creating new customers. - -```json -{ - "v1": { - "name": "View Customers Only", - "resources": { - "allowed": [ - "kots/app/*/license/*/read", - "kots/app/*/license/*/list", - "kots/app/*/read", - "kots/app/*/list" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -================ -File: docs/vendor/team-management-rbac-resource-names.md -================ -import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" - -# RBAC Resource Names - -This a list of all available resource names for the Replicated vendor role-based access control (RBAC) policy: - -## Integration Catalog - -### integration/catalog/list - -Grants the holder permission to view the catalog events and triggers available for integrations. - -## kots - -### kots/app/create - -When allowed, the holder will be allowed to create new applications. - -### kots/app/[:appId]/read -Grants the holder permission to view the application. If the holder does not have permissions to view an application, it will not appear in lists. - -### kots/externalregistry/list -Grants the holder the ability to list external docker registry for application(s). - -### kots/externalregistry/create - -Grants the holder the ability to link a new external docker registry to application(s). - -### kots/externalregistry/[:registryName]/delete - -Grants the holder the ability to delete the specified linked external docker registry in application(s). - -### kots/app/[:appId]/channel/create - -Grants the holder the ability to create a new channel in the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/archive - -Grants the holder permission to archive the specified channel(s) of the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/promote - -Grants the holder the ability to promote a new release to the specified channel(s) of the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/update - -Grants the holder permission to update the specified channel of the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/read - -Grants the holder the permission to view information about the specified channel of the specified application(s). - -### kots/app/[:appId]/enterprisechannel/[:channelId]/read - -Grants the holder the permission to view information about the specified enterprise channel of the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/releases/airgap - -Grants the holder permission to trigger airgap builds for the specified channel. - -### kots/app/[:appId]/channel/[:channelId]/releases/airgap/download-url - -Grants the holder permission to get an airgap bundle download URL for any release on the specified channel. - -### kots/app/[:appId]/installer/create - -Grants the holder permission to create kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - -### kots/app/[:appId]/installer/update - -Grants the holder permission to update kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - -### kots/app/[:appId]/installer/read - -Grants the holder permission to view kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - -### kots/app/[:appId]/installer/promote - -Grants the holder permission to promote kURL installers to a channel. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - -:::note -The `kots/app/[:appId]/installer/promote` policy does not grant the holder permission to view and create installers. Users must be assigned both the `kots/app/[:appId]/installers` and `kots/app/[:appId]/installer/promote` policies to have permissions to view, create, and promote installers. -::: - -### kots/app/[:appId]/license/create - -Grants the holder permission to create a new license in the specified application(s). - -### kots/app/[:appId]/license/[:customerId]/read - -Grants the holder permission to view the license specified by ID. If this is denied, the licenses will not show up in search, CSV export or on the Vendor Portal, and the holder will not be able to subscribe to this license's instance notifications. - -### kots/app/[:appId]/license/[:customerId]/update - -Grants the holder permission to edit the license specified by ID for the specified application(s). - -### kots/app/[:appId]/license/[:customerId]/slack-notifications/read - -Grants the holder permission to view the team's Slack notification subscriptions for instances associated with the specified license. - -### kots/app/[:appId]/license/[:customerId]/slack-notifications/update - -Grants the holder permission to edit the team's Slack notification subscriptions for instances associated with the specified license. - -### kots/app/[:appId]/builtin-licensefields/update - -Grants the holder permission to edit the builtin license field override values for the specified application(s). - -### kots/app/[:appId]/builtin-licensefields/delete - -Grants the holder permission to delete the builtin license field override values for the specified application(s). - -### kots/license/[:customerId]/airgap/password - -Grants the holder permission to generate a new download portal password for the license specified (by ID) for the specified application(s). - -### kots/license/[:customerId]/archive - -Grants the holder permission to archive the specified license (by ID). - -### kots/license/[:customerId]/unarchive - -Grants the holder permissions to unarchive the specified license (by ID). - -### kots/app/[:appId]/licensefields/create - -Grants the holder permission to create new license fields in the specified application(s). - -### kots/app/[:appId]/licensefields/read - -Grants the holder permission to view the license fields in the specified application(s). - -### kots/app/[:appId]/licensefields/update - -Grants the holder permission to edit the license fields for the specified application(s). - -### kots/app/[:appId]/licensefields/delete - -Grants the holder permission to delete the license fields for the specified application(s). - -### kots/app/[:appId]/release/create - -Grants the holder permission to create a new release in the specified application(s). - -### kots/app/[:appId]/release/[:sequence]/update - -Grants the holder permission to update the files saved in release sequence `[:sequence]` in the specified application(s). Once a release is promoted to a channel, it's not editable by anyone. - -### kots/app/[:appId]/release/[:sequence]/read - -Grants the holder permission to read the files at release sequence `[:sequence]` in the specified application(s). - -### kots/app/[:appId]/customhostname/list - -Grants the holder permission to view custom hostnames for the team. - -### kots/app/[:appId]/customhostname/create - -Grants the holder permission to create custom hostnames for the team. - -### kots/app/[:appId]/customhostname/delete - -Grants the holder permission to delete custom hostnames for the team. - -### kots/app/[:appId]/customhostname/default/set - -Grants the holder permission to set default custom hostnames. - -### kots/app/[:appId]/customhostname/default/unset - -Grants the holder permission to unset the default custom hostnames. - -### kots/app/[:appId]/supportbundle/read - -Grants the holder permission to view and download support bundles. - -## Registry - -### registry/namespace/:namespace/pull - -Grants the holder permission to pull images from Replicated registry. - -### registry/namespace/:namespace/push - -Grants the holder permission to push images into Replicated registry. - -## Compatibility Matrix - -### kots/cluster/create - -Grants the holder permission to create new clusters. - -### kots/cluster/list - -Grants the holder permission to list running and terminated clusters. - -### kots/cluster/[:clusterId] - -Grants the holder permission to get cluster details. - -### kots/cluster/[:clusterId]/upgrade - -Grants the holder permission to upgrade a cluster. - -### kots/cluster/tag/update - -Grants the holder permission to update cluster tags. - -### kots/cluster/ttl/update - -Grants the holder permission to update cluster ttl. - -### kots/cluster/[:clusterId]/nodegroup - -Grants the holder permission to update nodegroup details. - -### kots/cluster[:clusterId]/kubeconfig - -Grants the holder permision to get the kubeconfig for a cluster. - -### kots/cluster/[:clusterId]/delete - -Grants the holder permission to delete a cluster. - -### kots/cluster/[:clusterId]/addon/list - -Grants the holder permission to list addons for a cluster. - -### kots/cluster/[:clusterId]/addon/[:addonId]/read - -Grants the holder permission to read the addon for a cluster. - -### kots/cluster/[:clusterId]/addon/[:addonId]/delete - -Grants the holder permission to delete the addon for a cluster. - -### kots/cluster/[:clusterId]/addon/create/objectStore - -Grants the holder permission to create an object store for a cluster. - -### kots/cluster/[:clusterId]/port/expose - -Grants the holder permission to expose a port for a cluster. - -### kots/cluster/[:clusterId]/port/delete - -Grants the holder permission to delete a port for a cluster. - -### kots/cluster/[:clusterId]/port/list - -Grants the holder permission to list exposed ports for a cluster. - -### kots/cluster/list-quotas - -Grants the holder permission to list the quotas. - -### kots/cluster/increase-quota - -Grants the holder permission to request an increase in the quota. - -### kots/vm/tag/update - -Grants the holder permission to update vm tags. - -### kots/vm/ttl/update - -Grants the holder permission to update vm ttl. - -### kots/vm/[:vmId]/port/expose - -Grants the holder permission to expose a port for a vm. - -### kots/vm/[:vmId]/port/list - -Grants the holder permission to list exposed ports for a vm. - -### kots/vm/[:vmId]/addon/[:addonId]/delete - -Grants the holder permission to delete the addon for a vm. - -## Team - -### team/auditlog/read - -Grants the holder permission to view the audit log for the team. - -### team/authentication/update - -Grants the holder permission to manage the following team authentication settings: Google authentication, Auto-join, and SAML authentication. - -### team/authentication/read - -Grants the holder permission to read the following authentication settings: Google authentication, Auto-join, and SAML authentication. - -### team/integration/list - -Grants the holder permission to view team's integrations. - -### team/integration/create - -Grants the holder permission to create an integration. - -### team/integration/[:integrationId]/delete - -Grants the holder permission to delete specified integration(s). - -### team/integration/[:integrationId]/update - -Grants the holder permission to update specified integration(s). - -### team/members/list - -Grants the holder permission to list team members and invitations. - -### team/member/invite - -Grants the holder permission to invite additional people to the team. - -### team/members/delete - -Grants the holder permission to delete other team members. - -### team/notifications/slack-webhook/read - -Grants the holder permission to view the team's Slack webhook for instance notifications. - -### team/notifications/slack-webhook/update - -Grants the holder permission to edit the team's Slack webhook for instance notifications. - -### team/policy/read - -Grants the holder permission to view RBAC policies for the team. - -### team/policy/update - -Grants the holder permission to update RBAC policies for the team. - -### team/policy/delete - -Grants the holder permission to delete RBAC policies for the team. - -### team/policy/create - -Grants the holder permission to create RBAC policies for the team. - -### team/security/update - -Grants the holder permission to manage team password requirements including two-factor authentication and password complexity requirements. - -### team/serviceaccount/list - -Grants the holder permission to list service accounts. - -### team/serviceaccount/create - -Grants the holder permission to create new service accounts. - -### team/serviceaccount/[:name]/delete - -Grants the holder permission to delete the service account identified by the name specified. - -### team/support-issues/read - -Grants the holder Read permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - -To prevent access to the collab repository for an RBAC policy, add `team/support-issues/read` to the `denied:` list in the policy. For example: - -``` -{ - "v1": { - "name": "Policy Name", - "resources": { - "allowed": [], - "denied": [ - "team/support-issues/read" - ] - } - } -} -``` - -For more information about the Read role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - - - -### team/support-issues/write - -Grants the holder Write permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - -For more information about the Write role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - - - -### team/support-issues/triage - -Grants the holder Triage permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - -For more information about the Triage role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - - - -### team/support-issues/admin - -Grants the holder Admin permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - -For more information about the Admin role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - - - -## User - -### user/token/list - -Grants the holder permission to list user tokens. - -### user/token/create - -Grants the holder permission to create new user tokens. - -### user/token/delete - -Grants the holder permission to delete user tokens. - -================ -File: docs/vendor/team-management-saml-auth.md -================ -# Managing SAML Authentication - -This topic describes how to enable or disable SAML authentication for the Replicated Vendor Portal. - -## About Using SAML with the Vendor Portal - -After starting out with Replicated, most teams grow, adding more developers, support engineers, and sales engineers. Eventually, managing access to the Vendor Portal can become difficult. Replicated supports logging in using SAML, which lets you manage access (provisioning and unprovisioning accounts) through your SAML identity provider. - -Using SAML, everyone on your team logs in with their existing usernames and passwords through your identity provider's dashboard. Users do not need to sign up through the Vendor Portal or log in with a separate Vendor Portal account, simplifying their experience. - -### Enabling SAML in Your Vendor Account - -To enable SAML in your Vendor Portal account, you must have an Enterprise plan. For access to SAML, you can contact Replicated through [Support](https://vendor.replicated.com/support). For information about the Enterprise plan, see [pricing](https://www.replicated.com/pricing/). - -### SCIM - -Replicated does not implement System for Cross-domain Identity Management (SCIM). Instead, we use SAML to authenticate and create just-in-time user identities in our system. We resolve the username (email address) as the actor and use this to ensure that audit log events follow these dynamically provisioned users. If a user's email address is already associated with a Replicated account, by using your SAML integration to access the Vendor Portal, they automatically leave their current team and join the team associated with the SAML login. - -### Compatibility with Two-Factor Authentication - -If SAML authentication is configured for your team, Replicated two-factor authentication (2FA) is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. - -### Role Based Access Control - -Replicated supports Role Based Access Control (RBAC) in the Vendor Portal. To use RBAC with SAML, you must configure policies and add users to the policies by their username. Usernames are the identity of the user in your identity provide (IDP). Typically, this username is the full email address. For more information about configuring RBAC, see [Configuring RBAC Policies](team-management-rbac-configuring). - -## Downloading Certificates from Supported SAML providers - -You must retrieve the metadata and x.509 public certificate files from your SAML provider before configuring SAML in the Vendor Portal. The certificate file must be in PEM format. - -Replicated tests several SAML providers, but the service should be compatible with any SAML 2.0 compliant service provider. We provide full support for the following SAML providers: - -* Okta. For more information about integrating Okta with Replicated, see [Configure Okta](#configure-okta). - -* OneLogin - - -## Configure Okta - -The first part of the Vendor Portal and Okta integration is configured in the Okta dashboard. This configuration lets you download the XML Metadata file and x.509 public certificate in PEM format required for the SAML authentication. - -This procedure outlines the basic configuration steps, recommended settings, and the specific fields to configure in Okta. For more information about using Okta, see the [Okta](https://help.okta.com/en/prod/Content/index.htm) documentation. - -To configure Okta and download the required files: - -1. Log in to your Okta Admin dashboard, and click applications. - -1. Select **Create new app integration**, and create a new application as a SAML 2.0 application. - -1. Provide a name and icon for the application, such as Replicated Vendor Portal. You can download a high quality Replicated icon [here](https://help.replicated.com/images/guides/vendor-portal-saml/replicated-application-icon.png). - -1. Click **Next**. - - The Configuring SAML page opens. - -1. Click **Download Okta Certificate**. This downloads your x.509 certificate to provide to Replicated. Save this file to safe location. - -1. On this same page, edit the following fields: - - | Field Name | Description | - | :---------------------- | ----------------------------------------------------------------------------------------------- | - | Single Sign On URL | Set this to `https://id.replicated.com/v1/saml`. | - | Audience URI (SP Entity ID) | Displays on the Vendor Portal [SAML authentication](https://vendor.replicated.com/team/saml-authentication) tab, and is unique to your team. | - | Name ID Format | Change this to `EmailAddress`. | - -1. Click **Next**. - -1. Select **I’m an Okta customer adding an internal app** on the final screen, and click **Finish**. - -1. Click **Identity provider metadata** to download the Metadata.xml file. This likely opens an XML download that you can right-click and select **Save Link As…** to download this file. - -### Next Step - -Configure and enable SAML in the Vendor Portal. For more information, see [Configure SAML](#configure-saml). - -## Configure SAML - -When you initially configure SAML, we do not recommend that you disable username/password access at the same time. It is possible, and recommended during testing, to support both SAML and non-SAML authentication on your account simultaneously. - -**Prerequisite** - -- Download your XML Metadata file and x.509 public certificate PEM file from your SAML provider. For more information on supported SAML providers and how to find these files, see [Supported SAML providers](#downloading-certificates-from-supported-saml-providers). - -To configure SAML: - -1. Log in to the Vendor Portal [Team Members page](https://vendor.replicated.com/team/members) as a user with Admin access. -1. Click [SAML Authentication](https://vendor.replicated.com/team/saml-authentication) from the left menu. If you do not see these options, contact [Support](https://vendor.replicated.com/support). - - The SAML Authentication page opens. - - ![SAML Authentication](/images/team-mgmt-saml-authentication.png) - - [View a larger version of this image](/images/team-mgmt-saml-authentication.png) - -1. Browse for, or drag and drop, your XML Metadata file and x.509 PEM file from your SAML provider. - -1. Click **Upload Metadata & Cert**. - -### Next Step - -At this point, SAML is configured, but not enabled. The next step is to enable SAML enforcement options. For more information, see [Enable SAML Enforcement](#enable-saml-enforcement). - -## Enable SAML Enforcement - -After you have uploaded the metadata and x.509 public certificate PEM file, you must enable SAML enforcement options. Replicated provides options that can be enabled or disabled at any time. You can also change the IDP metadata if needed. - -To enable SAML enforcement: - -1. From the Vendor Portal, select **Team > [SAML Authentication](https://vendor.replicated.com/team/saml-authentication)**. - -1. Select either or both login method options in the the Manage your SAML authentication pane. Allowing both login methods is a good way to test SAML without risking any interruption for the rest of your team. - - **Enable SAML for team logins** - Allows members of your team to log in to the Vendor Portal through your identity provider. This option does not remove, change, or restrict any other authentication that methods you have configured in the Vendor Portal. If you enable SAML and your team already is logging in with accounts provisioned in the Vendor Portal, they will be able to continue logging in with those accounts. - - **Only allow SAML logins** - Requires members of your team to log in to the Vendor Portal through your identity provider. Prevents any non-SAML accounts from logging in. Replicated does not delete the existing accounts. If you turn on this option and then later disable it, accounts that never logged in using SAML will be able to log in again. If an account exists outside of SAML and then is authenticated with SAML, the account is converted and cannot authenticate using a password again. - - ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) - - [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) - -1. (Optional) Set a default policy for new accounts from the drop-down list. -1. (Optional) Click **Change IdP Metadata** and follow the prompts to upload any changes to your metadata. - -SAML is now enabled on your account. For your team to use the SAML login option, you must enable access through your SAML identity provider’s dashboard. For example, if you use Okta, assign the application to users or groups. When a user clicks through to use the application, they are granted access as described in [SCIM](#scim). - -## Disable SAML Enforcement - -You can disable SAML authentication options at any time and re-enable them later if needed. - -To disable SAML enforcement: - -1. From the Vendor Portal, select **Team > SAML Authentication**. - -1. Click **Deprovision SAML** in the Manage your SAML authentication pane. - - ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) - - [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) - -================ -File: docs/vendor/team-management-slack-config.mdx -================ -import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" - - -# Configuring a Slack Webhook (Beta) - -As a vendor, anyone on your team can set up Slack notifications, which are sent to a shared Slack channel. Notifications give your team visibility into customer instance statuses and changes. - - - -While email notifications are specific to each user, Slack notifications settings are shared, viewable, and editable by the entire team. Any changes made by a team member impacts the team. - -## Limitations - -As a Beta feature, the following limitations apply: - -- Only one Slack channel per team is supported. - -- RBAC policies are not supported for configuring granular permissions. - -## Prerequisite - -Create a Slack webhook URL. For more information, see [Sending Messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) in the Slack API documentation. - -Make sure to keep the URL secure because it contains a Secret that allows write access to one or more channels in your Slack Workspace. - -## Configure the Webhook in the Vendor Portal - -When you enable Slack notifications for a team, you must first configure the Slack webhook in the Vendor Portal. Typically you do this one time. Then you can configure notifications for individual customer instances. - -To configure the Slack webhook: - -1. From the **[Team Vendor Portal](https://vendor.replicated.com/team/members)** page, click **Slack Notifications**. - -1. On the **Slack Notifications Setup** page, paste the Slack webhook URL. Click **Save**. - -## Next Step - -[Configure Slack notifications for customer instances](instance-notifications-config). - -================ -File: docs/vendor/team-management-two-factor-auth.md -================ -# Managing Two-Factor Authentication - -This topic describes how to enable and disable Replicated two-factor authentication for individual and team accounts in the Replicated Vendor Portal. - -Alternatively, you can use Google Authentication or SAML Authentication to access the Vendor Portal. For more information about those options, see [Managing Google Authentication](team-management-google-auth) and [Managing SAML Authentication](team-management-saml-auth). - -## About Two-Factor Authentication - -Two-factor authentication (2FA) provides additional security by requiring two methods of authentication to access resources and data. When you enable the 2FA option in the Vendor Portal, you are asked to provide an authentication code and your password during authentication. Replicated uses the open algorithm known as the Time-based One-time Password (TOTP 7), which is specified by the Internet Engineering Task Force (IETF) under RFC 6238 2. - -## Limitation - -If SAML Authentication or Google Authentication is configured and 2FA is also enabled, then 2FA is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. - -## Enable 2FA on Individual Accounts - -If you are an administrator or if 2FA is enabled for your team, you can enable 2FA on your individual account. - -To enable two-factor authentication on your individual account: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. - - Vendor portal account settings - - [View a larger version of this image](/images/vendor-portal-account-settings.png) - -1. In the **Two-Factor Authentication** pane, click **Turn on two-factor authentication**. - - Turn on 2FA in the Vendor Portal - - [View a larger version of this image](/images/vendor-portal-password-2fa.png) - -1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. - -1. Scan the QR code that displays using a supported two-factor authentication application on your mobile device, such as Google Authenticator. Alternatively, click **Use this text code** in the Vendor Portal to generate an alphanumeric code that you enter in the mobile application. - - Turn on 2FA in the Vendor Portal - - [View a larger version of this image](/images/vendor-portal-scan-qr.png) - - Your mobile application displays an authentication code. - -1. Enter the authentication code in the Vendor Portal. - - Two-factor authentication is enabled and a list of recovery codes is displayed at the bottom of the **Two-Factor Authentication** pane. - -1. Save the recovery codes in a secure location. These codes can be used any time (one time per code), if you lose your mobile device. - -1. Log out of your account, then log back in to test that it is enabled. You are prompted to enter a one-time code generated by the application on your mobile device. - - -## Disable 2FA on Individual Accounts - -To disable two-factor authentication on your individual account: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. - - Vendor portal account settings - - [View a larger version of this image](/images/vendor-portal-account-settings.png) - -1. In the **Two-Factor Authentication** pane, click **Turn off two-factor authentication**. - -1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. - -## Enable or Disable 2FA for a Team - -As an administrator, you can enable and disable 2FA for teams. You must first enable 2FA on your individual account before you can enable 2FA for teams. After you enable 2FA for your team, team members can enable 2FA on their individual accounts. - -To enable or disable 2FA for a team: - -1. In the [Vendor Portal](https://vendor.replicated.com), select the **Team** tab, then select **Multifactor Auth**. - - Multifactor authentication for teams in the Vendor Portal - - [View a larger image](/images/team-2fa-auth.png) - -1. On the **Multifactor Authentication** page, do one of the following with the **Require Two-Factor Authentication for all Username/Password authenticating users** toggle: - - - Turn on the toggle to enable 2FA - - Turn off the toggle to disable 2FA - -1. Click **Save changes**. - -================ -File: docs/vendor/team-management.md -================ -import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" -import CollabRbacImportant from "../partials/collab-repo/_collab-rbac-important.mdx" - -# Managing Team Members - -This topic describes how to manage team members in the Replicated Vendor Portal, such as inviting and removing members, and editing permissions. For information about managing user access to the Replicated collab repository in GitHub, see [Managing Collab Repository Access](team-management-github-username). - -## Viewing Team Members -The [Team](https://vendor.replicated.com/team/members) page provides a list of all accounts currently associated with or invited to your team. Each row contains information about the user, including their two-factor authentication (2FA) status and role-based access control (RBAC) role, and lets administrators take additional actions, such as remove, re-invite, and edit permissions. - -View team members list in the Vendor Portal - -[View a larger image](/images/teams-view.png) - -All users, including read-only, can see the name of the RBAC role assigned to each team member. When SAML authentication is enabled, users with the built-in read-only policy cannot see the RBAC role assigned to team members. - -## Invite Members -By default, team administrators can invite more team members to collaborate. Invited users receive an email to activate their account. The activation link in the email is unique to the invited user. Following the activation link in the email also ensures that the invited user joins the team from which the invitation originated. - -:::note -Teams that have enforced SAML-only authentication do not use the email invitation flow described in this procedure. These teams and their users must log in through their SAML provider. -::: - -To invite a new team member: - -1. From the [Team Members](https://vendor.replicated.com/team/members) page, click **Invite team member**. - - The Invite team member dialog opens. - - Invite team member dialog in the Vendor Portal - - [Invite team member dialog](/images/teams-invite-member.png) - -1. Enter the email address of the member. - -1. In the **Permissions** field, assign an RBAC policy from the dropdown list. - - - -1. Click **Invite member**. - - People invited to join your team receive an email notification to accept the invitation. They must follow the link in the email to accept the invitation and join the team. If they do not have a Replicated account already, they can create one that complies with your password policies, 2FA, and Google authentication requirements. If an invited user's email address is already associated with a Replicated account, by accepting your invitation, they automatically leave their current team and join the team that you have invited them to. - -## Managing Invitations - -Invitations expire after 7 days. If a prospective member has not accepted their invitation in this time frame, you can re-invite them without having to reenter their details. You can also remove the prospective member from the list. - -You must be an administrator to perform this action. - -To re-invite or remove a prospective member, do one of the following on the **Team Members** page: - -* Click **Reinvite** from the row with the user's email address, and then click **Reinvite** in the confirmation dialog. - -* Click **Remove** from the row with the user's email address, and then click **Delete Invitation** in the confirmation dialog. - -## Edit Policy Permissions - -You can edit the RBAC policy that is assigned to a member at any time. - - - -To edit policy permissions for individual team members: - -1. From the the Team Members list, click **Edit permissions** next to a members name. - - :::note - The two-factor authentication (2FA) status displays on the **Team members** page, but it is not configured on this page. For more information about configuring 2FA, see [Managing Two-Factor Authentication](team-management-two-factor-auth). - ::: - -1. Select an RBAC policy from the **Permissions** dropdown list, and click **Save**. For information about configuring the RBAC policies that display in this list, see [Configuring RBAC Policies](team-management-rbac-configuring). - - Edit team member permissions in the Vendor Portal - -## Enable Users to Auto-join Your Team -By default, users must be invited to your team. Team administrators can use the auto-join feature to allow users from the same email domain to join their team automatically. This applies to users registering with an email, or with Google authentication if it is enabled for the team. The auto-join feature does not apply to SAML authentication because SAML users log in using their SAML provider's application portal instead of the Vendor Portal. - -To add, edit, or delete custom RBAC policies, see [Configuring RBAC Policies](team-management-rbac-configuring). - -To enable users to auto-join your team: - -1. From the Team Members page, click **Auto-join** from the left navigation. -1. Enable the **Allow all users from my domain to be added to my team** toggle. - - Auto join dialog in the Vendor Portal - - [View a larger image](/images/teams-auto-join.png) - -1. For **Default RBAC policy level for new accounts**, you can use the default Read Only policy or select another policy from the list. This RBAC policy is applied to all users who join the team with the auto-join feature. - - - - -## Remove Members and End Sessions -As a Vendor Portal team admin, you can remove team members, except for the account you are currently logged in with. - -If the team member that you remove added their GitHub username to their Account Settings page in the Vendor Portal to access the Replicated collab repository, then the Vendor Portal also automatically removes their username from the collab repository. For more information, see [Managing Collab Repository Access](team-management-github-username). - -SAML-created users must be removed using this method to expire their existing sessions because Replicated does not support System for Cross-domain Identity Management (SCIM). - -To remove a member: - -1. From the Team Members page, click **Remove** on the right side of a user's row. - -1. Click **Remove** in the confirmation dialog. - - The member is removed. All of their current user sessions are deleted and their next attempt at communicating with the server logs them out of their browser's session. - - If the member added their GitHub username to the Vendor Portal to access the collab repository, then the Vendor Portal also removes their GitHub username from the collab repository. - - For Google-authenticated users, if the user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal unless the username and password are allowed. - -## Update Email Addresses - -:::important -Changing team member email addresses has security implications. Replicated advises that you avoid changing team member email addresses if possible. -::: - -Updating the email address for a team member requires creating a new account with the updated email address, and then deactivating the previous account. - -To update the email address for a team member: - -1. From the Team Members page, click **Invite team member**. - -1. Assign the required RBAC policies to the new user. - -1. Deactivate the previous team member account. - -================ -File: docs/vendor/telemetry-air-gap.mdx -================ -import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" - -# Collecting Telemetry for Air Gap Instances - -This topic describes how to collect telemetry for instances in air gap environments. - -## Overview - -Air gap instances run in environments without outbound internet access. This limitation prevents these instances from periodically sending telemetry to the Replicated Vendor Portal through the Replicated SDK or Replicated KOTS. For more information about how the Vendor Portal collects telemetry from online (internet-connected) instances, see [About Instance and Event Data](/vendor/instance-insights-event-data#about-reporting). - - - -The following diagram demonstrates how air gap telemetry is collected and stored by the Replicated SDK in a customer environment, and then shared to the Vendor Portal in a support bundle: - -Air gap telemetry collected by the SDK in a support bundle - -[View a larger version of this image](/images/airgap-telemetry.png) - -All support bundles uploaded to the Vendor Portal from air gap customers contributes to a comprehensive dataset, providing parity in the telemetry for air gap and online instances. Replicated recommends that you collect support bundles from air gap customers regularly (monthly or quarterly) to improve the completeness of the dataset. The Vendor Portal handles any overlapping event archives idempotently, ensuring data integrity. - -## Requirement - -Air gap telemetry has the following requirements: - -* To collect telemetry from air gap instances, one of the following must be installed in the cluster where the instance is running: - - * The Replicated SDK installed in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). - - * KOTS v1.92.1 or later - - :::note - When both the Replicated SDK and KOTS v1.92.1 or later are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), both collect and store instance telemetry in their own dedicated secret, subject to the size limitation noted below. In the case of any overlapping data points, the Vendor Portal will report these data points chronologically based on their timestamp. - ::: - -* To collect custom metrics from air gap instances, the Replicated SDK must installed in the cluster in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). - - For more information about custom metrics, see [Configuring Custom Metrics](https://docs.replicated.com/vendor/custom-metrics). - -Replicated strongly recommends that all applications include the Replicated SDK because it enables access to both standard instance telemetry and custom metrics for air gap instances. - -## Limitation - -Telemetry data is capped at 4,000 events or 1MB per Secret; whichever limit is reached first. - -When a limit is reached, the oldest events are purged until the payload is within the limit. For optimal use, consider collecting support bundles regularly (monthly or quarterly) from air gap customers. - -## Collect and View Air Gap Telemetry - -To collect telemetry from air gap instances: - -1. Ask your customer to collect a support bundle. See [Generating Support Bundles](/vendor/support-bundle-generating). - -1. After receiving the support bundle from your customer, go to the Vendor Portal **Customers**, **Customer Reporting**, or **Instance Details** page and upload the support bundle: - - ![upload new bundle button on instance details page](/images/airgap-upload-telemetry.png) - - The telemetry collected from the support bundle appears in the instance data shortly. Allow a few minutes for all data to be processed. - -================ -File: docs/vendor/testing-about.md -================ -import Overview from "../partials/cmx/_overview.mdx" -import SupportedClusters from "../partials/cmx/_supported-clusters-overview.mdx" - -# About Compatibility Matrix - -This topic describes Replicated Compatibility Matrix, including use cases, billing, limitations, and more. - -## Overview - - - -You can use Compatibility Matrix with the Replicated CLI or the Replicated Vendor Portal. For more information about how to use Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). - -### Supported Clusters - - - -### Billing and Credits - -Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a `running` status and ends when the cluster is deleted. Compatibility Matrix marks a cluster as `running` when a working kubeconfig for the cluster is accessible. - -You are billed only for the time that the cluster is in a `running` status. You are _not_ billed for the time that it takes Compatibility Matrix to create and tear down clusters, including when the cluster is in an `assigned` status. - -For more information about pricing, see [Compatibility Matrix Pricing](testing-pricing). - -To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. -If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to [**Compatibility Matrix > Buy additional credits**](https://vendor.replicated.com/compatibility-matrix). -Otherwise, to request credits, log in to the Vendor Portal and go to [**Compatibility Matrix > Request more credits**](https://vendor.replicated.com/compatibility-matrix). - -### Quotas and Capacity - -By default, Compatibility Matrix sets quotas for the capacity that can be used concurrently by each vendor portal team. These quotas are designed to ensure that Replicated maintains a minimum amount of capacity for provisioning both VM and cloud-based clusters. - -By default, the quota for cloud-based cluster distributions (AKS, GKE, EKS) is three clusters running concurrently. - -VM-based cluster distributions (such as kind, OpenShift, and Replicated Embedded Cluster) have the following default quotas: -* 32 vCPUs -* 128 GiB memory -* 800 GiB disk size - -You can request increased quotas at any time with no additional cost. To view your team's current quota and capacity usage, or to request a quota increase, go to [**Compatibility Matrix > Settings**](https://vendor.replicated.com/compatibility-matrix/settings) in the vendor portal: - -![Compatibility matrix settings page](/images/compatibility-matrix-settings.png) - -[View a larger version of this image](/images/compatibility-matrix-settings.png) - -### Cluster Status - -Clusters created with Compatibility Matrix can have the following statuses: - -* `assigned`: The cluster resources were requested and Compatibility Matrix is provisioning the cluster. You are not billed for the time that a cluster spends in the `assigned` status. - -* `running`: A working kubeconfig for the cluster is accessible. Billing begins when the cluster reaches a `running` status. - - Additionally, clusters are verified prior to transitioning to a `running` status. Verification includes checking that the cluster is healthy and running with the correct number of nodes, as well as passing [sonobuoy](https://sonobuoy.io/) tests in `--quick` mode. - -* `terminated`: The cluster is deleted. Billing ends when the cluster status is changed from `running` to `terminated`. - -* `error`: An error occured when attempting to provision the cluster. - -You can view the status of clusters using the `replicated cluster ls` command. For more information, see [cluster ls](/reference/replicated-cli-cluster-ls). - -### Cluster Add-ons - -The Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. -This allows you to more easily provision dependencies required by your application. - -For more information about how to use the add-ons, see [Compatibility Matrix Cluster Add-ons](testing-cluster-addons). - -## Limitations - -Compatibility Matrix has the following limitations: - -- Clusters cannot be resized. Create another cluster if you want to make changes, such as add another node. -- Clusters cannot be rebooted. Create another cluster if you need to reset/reboot the cluster. -- On cloud clusters, node groups are not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). -- Multi-node support is not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). -- ARM instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). -- GPU instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). -- There is no support for IPv6 as a single stack. Dual stack support is available on kind clusters. -- There is no support for air gap testing. -- The `cluster upgrade` feature is available only for kURL distributions. See [cluster upgrade](/reference/replicated-cli-cluster-upgrade). -- Cloud clusters do not allow for the configuration of CNI, CSI, CRI, Ingress, or other plugins, add-ons, services, and interfaces. -- The node operating systems for clusters created with Compatibility Matrix cannot be configured nor replaced with different operating systems. -- The Kubernetes scheduler for clusters created with Compatibility Matrix cannot be replaced with a different scheduler. -- Each team has a quota limit on the amount of resources that can be used simultaneously. This limit can be raised by messaging your account representative. -- Team actions with Compatibility Matrix (for example, creating and deleting clusters and requesting quota increases) are not logged and displayed in the [Vendor Team Audit Log](https://vendor.replicated.com/team/audit-log). - -For additional distribution-specific limitations, see [Supported Compatibility Matrix Cluster Types](testing-supported-clusters). - -================ -File: docs/vendor/testing-cluster-addons.md -================ -# Compatibility Matrix Cluster Add-ons (Alpha) - -This topic describes the supported cluster add-ons for Replicated Compatibility Matrix. - -## Overview - -Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. -This allows you to more easily provision dependencies required by your application. - -## CLI - -The Replicated CLI can be used to [create](/reference/replicated-cli-cluster-addon-create), [manage](/reference/replicated-cli-cluster-addon-ls) and [remove](/reference/replicated-cli-cluster-addon-rm) cluster add-ons. - -## Supported Add-ons - -This section lists the supported cluster add-ons for clusters created with Compatibility Matrix. - -### object-store (Alpha) - -The Replicated cluster object store add-on can be used to create S3 compatible object store buckets for clusters (currently only AWS S3 is supported for EKS clusters). - -Assuming you already have a cluster, run the following command with the cluster ID to create an object store bucket: - -```bash -$ replicated cluster addon create object-store 4d2f7e70 --bucket-prefix mybucket -05929b24 Object Store pending {"bucket_prefix":"mybucket"} -$ replicated cluster addon ls 4d2f7e70 -ID TYPE STATUS DATA -05929b24 Object Store ready {"bucket_prefix":"mybucket","bucket_name":"mybucket-05929b24-cmx","service_account_namespace":"cmx","service_account_name":"mybucket-05929b24-cmx","service_account_name_read_only":"mybucket-05929b24-cmx-ro"} -``` - -This will create two service accounts in a namespace, one read-write and the other read-only access to the object store bucket. - -Additional service accounts can be created in any namespace with access to the object store by annotating the new service account with the same `eks.amazonaws.com/role-arn` annotation found in the predefined ones (`service_account_name` and `service_account_name_read_only`). - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported Kubernetes DistributionsEKS (AWS S3)
    CostFlat fee of $0.50 per bucket.
    Options -
      -
    • bucket_prefix (string): A prefix for the bucket name to be created (required)
    • -
    -
    Data -
      -
    • bucket_prefix: The prefix specified by the user for the bucket name
    • -
    -
      -
    • bucket_name: The actual bucket name
    • -
    -
      -
    • service_account_namespace: The namespace in which the service accounts (`service_account_name` and `service_account_name_read_only`) have been created.
    • -
    -
      -
    • service_account_name: The service account name for read-write access to the bucket.
    • -
    -
      -
    • service_account_name_read_only: The service account name for read-only access to the bucket.
    • -
    -
    - -================ -File: docs/vendor/testing-how-to.md -================ -import TestRecs from "../partials/ci-cd/_test-recs.mdx" -import Prerequisites from "../partials/cmx/_prerequisites.mdx" - -# Using Compatibility Matrix - -This topic describes how to use Replicated Compatibility Matrix to create ephemeral clusters. - -## Prerequisites - -Before you can use Compatibility Matrix, you must complete the following prerequisites: - - - -* Existing accounts must accept the TOS for the trial on the [**Compatibility Matrix**](https://vendor.replicated.com/compatibility-matrix) page in the Replicated Vendor Portal. - -## Create and Manage Clusters - -This section explains how to use Compatibility Matrix to create and manage clusters with the Replicated CLI or the Vendor Portal. - -For information about creating and managing clusters with the Vendor API v3, see the [clusters](https://replicated-vendor-api.readme.io/reference/listclusterusage) section in the Vendor API v3 documentation. - -### Create Clusters - -You can create clusters with Compatibility Matrix using the Replicated CLI or the Vendor Portal. - -#### Replicated CLI - -To create a cluster using the Replicated CLI: - -1. (Optional) View the available cluster distributions, including the supported Kubernetes versions, instance types, and maximum nodes for each distribution: - - ```bash - replicated cluster versions - ``` - For command usage, see [cluster versions](/reference/replicated-cli-cluster-versions). - -1. Run the following command to create a cluster: - - ``` - replicated cluster create --name NAME --distribution K8S_DISTRO --version K8S_VERSION --disk DISK_SIZE --instance-type INSTANCE_TYPE [--license-id LICENSE_ID] - ``` - Where: - * `NAME` is any name for the cluster. If `--name` is excluded, a name is automatically generated for the cluster. - * `K8S_DISTRO` is the Kubernetes distribution for the cluster. - * `K8S_VERSION` is the Kubernetes version for the cluster if creating a standard Cloud or VM-based cluster. If creating an Embedded Cluster or kURL cluster type,`--version` is optional: - * For Embedded Cluster types, `--verison` is the latest available release on the channel by default. Otherwise, to specify a different release, set `--version` to the `Channel release sequence` value for the release. - * For kURL cluster types, `--verison` is the `"latest"` kURL Installer ID by default. Otherwise, to specify a different kURL Installer, set `--version` to the kURL Installer ID. - * `DISK_SIZE` is the disk size (GiB) to request per node. - * `INSTANCE_TYPE` is the instance type to use for each node. - * (Embedded Cluster Only) `LICENSE_ID` is a valid customer license. Required to create an Embedded Cluster. - - For command usage and additional optional flags, see [cluster create](/reference/replicated-cli-cluster-create). - - **Example:** - - The following example creates a kind cluster with Kubernetes version 1.27.0, a disk size of 100 GiB, and an instance type of `r1.small`. - - ```bash - replicated cluster create --name kind-example --distribution kind --version 1.27.0 --disk 100 --instance-type r1.small - ``` - -1. Verify that the cluster was created: - - ```bash - replicated cluster ls CLUSTER_NAME - ``` - Where `CLUSTER_NAME` is the name of the cluster that you created. - - In the output of the command, you can see that the `STATUS` of the cluster is `assigned`. When the kubeconfig for the cluster is accessible, the cluster's status is changed to `running`. For more information about cluster statuses, see [Cluster Status](testing-about#cluster-status) in _About Compatibility Matrix._ - -#### Vendor Portal - -To create a cluster using the Vendor Portal: - -1. Go to [**Compatibility Matrix > Create cluster**](https://vendor.replicated.com/compatibility-matrix/create-cluster). - - Create a cluster page - - [View a larger version of this image](/images/create-a-cluster.png) - -1. On the **Create a cluster** page, complete the following fields: - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    Kubernetes distributionSelect the Kubernetes distribution for the cluster.
    VersionSelect the Kubernetes version for the cluster. The options available are specific to the distribution selected.
    Name (optional)Enter an optional name for the cluster.
    TagsAdd one or more tags to the cluster as key-value pairs.
    Set TTLSelect the Time to Live (TTL) for the cluster. When the TTL expires, the cluster is automatically deleted. TTL can be adjusted after cluster creation with [cluster update ttl](/reference/replicated-cli-cluster-update-ttl).
    - -1. For **Nodes & Nodes Groups**, complete the following fields to configure nodes and node groups for the cluster: - - - - - - - - - - - - - - -
    Instance typeSelect the instance type to use for the nodes in the node group. The options available are specific to the distribution selected.
    Disk sizeSelect the disk size in GiB to use per node.
    NodesSelect the number of nodes to provision in the node group. The options available are specific to the distribution selected.
    - -1. (Optional) Click **Add node group** to add additional node groups. - -1. Click **Create cluster**. - - The cluster is displayed in the list of clusters on the **Compatibility Matrix** page with a status of Assigned. When the kubeconfig for the cluster is accessible, the cluster's status is changed to Running. - - :::note - If the cluster is not automatically displayed, refresh your browser window. - ::: - - Cluster configuration dialog - - [View a larger version of this image](/images/cmx-assigned-cluster.png) - -### Prepare Clusters - -For applications distributed with the Replicated Vendor Portal, the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) command reduces the number of steps required to provision a cluster and then deploy a release to the cluster for testing. This is useful in continuous integration (CI) workflows that run multiple times a day. For an example workflow that uses the `cluster prepare` command, see [Recommended CI/CD Workflows](/vendor/ci-workflows). - -The `cluster prepare` command does the following: -* Creates a cluster -* Creates a release for your application based on either a Helm chart archive or a directory containing the application YAML files -* Creates a temporary customer of type `test` - :::note - Test customers created by the `cluster prepare` command are not saved in your Vendor Portal team. - ::: -* Installs the release in the cluster using either the Helm CLI or Replicated KOTS - -The `cluster prepare` command requires either a Helm chart archive or a directory containing the application YAML files to be installed: - -* **Install a Helm chart with the Helm CLI**: - - ```bash - replicated cluster prepare \ - --distribution K8S_DISTRO \ - --version K8S_VERSION \ - --chart HELM_CHART_TGZ - ``` - The following example creates a kind cluster and installs a Helm chart in the cluster using the `nginx-chart-0.0.14.tgz` chart archive: - ```bash - replicated cluster prepare \ - --distribution kind \ - --version 1.27.0 \ - --chart nginx-chart-0.0.14.tgz \ - --set key1=val1,key2=val2 \ - --set-string s1=val1,s2=val2 \ - --set-json j1='{"key1":"val1","key2":"val2"}' \ - --set-literal l1=val1,l2=val2 \ - --values values.yaml - ``` - -* **Install with KOTS from a YAML directory**: - - ```bash - replicated cluster prepare \ - --distribution K8S_DISTRO \ - --version K8S_VERSION \ - --yaml-dir PATH_TO_YAML_DIR - ``` - The following example creates a k3s cluster and installs an application in the cluster using the manifest files in a local directory named `config-validation`: - ```bash - replicated cluster prepare \ - --distribution k3s \ - --version 1.26 \ - --namespace config-validation \ - --shared-password password \ - --app-ready-timeout 10m \ - --yaml-dir config-validation \ - --config-values-file conifg-values.yaml \ - --entitlements "num_of_queues=5" - ``` - -For command usage, including additional options, see [cluster prepare](/reference/replicated-cli-cluster-prepare). - -### Access Clusters - -Compatibility Matrix provides the kubeconfig for clusters so that you can access clusters with the kubectl command line tool. For more information, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. - -To access a cluster from the command line: - -1. Verify that the cluster is in a Running state: - - ```bash - replicated cluster ls - ``` - In the output of the command, verify that the `STATUS` for the target cluster is `running`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). - -1. Run the following command to open a new shell session with the kubeconfig configured for the cluster: - - ```bash - replicated cluster shell CLUSTER_ID - ``` - Where `CLUSTER_ID` is the unique ID for the running cluster that you want to access. - - For command usage, see [cluster shell](/reference/replicated-cli-cluster-shell). - -1. Verify that you can interact with the cluster through kubectl by running a command. For example: - - ```bash - kubectl get ns - ``` - -1. Press Ctrl-D or type `exit` when done to end the shell and the connection to the server. - -### Upgrade Clusters (kURL Only) - -For kURL clusters provisioned with Compatibility Matrix, you can use the the `cluster upgrade` command to upgrade the version of the kURL installer specification used to provision the cluster. A recommended use case for the `cluster upgrade` command is for testing your application's compatibility with Kubernetes API resource version migrations after upgrade. - -The following example upgrades a kURL cluster from its previous version to version `9d5a44c`: - -```bash -replicated cluster upgrade cabb74d5 --version 9d5a44c -``` - -For command usage, see [cluster upgrade](/reference/replicated-cli-cluster-upgrade). - -### Delete Clusters - -You can delete clusters using the Replicated CLI or the Vendor Portal. - -#### Replicated CLI - -To delete a cluster using the Replicated CLI: - -1. Get the ID of the target cluster: - - ``` - replicated cluster ls - ``` - In the output of the command, copy the ID for the cluster. - - **Example:** - - ``` - ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES - 1234abc My Test Cluster eks 1.27 running 2023-10-09 17:08:01 +0000 UTC - - ``` - - For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). - -1. Run the following command: - - ``` - replicated cluster rm CLUSTER_ID - ``` - Where `CLUSTER_ID` is the ID of the target cluster. - For command usage, see [cluster rm](/reference/replicated-cli-cluster-rm). -1. Confirm that the cluster was deleted: - ``` - replicated cluster ls CLUSTER_ID --show-terminated - ``` - Where `CLUSTER_ID` is the ID of the target cluster. - In the output of the command, you can see that the `STATUS` of the cluster is `terminated`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). -#### Vendor Portal - -To delete a cluster using the Vendor Portal: - -1. Go to **Compatibility Matrix**. - -1. Under **Clusters**, in the vertical dots menu for the target cluster, click **Delete cluster**. - - Delete cluster button - - [View a larger version of this image](/images/cmx-delete-cluster.png) - -## About Using Compatibility Matrix with CI/CD - -Replicated recommends that you integrate Compatibility Matrix into your existing CI/CD workflow to automate the process of creating clusters to install your application and run tests. For more information, including additional best practices and recommendations for CI/CD, see [About Integrating with CI/CD](/vendor/ci-overview). - -### Replicated GitHub Actions - -Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to using Compatibility Matrix and distributing applications with Replicated. - -If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. - -To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. - -For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). - -### Recommended Workflows - -Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). For example development and release workflows that integrate Compatibility Matrix for testing, see [Recommended CI/CD Workflows](/vendor/ci-workflows). - -### Test Script Recommendations - -Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: - - - -================ -File: docs/vendor/testing-ingress.md -================ -# Accessing Your Application - -This topic describes the networking options for accessing applications deployed on clusters created with Replicated Compatibility Matrix. It also describes how to use and manage Compatibility Matrix tunnels. - -## Networking Options - -After deploying your application into Compatibility Matrix clusters, you will want to execute your tests using your own test runner. -In order to do this, you need to access your application. -Compatibility matrix offers several methods to access your application. - -Some standard Kubernetes networking options are available, but vary based on the distribution. -For VM-based distributions, there is no default network route into the cluster, making inbound connections challenging to create. - -### Port Forwarding -Port forwarding is a low-cost and portable mechanism to access your application. -Port forwarding works on all clusters supported by Compatibility Matrix because the connection is initiated from the client, over the Kubernetes API server port. -If you have a single service or pod and are not worried about complex routing, this is a good mechanism. -The basic steps are to connect the port-forward, execute your tests against localhost, and then shut down the port-forward. - -### LoadBalancer -If your application is only running on cloud services (EKS, GKE, AKS) you can create a service of type `LoadBalancer`. -This will provision the cloud-provider specific load balancer. -The `LoadBalancer` service will be filled by the in-tree Kubernetes functionality that's integrated with the underlying cloud provider. -You can then query the service definition using `kubectl` and connect to and execute your tests over the `LoadBalancer` IP address. - -### Ingress -Ingress is a good way to recreate customer-representative environments, but the problem still remains on how to get inbound access to the IP address that the ingress controller allocates. -Ingress is also not perfectly portable; each ingress controller might require different annotations in the ingress resource to work properly. -Supported ingress controllers vary based on the distribution. -Compatibility matrix supports ingress controllers that are running as a `NodePort` service. - -### Compatibility Matrix Tunnels -All VM-based Compatibility Matrix clusters support tunneling traffic into a `NodePort` service. -When this option is used, Replicated is responsible for creating the DNS record and TLS certs. -Replicated will route traffic from `:443` and/or `:80` into the `NodePort` service you defined. For more information about using tunnels, see [Managing Compatibility Matrix Tunnels](#manage-nodes) below. - -The following diagram shows how the traffic is routed into the service using Compatibility Matrix tunnels: - -Compatibility Matrix ingress - -[View a larger version of this image](/images/compatibility-matrix-ingress.png) - -## Managing Compatibility Matrix Tunnels {#manage-nodes} - -Tunnels are viewed, created, and removed using the Compatibility Matrix UI within Vendor Portal, the Replicated CLI, GitHub Actions, or directly with the Vendor API v3. There is no limit to the number of tunnels you can create for a cluster and multiple tunnels can connect to a single service, if desired. - -### Limitations - -Compatibility Matrix tunnels have the following limitations: -* One tunnel can only connect to one service. If you need fanout routing into different services, consider installing the nginx ingress controller as a `NodePort` service and exposing it. -* Tunnels are not supported for cloud distributions (EKS, GKE, AKS). - -### Supported Protocols - -A tunnel can support one or more protocols. -The supported protocols are HTTP, HTTPS, WS and WSS. -GRPC and other protocols are not routed into the cluster. - -### Exposing Ports -Once you have a node port available on the cluster, you can use the Replicated CLI to expose the node port to the public internet. -This can be used multiple times on a single cluster. - -Optionally, you can specify the `--wildcard` flag to expose this port with wildcard DNS and TLS certificate. -This feature adds extra time to provision the port, so it should only be used if necessary. - -```bash -replicated cluster port expose \ - [cluster id] \ - --port [node port] \ - --protocol [protocol] \ - --wildcard -``` - -For example, if you have the nginx ingress controller installed and the node port is 32456: - -```bash -% replicated cluster ls -ID NAME DISTRIBUTION VERSION STATUS -1e616c55 tender_ishizaka k3s 1.29.2 running - -% replicated cluster port expose \ - 1e616c55 \ - --port 32456 \ - --protocol http \ - --protocol https \ - --wildcard -``` - -:::note -You can expose a node port that does not yet exist in the cluster. -This is useful if you have a deterministic node port, but need the DNS name as a value in your Helm chart. -::: - -### Viewing Ports -To view all exposed ports, use the Replicated CLI `port ls` subcommand with the cluster ID: - -```bash -% replicated cluster port ls 1e616c55 -ID CLUSTER PORT PROTOCOL EXPOSED PORT WILDCARD STATUS -d079b2fc 32456 http http://happy-germain.ingress.replicatedcluster.com true ready - -d079b2fc 32456 https https://happy-germain.ingress.replicatedcluster.com true ready -``` - -### Removing Ports -Exposed ports are automatically deleted when a cluster terminates. -If you want to remove a port (and the associated DNS records and TLS certs) prior to cluster termination, run the `port rm` subcommand with the cluster ID: - -```bash -% replicated cluster port rm 1e616c55 --id d079b2fc -``` - -You can remove just one protocol, or all. -Removing all protocols also removes the DNS record and TLS cert. - -================ -File: docs/vendor/testing-pricing.mdx -================ -# Compatibility Matrix Pricing - -This topic describes the pricing for Replicated Compatibility Matrix. - -## Pricing Overview - -Compatibility Matrix usage-based pricing includes a $0.50 per cluster startup cost, plus by the minute pricing based on instance size and count (starting at the time the cluster state changed to "running" and ending when the cluster is either expired (TTL) or removed). Minutes will be rounded up, so there will be a minimum charge of $0.50 plus 1 minute for all running clusters. Each cluster's cost will be rounded up to the nearest cent and subtracted from the available credits in the team account. Remaining credit balance is viewable on the Replicated Vendor Portal [Cluster History](https://vendor.replicated.com/compatibility-matrix/history) page or with the Vendor API v3 [/vendor/v3/cluster/stats](https://replicated-vendor-api.readme.io/reference/getclusterstats) endpoint. Cluster [add-ons](/vendor/testing-cluster-addons) may incur additional charges. - -If the team's available credits are insufficient to run the cluster for the full duration of the TTL, the cluster creation will be rejected. - -## Cluster Quotas - -Each team is limited by the number of clusters that they can run concurrently. To increase the quota, reach out to your account manager. - -## VM Cluster Pricing (Openshift, RKE2, K3s, Kind, Embedded Cluster, kURL) - -VM-based clusters approximately match the AWS m6.i instance type pricing. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Instance TypeVCPUsMemory (GiB)USD/Credit per hour
    r1.small28$0.096
    r1.medium416$0.192
    r1.large832$0.384
    r1.xlarge1664$0.768
    r1.2xlarge32128$1.536
    - -## Cloud Cluster Pricing - -### AWS EKS Cluster Pricing - -AWS clusters will be charged AWS pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. Pricing for Extended Support EKS versions (those Kubernetes versions considered deprecated by upstream Kubernetes) will have additional charges applied. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Instance TypeVCPUsMemory (GiB)USD/Credit per hour
    m6i.large28$0.115
    m6i.xlarge416$0.230
    m6i.2xlarge832$0.461
    m6i.4xlarge1664$0.922
    m6i.8xlarge32128$1.843
    m7i.large28$0.121
    m7i.xlarge416$0.242
    m7i.2xlarge832$0.484
    m7i.4xlarge1664$0.968
    m7i.8xlarge32128$1.935
    m5.large28$0.115
    m5.xlarge416$0.230
    m5.2xlarge832$0.461
    m5.4xlarge1664$0.922
    m5.8xlarge32128$1.843
    m7g.large28$0.098
    m7g.xlarge416$0.195
    m7g.2xlarge832$0.392
    m7g.4xlarge1664$0.784
    m7g.8xlarge32128$1.567
    c5.large24$0.102
    c5.xlarge48$0.204
    c5.2xlarge816$0.408
    c5.4xlarge1632$0.816
    c5.9xlarge3672$1.836
    g4dn.xlarge416$0.631
    g4dn.2xlarge832$0.902
    g4dn.4xlarge1664$1.445
    g4dn.8xlarge32128$2.611
    g4dn.12xlarge48192$4.964
    g4dn.16xlarge64256$5.222
    - -### GCP GKE Cluster Pricing - -GCP clusters will be charged GCP list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Instance TypeVCPUsMemory (GiB)USD/Credit per hour
    n2-standard-228$0.117
    n2-standard-4416$0.233
    n2-standard-8832$0.466
    n2-standard-161664$0.932
    n2-standard-3232128$1.865
    t2a-standard-228$0.092
    t2a-standard-4416$0.185
    t2a-standard-8832$0.370
    t2a-standard-161664$0.739
    t2a-standard-3232128$1.478
    t2a-standard-4848192$2.218
    e2-standard-228$0.081
    e2-standard-4416$0.161
    e2-standard-8832$0.322
    e2-standard-161664$0.643
    e2-standard-3232128$1.287
    n1-standard-1+nvidia-tesla-t4+113.75$0.321
    n1-standard-1+nvidia-tesla-t4+213.75$0.585
    n1-standard-1+nvidia-tesla-t4+413.75$1.113
    n1-standard-2+nvidia-tesla-t4+127.50$0.378
    n1-standard-2+nvidia-tesla-t4+227.50$0.642
    n1-standard-2+nvidia-tesla-t4+427.50$1.170
    n1-standard-4+nvidia-tesla-t4+1415$0.492
    n1-standard-4+nvidia-tesla-t4+2415$0.756
    n1-standard-4+nvidia-tesla-t4+4415$1.284
    n1-standard-8+nvidia-tesla-t4+1830$0.720
    n1-standard-8+nvidia-tesla-t4+2830$0.984
    n1-standard-8+nvidia-tesla-t4+4830$1.512
    n1-standard-16+nvidia-tesla-t4+11660$1.176
    n1-standard-16+nvidia-tesla-t4+21660$1.440
    n1-standard-16+nvidia-tesla-t4+41660$1.968
    n1-standard-32+nvidia-tesla-t4+132120$2.088
    n1-standard-32+nvidia-tesla-t4+232120$2.352
    n1-standard-32+nvidia-tesla-t4+432120$2.880
    n1-standard-64+nvidia-tesla-t4+164240$3.912
    n1-standard-64+nvidia-tesla-t4+264240$4.176
    n1-standard-64+nvidia-tesla-t4+464240$4.704
    n1-standard-96+nvidia-tesla-t4+196360$5.736
    n1-standard-96+nvidia-tesla-t4+296360$6.000
    n1-standard-96+nvidia-tesla-t4+496360$6.528
    - -### Azure AKS Cluster Pricing - -Azure clusters will be charged Azure list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Instance TypeVCPUsMemory (GiB)RateList PriceUSD/Credit per hour
    Standard_B2ms288320$0.083$0.100
    Standard_B4ms41616600$0.166$0.199
    Standard_B8ms83233300$0.333$0.400
    Standard_B16ms166466600$0.666$0.799
    Standard_DS2_v22714600$0.146$0.175
    Standard_DS3_v241429300$0.293$0.352
    Standard_DS4_v282858500$0.585$0.702
    Standard_DS5_v21656117000$1.170$1.404
    Standard_D2ps_v52814600$0.077$0.092
    Standard_D4ps_v54167700$0.154$0.185
    Standard_D8ps_v583215400$0.308$0.370
    Standard_D16ps_v5166430800$0.616$0.739
    Standard_D32ps_v53212861600$1.232$1.478
    Standard_D48ps_v54819223200$1.848$2.218
    Standard_NC4as_T4_v342852600$0.526$0.631
    Standard_NC8as_T4_v385675200$0.752$0.902
    Standard_NC16as_T4_v316110120400$1.204$1.445
    Standard_NC64as_T4_v364440435200$4.352$5.222
    Standard_D2S_v5289600$0.096$0.115
    Standard_D4S_v541619200$0.192$0.230
    Standard_D8S_v583238400$0.384$0.461
    Standard_D16S_v5166476800$0.768$0.922
    Standard_D32S_v532128153600$1.536$1.843
    Standard_D64S_v564192230400$2.304$2.765
    - -### Oracle OKE Cluster Pricing - -Oracle based clusters will be charged Oracle list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Instance TypeVCPUsMemory (GiB)USD/Credit per hour
    VM.Standard2.1115$0.076
    VM.Standard2.2230$0.153
    VM.Standard2.4460$0.306
    VM.Standard2.88120$0.612
    VM.Standard2.1616240$1.225
    VM.Standard3Flex.114$0.055
    VM.Standard3Flex.228$0.110
    VM.Standard3Flex.4416$0.221
    VM.Standard3Flex.8832$0.442
    VM.Standard3Flex.161664$0.883
    VM.Standard.A1.Flex.114$0.019
    VM.Standard.A1.Flex.228$0.038
    VM.Standard.A1.Flex.4416$0.077
    VM.Standard.A1.Flex.8832$0.154
    VM.Standard.A1.Flex.161664$0.309
    - -Last modified January 06, 2025 - -================ -File: docs/vendor/testing-supported-clusters.md -================ -import Pool from "../partials/cmx/\_openshift-pool.mdx" - -# Supported Compatibility Matrix Cluster Types - -This topic describes the supported Kubernetes distributions, Kubernetes versions, instance types, nodes, limitations, and common use cases for clusters created with Replicated Compatibility Matrix. - -Compatibility Matrix provisions cloud-based or virtual machine (VM) clusters. - -## VM Clusters - -This section lists the supported VM cluster distributions for clusters created with Compatibility Matrix. - -### kind - -Compatibility Matrix supports creating [kind](https://kind.sigs.k8s.io/) clusters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported Kubernetes Versions{/* START_kind_VERSIONS */}1.26.15, 1.27.16, 1.28.15, 1.29.14, 1.30.10, 1.31.6, 1.32.2{/* END_kind_VERSIONS */}
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsNo
    Node Auto ScalingNo
    NodesSupports a single node.
    IP FamilySupports `ipv4` or `dual`.
    LimitationsSee Limitations
    Common Use CasesSmoke tests
    - -### k3s - -Compatibility Matrix supports creating [k3s](https://k3s.io) clusters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported k3s VersionsThe upstream k8s version that matches the Kubernetes version requested.
    Supported Kubernetes Versions{/* START_k3s_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.1, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_k3s_VERSIONS */}
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    Node Auto ScalingNo
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    LimitationsFor additional limitations that apply to all distributions, see Limitations.
    Common Use Cases
    • Smoke tests
    • Customer release tests
    - -### RKE2 (Beta) - -Compatibility Matrix supports creating [RKE2](https://docs.rke2.io/) clusters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported RKE2 VersionsThe upstream k8s version that matches the Kubernetes version requested.
    Supported Kubernetes Versions{/* START_rke2_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_rke2_VERSIONS */}
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    Node Auto ScalingNo
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    LimitationsFor additional limitations that apply to all distributions, see Limitations.
    Common Use Cases
    • Smoke tests
    • Customer release tests
    - -### OpenShift OKD - -Compatibility Matrix supports creating [Red Hat OpenShift OKD](https://www.okd.io/) clusters, which is the community distribution of OpenShift, using CodeReady Containers (CRC). - -OpenShift clusters are provisioned with two users: - -- (Default) A `kubeadmin` user with `cluster-admin` priviledges. Use the `kubeadmin` user only for administrative tasks such as creating new users or setting roles. -- A `developer` user with namespace-scoped priviledges. The `developer` user can be used to better simulate access in end-customer environments. - -By default, kubeconfig context is set to the `kubeadmin` user. To switch to the `developer` user, run the command `oc login --username developer`. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported OpenShift Versions{/* START_openshift_VERSIONS */}4.10.0-okd, 4.11.0-okd, 4.12.0-okd, 4.13.0-okd, 4.14.0-okd, 4.15.0-okd, 4.16.0-okd, 4.17.0-okd{/* END_openshift_VERSIONS */}
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    Node Auto ScalingNo
    NodesSupports multiple nodes for versions 4.13.0-okd and later.
    IP FamilySupports `ipv4`.
    Limitations -
      -
    • OpenShift does not support r1.small instance types.
    • -
    • OpenShift versions earlier than 4.13-okd do not have a registry mirror and so may be subject to rate limiting from Docker Hub. For information about Docker Hub rate limiting, see Docker Hub rate limit. To increase limits, Replicated recommends that you configure an image pull secret to pull public Docker Hub images as an authenticated user. For more information about how to configure image pull secrets, see Pull an Image from a Private Registry in the Kubernetes documentation.
    • -
    • -

      OpenShift builds take approximately 17 minutes.

      -

      -
    • -
    -

    For additional limitations that apply to all distributions, see Limitations.

    -
    Common Use CasesCustomer release tests
    - -### Embedded Cluster - -Compatibility Matrix supports creating clusters with Replicated Embedded Cluster. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported Embedded Cluster Versions - Any valid release sequence that has previously been promoted to the channel where the customer license is assigned. - Version is optional and defaults to the latest available release on the channel. -
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    NodesSupports multiple nodes (alpha).
    IP FamilySupports `ipv4`.
    Limitations -
      -
    • The Admin Console UI is not exposed publicly and must be exposed via `kubectl -n kotsadm port-forward svc/kurl-proxy-kotsadm 38800:8800`. The password for the Admin Console is `password`.
    • -
    • A valid customer license is required to create an Embedded Cluster.
    • -
    • The [cluster prepare](/vendor/testing-how-to#prepare-clusters) command is not supported.
    • -
    -

    For additional limitations that apply to all distributions, see Limitations.

    -
    Common Use CasesCustomer release tests
    - -### kURL - -Compatibility Matrix supports creating [kURL](https://kurl.sh) clusters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported kURL VersionsAny promoted kURL installer. Version is optional. For an installer version other than "latest", you can find the specific Installer ID for a previously promoted installer under the relevant **Install Command** (ID after kurl.sh/) on the **Channels > kURL Installer History** page in the Vendor Portal. For more information about viewing the history of kURL installers promoted to a channel, see [Installer History](/vendor/installer-history).
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    Node Auto ScalingNo
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    Does not work with the Longhorn add-on.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    - -## Cloud Clusters - -This section lists the supported cloud clusters for compatibility testing. - -### EKS - -Compatibility Matrix supports creating [AWS EKS](https://aws.amazon.com/eks/?nc2=type_a) clusters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported Kubernetes Versions

    {/* START_eks_VERSIONS */}1.25, 1.26, 1.27, 1.28, 1.29, 1.30, 1.31, 1.32{/* END_eks_VERSIONS */}

    Extended Support Versions: 1.25, 1.26, 1.27, 1.28

    Supported Instance Types

    m6i.large, m6i.xlarge, m6i.2xlarge, m6i.4xlarge, m6i.8xlarge, m7i.large, m7i.xlarge, m7i.2xlarge, m7i.4xlarge, m7i.8xlarge, m5.large, m5.xlarge, m5.2xlarge, - m5.4xlarge, m5.8xlarge, m7g.large (arm), m7g.xlarge (arm), m7g.2xlarge (arm), m7g.4xlarge (arm), m7g.8xlarge (arm), c5.large, c5.xlarge, c5.2xlarge, c5.4xlarge, - c5.9xlarge, g4dn.xlarge (gpu), g4dn.2xlarge (gpu), g4dn.4xlarge (gpu), g4dn.8xlarge (gpu), g4dn.12xlarge (gpu), g4dn.16xlarge (gpu)

    g4dn instance types depend on available capacity. After a g4dn cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [Amazon EKS optimized accelerated Amazon Linux AMIs](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) in the AWS documentation.

    Node GroupsYes
    Node Auto ScalingYes. Cost will be based on the max number of nodes.
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    You can only choose a minor version, not a patch version. The EKS installer chooses the latest patch for that minor version.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    - -### GKE - -Compatibility Matrix supports creating [Google GKE](https://cloud.google.com/kubernetes-engine) clusters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported Kubernetes Versions{/* START_gke_VERSIONS */}1.29, 1.30, 1.31, 1.32{/* END_gke_VERSIONS */}
    Supported Instance Types

    n2-standard-2, n2-standard-4, n2-standard-8, n2-standard-16, n2-standard-32, t2a-standard-2 (arm), t2a-standard-4 (arm), t2a-standard-8 (arm), t2a-standard-16 (arm), t2a-standard-32 (arm), t2a-standard-48 (arm), e2-standard-2, e2-standard-4, e2-standard-8, e2-standard-16, e2-standard-32, n1-standard-1+nvidia-tesla-t4+1 (gpu), n1-standard-1+nvidia-tesla-t4+2 (gpu), n1-standard-1+nvidia-tesla-t4+4 (gpu), n1-standard-2+nvidia-tesla-t4+1 (gpu), n1-standard-2+nvidia-tesla-t4+2 (gpu), n1-standard-2+nvidia-tesla-t4+4 (gpu), n1-standard-4+nvidia-tesla-t4+1 (gpu), n1-standard-4+nvidia-tesla-t4+2 (gpu), n1-standard-4+nvidia-tesla-t4+4 (gpu), n1-standard-8+nvidia-tesla-t4+1 (gpu), n1-standard-8+nvidia-tesla-t4+2 (gpu), n1-standard-8+nvidia-tesla-t4+4 (gpu), n1-standard-16+nvidia-tesla-t4+1 (gpu), n1-standard-16+nvidia-tesla-t4+2 (gpu), n1-standard-16+nvidia-tesla-t4+4 (gpu), n1-standard-32+nvidia-tesla-t4+1 (gpu), n1-standard-32+nvidia-tesla-t4+2 (gpu), n1-standard-32+nvidia-tesla-t4+4 (gpu), n1-standard-64+nvidia-tesla-t4+1 (gpu), n1-standard-64+nvidia-tesla-t4+2 (gpu), n1-standard-64+nvidia-tesla-t4+4 (gpu), n1-standard-96+nvidia-tesla-t4+1 (gpu), n1-standard-96+nvidia-tesla-t4+2 (gpu), n1-standard-96+nvidia-tesla-t4+4 (gpu)

    You can specify more than one node.

    Node GroupsYes
    Node Auto ScalingYes. Cost will be based on the max number of nodes.
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    You can choose only a minor version, not a patch version. The GKE installer chooses the latest patch for that minor version.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    - -### AKS - -Compatibility Matrix supports creating [Azure AKS](https://azure.microsoft.com/en-us/products/kubernetes-service) clusters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported Kubernetes Versions{/* START_aks_VERSIONS */}1.29, 1.30, 1.31{/* END_aks_VERSIONS */}
    Supported Instance Types

    Standard_B2ms, Standard_B4ms, Standard_B8ms, Standard_B16ms, Standard_DS2_v2, Standard_DS3_v2, Standard_DS4_v2, Standard_DS5_v2, Standard_DS2_v5, Standard_DS3_v5, Standard_DS4_v5, Standard_DS5_v5, Standard_D2ps_v5 (arm), Standard_D4ps_v5 (arm), Standard_D8ps_v5 (arm), Standard_D16ps_v5 (arm), Standard_D32ps_v5 (arm), Standard_D48ps_v5 (arm), Standard_NC4as_T4_v3 (gpu), Standard_NC8as_T4_v3 (gpu), Standard_NC16as_T4_v3 (gpu), Standard_NC64as_T4_v3 (gpu)

    GPU instance types depend on available capacity. After a GPU cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [NVIDIA GPU Operator with Azure Kubernetes Service](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/microsoft-aks.html) in the NVIDIA documentation.

    Node GroupsYes
    Node Auto ScalingYes. Cost will be based on the max number of nodes.
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    You can choose only a minor version, not a patch version. The AKS installer chooses the latest patch for that minor version.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    - -### OKE (Beta) - -Compatibility Matrix supports creating [Oracle Container Engine for Kubernetes (OKE)](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm) clusters. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeDescription
    Supported Kubernetes Versions{/* START_oke_VERSIONS */}1.29.1, 1.30.1, 1.31.1{/* END_oke_VERSIONS */}
    Supported Instance Types

    VM.Standard2.1, VM.Standard2.2, VM.Standard2.4, VM.Standard2.8, VM.Standard2.16, VM.Standard3.Flex.1, VM.Standard3.Flex.2, VM.Standard3.Flex.4, VM.Standard3.Flex.8, VM.Standard3.Flex.16, VM.Standard.A1.Flex.1 (arm), VM.Standard.A1.Flex.2 (arm), VM.Standard.A1.Flex.4 (arm), VM.Standard.A1.Flex.8 (arm), VM.Standard.A1.Flex.16 (arm)

    Node GroupsYes
    Node Auto ScalingNo.
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    Provising an OKE cluster does take between 8 to 10 minutes. If needed, some timeouts in your CI pipelines might have to be adjusted.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    - -## Replicated Instance Types {#types} - -When creating a VM-based cluster with Compatibility Matrix, you must specify a Replicated instance type. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    TypeMemory (GiB)VCPU Count
    r1.small8 GB2 VCPUs
    r1.medium16 GB4 VCPUs
    r1.large32 GB8 VCPUs
    r1.xlarge64 GB16 VCPUs
    r1.2xlarge128 GB32 VCPUs
    - -## Kubernetes Version Support Policy - -We do not maintain forks or patches of the supported distributions. When a Kubernetes version in Compatibility Matrix is out of support (EOL), Replicated will attempt to continue to support this version for six months for compatibility testing to support customers who are running out-of-date versions of Kubernetes. In the event that a critical security issue or bug is found and unresolved, we might discontinue support for EOL versions of Kubernetes prior to 6 months post EOL. - -================ -File: docs/vendor/tutorial-adding-db-config.md -================ -# Example: Adding Database Configuration Options - -In this tutorial, we'll explore ways to give your end user the option to either embed a database instance with the application, or connect your application to an external database instance that they will manage. -We'll use a PostgreSQL database as an example, configuring an example app to connect. - -This tutorial explores advanced topics like workload coordination, credential management, and refactoring your application's user-facing configuration in the Replicated Admin Console. We'll also review best practices for integrating persistent stores like databases, queues, and caches. - -It is split into 5 sections: - -- [The Example Application](#the-example-application) -- [User-Facing Configuration](#user-facing-configuration) -- [Embedding a Database](#embedding-a-database) -- [Connecting to an External Database](#connecting-to-an-external-database) - -### Prerequisites - -This guide assumes you have: - -* A running instance of the Replicated Admin Console (`kotsadm`) to iterate against in either an existing cluster or an embedded cluster created with Replicated kURL. If you do not have a running instance of the Admin Console in an existing or kURL cluster, complete the [Install with KOTS in an Existing Cluster](tutorial-cli-setup) tutorial to package and install a sample application. -* A local git checkout of your application manifests. - -### Accompanying Code Examples - -A full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). - -* * * - -## The Example Application - -For demonstration purposes, we'll use a simple app that connects to a Postgres database via the `psql` CLI. -Once you've finished this guide, you should feel confident replacing it with any Kubernetes workload(s) that need to connect to a database. -The deployment we'll use can be seen below: - -```yaml -# pg-consumer.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: postgres:10 - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - # hard coded for now, we'll wire these up later - env: - - name: DB_HOST - value: postgres - - name: DB_PORT - value: "5432" - - name: DB_USER - value: postgres - - name: DB_PASSWORD - value: postgres - - name: DB_NAME - value: postgres -``` - -This app simply connects to the database every 20 seconds and writes the server timestamp to stdout. -Even though `psql` supports [default environment variables](https://www.postgresql.org/docs/current/libpq-envars.html) for host, username, etc that can be read transparently, we're intentionally using these generic `DB_` variables for clarity. -Later, you can change these environment variable names to whatever format your application consumes. - -For now we'll hard code the DB variable values, in the next sections we'll wire these up to the user-provided configuration. - - -### Deploying the example application - - Once you've added this deployment to you application's `manifests` directory, create a release by running `replicated release create --auto` locally. - Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: - -![View Update](/images/guides/kots/view-update.png) - -Click **Deploy**. You should be able to review the logs and see `deployment.apps/pg-consumer created` in `applyStdout`: - - -![Deployed PG Consumer](/images/guides/kots/pg-consumer-deployed.png) - - -After it is deployed, you can run `kubectl get pods` to inspect the cluster. -We should expect the Pod to be crashlooping at this point, since there's no database to connect to just yet: - -```text -$ kubectl get pod -NAME READY STATUS RESTARTS AGE -kotsadm-5bbf54df86-p7kqg 1/1 Running 0 12m -kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 12m -kotsadm-minio-0 1/1 Running 0 12m -kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 12m -kotsadm-postgres-0 1/1 Running 0 12m -pg-consumer-75f49bfb69-mljr6 0/1 CrashLoopBackOff 1 10s -``` - -Checking the logs, we should see a connect error: - -```text -$ kubectl logs -l app=pg-consumer -psql: could not translate host name "postgres" to address: Name or service not known -``` - -If the `kubectl logs` command hangs, you can try using the `--previous` flag to fetch the logs of the most recent crash: - - -```text -$ kubectl logs -l app=pg-consumer --previous -psql: could not translate host name "postgres" to address: Name or service not known -``` - -Now that our test app is deployed, we'll walk through presenting options to the end user for connecting a Postgres instance to this app. - -* * * - -## User-Facing Configuration - -The core of this guide will be around how to give your end users the option to do one of the following actions: - -* Bring their own PostgreSQL instance for your app to connect to -* Use an "embedded" database bundled in with the application - -The first step here is to present that option to the user, then we'll walk through implementing each scenario. -The `kots.io/v1beta1` `Config` resource controls what configuration options are presented to the end user. -If you followed one of the "Getting Started" guides, you probably have a `config.yaml` in your manifests that looks something like the following YAML file: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: example_settings - title: My Example Config - description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Ingress object. - items: - - name: use_ingress - title: Use Ingress? - help_text: An example field to toggle inclusion of an Ingress Object - type: bool - default: "0" - - name: ingress_hostname - title: Ingress Hostname - help_text: If desired, enter the hostname for ingress to this application. You can enter the IP of this instance, or a DNS hostname. - type: text - when: repl{{ ConfigOptionEquals "use_ingress" "1" }} -``` - -To add a database section, we'll modify it to include some database settings. -In this case we'll remove the Ingress toggle that is included as an example, although you might also choose to leave this in. None of these database settings will have any effect yet, but we'll still be able to preview what the end user will see. -Modify your YAML to include this database section: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: database - title: Database - items: - - name: postgres_type - help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? - type: radio - title: Postgres - default: embedded_postgres - items: - - name: embedded_postgres - title: Embedded Postgres - - name: external_postgres - title: External Postgres - - name: embedded_postgres_password - hidden: true - type: password - value: "{{repl RandomString 32}}" -``` - -This creates a toggle to allow the user to choose between an embedded or external Postgres instance, and a `hidden` field to generate a unique password for the embedded instance. - -As mentioned in the introduction, a full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). - - -### Validating Config Changes - -Even though the options aren't wired, let's create a new release to validate the configuration screen was modified. -Create a release by running `replicated release create --auto`. -Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: - -![View Update](/images/guides/kots/view-update.png) - -After the update is deployed, click the Config tab and review our new toggle. -You might also notice that we've removed the Ingress settings to simplify things for this guide: - -![Database Config](/images/guides/kots/database-config.png) - -Now that we have the configuration screen started, we can proceed to implement the "Embedded Postgres" option. - -* * * - -## Embedding a Database - -To implement the embedded Database option, we'll add a Kubernetes [Statefulset](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), and use the [annotations for optional resources](packaging-include-resources/) to control when it will be included in the application. - -### Adding the Secret and StatefulSet - -First, we'll create a secret to store the root password for our embedded postgres instance: - -```yaml -# postgres-secret.yaml -apiVersion: v1 -kind: Secret -metadata: - name: postgres -data: - DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' -``` - -Next, create a new YAML file in your `manifests` directory with the following contents. -Note the use of `kots.io/when` to only conditionally include this based on end-user inputs: - -```yaml -# postgres-statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: postgres - labels: - app: pg-provider - annotations: - kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' -spec: - replicas: 1 - selector: - matchLabels: - app: pg-provider - serviceName: postgres - template: - metadata: - labels: - app: pg-provider - spec: - containers: - - env: - - name: PGDATA - value: /var/lib/postgresql/data/pgdata - # create a db called "postgres" - - name: POSTGRES_DB - value: postgres - # create admin user with name "postgres" - - name: POSTGRES_USER - value: postgres - # use admin password from secret - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - key: DB_PASSWORD - name: postgres - image: postgres:10 - name: postgres - volumeMounts: - - mountPath: /var/lib/postgresql/data - name: pgdata - volumes: - - name: pgdata - persistentVolumeClaim: - claimName: pgdata - volumeClaimTemplates: - - metadata: - name: pgdata - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -``` - -Finally, lets add a Service object so we can route traffic to our postgres instance, again using `kots.io/when` to conditionally include this resource: - - -```yaml -# postgres-service.yaml -apiVersion: v1 -kind: Service -metadata: - name: postgres - labels: - app: pg-provider - annotations: - kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' -spec: - ports: - - port: 5432 - selector: - app: pg-provider - type: ClusterIP -``` - -### Validating the embedded Database - -After you've added these resources, you can push a new release and update in the Admin Console. -You should see the following in the deployment logs: - -![Embedded PG Deployed](/images/guides/kots/embedded-pg-deployed.png) - -We should now see an instance of Postgres running in our namespace as well. -The consumer may still be crashlooping, but we can see the error is different now: - -```text -$ kubectl logs -l app=pg-consumer -psql: FATAL: password authentication failed for user "postgres" -``` - -This is because we still need to deliver the generated password to our workload pod. -In `pg-consumer.yaml`, we'll remove this section: - -```yaml - - name: DB_PASSWORD - value: postgres -``` - -and replace it with: - -```yaml - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: postgres - key: DB_PASSWORD -``` - -The full Deployment should now look like the following YAML file: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - # hard coded for now, we'll wire these up later - env: - - name: DB_HOST - value: postgres - - name: DB_PORT - value: "5432" - - name: DB_USER - value: postgres - - name: DB_NAME - value: postgres - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: postgres - key: DB_PASSWORD -``` - -From here, make another release and deploy it. -You should see the consumer pod is now able to connect to the database: - - -```text -$ kubectl get pod -NAME READY STATUS RESTARTS AGE -kotsadm-5bbf54df86-p7kqg 1/1 Running 0 144m -kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 144m -kotsadm-minio-0 1/1 Running 0 144m -kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 144m -kotsadm-postgres-0 1/1 Running 0 144m -pg-consumer-77b868d7d8-xdn9v 1/1 Running 0 20s -postgres-0 1/1 Running 0 6m22s -``` - -Checking the logs, we can connect now: - -```text -$ kubectl logs -l app=pg-consumer - now -------------------------------- - 2020-04-12 17:11:45.019293+00 -(1 row) - - now -------------------------------- - 2020-04-12 17:11:55.072041+00 -(1 row) -``` - -Now that we've configured our application to read from an embedded postgres instance, we'll switch to allowing the end user to provide their own database connection parameters. - -* * * - -## Connecting to an External Database - -In this section, we'll expand our configuration section to allow end users to bring their own Postgres instance. - -### Modifying the Config Screen - -Let's update our config screen to allow an end user to input some details about their database. -We'll add the following YAML, noting the use of the `when` field to conditionally hide or show fields in the user-facing config screen: - -```yaml - - name: external_postgres_host - title: Postgres Host - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: postgres - - name: external_postgres_port - title: Postgres Port - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: "5432" - - name: external_postgres_user - title: Postgres Username - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - required: true - - name: external_postgres_password - title: Postgres Password - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: password - required: true - - name: external_postgres_db - title: Postgres Database - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: sentry -``` - -Your full configuration screen should now look something like the following YAMl file: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: database - title: Database - items: - - name: postgres_type - help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? - type: radio - title: Postgres - default: embedded_postgres - items: - - name: embedded_postgres - title: Embedded Postgres - - name: external_postgres - title: External Postgres - - name: embedded_postgres_password - hidden: true - type: password - value: "{{repl RandomString 32}}" - - name: external_postgres_host - title: Postgres Host - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: postgres - - name: external_postgres_port - title: Postgres Port - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: "5432" - - name: external_postgres_user - title: Postgres Username - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - required: true - - name: external_postgres_password - title: Postgres Password - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: password - required: true - - name: external_postgres_db - title: Postgres Database - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: postgres -``` - -Let's save this and create a new release. After deploying the release in the Admin Console, click **Config** and set the toggle to "External Postgres" to see the new fields: - -In order to demonstrate that these are working, let's add some values that we know won't work, and just check to confirm that checking "External Postgres" will remove our embedded postgres instance: - - -![External PG Config Fake](/images/guides/kots/external-pg-config-fake.png) - -Save these settings, and then you'll be directed back to the Version History page to apply the change: - -![Deploy Config Change](/images/guides/kots/deploy-config-change.png) - -after this is deployed, we should see that the postgres statefulset has been removed, and that our sample application is back to failing: - - -```text -$ kubectl get pod -NAME READY STATUS RESTARTS AGE -kotsadm-5bbf54df86-8ws98 1/1 Running 0 12m -kotsadm-api-cbccb97ff-r7mz6 1/1 Running 2 12m -kotsadm-minio-0 1/1 Running 0 12m -kotsadm-operator-84477b5c4-4gmbm 1/1 Running 0 12m -kotsadm-postgres-0 1/1 Running 0 12m -pg-consumer-6bd78594d-n7nmw 0/1 Error 2 29s -``` - -You'll note that it is failing, but it is still using our hardcoded environment variables, not the user-entered config. -In the next step, we'll wire the end-user configuration values into our service. - -```text -$ kubectl logs -l app=pg-consumer -psql: could not translate host name "postgres" to address: Name or service not known -``` - -### Mapping User Inputs - -To map the user-supplied configuration, we'll start by expanding our secret we created before, adding fields for additional variables, using `{{repl if ... }}` blocks to switch between embedded/external contexts. - -To start, you can add a field for hostname, using Base64Encode. You must use a single line, as shown in the following example. - - - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: postgres -data: - DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' - DB_HOST: - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" }}{{repl Base64Encode "postgres" }}{{repl else}}{{repl ConfigOption"external_postgres_host" | Base64Encode }}{{repl end}} -``` - -Now that we have the value in our Secret, we can modify our deployment to consume it. -Replace this text: - -```yaml - - name: DB_HOST - value: postgres -``` - -with this text: - -```yaml - - name: DB_HOST - valueFrom: - secretKeyRef: - name: postgres - key: DB_HOST -``` - -Your full deployment should look something like the following YAML file: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - name: postgres - key: DB_HOST - - name: DB_PORT - value: "5432" - - name: DB_USER - value: postgres - - name: DB_NAME - value: postgres - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: postgres - key: DB_PASSWORD -``` - -From here, let's create and deploy a release, and verify that the secret has the customer-provided value, base64 decoding the secret contents: - -```text -$ kubectl get secret postgres -o yaml | head -n 4 -apiVersion: v1 -data: - DB_HOST: ZmFrZQ== - DB_PASSWORD: ajNVWDd1RnRfc0NkVTJqOFU3Q25xUkxRQk5fUlh3RjA= -``` - -You can verify we pulled in our user-provided config by base64-decoding the `DB_HOST` field: - -```text -$ echo ZmFrZQ== | base64 --decode -fake -``` - -Checking on our service itself, we can verify that it's now trying to connect to the `fake` hostname instead of `postgres`: - -```text -$ kubectl logs -l app=pg-consumer -psql: could not translate host name "fake" to address: Name or service not known -``` - -We'll optionally wire this to a real external Postgres database later, but for now we'll proceed to add the rest of the fields. - -### Extending this to All Fields - -Now that we've wired the DB_HOST field all the way through, we'll do the same for the other fields. -In the end, your Secret and Deployment should look like the following YAML files: - -```yaml -# postgres-secret.yaml -apiVersion: v1 -kind: Secret -metadata: - name: postgres -data: - DB_HOST: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "postgres" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_host" | Base64Encode }} - {{repl end}} - DB_PORT: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "5432" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_port" | Base64Encode }} - {{repl end}} - DB_USER: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "postgres" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_user" | Base64Encode }} - {{repl end}} - DB_PASSWORD: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl ConfigOption "embedded_postgres_password" | Base64Encode }} - {{repl else -}} - {{repl ConfigOption "external_postgres_password" | Base64Encode }} - {{repl end}} - DB_NAME: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "postgres" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_db" | Base64Encode }} - {{repl end}} -``` - -```yaml -# pg-consumer.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - name: postgres - key: DB_HOST - - name: DB_PORT - valueFrom: - secretKeyRef: - name: postgres - key: DB_PORT - - name: DB_USER - valueFrom: - secretKeyRef: - name: postgres - key: DB_USER - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: postgres - key: DB_PASSWORD - - name: DB_NAME - valueFrom: - secretKeyRef: - name: postgres - key: DB_NAME -``` - -Optionally, you can be extra concise and collapse each individual `env` `valueFrom` into a single `envFrom` `secretRef` entry: - -```yaml -# pg-consumer.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - envFrom: - - secretRef: - name: postgres -``` - - -After deploying this, you should see all of the fields in the secret: - -```text -$ kubectl get secret postgres -o yaml -apiVersion: v1 -data: - DB_HOST: ZmFrZQ== - DB_NAME: ZmFrZQ== - DB_PASSWORD: ZXh0cmEgZmFrZQ== - DB_PORT: NTQzMjE= - DB_USER: ZmFrZQ== -kind: Secret -# ...snip... -``` - -We can also print the environment in our sample app to verify that all of the values are piped properly: - -```text -$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' -DB_PORT=54321 -DB_NAME=fake -DB_PASSWORD=extra fake -DB_HOST=fake -DB_USER=fake -``` - -### Testing Config Changes - -Now let's make some changes to the database credentials. In this case, we'll use a Postgres database provisioned in Amazon RDS, but you can use any external database. -To start, head to the "Config" screen and input your values: - -![Real Postgres Values](/images/guides/kots/real-postgres-values.png) - -Let's save and apply this config and check in our pod again: - -```text -$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' -DB_PORT=54321 -DB_NAME=fake -DB_PASSWORD=extra fake -DB_HOST=fake -DB_USER=fake -``` - -Uh oh, It appears that our values did not get updated! If you've worked with Secrets before, you may know that there's a [long-standing issue in Kubernetes](https://github.com/kubernetes/kubernetes/issues/22368) where pods that load config from Secrets or ConfigMaps won't automatically restart when underlying config is changed. -There are some tricks to make this work, and in the next step we'll implement one of them, but for now we can delete the pod to verify that the configuration is being piped through to our sample application: - -```text -$ kubectl delete pod -l app=pg-consumer -pod "pg-consumer-6df9d5d7fd-bd5z6"" deleted -``` - -If the pod is crashlooping, you might need to add `--force --grace-period 0` to force delete it. -In either case, once a new pod starts, we should now see it loading the correct config: - -```text -$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' -DB_PORT=5432 -DB_NAME=postgres -DB_PASSWORD= -DB_HOST=10.128.0.12 -DB_USER=postgres -``` - -### Triggering Restarts on Changes - -In order to automate this restart on changes, we're going to use a hash of all database parameters to trigger a rolling update whenever database parameters are changed. -We'll use a `hidden`, `readonly` field to store this in our config screen: - -```yaml - - name: external_postgres_confighash - hidden: true - readonly: true - type: text - value: '{{repl (sha256sum (print (ConfigOption "external_postgres_host") (ConfigOption "external_postgres_port") (ConfigOption "external_postgres_user") (ConfigOption "external_postgres_password") (ConfigOption "external_postgres_db") ))}}' -``` - -The `hidden` flag will hide it from the UI, and the `readonly` flag in this case will cause the value to be re-computed any time an upstream `ConfigOption` value changes. - -Next, let's add this as an annotation to our deployment's pod template at `spec.template.metadata.annotations`: - -```yaml -annotations: - kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' -``` - -**Note**: It's worth noting here that there's nothing special about the `kots.io/config-hash` annotation. We could have just as easily called this annotation `my-app-something-fake` instead. -What matters here is that when the value in a Deployment annotation changes, it will cause Kubernetes to roll out a new version of the pod, stopping the old one and thus picking up our config changes. - - -Your full deployment should now look like the following YAML file: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - annotations: - kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - envFrom: - - secretRef: - name: postgres -``` - - -### Integrating a Real Database - -If you'd like at this point, you can integrate a real database in your environment, just fill out your configuration fields. You'll know you did it right if your pg-consumer pod can connect. - -================ -File: docs/vendor/tutorial-cli-create-app.mdx -================ -# Step 2: Create an Application - -After you install the Replicated CLI and create an API token, you can use the CLI to create a new application. - -To create an application: - -1. Run the following command to create an application named `cli-tutorial`: - - ``` - replicated app create cli-tutorial - ``` - - **Example output**: - - ``` - ID NAME SLUG SCHEDULER - 2GmY... cli-tutorial cli-tutorial kots - ``` - -1. Export the application slug in the output of the `app create` command as an environment variable: - - ``` - export REPLICATED_APP=YOUR_SLUG - ``` - Replace `YOUR_SLUG` with the slug for the application you created in the previous step. - -1. Verify that both the `REPLICATED_API_TOKEN` environment variable that you created as part of [Step 1: Install the Replicated CLI](tutorial-cli-install-cli) and the `REPLICATED_APP` environment variable are set correctly: - - ``` - replicated release ls - ``` - - In the output of this command, you now see an empty list of releases for the application: - - ``` - SEQUENCE CREATED EDITED ACTIVE_CHANNELS - ``` - -## Next Step - -Continue to [Step 3: Get the Sample Manifests](tutorial-cli-manifests) to download the manifest files for a sample Kubernetes application. You will use these manifest files to create the first release for the `cli-tutorial` application. - -================ -File: docs/vendor/tutorial-cli-create-customer.mdx -================ -# Step 5: Create a Customer - -After promoting the first release for the `cli-tutorial` application, create a customer so that you can install the application. - -A _customer_ is an object in the Vendor Portal that represents a single licensed user of your application. When you create a customer, you define entitlement information for the user, and the Vendor Portal generates a YAML license file for the customer that you can download. - -When you install the application later in this tutorial, you will upload the license file that you create in this step to allow KOTS to create the application containers. - -To create a customer and download the license file: - -1. From the `replicated-cli-tutorial` directory, create a license for a customer named `Some-Big-Bank` that is assigned to the Unstable channel and expires in 10 days: - - ``` - replicated customer create \ - --name "Some-Big-Bank" \ - --expires-in "240h" \ - --channel "Unstable" - ``` - The Unstable channel is the channel where you promoted the release in [Step 4: Create a Release](tutorial-cli-create-release). Assigning the customer to a channel allows them to install the releases that are promoted to that channel. - - **Example output:** - - ``` - ID NAME CHANNELS EXPIRES TYPE - 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev - ``` - -1. Verify the customer creation details: - - ``` - replicated customer ls - ``` - - **Example output:** - - ``` - ID NAME CHANNELS EXPIRES TYPE - 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev - ``` - -1. Download the license file for the customer that you just created: - - ``` - replicated customer download-license \ - --customer "Some-Big-Bank" - ``` - - The license downloads to `stdout`. - - **Example output**: - - ``` - apiVersion: kots.io/v1beta1 - kind: License - metadata: - name: some-big-bank - spec: - appSlug: cli-tutorial - channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 - channelName: Unstable - customerName: Some-Big-Bank - endpoint: https://replicated.app - entitlements: - expires_at: - description: License Expiration - title: Expiration - value: "2022-11-10T14:59:49Z" - valueType: String - isNewKotsUiEnabled: true - licenseID: 2GuB3ZLQsU38F5SX3n03x8qBzeL - licenseSequence: 1 - licenseType: dev - signature: eyJsaW... - ``` - -1. Rename the license file and save it to your Desktop folder: - - ``` - export LICENSE_FILE=~/Desktop/Some-Big-Bank-${REPLICATED_APP}-license.yaml - replicated customer download-license --customer "Some-Big-Bank" > "${LICENSE_FILE}" - ``` - -1. Verify that the license was written properly using either `cat` or `head`: - - ``` - head ${LICENSE_FILE} - ``` - - **Example output**: - - ``` - apiVersion: kots.io/v1beta1 - kind: License - metadata: - name: some-big-bank - spec: - appSlug: cli-tutorial - channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 - channelName: Unstable - customerName: Some-Big-Bank - endpoint: https://replicated.app - ``` - -## Next Step - -Continue to [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to get the installation commands from the Unstable channel, then install the KOTS components and the sample application in your cluster. - -================ -File: docs/vendor/tutorial-cli-create-new-version.mdx -================ -# Step 8: Create a New Version - -In this step, you make an edit to the Config custom resource manifest file in the `replicated-cli-tutorial/manifests` directory for the `cli-tutorial` application to create a new field on the **Config** page in the Admin Console. You will then create and promote a new release to the Unstable channel with your changes. - -To create and promote a new version of the application: - -1. In your local directory, go to the the `replicated-cli-tutorial/manifests` folder and open the `kots-config.yaml` file in a text editor. - -1. Copy and paste the following YAML into the file under the `example_default_value` field to create a new text field on the **Config** page: - - ```yaml - - name: more_text - title: Another Text Example - type: text - value: "" - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - ``` - The following shows the full YAML for the `kots-config.yaml` file after you add the new field: - - ```yaml - --- - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: config-sample - spec: - groups: - - name: example_settings - title: My Example Config - description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Nginx welcome page. - items: - - name: show_text_inputs - title: Customize Text Inputs - help_text: "Show custom user text inputs" - type: bool - default: "0" - recommended: true - - name: example_default_value - title: Text Example (with default value) - type: text - value: "" - default: please change this value - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - # Add the new more_text field here - - name: more_text - title: Another Text Example - type: text - value: "" - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - - name: api_token - title: API token - type: password - props: - rows: 5 - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - - name: readonly_text_left - title: Readonly Text - type: text - value: "{{repl RandomString 10}}" - readonly: true - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - - name: hidden_text - title: Secret Key - type: password - hidden: true - value: "{{repl RandomString 40}}" - - ``` - -1. Open the `example-configmap.yaml` file. - -1. In the `example-configmap.yaml` file, copy and paste the following HTML to replace the `` section: - - ``` - - This is an example KOTS application. -

    This is text from a user config value: '{{repl ConfigOption "example_default_value"}}'

    -

    This is more text from a user config value: '{{repl ConfigOption "more_text"}}'

    -

    This is a hidden value: '{{repl ConfigOption "hidden_text"}}'

    - - ``` - This creates a reference to the `more_text` field using a Replicated KOTS template function. The ConfigOption template function renders the user input from the configuration item that you specify. For more information, see [Config Context](/reference/template-functions-config-context) in _Reference_. - -1. Save the changes to both YAML files. - -1. Change to the root `replicated-cli-tutorial` directory, then run the following command to verify that there are no errors in the YAML: - - ``` - replicated release lint --yaml-dir=manifests - ``` - -1. Create a new release and promote it to the Unstable channel: - - ``` - replicated release create --auto - ``` - - **Example output**: - - ``` - • Reading manifests from ./manifests ✓ - • Creating Release ✓ - • SEQUENCE: 2 - • Promoting ✓ - • Channel 2GxpUm7lyB2g0ramqUXqjpLHzK0 successfully set to release 2 - ``` - -1. Type `y` and press **Enter** to continue with the defaults. - - **Example output**: - - ``` - RULE TYPE FILENAME LINE MESSAGE - - • Reading manifests from ./manifests ✓ - • Creating Release ✓ - • SEQUENCE: 2 - • Promoting ✓ - • Channel 2GmYFUFzj8JOSLYw0jAKKJKFua8 successfully set to release 2 - ``` - - The release is created and promoted to the Unstable channel with `SEQUENCE: 2`. - -1. Verify that the release was promoted to the Unstable channel: - - ``` - replicated release ls - ``` - **Example output**: - - ``` - SEQUENCE CREATED EDITED ACTIVE_CHANNELS - 2 2022-11-03T19:16:24Z 0001-01-01T00:00:00Z Unstable - 1 2022-11-03T18:49:13Z 0001-01-01T00:00:00Z - ``` - -## Next Step - -Continue to [Step 9: Update the Application](tutorial-cli-update-app) to return to the Admin Console and update the application to the new version that you promoted. - -================ -File: docs/vendor/tutorial-cli-create-release.mdx -================ -# Step 4: Create a Release - -Now that you have the manifest files for the sample Kubernetes application, you can create a release for the `cli-tutorial` application and promote the release to the Unstable channel. - -By default, the Vendor Portal includes Unstable, Beta, and Stable release channels. The Unstable channel is intended for software vendors to use for internal testing, before promoting a release to the Beta or Stable channels for distribution to customers. For more information about channels, see [About Channels and Releases](releases-about). - -To create and promote a release to the Unstable channel: - -1. From the `replicated-cli-tutorial` directory, lint the application manifest files and ensure that there are no errors in the YAML: - - ``` - replicated release lint --yaml-dir=manifests - ``` - - If there are no errors, an empty list is displayed with a zero exit code: - - ```text - RULE TYPE FILENAME LINE MESSAGE - ``` - - For a complete list of the possible error, warning, and informational messages that can appear in the output of the `release lint` command, see [Linter Rules](/reference/linter). - -1. Initialize the project as a Git repository: - - ``` - git init - git add . - git commit -m "Initial Commit: CLI Tutorial" - ``` - - Initializing the project as a Git repository allows you to track your history. The Replicated CLI also reads Git metadata to help with the generation of release metadata, such as version labels. - -1. From the `replicated-cli-tutorial` directory, create a release with the default settings: - - ``` - replicated release create --auto - ``` - - The `--auto` flag generates release notes and metadata based on the Git status. - - **Example output:** - - ``` - • Reading Environment ✓ - - Prepared to create release with defaults: + + m6i.2xlarge + 8 + 32 + $0.461 + + + m6i.4xlarge + 16 + 64 + $0.922 + + + m6i.8xlarge + 32 + 128 + $1.843 + + + m7i.large + 2 + 8 + $0.121 + + + m7i.xlarge + 4 + 16 + $0.242 + + + m7i.2xlarge + 8 + 32 + $0.484 + + + m7i.4xlarge + 16 + 64 + $0.968 + + + m7i.8xlarge + 32 + 128 + $1.935 + + + m5.large + 2 + 8 + $0.115 + + + m5.xlarge + 4 + 16 + $0.230 + + + m5.2xlarge + 8 + 32 + $0.461 + + + m5.4xlarge + 16 + 64 + $0.922 + + + m5.8xlarge + 32 + 128 + $1.843 + + + m7g.large + 2 + 8 + $0.098 + + + m7g.xlarge + 4 + 16 + $0.195 + + + m7g.2xlarge + 8 + 32 + $0.392 + + + m7g.4xlarge + 16 + 64 + $0.784 + + + m7g.8xlarge + 32 + 128 + $1.567 + + + c5.large + 2 + 4 + $0.102 + + + c5.xlarge + 4 + 8 + $0.204 + + + c5.2xlarge + 8 + 16 + $0.408 + + + c5.4xlarge + 16 + 32 + $0.816 + + + c5.9xlarge + 36 + 72 + $1.836 + + + g4dn.xlarge + 4 + 16 + $0.631 + + + g4dn.2xlarge + 8 + 32 + $0.902 + + + g4dn.4xlarge + 16 + 64 + $1.445 + + + g4dn.8xlarge + 32 + 128 + $2.611 + + + g4dn.12xlarge + 48 + 192 + $4.964 + + + g4dn.16xlarge + 64 + 256 + $5.222 + + - yaml-dir "./manifests" - promote "Unstable" - version "Unstable-ba710e5" - release-notes "CLI release of master triggered by exampleusername [SHA: d4173a4] [31 Oct 22 08:51 MDT]" - ensure-channel true - lint-release true +### GCP GKE Cluster Pricing - Create with these properties? [Y/n] - ``` +GCP clusters will be charged GCP list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. -1. Type `y` and press **Enter** to confirm the prompt. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Instance TypeVCPUsMemory (GiB)USD/Credit per hour
    n2-standard-228$0.117
    n2-standard-4416$0.233
    n2-standard-8832$0.466
    n2-standard-161664$0.932
    n2-standard-3232128$1.865
    t2a-standard-228$0.092
    t2a-standard-4416$0.185
    t2a-standard-8832$0.370
    t2a-standard-161664$0.739
    t2a-standard-3232128$1.478
    t2a-standard-4848192$2.218
    e2-standard-228$0.081
    e2-standard-4416$0.161
    e2-standard-8832$0.322
    e2-standard-161664$0.643
    e2-standard-3232128$1.287
    n1-standard-1+nvidia-tesla-t4+113.75$0.321
    n1-standard-1+nvidia-tesla-t4+213.75$0.585
    n1-standard-1+nvidia-tesla-t4+413.75$1.113
    n1-standard-2+nvidia-tesla-t4+127.50$0.378
    n1-standard-2+nvidia-tesla-t4+227.50$0.642
    n1-standard-2+nvidia-tesla-t4+427.50$1.170
    n1-standard-4+nvidia-tesla-t4+1415$0.492
    n1-standard-4+nvidia-tesla-t4+2415$0.756
    n1-standard-4+nvidia-tesla-t4+4415$1.284
    n1-standard-8+nvidia-tesla-t4+1830$0.720
    n1-standard-8+nvidia-tesla-t4+2830$0.984
    n1-standard-8+nvidia-tesla-t4+4830$1.512
    n1-standard-16+nvidia-tesla-t4+11660$1.176
    n1-standard-16+nvidia-tesla-t4+21660$1.440
    n1-standard-16+nvidia-tesla-t4+41660$1.968
    n1-standard-32+nvidia-tesla-t4+132120$2.088
    n1-standard-32+nvidia-tesla-t4+232120$2.352
    n1-standard-32+nvidia-tesla-t4+432120$2.880
    n1-standard-64+nvidia-tesla-t4+164240$3.912
    n1-standard-64+nvidia-tesla-t4+264240$4.176
    n1-standard-64+nvidia-tesla-t4+464240$4.704
    n1-standard-96+nvidia-tesla-t4+196360$5.736
    n1-standard-96+nvidia-tesla-t4+296360$6.000
    n1-standard-96+nvidia-tesla-t4+496360$6.528
    - **Example output:** +### Azure AKS Cluster Pricing - ```text - • Reading manifests from ./manifests ✓ - • Creating Release ✓ - • SEQUENCE: 1 - • Promoting ✓ - • Channel VEr0nhJBBUdaWpPvOIK-SOryKZEwa3Mg successfully set to release 1 - ``` - The release is created and promoted to the Unstable channel. +Azure clusters will be charged Azure list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. -1. Verify that the release was promoted to the Unstable channel: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Instance TypeVCPUsMemory (GiB)RateList PriceUSD/Credit per hour
    Standard_B2ms288320$0.083$0.100
    Standard_B4ms41616600$0.166$0.199
    Standard_B8ms83233300$0.333$0.400
    Standard_B16ms166466600$0.666$0.799
    Standard_DS2_v22714600$0.146$0.175
    Standard_DS3_v241429300$0.293$0.352
    Standard_DS4_v282858500$0.585$0.702
    Standard_DS5_v21656117000$1.170$1.404
    Standard_D2ps_v52814600$0.077$0.092
    Standard_D4ps_v54167700$0.154$0.185
    Standard_D8ps_v583215400$0.308$0.370
    Standard_D16ps_v5166430800$0.616$0.739
    Standard_D32ps_v53212861600$1.232$1.478
    Standard_D48ps_v54819223200$1.848$2.218
    Standard_NC4as_T4_v342852600$0.526$0.631
    Standard_NC8as_T4_v385675200$0.752$0.902
    Standard_NC16as_T4_v316110120400$1.204$1.445
    Standard_NC64as_T4_v364440435200$4.352$5.222
    Standard_D2S_v5289600$0.096$0.115
    Standard_D4S_v541619200$0.192$0.230
    Standard_D8S_v583238400$0.384$0.461
    Standard_D16S_v5166476800$0.768$0.922
    Standard_D32S_v532128153600$1.536$1.843
    Standard_D64S_v564192230400$2.304$2.765
    - ``` - replicated release ls - ``` - **Example output:** +### Oracle OKE Cluster Pricing - ```text - SEQUENCE CREATED EDITED ACTIVE_CHANNELS - 1 2022-10-31T14:55:35Z 0001-01-01T00:00:00Z Unstable - ``` +Oracle based clusters will be charged Oracle list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. -## Next Step + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Instance TypeVCPUsMemory (GiB)USD/Credit per hour
    VM.Standard2.1115$0.076
    VM.Standard2.2230$0.153
    VM.Standard2.4460$0.306
    VM.Standard2.88120$0.612
    VM.Standard2.1616240$1.225
    VM.Standard3Flex.114$0.055
    VM.Standard3Flex.228$0.110
    VM.Standard3Flex.4416$0.221
    VM.Standard3Flex.8832$0.442
    VM.Standard3Flex.161664$0.883
    VM.Standard.A1.Flex.114$0.019
    VM.Standard.A1.Flex.228$0.038
    VM.Standard.A1.Flex.4416$0.077
    VM.Standard.A1.Flex.8832$0.154
    VM.Standard.A1.Flex.161664$0.309
    -Continue to [Step 5: Create a Customer](tutorial-cli-create-customer) to create a customer license file that you will upload when installing the application. +Last modified January 06, 2025 ================ -File: docs/vendor/tutorial-cli-deploy-app.mdx +File: docs/vendor/testing-supported-clusters.md ================ -# Step 7: Configure the Application - -After you install KOTS, you can log in to the KOTS Admin Console. This procedure shows you how to make a configuration change for the application from the Admin Console, which is a typical task performed by end users. - -To configure the application: - -1. Access the Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: - - ```bash - kubectl kots admin-console --namespace NAMESPACE - ``` +import Pool from "../partials/cmx/\_openshift-pool.mdx" - Replace `NAMESPACE` with the namespace where KOTS is installed. +# Supported Compatibility Matrix Cluster Types -1. Enter the password that you created in [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to log in to the Admin Console. +This topic describes the supported Kubernetes distributions, Kubernetes versions, instance types, nodes, limitations, and common use cases for clusters created with Replicated Compatibility Matrix. - The Admin Console dashboard opens. On the Admin Console **Dashboard** tab, users can take various actions, including viewing the application status, opening the application, checking for application updates, syncing their license, and setting up application monitoring on the cluster with Prometheus. +Compatibility Matrix provisions cloud-based or virtual machine (VM) clusters. - ![Admin Console app dashboard](/images/tutorials/tutorial-admin-console-dashboard.png) +## VM Clusters -1. On the **Config** tab, select the **Customize Text Inputs** checkbox. In the **Text Example** field, enter any text. For example, `Hello`. +This section lists the supported VM cluster distributions for clusters created with Compatibility Matrix. - ![Admin Console configuration tab](/images/tutorials/tutorial-install-config-tab.png) +### kind - This page displays configuration settings that are specific to the application. Software vendors define the fields that are displayed on this page in the KOTS Config custom resource. For more information, see [Config](/reference/custom-resource-config) in _Reference_. +Compatibility Matrix supports creating [kind](https://kind.sigs.k8s.io/) clusters. -1. Click **Save config**. In the dialog that opens, click **Go to updated version**. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported Kubernetes Versions{/* START_kind_VERSIONS */}1.26.15, 1.27.16, 1.28.15, 1.29.14, 1.30.10, 1.31.6, 1.32.2{/* END_kind_VERSIONS */}
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsNo
    Node Auto ScalingNo
    NodesSupports a single node.
    IP FamilySupports `ipv4` or `dual`.
    LimitationsSee Limitations
    Common Use CasesSmoke tests
    - The **Version history** tab opens. +### k3s -1. Click **Deploy** for the new version. Then click **Yes, deploy** in the confirmation dialog. +Compatibility Matrix supports creating [k3s](https://k3s.io) clusters. - ![Admin Console configuration tab](/images/tutorials/tutorial-install-version-history.png) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported k3s VersionsThe upstream k8s version that matches the Kubernetes version requested.
    Supported Kubernetes Versions{/* START_k3s_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.1, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_k3s_VERSIONS */}
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    Node Auto ScalingNo
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    LimitationsFor additional limitations that apply to all distributions, see Limitations.
    Common Use Cases
    • Smoke tests
    • Customer release tests
    -1. Click **Open App** to view the application in your browser. +### RKE2 (Beta) - ![web page that displays text](/images/tutorials/tutorial-open-app.png) +Compatibility Matrix supports creating [RKE2](https://docs.rke2.io/) clusters. - Notice the text that you entered previously on the configuration page is displayed on the screen. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported RKE2 VersionsThe upstream k8s version that matches the Kubernetes version requested.
    Supported Kubernetes Versions{/* START_rke2_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_rke2_VERSIONS */}
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    Node Auto ScalingNo
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    LimitationsFor additional limitations that apply to all distributions, see Limitations.
    Common Use Cases
    • Smoke tests
    • Customer release tests
    - :::note - If you do not see the new text, refresh your browser. - ::: +### OpenShift OKD -## Next Step +Compatibility Matrix supports creating [Red Hat OpenShift OKD](https://www.okd.io/) clusters, which is the community distribution of OpenShift, using CodeReady Containers (CRC). -Continue to [Step 8: Create a New Version](tutorial-cli-create-new-version) to make a change to one of the manifest files for the `cli-tutorial` application, then use the Replicated CLI to create and promote a new release. +OpenShift clusters are provisioned with two users: -================ -File: docs/vendor/tutorial-cli-install-app-manager.mdx -================ -# Step 6: Install KOTS and the Application +- (Default) A `kubeadmin` user with `cluster-admin` priviledges. Use the `kubeadmin` user only for administrative tasks such as creating new users or setting roles. +- A `developer` user with namespace-scoped priviledges. The `developer` user can be used to better simulate access in end-customer environments. -The next step is to test the installation process for the application release that you promoted. Using the KOTS CLI, you will install KOTS and the sample application in your cluster. +By default, kubeconfig context is set to the `kubeadmin` user. To switch to the `developer` user, run the command `oc login --username developer`. -KOTS is the Replicated component that allows your users to install, manage, and upgrade your application. Users can interact with KOTS through the Admin Console or through the KOTS CLI. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported OpenShift Versions{/* START_openshift_VERSIONS */}4.10.0-okd, 4.11.0-okd, 4.12.0-okd, 4.13.0-okd, 4.14.0-okd, 4.15.0-okd, 4.16.0-okd, 4.17.0-okd{/* END_openshift_VERSIONS */}
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    Node Auto ScalingNo
    NodesSupports multiple nodes for versions 4.13.0-okd and later.
    IP FamilySupports `ipv4`.
    Limitations +
      +
    • OpenShift does not support r1.small instance types.
    • +
    • OpenShift versions earlier than 4.13-okd do not have a registry mirror and so may be subject to rate limiting from Docker Hub. For information about Docker Hub rate limiting, see Docker Hub rate limit. To increase limits, Replicated recommends that you configure an image pull secret to pull public Docker Hub images as an authenticated user. For more information about how to configure image pull secrets, see Pull an Image from a Private Registry in the Kubernetes documentation.
    • +
    • +

      OpenShift builds take approximately 17 minutes.

      +

      +
    • +
    +

    For additional limitations that apply to all distributions, see Limitations.

    +
    Common Use CasesCustomer release tests
    -To install KOTS and the application: +### Embedded Cluster -1. From the `replicated-cli-tutorial` directory, run the following command to get the installation commands for the Unstable channel, where you promoted the release for the `cli-tutorial` application: +Compatibility Matrix supports creating clusters with Replicated Embedded Cluster. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). - ``` - replicated channel inspect Unstable - ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported Embedded Cluster Versions + Any valid release sequence that has previously been promoted to the channel where the customer license is assigned. + Version is optional and defaults to the latest available release on the channel. +
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    NodesSupports multiple nodes (alpha).
    IP FamilySupports `ipv4`.
    Limitations +
      +
    • The Admin Console UI is not exposed publicly and must be exposed via `kubectl -n kotsadm port-forward svc/kurl-proxy-kotsadm 38800:8800`. The password for the Admin Console is `password`.
    • +
    • A valid customer license is required to create an Embedded Cluster.
    • +
    • The [cluster prepare](/vendor/testing-how-to#prepare-clusters) command is not supported.
    • +
    +

    For additional limitations that apply to all distributions, see Limitations.

    +
    Common Use CasesCustomer release tests
    - **Example output:** +### kURL - ``` - ID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 - NAME: Unstable - DESCRIPTION: - RELEASE: 1 - VERSION: Unstable-d4173a4 - EXISTING: +Compatibility Matrix supports creating [kURL](https://kurl.sh) clusters. - curl -fsSL https://kots.io/install | bash - kubectl kots install cli-tutorial/unstable + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported kURL VersionsAny promoted kURL installer. Version is optional. For an installer version other than "latest", you can find the specific Installer ID for a previously promoted installer under the relevant **Install Command** (ID after kurl.sh/) on the **Channels > kURL Installer History** page in the Vendor Portal. For more information about viewing the history of kURL installers promoted to a channel, see [Installer History](/vendor/installer-history).
    Supported Instance TypesSee Replicated Instance Types
    Node GroupsYes
    Node Auto ScalingNo
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    Does not work with the Longhorn add-on.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    - EMBEDDED: +## Cloud Clusters - curl -fsSL https://k8s.kurl.sh/cli-tutorial-unstable | sudo bash +This section lists the supported cloud clusters for compatibility testing. - AIRGAP: +### EKS - curl -fSL -o cli-tutorial-unstable.tar.gz https://k8s.kurl.sh/bundle/cli-tutorial-unstable.tar.gz - # ... scp or sneakernet cli-tutorial-unstable.tar.gz to airgapped machine, then - tar xvf cli-tutorial-unstable.tar.gz - sudo bash ./install.sh airgap - ``` - This command prints information about the channel, including the commands for installing in: - * An existing cluster - * An _embedded cluster_ created by Replicated kURL - * An air gap cluster that is not connected to the internet +Compatibility Matrix supports creating [AWS EKS](https://aws.amazon.com/eks/?nc2=type_a) clusters. -1. If you have not already, configure kubectl access to the cluster you provisioned as part of [Set Up the Environment](tutorial-cli-setup#set-up-the-environment). For more information about setting the context for kubectl, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported Kubernetes Versions

    {/* START_eks_VERSIONS */}1.25, 1.26, 1.27, 1.28, 1.29, 1.30, 1.31, 1.32{/* END_eks_VERSIONS */}

    Extended Support Versions: 1.25, 1.26, 1.27, 1.28

    Supported Instance Types

    m6i.large, m6i.xlarge, m6i.2xlarge, m6i.4xlarge, m6i.8xlarge, m7i.large, m7i.xlarge, m7i.2xlarge, m7i.4xlarge, m7i.8xlarge, m5.large, m5.xlarge, m5.2xlarge, + m5.4xlarge, m5.8xlarge, m7g.large (arm), m7g.xlarge (arm), m7g.2xlarge (arm), m7g.4xlarge (arm), m7g.8xlarge (arm), c5.large, c5.xlarge, c5.2xlarge, c5.4xlarge, + c5.9xlarge, g4dn.xlarge (gpu), g4dn.2xlarge (gpu), g4dn.4xlarge (gpu), g4dn.8xlarge (gpu), g4dn.12xlarge (gpu), g4dn.16xlarge (gpu)

    g4dn instance types depend on available capacity. After a g4dn cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [Amazon EKS optimized accelerated Amazon Linux AMIs](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) in the AWS documentation.

    Node GroupsYes
    Node Auto ScalingYes. Cost will be based on the max number of nodes.
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    You can only choose a minor version, not a patch version. The EKS installer chooses the latest patch for that minor version.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    -1. Run the `EXISTING` installation script with the following flags to automatically upload the license file and run the preflight checks at the same time you run the installation. +### GKE - **Example:** +Compatibility Matrix supports creating [Google GKE](https://cloud.google.com/kubernetes-engine) clusters. - ``` - curl -fsSL https://kots.io/install | bash - kubectl kots install cli-tutorial/unstable \ - --license-file ./LICENSE_YAML \ - --shared-password PASSWORD \ - --namespace NAMESPACE - ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported Kubernetes Versions{/* START_gke_VERSIONS */}1.29, 1.30, 1.31, 1.32{/* END_gke_VERSIONS */}
    Supported Instance Types

    n2-standard-2, n2-standard-4, n2-standard-8, n2-standard-16, n2-standard-32, t2a-standard-2 (arm), t2a-standard-4 (arm), t2a-standard-8 (arm), t2a-standard-16 (arm), t2a-standard-32 (arm), t2a-standard-48 (arm), e2-standard-2, e2-standard-4, e2-standard-8, e2-standard-16, e2-standard-32, n1-standard-1+nvidia-tesla-t4+1 (gpu), n1-standard-1+nvidia-tesla-t4+2 (gpu), n1-standard-1+nvidia-tesla-t4+4 (gpu), n1-standard-2+nvidia-tesla-t4+1 (gpu), n1-standard-2+nvidia-tesla-t4+2 (gpu), n1-standard-2+nvidia-tesla-t4+4 (gpu), n1-standard-4+nvidia-tesla-t4+1 (gpu), n1-standard-4+nvidia-tesla-t4+2 (gpu), n1-standard-4+nvidia-tesla-t4+4 (gpu), n1-standard-8+nvidia-tesla-t4+1 (gpu), n1-standard-8+nvidia-tesla-t4+2 (gpu), n1-standard-8+nvidia-tesla-t4+4 (gpu), n1-standard-16+nvidia-tesla-t4+1 (gpu), n1-standard-16+nvidia-tesla-t4+2 (gpu), n1-standard-16+nvidia-tesla-t4+4 (gpu), n1-standard-32+nvidia-tesla-t4+1 (gpu), n1-standard-32+nvidia-tesla-t4+2 (gpu), n1-standard-32+nvidia-tesla-t4+4 (gpu), n1-standard-64+nvidia-tesla-t4+1 (gpu), n1-standard-64+nvidia-tesla-t4+2 (gpu), n1-standard-64+nvidia-tesla-t4+4 (gpu), n1-standard-96+nvidia-tesla-t4+1 (gpu), n1-standard-96+nvidia-tesla-t4+2 (gpu), n1-standard-96+nvidia-tesla-t4+4 (gpu)

    You can specify more than one node.

    Node GroupsYes
    Node Auto ScalingYes. Cost will be based on the max number of nodes.
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    You can choose only a minor version, not a patch version. The GKE installer chooses the latest patch for that minor version.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    - Replace: +### AKS - - `LICENSE_YAML` with the local path to your license file. - - `PASSWORD` with a password to access the Admin Console. - - `NAMESPACE` with the namespace where KOTS and application will be installed. +Compatibility Matrix supports creating [Azure AKS](https://azure.microsoft.com/en-us/products/kubernetes-service) clusters. - When the Admin Console is ready, the script prints the `https://localhost:8800` URL where you can access the Admin Console and the `http://localhost:8888` URL where you can access the application. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported Kubernetes Versions{/* START_aks_VERSIONS */}1.29, 1.30, 1.31{/* END_aks_VERSIONS */}
    Supported Instance Types

    Standard_B2ms, Standard_B4ms, Standard_B8ms, Standard_B16ms, Standard_DS2_v2, Standard_DS3_v2, Standard_DS4_v2, Standard_DS5_v2, Standard_DS2_v5, Standard_DS3_v5, Standard_DS4_v5, Standard_DS5_v5, Standard_D2ps_v5 (arm), Standard_D4ps_v5 (arm), Standard_D8ps_v5 (arm), Standard_D16ps_v5 (arm), Standard_D32ps_v5 (arm), Standard_D48ps_v5 (arm), Standard_NC4as_T4_v3 (gpu), Standard_NC8as_T4_v3 (gpu), Standard_NC16as_T4_v3 (gpu), Standard_NC64as_T4_v3 (gpu)

    GPU instance types depend on available capacity. After a GPU cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [NVIDIA GPU Operator with Azure Kubernetes Service](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/microsoft-aks.html) in the NVIDIA documentation.

    Node GroupsYes
    Node Auto ScalingYes. Cost will be based on the max number of nodes.
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    You can choose only a minor version, not a patch version. The AKS installer chooses the latest patch for that minor version.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    - **Example output**: +### OKE (Beta) - ``` - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - • Waiting for Admin Console to be ready ✓ - • Waiting for installation to complete ✓ - • Waiting for preflight checks to complete ✓ +Compatibility Matrix supports creating [Oracle Container Engine for Kubernetes (OKE)](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm) clusters. - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeDescription
    Supported Kubernetes Versions{/* START_oke_VERSIONS */}1.29.1, 1.30.1, 1.31.1{/* END_oke_VERSIONS */}
    Supported Instance Types

    VM.Standard2.1, VM.Standard2.2, VM.Standard2.4, VM.Standard2.8, VM.Standard2.16, VM.Standard3.Flex.1, VM.Standard3.Flex.2, VM.Standard3.Flex.4, VM.Standard3.Flex.8, VM.Standard3.Flex.16, VM.Standard.A1.Flex.1 (arm), VM.Standard.A1.Flex.2 (arm), VM.Standard.A1.Flex.4 (arm), VM.Standard.A1.Flex.8 (arm), VM.Standard.A1.Flex.16 (arm)

    Node GroupsYes
    Node Auto ScalingNo.
    NodesSupports multiple nodes.
    IP FamilySupports `ipv4`.
    Limitations

    Provising an OKE cluster does take between 8 to 10 minutes. If needed, some timeouts in your CI pipelines might have to be adjusted.

    For additional limitations that apply to all distributions, see Limitations.

    Common Use CasesCustomer release tests
    - • Go to http://localhost:8888 to access the application - ``` +## Replicated Instance Types {#types} -1. Verify that the Pods are running for the example NGNIX service and for kotsadm: +When creating a VM-based cluster with Compatibility Matrix, you must specify a Replicated instance type. - ```bash - kubectl get pods --namespace NAMESPACE - ``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    TypeMemory (GiB)VCPU Count
    r1.small8 GB2 VCPUs
    r1.medium16 GB4 VCPUs
    r1.large32 GB8 VCPUs
    r1.xlarge64 GB16 VCPUs
    r1.2xlarge128 GB32 VCPUs
    - Replace `NAMESPACE` with the namespace where KOTS and application was installed. +## Kubernetes Version Support Policy - **Example output:** +We do not maintain forks or patches of the supported distributions. When a Kubernetes version in Compatibility Matrix is out of support (EOL), Replicated will attempt to continue to support this version for six months for compatibility testing to support customers who are running out-of-date versions of Kubernetes. In the event that a critical security issue or bug is found and unresolved, we might discontinue support for EOL versions of Kubernetes prior to 6 months post EOL. - ```NAME READY STATUS RESTARTS AGE - kotsadm-7ccc8586b8-n7vf6 1/1 Running 0 12m - kotsadm-minio-0 1/1 Running 0 17m - kotsadm-rqlite-0 1/1 Running 0 17m - nginx-688f4b5d44-8s5v7 1/1 Running 0 11m - ``` +================ +File: docs/vendor/tutorial-adding-db-config.md +================ +# Example: Adding Database Configuration Options -## Next Step +In this tutorial, we'll explore ways to give your end user the option to either embed a database instance with the application, or connect your application to an external database instance that they will manage. +We'll use a PostgreSQL database as an example, configuring an example app to connect. -Continue to [Step 7: Configure the Application](tutorial-cli-deploy-app) to log in to the Admin Console and make configuration changes. +This tutorial explores advanced topics like workload coordination, credential management, and refactoring your application's user-facing configuration in the Replicated Admin Console. We'll also review best practices for integrating persistent stores like databases, queues, and caches. -================ -File: docs/vendor/tutorial-cli-install-cli.mdx -================ -# Step 1: Install the Replicated CLI +It is split into 5 sections: -In this tutorial, you use the Replicated CLI to create and promote releases for a sample application with Replicated. The Replicated CLI is the CLI for the Replicated Vendor Portal. +- [The Example Application](#the-example-application) +- [User-Facing Configuration](#user-facing-configuration) +- [Embedding a Database](#embedding-a-database) +- [Connecting to an External Database](#connecting-to-an-external-database) -This procedure describes how to create a Vendor Portal account, install the Replicated CLI on your local machine, and set up a `REPLICATED_API_TOKEN` environment variable for authentication. +### Prerequisites -To install the Replicated CLI: +This guide assumes you have: -1. Do one of the following to create an account in the Replicated Vendor Portal: - * **Join an existing team**: If you have an existing Vendor Portal team, you can ask your team administrator to send you an invitation to join. - * **Start a trial**: Alternatively, go to [vendor.replicated.com](https://vendor.replicated.com/) and click **Sign up** to create a 21-day trial account for completing this tutorial. +* A running instance of the Replicated Admin Console (`kotsadm`) to iterate against in either an existing cluster or an embedded cluster created with Replicated kURL. If you do not have a running instance of the Admin Console in an existing or kURL cluster, complete the [Install with KOTS in an Existing Cluster](tutorial-cli-setup) tutorial to package and install a sample application. +* A local git checkout of your application manifests. -1. Run the following command to use [Homebrew](https://brew.sh) to install the CLI: +### Accompanying Code Examples - ``` - brew install replicatedhq/replicated/cli - ``` +A full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). - For the latest Linux or macOS versions of the Replicated CLI, see the [replicatedhq/replicated](https://github.com/replicatedhq/replicated/releases) releases in GitHub. +* * * -1. Verify the installation: +## The Example Application - ``` - replicated version - ``` - **Example output**: +For demonstration purposes, we'll use a simple app that connects to a Postgres database via the `psql` CLI. +Once you've finished this guide, you should feel confident replacing it with any Kubernetes workload(s) that need to connect to a database. +The deployment we'll use can be seen below: - ```json - { - "version": "0.37.2", - "git": "8664ac3", - "buildTime": "2021-08-24T17:05:26Z", - "go": { - "version": "go1.14.15", - "compiler": "gc", - "os": "darwin", - "arch": "amd64" - } - } - ``` - If you run a Replicated CLI command, such as `replicated release ls`, you see the following error message about a missing API token: +```yaml +# pg-consumer.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: postgres:10 + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + # hard coded for now, we'll wire these up later + env: + - name: DB_HOST + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_USER + value: postgres + - name: DB_PASSWORD + value: postgres + - name: DB_NAME + value: postgres +``` - ``` - Error: set up APIs: Please provide your API token - ``` +This app simply connects to the database every 20 seconds and writes the server timestamp to stdout. +Even though `psql` supports [default environment variables](https://www.postgresql.org/docs/current/libpq-envars.html) for host, username, etc that can be read transparently, we're intentionally using these generic `DB_` variables for clarity. +Later, you can change these environment variable names to whatever format your application consumes. -1. Create an API token for the Replicated CLI: +For now we'll hard code the DB variable values, in the next sections we'll wire these up to the user-provided configuration. - 1. Log in to the Vendor Portal, and go to the [Account settings](https://vendor.replicated.com/account-settings) page. - 1. Under **User API Tokens**, click **Create user API token**. For Nickname, provide a name for the token. For Permissions, select **Read and Write**. +### Deploying the example application - For more information about User API tokens, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. + Once you've added this deployment to you application's `manifests` directory, create a release by running `replicated release create --auto` locally. + Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: - 1. Click **Create Token**. +![View Update](/images/guides/kots/view-update.png) - 1. Copy the string that appears in the dialog. +Click **Deploy**. You should be able to review the logs and see `deployment.apps/pg-consumer created` in `applyStdout`: -1. Export the string that you copied in the previous step to an environment variable named `REPLICATED_API_TOKEN`: - ```bash - export REPLICATED_API_TOKEN=YOUR_TOKEN - ``` - Replace `YOUR_TOKEN` with the token string that you copied from the Vendor Portal in the previous step. +![Deployed PG Consumer](/images/guides/kots/pg-consumer-deployed.png) -1. Verify the User API token: - ``` - replicated release ls - ``` +After it is deployed, you can run `kubectl get pods` to inspect the cluster. +We should expect the Pod to be crashlooping at this point, since there's no database to connect to just yet: - You see the following error message: +```text +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +kotsadm-5bbf54df86-p7kqg 1/1 Running 0 12m +kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 12m +kotsadm-minio-0 1/1 Running 0 12m +kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 12m +kotsadm-postgres-0 1/1 Running 0 12m +pg-consumer-75f49bfb69-mljr6 0/1 CrashLoopBackOff 1 10s +``` - ``` - Error: App not found: - ``` +Checking the logs, we should see a connect error: -## Next Step +```text +$ kubectl logs -l app=pg-consumer +psql: could not translate host name "postgres" to address: Name or service not known +``` -Continue to [Step 2: Create an Application](tutorial-cli-create-app) to use the Replicated CLI to create an application. +If the `kubectl logs` command hangs, you can try using the `--previous` flag to fetch the logs of the most recent crash: -================ -File: docs/vendor/tutorial-cli-manifests.mdx -================ -# Step 3: Get the Sample Manifests -To create a release for the `cli-tutorial` application, first create the Kubernetes manifest files for the application. This tutorial provides a set of sample manifest files for a simple Kubernetes application that deploys an NGINX service. +```text +$ kubectl logs -l app=pg-consumer --previous +psql: could not translate host name "postgres" to address: Name or service not known +``` -To get the sample manifest files: +Now that our test app is deployed, we'll walk through presenting options to the end user for connecting a Postgres instance to this app. -1. Run the following command to create and change to a `replicated-cli-tutorial` directory: +* * * - ``` - mkdir replicated-cli-tutorial - cd replicated-cli-tutorial - ``` +## User-Facing Configuration -1. Create a `/manifests` directory and download the sample manifest files from the [kots-default-yaml](https://github.com/replicatedhq/kots-default-yaml) repository in GitHub: +The core of this guide will be around how to give your end users the option to do one of the following actions: - ``` - mkdir ./manifests - curl -fSsL https://github.com/replicatedhq/kots-default-yaml/archive/refs/heads/main.zip | \ - tar xzv --strip-components=1 -C ./manifests \ - --exclude README.md --exclude LICENSE --exclude .gitignore - ``` +* Bring their own PostgreSQL instance for your app to connect to +* Use an "embedded" database bundled in with the application -1. Verify that you can see the YAML files in the `replicated-cli-tutorial/manifests` folder: +The first step here is to present that option to the user, then we'll walk through implementing each scenario. +The `kots.io/v1beta1` `Config` resource controls what configuration options are presented to the end user. +If you followed one of the "Getting Started" guides, you probably have a `config.yaml` in your manifests that looks something like the following YAML file: - ``` - ls manifests/ - ``` - ``` - example-configmap.yaml example-service.yaml kots-app.yaml kots-lint-config.yaml kots-support-bundle.yaml - example-deployment.yaml k8s-app.yaml kots-config.yaml kots-preflight.yaml - ``` +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Ingress object. + items: + - name: use_ingress + title: Use Ingress? + help_text: An example field to toggle inclusion of an Ingress Object + type: bool + default: "0" + - name: ingress_hostname + title: Ingress Hostname + help_text: If desired, enter the hostname for ingress to this application. You can enter the IP of this instance, or a DNS hostname. + type: text + when: repl{{ ConfigOptionEquals "use_ingress" "1" }} +``` -## Next Step +To add a database section, we'll modify it to include some database settings. +In this case we'll remove the Ingress toggle that is included as an example, although you might also choose to leave this in. None of these database settings will have any effect yet, but we'll still be able to preview what the end user will see. +Modify your YAML to include this database section: -Continue to [Step 4: Create a Release](tutorial-cli-create-release) to create and promote the first release for the `cli-tutorial` application using these manifest files. +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: database + title: Database + items: + - name: postgres_type + help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres + - name: embedded_postgres_password + hidden: true + type: password + value: "{{repl RandomString 32}}" +``` -================ -File: docs/vendor/tutorial-cli-setup.mdx -================ -import KubernetesTraining from "../partials/getting-started/_kubernetes-training.mdx" -import LabsIntro from "../partials/getting-started/_labs-intro.mdx" -import TutorialIntro from "../partials/getting-started/_tutorial-intro.mdx" -import RelatedTopics from "../partials/getting-started/_related-topics.mdx" -import VMRequirements from "../partials/getting-started/_vm-requirements.mdx" +This creates a toggle to allow the user to choose between an embedded or external Postgres instance, and a `hidden` field to generate a unique password for the embedded instance. -# Introduction and Setup +As mentioned in the introduction, a full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). - -The steps in this KOTS CLI-based tutorial show you how to use the Replicated CLI to perform these tasks. The Replicated CLI is the CLI for the Replicated Vendor Portal. You can use the Replicated CLI as a software vendor to programmatically create, configure, and manage your application artifacts, including application releases, release channels, customer entitlements, private image registries, and more. +### Validating Config Changes - +Even though the options aren't wired, let's create a new release to validate the configuration screen was modified. +Create a release by running `replicated release create --auto`. +Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: -## Set Up the Environment +![View Update](/images/guides/kots/view-update.png) -As part of this tutorial, you will install a sample application into a Kubernetes cluster. Before you begin, do the following to set up your environment: +After the update is deployed, click the Config tab and review our new toggle. +You might also notice that we've removed the Ingress settings to simplify things for this guide: -* Create a Kubernetes cluster that meets the minimum system requirements described in [KOTS Installation Requirements](/enterprise/installing-general-requirements). You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. +![Database Config](/images/guides/kots/database-config.png) - **Example:** +Now that we have the configuration screen started, we can proceed to implement the "Embedded Postgres" option. - For example, to create a cluster in GKE, run the following command in the gcloud CLI: +* * * - ``` - gcloud container clusters create NAME --preemptible --no-enable-ip-alias - ``` - Where `NAME` is any name for the cluster. +## Embedding a Database -* Install kubectl, the Kubernetes command line tool. See [Install Tools](https://kubernetes.io/docs/tasks/tools/) in the Kubernetes documentation. -* Configure kubectl command line access to the cluster that you created. See [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. +To implement the embedded Database option, we'll add a Kubernetes [Statefulset](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), and use the [annotations for optional resources](packaging-include-resources/) to control when it will be included in the application. -## Related Topics +### Adding the Secret and StatefulSet - +First, we'll create a secret to store the root password for our embedded postgres instance: -================ -File: docs/vendor/tutorial-cli-update-app.mdx -================ -# Step 9: Update the Application +```yaml +# postgres-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' +``` -To test the new release that you promoted, return to the Admin Console in a browser to update the application. +Next, create a new YAML file in your `manifests` directory with the following contents. +Note the use of `kots.io/when` to only conditionally include this based on end-user inputs: -To update the application: +```yaml +# postgres-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres + labels: + app: pg-provider + annotations: + kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' +spec: + replicas: 1 + selector: + matchLabels: + app: pg-provider + serviceName: postgres + template: + metadata: + labels: + app: pg-provider + spec: + containers: + - env: + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + # create a db called "postgres" + - name: POSTGRES_DB + value: postgres + # create admin user with name "postgres" + - name: POSTGRES_USER + value: postgres + # use admin password from secret + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + key: DB_PASSWORD + name: postgres + image: postgres:10 + name: postgres + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: pgdata + volumes: + - name: pgdata + persistentVolumeClaim: + claimName: pgdata + volumeClaimTemplates: + - metadata: + name: pgdata + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` -1. Access the KOTS Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: +Finally, lets add a Service object so we can route traffic to our postgres instance, again using `kots.io/when` to conditionally include this resource: - ```bash - kubectl kots admin-console --namespace NAMESPACE - ``` - Replace `NAMESPACE` with the namespace where the Admin Console is installed. +```yaml +# postgres-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: postgres + labels: + app: pg-provider + annotations: + kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' +spec: + ports: + - port: 5432 + selector: + app: pg-provider + type: ClusterIP +``` -1. Go to the Version history page, and click **Check for update**. +### Validating the embedded Database - ![Admin Console version history page](/images/tutorials/tutorial-check-for-update.png) +After you've added these resources, you can push a new release and update in the Admin Console. +You should see the following in the deployment logs: - The Admin Console loads the new release that you promoted. +![Embedded PG Deployed](/images/guides/kots/embedded-pg-deployed.png) -1. Click **Deploy**. In the dialog, click **Yes, deploy** to deploy the new version. +We should now see an instance of Postgres running in our namespace as well. +The consumer may still be crashlooping, but we can see the error is different now: - ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-app.png) +```text +$ kubectl logs -l app=pg-consumer +psql: FATAL: password authentication failed for user "postgres" +``` -1. After the Admin Console deploys the new version, go to the **Config** page where the **Another Text Example** field that you added is displayed. +This is because we still need to deliver the generated password to our workload pod. +In `pg-consumer.yaml`, we'll remove this section: - ![Admin Console configuration page with Another Text Example field](/images/tutorials/tutorial-new-config-item.png) +```yaml + - name: DB_PASSWORD + value: postgres +``` -1. In the new **Another Text Example** field, enter any text. Click **Save config**. +and replace it with: - The Admin Console notifies you that the configuration settings for the application have changed. +```yaml + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD +``` - ![dialog over Admin Console configuration screen](/images/tutorials/tutorial-go-to-updated-version.png) +The full Deployment should now look like the following YAML file: -1. In the dialog, click **Go to updated version**. +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + # hard coded for now, we'll wire these up later + env: + - name: DB_HOST + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_USER + value: postgres + - name: DB_NAME + value: postgres + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD +``` - The Admin Console loads the updated version on the Version history page. +From here, make another release and deploy it. +You should see the consumer pod is now able to connect to the database: -1. On the Version history page, click **Deploy** next to the latest version to deploy the configuration change. - ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-config-change.png) +```text +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +kotsadm-5bbf54df86-p7kqg 1/1 Running 0 144m +kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 144m +kotsadm-minio-0 1/1 Running 0 144m +kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 144m +kotsadm-postgres-0 1/1 Running 0 144m +pg-consumer-77b868d7d8-xdn9v 1/1 Running 0 20s +postgres-0 1/1 Running 0 6m22s +``` -1. Go to the **Dashboard** page and click **Open App**. The application displays the text that you added to the field. +Checking the logs, we can connect now: - ![web page with text from the new configuration field](/images/tutorials/tutorial-updated-app.png) +```text +$ kubectl logs -l app=pg-consumer + now +------------------------------- + 2020-04-12 17:11:45.019293+00 +(1 row) - :::note - If you do not see the new text, refresh your browser. - ::: + now +------------------------------- + 2020-04-12 17:11:55.072041+00 +(1 row) +``` -## Summary +Now that we've configured our application to read from an embedded postgres instance, we'll switch to allowing the end user to provide their own database connection parameters. -Congratulations! As part of this tutorial, you: -* Created and promoted a release for a Kubernetes application using the Replicated CLI -* Installed the application in a Kubernetes cluster -* Edited the manifest files for the application, adding a new configuration field and using template functions to reference the field -* Promoted a new release with your changes -* Used the Admin Console to update the application to the latest version +* * * -================ -File: docs/vendor/tutorial-config-create-app.md -================ -# Step 2: Create an Application +## Connecting to an External Database -Next, install the Replicated CLI and then create an application. +In this section, we'll expand our configuration section to allow end users to bring their own Postgres instance. -To create an application: +### Modifying the Config Screen -1. Install the Replicated CLI: +Let's update our config screen to allow an end user to input some details about their database. +We'll add the following YAML, noting the use of the `when` field to conditionally hide or show fields in the user-facing config screen: - ``` - brew install replicatedhq/replicated/cli - ``` - For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). +```yaml + - name: external_postgres_host + title: Postgres Host + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: postgres + - name: external_postgres_port + title: Postgres Port + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: "5432" + - name: external_postgres_user + title: Postgres Username + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + required: true + - name: external_postgres_password + title: Postgres Password + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: password + required: true + - name: external_postgres_db + title: Postgres Database + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: sentry +``` -1. Authorize the Replicated CLI: +Your full configuration screen should now look something like the following YAMl file: - ``` - replicated login - ``` - In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: database + title: Database + items: + - name: postgres_type + help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres + - name: embedded_postgres_password + hidden: true + type: password + value: "{{repl RandomString 32}}" + - name: external_postgres_host + title: Postgres Host + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: postgres + - name: external_postgres_port + title: Postgres Port + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: "5432" + - name: external_postgres_user + title: Postgres Username + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + required: true + - name: external_postgres_password + title: Postgres Password + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: password + required: true + - name: external_postgres_db + title: Postgres Database + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: postgres +``` -1. Create an application named `Grafana`: +Let's save this and create a new release. After deploying the release in the Admin Console, click **Config** and set the toggle to "External Postgres" to see the new fields: - ``` - replicated app create Grafana - ``` +In order to demonstrate that these are working, let's add some values that we know won't work, and just check to confirm that checking "External Postgres" will remove our embedded postgres instance: -1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: - 1. Get the slug for the application that you created: +![External PG Config Fake](/images/guides/kots/external-pg-config-fake.png) - ``` - replicated app ls - ``` - **Example output**: - ``` - ID NAME SLUG SCHEDULER - 2WthxUIfGT13RlrsUx9HR7So8bR Grafana grafana-python kots - ``` - In the example above, the application slug is `grafana-python`. +Save these settings, and then you'll be directed back to the Version History page to apply the change: - :::info - The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. - ::: +![Deploy Config Change](/images/guides/kots/deploy-config-change.png) - 1. Set the `REPLICATED_APP` environment variable to the application slug. +after this is deployed, we should see that the postgres statefulset has been removed, and that our sample application is back to failing: - **MacOS Example:** - ``` - export REPLICATED_APP=grafana-python - ``` +```text +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +kotsadm-5bbf54df86-8ws98 1/1 Running 0 12m +kotsadm-api-cbccb97ff-r7mz6 1/1 Running 2 12m +kotsadm-minio-0 1/1 Running 0 12m +kotsadm-operator-84477b5c4-4gmbm 1/1 Running 0 12m +kotsadm-postgres-0 1/1 Running 0 12m +pg-consumer-6bd78594d-n7nmw 0/1 Error 2 29s +``` -## Next Step +You'll note that it is failing, but it is still using our hardcoded environment variables, not the user-entered config. +In the next step, we'll wire the end-user configuration values into our service. -Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-config-package-chart). +```text +$ kubectl logs -l app=pg-consumer +psql: could not translate host name "postgres" to address: Name or service not known +``` -## Related Topics +### Mapping User Inputs -* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) -* [Installing the Replicated CLI](/reference/replicated-cli-installing) -* [replicated app create](/reference/replicated-cli-app-create) +To map the user-supplied configuration, we'll start by expanding our secret we created before, adding fields for additional variables, using `{{repl if ... }}` blocks to switch between embedded/external contexts. -================ -File: docs/vendor/tutorial-config-create-customer.md -================ -# Step 5: Create a KOTS-Enabled Customer +To start, you can add a field for hostname, using Base64Encode. You must use a single line, as shown in the following example. -After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. -To create a customer: -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' + DB_HOST: + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" }}{{repl Base64Encode "postgres" }}{{repl else}}{{repl ConfigOption"external_postgres_host" | Base64Encode }}{{repl end}} +``` - The **Create a new customer** page opens: +Now that we have the value in our Secret, we can modify our deployment to consume it. +Replace this text: - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) +```yaml + - name: DB_HOST + value: postgres +``` - [View a larger version of this image](/images/create-customer.png) +with this text: -1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. +```yaml + - name: DB_HOST + valueFrom: + secretKeyRef: + name: postgres + key: DB_HOST +``` -1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. +Your full deployment should look something like the following YAML file: -1. For **License type**, select Development. +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + env: + - name: DB_HOST + valueFrom: + secretKeyRef: + name: postgres + key: DB_HOST + - name: DB_PORT + value: "5432" + - name: DB_USER + value: postgres + - name: DB_NAME + value: postgres + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD +``` -1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. +From here, let's create and deploy a release, and verify that the secret has the customer-provided value, base64 decoding the secret contents: -1. Click **Save Changes**. +```text +$ kubectl get secret postgres -o yaml | head -n 4 +apiVersion: v1 +data: + DB_HOST: ZmFrZQ== + DB_PASSWORD: ajNVWDd1RnRfc0NkVTJqOFU3Q25xUkxRQk5fUlh3RjA= +``` -1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. +You can verify we pulled in our user-provided config by base64-decoding the `DB_HOST` field: - ![Download license button on the customer page](/images/customer-download-license.png) +```text +$ echo ZmFrZQ== | base64 --decode +fake +``` - [View a larger version of this image](/images/customer-download-license.png) +Checking on our service itself, we can verify that it's now trying to connect to the `fake` hostname instead of `postgres`: -## Next Step +```text +$ kubectl logs -l app=pg-consumer +psql: could not translate host name "fake" to address: Name or service not known +``` -Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-config-install-kots). +We'll optionally wire this to a real external Postgres database later, but for now we'll proceed to add the rest of the fields. -## Related Topics +### Extending this to All Fields -* [About Customers](/vendor/licenses-about) -* [Creating and Managing Customers](/vendor/releases-creating-customer) +Now that we've wired the DB_HOST field all the way through, we'll do the same for the other fields. +In the end, your Secret and Deployment should look like the following YAML files: -================ -File: docs/vendor/tutorial-config-create-release.md -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HelmChart from "../partials/getting-started/_grafana-helmchart.mdx" -import KotsApp from "../partials/getting-started/_grafana-kots-app.mdx" -import K8sApp from "../partials/getting-started/_grafana-k8s-app.mdx" -import Config from "../partials/getting-started/_grafana-config.mdx" +```yaml +# postgres-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + DB_HOST: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_host" | Base64Encode }} + {{repl end}} + DB_PORT: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "5432" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_port" | Base64Encode }} + {{repl end}} + DB_USER: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_user" | Base64Encode }} + {{repl end}} + DB_PASSWORD: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl ConfigOption "embedded_postgres_password" | Base64Encode }} + {{repl else -}} + {{repl ConfigOption "external_postgres_password" | Base64Encode }} + {{repl end}} + DB_NAME: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_db" | Base64Encode }} + {{repl end}} +``` -# Step 4: Add the Chart Archive to a Release +```yaml +# pg-consumer.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + env: + - name: DB_HOST + valueFrom: + secretKeyRef: + name: postgres + key: DB_HOST + - name: DB_PORT + valueFrom: + secretKeyRef: + name: postgres + key: DB_PORT + - name: DB_USER + valueFrom: + secretKeyRef: + name: postgres + key: DB_USER + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD + - name: DB_NAME + valueFrom: + secretKeyRef: + name: postgres + key: DB_NAME +``` -Next, add the Helm chart archive to a new release for the application in the Replicated vendor platform. +Optionally, you can be extra concise and collapse each individual `env` `valueFrom` into a single `envFrom` `secretRef` entry: -The purpose of this step is to configure a release that supports installation with KOTS. Additionally, this step defines a user-facing application configuration page that displays in the KOTS Admin Console during installation where users can set their own Grafana login credentials. +```yaml +# pg-consumer.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + envFrom: + - secretRef: + name: postgres +``` -To create a release: -1. In the `grafana` directory, create a subdirectory named `manifests`: +After deploying this, you should see all of the fields in the secret: - ``` - mkdir manifests - ``` +```text +$ kubectl get secret postgres -o yaml +apiVersion: v1 +data: + DB_HOST: ZmFrZQ== + DB_NAME: ZmFrZQ== + DB_PASSWORD: ZXh0cmEgZmFrZQ== + DB_PORT: NTQzMjE= + DB_USER: ZmFrZQ== +kind: Secret +# ...snip... +``` - You will add the files required to support installation with Replicated KOTS to this subdirectory. +We can also print the environment in our sample app to verify that all of the values are piped properly: -1. Move the Helm chart archive that you created to `manifests`: +```text +$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' +DB_PORT=54321 +DB_NAME=fake +DB_PASSWORD=extra fake +DB_HOST=fake +DB_USER=fake +``` - ``` - mv grafana-9.6.5.tgz manifests - ``` +### Testing Config Changes -1. In the `manifests` directory, create the following YAML files to configure the release: - - ``` - cd manifests - ``` - ``` - touch kots-app.yaml k8s-app.yaml kots-config.yaml grafana.yaml - ``` +Now let's make some changes to the database credentials. In this case, we'll use a Postgres database provisioned in Amazon RDS, but you can use any external database. +To start, head to the "Config" screen and input your values: -1. In each file, paste the corresponding YAML provided in the tabs below: +![Real Postgres Values](/images/guides/kots/real-postgres-values.png) - - -
    Description
    -

    The KOTS Application custom resource enables features in the Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the grafana Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Grafana application in a browser.

    -
    YAML
    - -
    - -
    Description
    -

    The Kubernetes Application custom resource supports functionality such as including buttons and links on the Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.

    -
    YAML
    - -
    - -
    Description
    -

    The Config custom resource specifies a user-facing configuration page in the Admin Console designed for collecting application configuration from users. The YAML below creates "Admin User" and "Admin Password" fields that will be shown to the user on the configuration page during installation. These fields will be used to set the login credentials for Grafana.

    -
    YAML
    - -
    - -
    Description
    -

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart.

    -

    The HelmChart custom resource below contains a values key, which creates a mapping to the Grafana values.yaml file. In this case, the values.admin.user and values.admin.password fields map to admin.user and admin.password in the Grafana values.yaml file.

    -

    During installation, KOTS renders the ConfigOption template functions in the values.admin.user and values.admin.password fields and then sets the corresponding Grafana values accordingly.

    -
    YAML
    - -
    -
    +Let's save and apply this config and check in our pod again: -1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: +```text +$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' +DB_PORT=54321 +DB_NAME=fake +DB_PASSWORD=extra fake +DB_HOST=fake +DB_USER=fake +``` - ``` - replicated release lint --yaml-dir . - ``` - `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. +Uh oh, It appears that our values did not get updated! If you've worked with Secrets before, you may know that there's a [long-standing issue in Kubernetes](https://github.com/kubernetes/kubernetes/issues/22368) where pods that load config from Secrets or ConfigMaps won't automatically restart when underlying config is changed. +There are some tricks to make this work, and in the next step we'll implement one of them, but for now we can delete the pod to verify that the configuration is being piped through to our sample application: - **Example output**: +```text +$ kubectl delete pod -l app=pg-consumer +pod "pg-consumer-6df9d5d7fd-bd5z6"" deleted +``` - ``` - RULE TYPE FILENAME LINE MESSAGE - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - :::note - The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `grafana` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. - ::: +If the pod is crashlooping, you might need to add `--force --grace-period 0` to force delete it. +In either case, once a new pod starts, we should now see it loading the correct config: -1. Create a release: +```text +$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' +DB_PORT=5432 +DB_NAME=postgres +DB_PASSWORD= +DB_HOST=10.128.0.12 +DB_USER=postgres +``` - ``` - replicated release create --yaml-dir . - ``` - **Example output**: - ``` - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 1 - ``` +### Triggering Restarts on Changes -1. Log in to the Vendor Portal and go to **Releases**. +In order to automate this restart on changes, we're going to use a hash of all database parameters to trigger a rolling update whenever database parameters are changed. +We'll use a `hidden`, `readonly` field to store this in our config screen: - The release that you created is listed under **All releases**. +```yaml + - name: external_postgres_confighash + hidden: true + readonly: true + type: text + value: '{{repl (sha256sum (print (ConfigOption "external_postgres_host") (ConfigOption "external_postgres_port") (ConfigOption "external_postgres_user") (ConfigOption "external_postgres_password") (ConfigOption "external_postgres_db") ))}}' +``` - ![Release page in the Vendor Portal with one release](/images/grafana-release-seq-1.png) +The `hidden` flag will hide it from the UI, and the `readonly` flag in this case will cause the value to be re-computed any time an upstream `ConfigOption` value changes. - [View a larger version of this image](/images/grafana-release-seq-1.png) +Next, let's add this as an annotation to our deployment's pod template at `spec.template.metadata.annotations`: -1. Click **Edit release** to view the files in the release. +```yaml +annotations: + kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' +``` - In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Grafana Helm chart. You can also see the same warning messages that were displayed in the CLI output. +**Note**: It's worth noting here that there's nothing special about the `kots.io/config-hash` annotation. We could have just as easily called this annotation `my-app-something-fake` instead. +What matters here is that when the value in a Deployment annotation changes, it will cause Kubernetes to roll out a new version of the pod, stopping the old one and thus picking up our config changes. - ![Edit Release page in the Vendor Portal](/images/grafana-edit-release-seq-1.png) - [View a larger version of this image](/images/grafana-edit-release-seq-1.png) +Your full deployment should now look like the following YAML file: -1. At the top of the page, click **Promote**. +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + annotations: + kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + envFrom: + - secretRef: + name: postgres +``` -1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. - Promote release dialog +### Integrating a Real Database - [View a larger version of this image](/images/release-promote.png) +If you'd like at this point, you can integrate a real database in your environment, just fill out your configuration fields. You'll know you did it right if your pg-consumer pod can connect. -## Next Step +================ +File: docs/vendor/tutorial-cli-create-app.mdx +================ +# Step 2: Create an Application -Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-config-create-customer). +After you install the Replicated CLI and create an API token, you can use the CLI to create a new application. -## Related Topics +To create an application: -* [About Channels and Releases](/vendor/releases-about) -* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) -* [Config Custom Resource](/reference/custom-resource-config) -* [Manipulating Helm Chart Values with KOTS](/vendor/helm-optional-value-keys) +1. Run the following command to create an application named `cli-tutorial`: -================ -File: docs/vendor/tutorial-config-get-chart.md -================ -# Step 1: Get the Sample Chart and Test + ``` + replicated app create cli-tutorial + ``` -To begin, get the sample Grafana Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated vendor platform. + **Example output**: -To get the sample Grafana chart and test installation: + ``` + ID NAME SLUG SCHEDULER + 2GmY... cli-tutorial cli-tutorial kots + ``` -1. Run the following command to pull and untar version 9.6.5 of the Bitnami Grafana Helm chart: +1. Export the application slug in the output of the `app create` command as an environment variable: ``` - helm pull --untar oci://registry-1.docker.io/bitnamicharts/grafana --version 9.6.5 + export REPLICATED_APP=YOUR_SLUG ``` - For more information about this chart, see the [bitnami/grafana](https://github.com/bitnami/charts/tree/main/bitnami/grafana) repository in GitHub. + Replace `YOUR_SLUG` with the slug for the application you created in the previous step. + +1. Verify that both the `REPLICATED_API_TOKEN` environment variable that you created as part of [Step 1: Install the Replicated CLI](tutorial-cli-install-cli) and the `REPLICATED_APP` environment variable are set correctly: -1. Change to the new `grafana` directory that was created: - ``` - cd grafana - ``` -1. View the files in the directory: - ``` - ls - ``` - The directory contains the following files: ``` - Chart.lock Chart.yaml README.md charts templates values.yaml + replicated release ls ``` -1. Install the chart in your cluster: + + In the output of this command, you now see an empty list of releases for the application: ``` - helm install grafana . --namespace grafana --create-namespace + SEQUENCE CREATED EDITED ACTIVE_CHANNELS ``` - To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/grafana/README.md#installing-the-chart) in the `bitnami/grafana` repository. - After running the installation command, the following output is displayed: +## Next Step - ``` - NAME: grafana - LAST DEPLOYED: Thu Dec 14 14:54:50 2023 - NAMESPACE: grafana - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - CHART NAME: grafana - CHART VERSION: 9.6.5 - APP VERSION: 10.2.2 +Continue to [Step 3: Get the Sample Manifests](tutorial-cli-manifests) to download the manifest files for a sample Kubernetes application. You will use these manifest files to create the first release for the `cli-tutorial` application. - ** Please be patient while the chart is being deployed ** +================ +File: docs/vendor/tutorial-cli-create-customer.mdx +================ +# Step 5: Create a Customer - 1. Get the application URL by running these commands: - echo "Browse to http://127.0.0.1:8080" - kubectl port-forward svc/grafana 8080:3000 & +After promoting the first release for the `cli-tutorial` application, create a customer so that you can install the application. - 2. Get the admin credentials: +A _customer_ is an object in the Vendor Portal that represents a single licensed user of your application. When you create a customer, you define entitlement information for the user, and the Vendor Portal generates a YAML license file for the customer that you can download. - echo "User: admin" - echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" - # Note: Do not include grafana.validateValues.database here. See https://github.com/bitnami/charts/issues/20629 - ``` +When you install the application later in this tutorial, you will upload the license file that you create in this step to allow KOTS to create the application containers. -1. Watch the `grafana` Deployment until it is ready: +To create a customer and download the license file: - ``` - kubectl get deploy grafana --namespace grafana --watch - ``` +1. From the `replicated-cli-tutorial` directory, create a license for a customer named `Some-Big-Bank` that is assigned to the Unstable channel and expires in 10 days: -1. When the Deployment is created, run the commands provided in the output of the installation command to get the Grafana login credentials: + ``` + replicated customer create \ + --name "Some-Big-Bank" \ + --expires-in "240h" \ + --channel "Unstable" + ``` + The Unstable channel is the channel where you promoted the release in [Step 4: Create a Release](tutorial-cli-create-release). Assigning the customer to a channel allows them to install the releases that are promoted to that channel. - ``` - echo "User: admin" - echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" - ``` + **Example output:** -1. Run the commands provided in the ouptut of the installation command to get the Grafana URL: + ``` + ID NAME CHANNELS EXPIRES TYPE + 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev + ``` - ``` - echo "Browse to http://127.0.0.1:8080" - kubectl port-forward svc/grafana 8080:3000 --namespace grafana - ``` +1. Verify the customer creation details: - :::note - Include `--namespace grafana` in the `kubectl port-forward` command. - ::: + ``` + replicated customer ls + ``` -1. In a browser, go to the URL to open the Grafana login page: + **Example output:** - Grafana login page + ``` + ID NAME CHANNELS EXPIRES TYPE + 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev + ``` - [View a larger version of this image](/images/grafana-login.png) +1. Download the license file for the customer that you just created: -1. Log in using the credentials provided to open the Grafana dashboard: + ``` + replicated customer download-license \ + --customer "Some-Big-Bank" + ``` - Grafana dashboard + The license downloads to `stdout`. - [View a larger version of this image](/images/grafana-dashboard.png) + **Example output**: -1. Uninstall the Helm chart: + ``` + apiVersion: kots.io/v1beta1 + kind: License + metadata: + name: some-big-bank + spec: + appSlug: cli-tutorial + channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 + channelName: Unstable + customerName: Some-Big-Bank + endpoint: https://replicated.app + entitlements: + expires_at: + description: License Expiration + title: Expiration + value: "2022-11-10T14:59:49Z" + valueType: String + isNewKotsUiEnabled: true + licenseID: 2GuB3ZLQsU38F5SX3n03x8qBzeL + licenseSequence: 1 + licenseType: dev + signature: eyJsaW... + ``` - ``` - helm uninstall grafana --namespace grafana - ``` - This command removes all the Kubernetes resources associated with the chart and uninstalls the `grafana` release. +1. Rename the license file and save it to your Desktop folder: -1. Delete the namespace: + ``` + export LICENSE_FILE=~/Desktop/Some-Big-Bank-${REPLICATED_APP}-license.yaml + replicated customer download-license --customer "Some-Big-Bank" > "${LICENSE_FILE}" + ``` - ``` - kubectl delete namespace grafana - ``` +1. Verify that the license was written properly using either `cat` or `head`: -## Next Step + ``` + head ${LICENSE_FILE} + ``` -Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-config-create-app). + **Example output**: -## Related Topics + ``` + apiVersion: kots.io/v1beta1 + kind: License + metadata: + name: some-big-bank + spec: + appSlug: cli-tutorial + channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 + channelName: Unstable + customerName: Some-Big-Bank + endpoint: https://replicated.app + ``` -* [Helm Install](https://helm.sh/docs/helm/helm_install/) -* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) -* [Helm Create](https://helm.sh/docs/helm/helm_create/) -* [Helm Package](https://helm.sh/docs/helm/helm_package/) -* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) +## Next Step + +Continue to [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to get the installation commands from the Unstable channel, then install the KOTS components and the sample application in your cluster. ================ -File: docs/vendor/tutorial-config-install-kots.md +File: docs/vendor/tutorial-cli-create-new-version.mdx ================ -# Step 6: Install the Release with KOTS +# Step 8: Create a New Version -Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. +In this step, you make an edit to the Config custom resource manifest file in the `replicated-cli-tutorial/manifests` directory for the `cli-tutorial` application to create a new field on the **Config** page in the Admin Console. You will then create and promote a new release to the Unstable channel with your changes. -As part of installation, you will set Grafana login credentials on the KOTS Admin Console configuration page. +To create and promote a new version of the application: -To install the release with KOTS: +1. In your local directory, go to the the `replicated-cli-tutorial/manifests` folder and open the `kots-config.yaml` file in a text editor. -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. +1. Copy and paste the following YAML into the file under the `example_default_value` field to create a new text field on the **Config** page: - ![KOTS Install tab on the Unstable channel card](/images/grafana-unstable-channel.png) + ```yaml + - name: more_text + title: Another Text Example + type: text + value: "" + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + ``` + The following shows the full YAML for the `kots-config.yaml` file after you add the new field: - [View a larger version of this image](/images/grafana-unstable-channel.png) + ```yaml + --- + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: config-sample + spec: + groups: + - name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Nginx welcome page. + items: + - name: show_text_inputs + title: Customize Text Inputs + help_text: "Show custom user text inputs" + type: bool + default: "0" + recommended: true + - name: example_default_value + title: Text Example (with default value) + type: text + value: "" + default: please change this value + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + # Add the new more_text field here + - name: more_text + title: Another Text Example + type: text + value: "" + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + - name: api_token + title: API token + type: password + props: + rows: 5 + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + - name: readonly_text_left + title: Readonly Text + type: text + value: "{{repl RandomString 10}}" + readonly: true + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + - name: hidden_text + title: Secret Key + type: password + hidden: true + value: "{{repl RandomString 40}}" -1. On the command line, run the **KOTS Install** command that you copied: + ``` - ```bash - curl https://kots.io/install | bash - kubectl kots install $REPLICATED_APP/unstable - ``` +1. Open the `example-configmap.yaml` file. - This installs the latest version of the KOTS CLI and the Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. +1. In the `example-configmap.yaml` file, copy and paste the following HTML to replace the `` section: - For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + ``` + + This is an example KOTS application. +

    This is text from a user config value: '{{repl ConfigOption "example_default_value"}}'

    +

    This is more text from a user config value: '{{repl ConfigOption "more_text"}}'

    +

    This is a hidden value: '{{repl ConfigOption "hidden_text"}}'

    + + ``` + This creates a reference to the `more_text` field using a Replicated KOTS template function. The ConfigOption template function renders the user input from the configuration item that you specify. For more information, see [Config Context](/reference/template-functions-config-context) in _Reference_. - :::note - KOTS v1.104.0 or later is required to deploy the Replicated SDK. You can verify the version of KOTS installed with `kubectl kots version`. - ::: +1. Save the changes to both YAML files. -1. Complete the installation command prompts: +1. Change to the root `replicated-cli-tutorial` directory, then run the following command to verify that there are no errors in the YAML: - 1. For `Enter the namespace to deploy to`, enter `grafana`. + ``` + replicated release lint --yaml-dir=manifests + ``` - 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. +1. Create a new release and promote it to the Unstable channel: - When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. + ``` + replicated release create --auto + ``` - **Example output:** + **Example output**: - ```bash - Enter the namespace to deploy to: grafana - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - Enter a new password for the Admin Console (6+ characters): •••••••• - • Waiting for Admin Console to be ready ✓ - - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console + ``` + • Reading manifests from ./manifests ✓ + • Creating Release ✓ + • SEQUENCE: 2 + • Promoting ✓ + • Channel 2GxpUm7lyB2g0ramqUXqjpLHzK0 successfully set to release 2 ``` -1. With the port forward running, go to `http://localhost:8800` in a browser to access the Admin Console. +1. Type `y` and press **Enter** to continue with the defaults. -1. On the login page, enter the password that you created for the Admin Console. + **Example output**: -1. On the license page, select the license file that you downloaded previously and click **Upload license**. + ``` + RULE TYPE FILENAME LINE MESSAGE -1. On the **Configure Grafana** page, enter a username and password. You will use these credentials to log in to Grafana. + • Reading manifests from ./manifests ✓ + • Creating Release ✓ + • SEQUENCE: 2 + • Promoting ✓ + • Channel 2GmYFUFzj8JOSLYw0jAKKJKFua8 successfully set to release 2 + ``` - ![Admin Console config page with username and password fields](/images/grafana-config.png) + The release is created and promoted to the Unstable channel with `SEQUENCE: 2`. - [View a larger version of this image](/images/grafana-config.png) +1. Verify that the release was promoted to the Unstable channel: -1. Click **Continue**. + ``` + replicated release ls + ``` + **Example output**: - The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `grafana` Deployment is being created. + ``` + SEQUENCE CREATED EDITED ACTIVE_CHANNELS + 2 2022-11-03T19:16:24Z 0001-01-01T00:00:00Z Unstable + 1 2022-11-03T18:49:13Z 0001-01-01T00:00:00Z + ``` - ![Admin Console dashboard showing unavailable application status](/images/grafana-unavailable.png) +## Next Step - [View a larger version of this image](/images/grafana-unavailable.png) +Continue to [Step 9: Update the Application](tutorial-cli-update-app) to return to the Admin Console and update the application to the new version that you promoted. -1. On the command line, press Ctrl+C to exit the port forward. +================ +File: docs/vendor/tutorial-cli-create-release.mdx +================ +# Step 4: Create a Release -1. Watch for the `grafana` Deployment to become ready: +Now that you have the manifest files for the sample Kubernetes application, you can create a release for the `cli-tutorial` application and promote the release to the Unstable channel. + +By default, the Vendor Portal includes Unstable, Beta, and Stable release channels. The Unstable channel is intended for software vendors to use for internal testing, before promoting a release to the Beta or Stable channels for distribution to customers. For more information about channels, see [About Channels and Releases](releases-about). + +To create and promote a release to the Unstable channel: + +1. From the `replicated-cli-tutorial` directory, lint the application manifest files and ensure that there are no errors in the YAML: ``` - kubectl get deploy grafana --namespace grafana --watch + replicated release lint --yaml-dir=manifests ``` -1. After the Deployment is ready, run the following command to confirm that the `grafana-admin` Secret was updated with the new password that you created on the **Configure Grafana** page: + If there are no errors, an empty list is displayed with a zero exit code: - ``` - echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" - ``` + ```text + RULE TYPE FILENAME LINE MESSAGE + ``` - The ouput of this command displays the password that you created. + For a complete list of the possible error, warning, and informational messages that can appear in the output of the `release lint` command, see [Linter Rules](/reference/linter). -1. Start the port foward again to access the Admin Console: +1. Initialize the project as a Git repository: - ``` - kubectl kots admin-console --namespace grafana - ``` + ``` + git init + git add . + git commit -m "Initial Commit: CLI Tutorial" + ``` -1. Go to `http://localhost:8800` to open the Admin Console. + Initializing the project as a Git repository allows you to track your history. The Replicated CLI also reads Git metadata to help with the generation of release metadata, such as version labels. - On the Admin Console dashboard, the application status is now displayed as Ready: +1. From the `replicated-cli-tutorial` directory, create a release with the default settings: - ![Admin console dashboard showing ready application status](/images/grafana-ready.png) + ``` + replicated release create --auto + ``` - [View a larger version of this image](/images/grafana-ready.png) + The `--auto` flag generates release notes and metadata based on the Git status. -1. Click **Open App** to open the Grafana login page in a browser. + **Example output:** - Grafana login webpage + ``` + • Reading Environment ✓ - [View a larger version of this image](/images/grafana-login.png) + Prepared to create release with defaults: -1. On the Grafana login page, enter the username and password that you created on the **Configure Grafana** page. Confirm that you can log in to the application to access the Grafana dashboard: + yaml-dir "./manifests" + promote "Unstable" + version "Unstable-ba710e5" + release-notes "CLI release of master triggered by exampleusername [SHA: d4173a4] [31 Oct 22 08:51 MDT]" + ensure-channel true + lint-release true - Grafana dashboard + Create with these properties? [Y/n] + ``` - [View a larger version of this image](/images/grafana-dashboard.png) +1. Type `y` and press **Enter** to confirm the prompt. -1. On the command line, press Ctrl+C to exit the port forward. + **Example output:** -1. Uninstall the Grafana application from your cluster: + ```text + • Reading manifests from ./manifests ✓ + • Creating Release ✓ + • SEQUENCE: 1 + • Promoting ✓ + • Channel VEr0nhJBBUdaWpPvOIK-SOryKZEwa3Mg successfully set to release 1 + ``` + The release is created and promoted to the Unstable channel. + +1. Verify that the release was promoted to the Unstable channel: - ```bash - kubectl kots remove $REPLICATED_APP --namespace grafana --undeploy ``` - **Example output**: + replicated release ls ``` - • Removing application grafana-python reference from Admin Console and deleting associated resources from the cluster ✓ - • Application grafana-python has been removed + **Example output:** + + ```text + SEQUENCE CREATED EDITED ACTIVE_CHANNELS + 1 2022-10-31T14:55:35Z 0001-01-01T00:00:00Z Unstable ``` -1. Remove the Admin Console from the cluster: +## Next Step - 1. Delete the namespace where the Admin Console is installed: +Continue to [Step 5: Create a Customer](tutorial-cli-create-customer) to create a customer license file that you will upload when installing the application. - ``` - kubectl delete namespace grafana - ``` - 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: +================ +File: docs/vendor/tutorial-cli-deploy-app.mdx +================ +# Step 7: Configure the Application - ``` - kubectl delete clusterrole kotsadm-role - ``` - ``` - kubectl delete clusterrolebinding kotsadm-rolebinding - ``` +After you install KOTS, you can log in to the KOTS Admin Console. This procedure shows you how to make a configuration change for the application from the Admin Console, which is a typical task performed by end users. -## Next Step +To configure the application: -Congratulations! As part of this tutorial, you used the KOTS Config custom resource to define a configuration page in the Admin Console. You also used the KOTS HelmChart custom resource and KOTS ConfigOption template function to override the default Grafana login credentials with a user-supplied username and password. +1. Access the Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: -To learn more about how to customize the Config custom resource to create configuration fields for your application, see [Config](/reference/custom-resource-config). + ```bash + kubectl kots admin-console --namespace NAMESPACE + ``` -## Related Topics + Replace `NAMESPACE` with the namespace where KOTS is installed. -* [kots install](/reference/kots-cli-install/) -* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) -* [Installing an Application](/enterprise/installing-overview) -* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) +1. Enter the password that you created in [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to log in to the Admin Console. -================ -File: docs/vendor/tutorial-config-package-chart.md -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + The Admin Console dashboard opens. On the Admin Console **Dashboard** tab, users can take various actions, including viewing the application status, opening the application, checking for application updates, syncing their license, and setting up application monitoring on the cluster with Prometheus. + + ![Admin Console app dashboard](/images/tutorials/tutorial-admin-console-dashboard.png) + +1. On the **Config** tab, select the **Customize Text Inputs** checkbox. In the **Text Example** field, enter any text. For example, `Hello`. -# Step 3: Package the Helm Chart + ![Admin Console configuration tab](/images/tutorials/tutorial-install-config-tab.png) -Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. + This page displays configuration settings that are specific to the application. Software vendors define the fields that are displayed on this page in the KOTS Config custom resource. For more information, see [Config](/reference/custom-resource-config) in _Reference_. -To add the Replicated SDK and package the Helm chart: +1. Click **Save config**. In the dialog that opens, click **Go to updated version**. -1. In your local file system, go to the `grafana` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-config-get-chart). + The **Version history** tab opens. -1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: +1. Click **Deploy** for the new version. Then click **Yes, deploy** in the confirmation dialog. - + ![Admin Console configuration tab](/images/tutorials/tutorial-install-version-history.png) -1. Update dependencies and package the Helm chart to a `.tgz` chart archive: +1. Click **Open App** to view the application in your browser. - ```bash - helm package . --dependency-update - ``` - + ![web page that displays text](/images/tutorials/tutorial-open-app.png) -## Next Step + Notice the text that you entered previously on the configuration page is displayed on the screen. -Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-config-create-release). + :::note + If you do not see the new text, refresh your browser. + ::: -## Related Topics +## Next Step -* [About the Replicated SDK](/vendor/replicated-sdk-overview) -* [Helm Package](https://helm.sh/docs/helm/helm_package/) +Continue to [Step 8: Create a New Version](tutorial-cli-create-new-version) to make a change to one of the manifest files for the `cli-tutorial` application, then use the Replicated CLI to create and promote a new release. ================ -File: docs/vendor/tutorial-config-setup.md +File: docs/vendor/tutorial-cli-install-app-manager.mdx ================ -# Introduction and Setup +# Step 6: Install KOTS and the Application -This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. +The next step is to test the installation process for the application release that you promoted. Using the KOTS CLI, you will install KOTS and the sample application in your cluster. -## Summary +KOTS is the Replicated component that allows your users to install, manage, and upgrade your application. Users can interact with KOTS through the Admin Console or through the KOTS CLI. -This tutorial introduces you to mapping user-supplied values from the Replicated KOTS Admin Console configuration page to a Helm chart `values.yaml` file. +To install KOTS and the application: -In this tutorial, you use a sample Helm chart to learn how to: +1. From the `replicated-cli-tutorial` directory, run the following command to get the installation commands for the Unstable channel, where you promoted the release for the `cli-tutorial` application: -* Define a user-facing application configuration page in the KOTS Admin Console -* Set Helm chart values with the user-supplied values from the Admin Console configuration page + ``` + replicated channel inspect Unstable + ``` -## Set Up the Environment + **Example output:** -Before you begin, ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. + ``` + ID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 + NAME: Unstable + DESCRIPTION: + RELEASE: 1 + VERSION: Unstable-d4173a4 + EXISTING: -## Next Step + curl -fsSL https://kots.io/install | bash + kubectl kots install cli-tutorial/unstable -Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-config-get-chart) + EMBEDDED: -================ -File: docs/vendor/tutorial-ecr-private-images.md -================ -# Tutorial: Using ECR for Private Images + curl -fsSL https://k8s.kurl.sh/cli-tutorial-unstable | sudo bash -## Objective + AIRGAP: -The purpose of this tutorial is to walk you through how to configure Replicated KOTS to pull images from a private registry in Amazon's Elastic Container Registry (ECR). This tutorial demonstrates the differences between using public and private images with KOTS. + curl -fSL -o cli-tutorial-unstable.tar.gz https://k8s.kurl.sh/bundle/cli-tutorial-unstable.tar.gz + # ... scp or sneakernet cli-tutorial-unstable.tar.gz to airgapped machine, then + tar xvf cli-tutorial-unstable.tar.gz + sudo bash ./install.sh airgap + ``` + This command prints information about the channel, including the commands for installing in: + * An existing cluster + * An _embedded cluster_ created by Replicated kURL + * An air gap cluster that is not connected to the internet -## Prerequisites +1. If you have not already, configure kubectl access to the cluster you provisioned as part of [Set Up the Environment](tutorial-cli-setup#set-up-the-environment). For more information about setting the context for kubectl, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. -* To install the application in this tutorial, you must have a virtual machine (VM) that meets the following minimum requirements: - * Ubuntu 18.04 - * At least 8 GB of RAM - * 4 CPU cores - * At least 40GB of disk space +1. Run the `EXISTING` installation script with the following flags to automatically upload the license file and run the preflight checks at the same time you run the installation. -* To pull a public NGINX container and push it to a private repository in ECR as part of this tutorial, you must have the following: - * An ECR Repository - * An AWS account to use with Docker to pull and push the public NGINX image to the ECR repository. The AWS account must be able to create a read-only user. - * Docker - * The AWS CLI + **Example:** -## Overview + ``` + curl -fsSL https://kots.io/install | bash + kubectl kots install cli-tutorial/unstable \ + --license-file ./LICENSE_YAML \ + --shared-password PASSWORD \ + --namespace NAMESPACE + ``` -The guide is divided into the following steps: + Replace: - 1. [Set Up the Testing Environment](#set-up) + - `LICENSE_YAML` with the local path to your license file. + - `PASSWORD` with a password to access the Admin Console. + - `NAMESPACE` with the namespace where KOTS and application will be installed. - 2. [Configure Private Registries in Replicated](#2-configure-private-registries-in-replicated) + When the Admin Console is ready, the script prints the `https://localhost:8800` URL where you can access the Admin Console and the `http://localhost:8888` URL where you can access the application. - 3. [Update Definition Files](#3-update-definition-files) + **Example output**: - 4. [Install the New Version](#4-install-the-new-version) + ``` + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + • Waiting for Admin Console to be ready ✓ + • Waiting for installation to complete ✓ + • Waiting for preflight checks to complete ✓ -## 1. Set Up the Testing Environment {#set-up} + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console -We are going to use the default NGINX deployment to create our application and then update it to pull the same container from a private repository in ECR and note the differences. + • Go to http://localhost:8888 to access the application + ``` -### Create Sample Application and deploy the first release +1. Verify that the Pods are running for the example NGNIX service and for kotsadm: -In this section, we cover at a high level the steps to create a new application and install it on a VM. + ```bash + kubectl get pods --namespace NAMESPACE + ``` -To create our sample application follow these steps: + Replace `NAMESPACE` with the namespace where KOTS and application was installed. -* Create a new application in the Replicated [vendor portal](https://vendor.replicated.com) and call it 'MySampleECRApp'. -* Create the first release using the default definition files and promote it to the *unstable* channel. -* Create a customer, assign it to the *Unstable* channel and download the license file after creating the customer. -* Install the application to a VM + **Example output:** -Log in to the Replicated admin console. To inspect what was deployed, look at the files under **View Files** from the admin console. -In the Upstream files (files from the release created in the vendor portal) show that we are pulling the public image. + ```NAME READY STATUS RESTARTS AGE + kotsadm-7ccc8586b8-n7vf6 1/1 Running 0 12m + kotsadm-minio-0 1/1 Running 0 17m + kotsadm-rqlite-0 1/1 Running 0 17m + nginx-688f4b5d44-8s5v7 1/1 Running 0 11m + ``` -![admin-console-view-files-upstream-release1](/images/guides/kots/priv-reg-ecr-ups-files-rel1.png) +## Next Step -We can further validate this if we switch back to the terminal window on the VM where we installed the application. -If we run `kubectl describe pod ` on the NGINX pod, we can confirm that it was in fact pulled from the public repository. +Continue to [Step 7: Configure the Application](tutorial-cli-deploy-app) to log in to the Admin Console and make configuration changes. -![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubctl-describe-rel1.png) +================ +File: docs/vendor/tutorial-cli-install-cli.mdx +================ +# Step 1: Install the Replicated CLI -Now that we have the basic application installed, we are now going to pull the same image, but from an ECR repository. +In this tutorial, you use the Replicated CLI to create and promote releases for a sample application with Replicated. The Replicated CLI is the CLI for the Replicated Vendor Portal. -### Pull Public Image and Push to ECR +This procedure describes how to create a Vendor Portal account, install the Replicated CLI on your local machine, and set up a `REPLICATED_API_TOKEN` environment variable for authentication. -To keep the changes to a minimum and only focus on using a private registry, we are going to pull the public NGINX container (as specified in the `deployment.yaml` file) to our local environment, and then push it to a repository in ECR. -To use `docker login` with ECR, we will need to configure AWS CLI with the AWS Access Key ID and AWS Secret Key for this user. +To install the Replicated CLI: -Let's start by pulling the public image: +1. Do one of the following to create an account in the Replicated Vendor Portal: + * **Join an existing team**: If you have an existing Vendor Portal team, you can ask your team administrator to send you an invitation to join. + * **Start a trial**: Alternatively, go to [vendor.replicated.com](https://vendor.replicated.com/) and click **Sign up** to create a 21-day trial account for completing this tutorial. -```shell -$ docker pull nginx -``` +1. Run the following command to use [Homebrew](https://brew.sh) to install the CLI: -You should have an output similar to this: + ``` + brew install replicatedhq/replicated/cli + ``` -```shell -Using default tag: latest -latest: Pulling from library/nginx -d121f8d1c412: Pull complete -ebd81fc8c071: Pull complete -655316c160af: Pull complete -d15953c0e0f8: Pull complete -2ee525c5c3cc: Pull complete -Digest: sha256:c628b67d21744fce822d22fdcc0389f6bd763daac23a6b77147d0712ea7102d0 -Status: Downloaded newer image for nginx:latest -docker.io/library/nginx:latest -``` + For the latest Linux or macOS versions of the Replicated CLI, see the [replicatedhq/replicated](https://github.com/replicatedhq/replicated/releases) releases in GitHub. -Next, log in to ECR and push this container. -To use `docker login` with ECR, [install the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) and [configure it](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) if not already done. -As part of this, we will need to provide the AWS Access Key ID and AWS Secret Key for a user that has permissions to create and push images to the repository. For more information about working with containers and ECR in the AWS CLI, see [Using Amazon ECR with the AWS CLI](https://docs.aws.amazon.com/AmazonECR/latest/userguide/getting-started-cli.html). +1. Verify the installation: -Just like with any other private registry, we need to know the registry endpoint to pass the `docker login` command. -The syntax is as follows: + ``` + replicated version + ``` + **Example output**: -```shell + ```json + { + "version": "0.37.2", + "git": "8664ac3", + "buildTime": "2021-08-24T17:05:26Z", + "go": { + "version": "go1.14.15", + "compiler": "gc", + "os": "darwin", + "arch": "amd64" + } + } + ``` + If you run a Replicated CLI command, such as `replicated release ls`, you see the following error message about a missing API token: -docker login [some.private.registry]:[port] + ``` + Error: set up APIs: Please provide your API token + ``` -``` -In this case, the endpoint is the **[some.private.registry]:[port]** +1. Create an API token for the Replicated CLI: -To determine the endpoint for ECR, log in to the AWS console and search for 'ECR', which should bring up Elastic Container Registry as an option as shown below. + 1. Log in to the Vendor Portal, and go to the [Account settings](https://vendor.replicated.com/account-settings) page. -![search-4-ecr](/images/guides/kots/priv-reg-ecr-search-4-ecr.png) + 1. Under **User API Tokens**, click **Create user API token**. For Nickname, provide a name for the token. For Permissions, select **Read and Write**. -Select 'Elastic Container Registry' from the options in the dropdown to get to the list of repositories. + For more information about User API tokens, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. -![ecr-repos](/images/guides/kots/priv-reg-ecr-repos.png) + 1. Click **Create Token**. -As you can see from the screenshot above, you can see the endpoints for each repository under the URI column. -For the purpose of this guide, we will push the NGINX image to the **demo-apps** repository. + 1. Copy the string that appears in the dialog. -To determine the endpoint to use in the login command, use the URL without the repository name. +1. Export the string that you copied in the previous step to an environment variable named `REPLICATED_API_TOKEN`: -When logging in to ECR, use the AWS CLI to the user credentials. -For example, to log in to ECR, we run the following command: + ```bash + export REPLICATED_API_TOKEN=YOUR_TOKEN + ``` + Replace `YOUR_TOKEN` with the token string that you copied from the Vendor Portal in the previous step. -```shell +1. Verify the User API token: -$ aws ecr get-login-password --region us-east-2 | docker login --username AWS --password-stdin 4999999999999.dkr.ecr.us-east-2.amazonaws.com -``` + ``` + replicated release ls + ``` -A successful login will display a `Login Succeeded` message. -To push this image to our private repository, tag the image. -The new tag will consist of: + You see the following error message: -`/image` + ``` + Error: App not found: + ``` -For example, to tag the public NGINX image, we run the following command: +## Next Step -```shell -$ docker tag nginx 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx -``` +Continue to [Step 2: Create an Application](tutorial-cli-create-app) to use the Replicated CLI to create an application. -Assuming the tagging is successful, push the container to our ECR repository: +================ +File: docs/vendor/tutorial-cli-manifests.mdx +================ +# Step 3: Get the Sample Manifests -```shell -$ docker push 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx -The push refers to repository [4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx] -908cf8238301: Pushed -eabfa4cd2d12: Pushed -60c688e8765e: Pushed -f431d0917d41: Pushed -07cab4339852: Pushed -latest: digest: sha256:794275d96b4ab96eeb954728a7bf11156570e8372ecd5ed0cbc7280313a27d19 size: 1362 +To create a release for the `cli-tutorial` application, first create the Kubernetes manifest files for the application. This tutorial provides a set of sample manifest files for a simple Kubernetes application that deploys an NGINX service. -``` -Our testing environment is all set. -We are now ready to update Replicated to use the private registry. +To get the sample manifest files: -* * * +1. Run the following command to create and change to a `replicated-cli-tutorial` directory: -## 2. Configure Private Registries in Replicated + ``` + mkdir replicated-cli-tutorial + cd replicated-cli-tutorial + ``` -To configure a Private Registry in Replicated, we need to provide the same information we needed to login to ECR in the previous step: +1. Create a `/manifests` directory and download the sample manifest files from the [kots-default-yaml](https://github.com/replicatedhq/kots-default-yaml) repository in GitHub: -- **Endpoint** -- **Username** -- **Password** + ``` + mkdir ./manifests + curl -fSsL https://github.com/replicatedhq/kots-default-yaml/archive/refs/heads/main.zip | \ + tar xzv --strip-components=1 -C ./manifests \ + --exclude README.md --exclude LICENSE --exclude .gitignore + ``` -The difference is that we'll use a different user than the one we used previously. Since Replicated only needs to pull images, it is a best practice to create a 'read-only' user for this specific purpose. +1. Verify that you can see the YAML files in the `replicated-cli-tutorial/manifests` folder: -### Determine the endpoint + ``` + ls manifests/ + ``` + ``` + example-configmap.yaml example-service.yaml kots-app.yaml kots-lint-config.yaml kots-support-bundle.yaml + example-deployment.yaml k8s-app.yaml kots-config.yaml kots-preflight.yaml + ``` -The endpoint should be the same as the one we provided in the previous step. +## Next Step -### Setting up the Service Account User +Continue to [Step 4: Create a Release](tutorial-cli-create-release) to create and promote the first release for the `cli-tutorial` application using these manifest files. -Replicated only needs access to pull images from the private registry. Let's create a new user in AWS: +================ +File: docs/vendor/tutorial-cli-setup.mdx +================ +import KubernetesTraining from "../partials/getting-started/_kubernetes-training.mdx" +import LabsIntro from "../partials/getting-started/_labs-intro.mdx" +import TutorialIntro from "../partials/getting-started/_tutorial-intro.mdx" +import RelatedTopics from "../partials/getting-started/_related-topics.mdx" +import VMRequirements from "../partials/getting-started/_vm-requirements.mdx" -![aws-new-user](/images/guides/kots/priv-reg-ecr-new-user.png) +# Introduction and Setup -As far as permissions go, there are a couple of options, depending on scope of access. -If exposing all images to Replicated is an acceptable solution, the Amazon-provided [AmazonEC2ContainerRegistryReadOnly](https://docs.aws.amazon.com/AmazonECR/latest/userguide/ecr_managed_policies.html#AmazonEC2ContainerRegistryReadOnly) policy will work: + -```shell -{ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "Action": [ - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:DescribeImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - }] -} -``` -If you wish to limit Replicated to only certain images, this policy should be used instead: +The steps in this KOTS CLI-based tutorial show you how to use the Replicated CLI to perform these tasks. The Replicated CLI is the CLI for the Replicated Vendor Portal. You can use the Replicated CLI as a software vendor to programmatically create, configure, and manage your application artifacts, including application releases, release channels, customer entitlements, private image registries, and more. -```shell -{ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "Action": [ - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:DescribeImages", - "ecr:BatchGetImage" - ], - "Resource": [ - "arn:aws:ecr:us-east-1::repository/", - "arn:aws:ecr:us-east-1::repository/" - ] - }] -}{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ecr:GetAuthorizationToken" - ], - "Resource": "*" - }, - ] -} -``` + -We will need the AWS Access Key ID and AWS Secret Key in the next section as these will map to the *Username* and *Password* fields. You can obtain these as you create the user or after the user has been created. +## Set Up the Environment -### Enter Registry Information in Replicated +As part of this tutorial, you will install a sample application into a Kubernetes cluster. Before you begin, do the following to set up your environment: -First, we must link Replicated with the registry. To do this, click on **Add External Registry** from the *Images* tab. +* Create a Kubernetes cluster that meets the minimum system requirements described in [KOTS Installation Requirements](/enterprise/installing-general-requirements). You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. -/images/add-external-registry.png + **Example:** -[View a larger version of this image](/images/add-external-registry.png) + For example, to create a cluster in GKE, run the following command in the gcloud CLI: -The values for the fields are: + ``` + gcloud container clusters create NAME --preemptible --no-enable-ip-alias + ``` + Where `NAME` is any name for the cluster. -**Endpoint:** -Enter the same URL used to log in to ECR. -For example, to link to the same registry as the one in the section, we would enter *4999999999999.dkr.ecr.us-east-2.amazonaws.com*. +* Install kubectl, the Kubernetes command line tool. See [Install Tools](https://kubernetes.io/docs/tasks/tools/) in the Kubernetes documentation. +* Configure kubectl command line access to the cluster that you created. See [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. -**Username:** -Enter the AWS Access Key ID for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. +## Related Topics -**Password:** -Enter the AWS Secret Key for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. + -* * * +================ +File: docs/vendor/tutorial-cli-update-app.mdx +================ +# Step 9: Update the Application -## 3. Update Definition Files +To test the new release that you promoted, return to the Admin Console in a browser to update the application. -Last step is to update our definition manifest to pull the image from the ECR repository. -To do this, we'll update the `deployment.yaml` file by adding the ECR registry URL to the `image` value. -Below is an example using the registry URL used in this guide. +To update the application: -```diff - spec: - containers: - - name: nginx -- image: nginx -+ image: 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx - envFrom: -``` +1. Access the KOTS Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: -Save your changes and create the new release and promote it to the *Unstable* channel. + ```bash + kubectl kots admin-console --namespace NAMESPACE + ``` -* * * + Replace `NAMESPACE` with the namespace where the Admin Console is installed. -## 4. Install the New Version +1. Go to the Version history page, and click **Check for update**. -To deploy the new version of the application, go back to the admin console and select the *Version History* tab. -Click on **Check for Updates** and then **Deploy** when the new version is listed. -To confirm that the new version was in fact installed, it should look like the screenshot below. + ![Admin Console version history page](/images/tutorials/tutorial-check-for-update.png) -![version-history](/images/guides/kots/priv-reg-ecr-version-history.png) + The Admin Console loads the new release that you promoted. -Now, we can inspect to see the changes in the definition files. -Looking at the `deployment.yaml` upstream file, we see the image path as we set it in the [Update Definition Files](#3-update-definition-files) section. +1. Click **Deploy**. In the dialog, click **Yes, deploy** to deploy the new version. -![admin-console-view-files-upstream-release2](/images/guides/kots/priv-reg-ecr-upstream-file-rel2.png) + ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-app.png) -Because KOTS is able to detect that it cannot pull this image anonymously, it then tries to proxy the private registries configured. Looking at the `kustomization.yaml` downstream file we can see that the image path is changed to use the Replicated proxy. +1. After the Admin Console deploys the new version, go to the **Config** page where the **Another Text Example** field that you added is displayed. -![admin-console-view-files-downstream-release2](/images/guides/kots/priv-reg-ecr-downstream-file-rel2.png) + ![Admin Console configuration page with Another Text Example field](/images/tutorials/tutorial-new-config-item.png) -The install of the new version should have created a new pod. If we run `kubectl describe pod` on the new NGINX pod, we can confirm that the image was in fact pulled from the ECR repository. +1. In the new **Another Text Example** field, enter any text. Click **Save config**. -![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubectl-describe-rel2.png) + The Admin Console notifies you that the configuration settings for the application have changed. -* * * + ![dialog over Admin Console configuration screen](/images/tutorials/tutorial-go-to-updated-version.png) -## Related Topics +1. In the dialog, click **Go to updated version**. -- [Connecting to an External Registry](packaging-private-images/) + The Admin Console loads the updated version on the Version history page. -- [Replicated Community Thread on AWS Roles and Permissions](https://help.replicated.com/community/t/what-are-the-minimal-aws-iam-permissions-needed-to-proxy-images-from-elastic-container-registry-ecr/267) +1. On the Version history page, click **Deploy** next to the latest version to deploy the configuration change. -- [AWS ECR Managed Policies Documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecr_managed_policies.html) + ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-config-change.png) + +1. Go to the **Dashboard** page and click **Open App**. The application displays the text that you added to the field. + + ![web page with text from the new configuration field](/images/tutorials/tutorial-updated-app.png) + + :::note + If you do not see the new text, refresh your browser. + ::: + +## Summary + +Congratulations! As part of this tutorial, you: +* Created and promoted a release for a Kubernetes application using the Replicated CLI +* Installed the application in a Kubernetes cluster +* Edited the manifest files for the application, adding a new configuration field and using template functions to reference the field +* Promoted a new release with your changes +* Used the Admin Console to update the application to the latest version ================ -File: docs/vendor/tutorial-embedded-cluster-create-app.mdx +File: docs/vendor/tutorial-config-create-app.md ================ -# Step 1: Create an Application - -To begin, install the Replicated CLI and create an application in the Replicated Vendor Portal. +# Step 2: Create an Application -An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. +Next, install the Replicated CLI and then create an application. To create an application: @@ -54972,10 +44815,10 @@ To create an application: ``` In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. -1. Create an application named `Gitea`: +1. Create an application named `Grafana`: ``` - replicated app create Gitea + replicated app create Grafana ``` 1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: @@ -54987,26 +44830,26 @@ To create an application: ``` **Example output**: ``` - ID NAME SLUG SCHEDULER - 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-kite kots + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Grafana grafana-python kots ``` - In the example above, the application slug is `gitea-kite`. + In the example above, the application slug is `grafana-python`. - :::note + :::info The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. ::: 1. Set the `REPLICATED_APP` environment variable to the application slug. - **Example:** + **MacOS Example:** ``` - export REPLICATED_APP=gitea-kite + export REPLICATED_APP=grafana-python ``` ## Next Step -Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 2: Package the Helm Chart](tutorial-embedded-cluster-package-chart). +Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-config-package-chart). ## Related Topics @@ -55015,11 +44858,11 @@ Add the Replicated SDK to the Helm chart and package the chart to an archive. Se * [replicated app create](/reference/replicated-cli-app-create) ================ -File: docs/vendor/tutorial-embedded-cluster-create-customer.mdx +File: docs/vendor/tutorial-config-create-customer.md ================ -# Step 4: Create an Embedded Cluster-Enabled Customer +# Step 5: Create a KOTS-Enabled Customer -After promoting the release, create a customer with the Replicated KOTS and Embedded Cluster entitlements so that you can install the release with Embedded Cluster. A _customer_ represents a single licensed user of your application. +After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. To create a customer: @@ -55031,21 +44874,25 @@ To create a customer: [View a larger version of this image](/images/create-customer.png) -1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. +1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. -1. For **License type**, select **Development**. +1. For **License type**, select Development. -1. For **License options**, enable the following entitlements: - * **KOTS Install Enabled** - * **Embedded Cluster Enabled** +1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. 1. Click **Save Changes**. +1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. + + ![Download license button on the customer page](/images/customer-download-license.png) + + [View a larger version of this image](/images/customer-download-license.png) + ## Next Step -Get the Embedded Cluster installation commands and install. See [Step 5: Install the Release on a VM](tutorial-embedded-cluster-install). +Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-config-install-kots). ## Related Topics @@ -55053,24 +44900,24 @@ Get the Embedded Cluster installation commands and install. See [Step 5: Install * [Creating and Managing Customers](/vendor/releases-creating-customer) ================ -File: docs/vendor/tutorial-embedded-cluster-create-release.mdx +File: docs/vendor/tutorial-config-create-release.md ================ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" -import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" -import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" -import EcCr from "../partials/embedded-cluster/_ec-config.mdx" +import HelmChart from "../partials/getting-started/_grafana-helmchart.mdx" +import KotsApp from "../partials/getting-started/_grafana-kots-app.mdx" +import K8sApp from "../partials/getting-started/_grafana-k8s-app.mdx" +import Config from "../partials/getting-started/_grafana-config.mdx" -# Step 3: Add the Chart Archive to a Release +# Step 4: Add the Chart Archive to a Release -Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with Replicated Embedded Cluster. +Next, add the Helm chart archive to a new release for the application in the Replicated vendor platform. -A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. +The purpose of this step is to configure a release that supports installation with KOTS. Additionally, this step defines a user-facing application configuration page that displays in the KOTS Admin Console during installation where users can set their own Grafana login credentials. To create a release: -1. In the `gitea` directory, create a subdirectory named `manifests`: +1. In the `grafana` directory, create a subdirectory named `manifests`: ``` mkdir manifests @@ -55081,61 +44928,67 @@ To create a release: 1. Move the Helm chart archive that you created to `manifests`: ``` - mv gitea-1.0.6.tgz manifests + mv grafana-9.6.5.tgz manifests ``` -1. In `manifests`, create the YAML manifests required by KOTS: - ``` - cd manifests - ``` - ``` - touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml - ``` +1. In the `manifests` directory, create the following YAML files to configure the release: + + ``` + cd manifests + ``` + ``` + touch kots-app.yaml k8s-app.yaml kots-config.yaml grafana.yaml + ``` -1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: +1. In each file, paste the corresponding YAML provided in the tabs below: - - -
    Description
    -

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The optionalValues field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment.

    -
    YAML
    - -
    - -
    Description
    -

    The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the gitea Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.

    -
    YAML
    - -
    - -
    Description
    -

    The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.

    -
    YAML
    - -
    - -
    Description
    -

    To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.

    -
    YAML
    - -
    -
    + + +
    Description
    +

    The KOTS Application custom resource enables features in the Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the grafana Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Grafana application in a browser.

    +
    YAML
    + +
    + +
    Description
    +

    The Kubernetes Application custom resource supports functionality such as including buttons and links on the Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.

    +
    YAML
    + +
    + +
    Description
    +

    The Config custom resource specifies a user-facing configuration page in the Admin Console designed for collecting application configuration from users. The YAML below creates "Admin User" and "Admin Password" fields that will be shown to the user on the configuration page during installation. These fields will be used to set the login credentials for Grafana.

    +
    YAML
    + +
    + +
    Description
    +

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart.

    +

    The HelmChart custom resource below contains a values key, which creates a mapping to the Grafana values.yaml file. In this case, the values.admin.user and values.admin.password fields map to admin.user and admin.password in the Grafana values.yaml file.

    +

    During installation, KOTS renders the ConfigOption template functions in the values.admin.user and values.admin.password fields and then sets the corresponding Grafana values accordingly.

    +
    YAML
    + +
    +
    -1. Lint: +1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: - ```bash - replicated release lint --yaml-dir . - ``` - ```bash - RULE TYPE FILENAME LINE MESSAGE - config-spec warn Missing config spec - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - :::note - You can ignore any warning messages for the purpose of this tutorial. - ::: + ``` + replicated release lint --yaml-dir . + ``` + `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. + + **Example output**: + + ``` + RULE TYPE FILENAME LINE MESSAGE + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `grafana` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. + ::: 1. Create a release: @@ -55153,190 +45006,332 @@ To create a release: The release that you created is listed under **All releases**. - ![Release page in the Vendor Portal with one release](/images/gitea-ec-release-seq-1.png) - - [View a larger version of this image](/images/gitea-ec-release-seq-1.png) - -1. Click the dot menu then **Edit release** to view the files in the release. + ![Release page in the Vendor Portal with one release](/images/grafana-release-seq-1.png) - ![dot menu](/images/gitea-ec-release-edit-button.png) + [View a larger version of this image](/images/grafana-release-seq-1.png) - [View a larger version of this image](/images/gitea-ec-release-edit-button.png) +1. Click **Edit release** to view the files in the release. - In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. + In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Grafana Helm chart. You can also see the same warning messages that were displayed in the CLI output. - ![Edit Release page in the Vendor Portal](/images/gitea-ec-release-edit-seq-1.png) + ![Edit Release page in the Vendor Portal](/images/grafana-edit-release-seq-1.png) - [View a larger version of this image](/images/gitea-ec-release-edit-seq-1.png) + [View a larger version of this image](/images/grafana-edit-release-seq-1.png) 1. At the top of the page, click **Promote**. 1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. - Promote release dialog + Promote release dialog - [View a larger version of this image](/images/release-promote.png) + [View a larger version of this image](/images/release-promote.png) ## Next Step -Create a customer with the Embedded Cluster entitlement so that you can install the release using Embedded Cluster. See [Step 4: Create an Embedded Cluster-Enabled Customer](tutorial-embedded-cluster-create-customer). +Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-config-create-customer). ## Related Topics * [About Channels and Releases](/vendor/releases-about) * [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) -* [Embedded Cluster Config](/reference/embedded-config) -* [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) +* [Config Custom Resource](/reference/custom-resource-config) +* [Manipulating Helm Chart Values with KOTS](/vendor/helm-optional-value-keys) ================ -File: docs/vendor/tutorial-embedded-cluster-install.mdx +File: docs/vendor/tutorial-config-get-chart.md ================ -import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" +# Step 1: Get the Sample Chart and Test -# Step 5: Install the Release on a VM +To begin, get the sample Grafana Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated vendor platform. -Next, get the customer-specific Embedded Cluster installation commands and then install the release on a Linux VM. +To get the sample Grafana chart and test installation: -To install the release with Embedded Cluster: +1. Run the following command to pull and untar version 9.6.5 of the Bitnami Grafana Helm chart: -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers**. Click on the name of the customer you created. + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/grafana --version 9.6.5 + ``` + For more information about this chart, see the [bitnami/grafana](https://github.com/bitnami/charts/tree/main/bitnami/grafana) repository in GitHub. -1. Click **Install instructions > Embedded cluster**. +1. Change to the new `grafana` directory that was created: + ``` + cd grafana + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` +1. Install the chart in your cluster: - Customer install instructions dropdown + ``` + helm install grafana . --namespace grafana --create-namespace + ``` + To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/grafana/README.md#installing-the-chart) in the `bitnami/grafana` repository. - [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + After running the installation command, the following output is displayed: - The **Embedded cluster install instructions** dialog opens. + ``` + NAME: grafana + LAST DEPLOYED: Thu Dec 14 14:54:50 2023 + NAMESPACE: grafana + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + CHART NAME: grafana + CHART VERSION: 9.6.5 + APP VERSION: 10.2.2 - Embedded Cluster install instructions dialog + ** Please be patient while the chart is being deployed ** - [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + 1. Get the application URL by running these commands: + echo "Browse to http://127.0.0.1:8080" + kubectl port-forward svc/grafana 8080:3000 & -1. On the command line, SSH onto your Linux VM. + 2. Get the admin credentials: -1. Run the first command in the **Embedded cluster install instructions** dialog to download the latest release. + echo "User: admin" + echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" + # Note: Do not include grafana.validateValues.database here. See https://github.com/bitnami/charts/issues/20629 + ``` -1. Run the second command to extract the release. +1. Watch the `grafana` Deployment until it is ready: -1. Run the third command to install the release. + ``` + kubectl get deploy grafana --namespace grafana --watch + ``` -1. When prompted, enter a password for accessing the KOTS Admin Console. +1. When the Deployment is created, run the commands provided in the output of the installation command to get the Grafana login credentials: - The installation command takes a few minutes to complete. + ``` + echo "User: admin" + echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" + ``` -1. When the installation command completes, go to the URL provided in the output to log in to the Admin Console. +1. Run the commands provided in the ouptut of the installation command to get the Grafana URL: - **Example output:** + ``` + echo "Browse to http://127.0.0.1:8080" + kubectl port-forward svc/grafana 8080:3000 --namespace grafana + ``` - ```bash - ✔ Host files materialized - ? Enter an Admin Console password: ******** - ? Confirm password: ******** - ✔ Node installation finished - ✔ Storage is ready! - ✔ Embedded Cluster Operator is ready! - ✔ Admin Console is ready! - ✔ Finished! - Visit the admin console to configure and install gitea-kite: http://104.155.145.60:30000 - ``` + :::note + Include `--namespace grafana` in the `kubectl port-forward` command. + ::: - At this point, the cluster is provisioned and the KOTS Admin Console is deployed, but the application is not yet installed. +1. In a browser, go to the URL to open the Grafana login page: -1. Bypass the browser TLS warning by clicking **Continue to Setup**. + Grafana login page -1. Click **Advanced > Proceed**. + [View a larger version of this image](/images/grafana-login.png) -1. On the **HTTPS for the Gitea Admin Console** page, select **Self-signed** and click **Continue**. +1. Log in using the credentials provided to open the Grafana dashboard: -1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. + Grafana dashboard -1. On the **Nodes** page, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. + [View a larger version of this image](/images/grafana-dashboard.png) - The Admin Console dashboard opens. - -1. In the **Version** section, for version `0.1.0`, click **Deploy** then **Yes, Deploy**. +1. Uninstall the Helm chart: - The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. + ``` + helm uninstall grafana --namespace grafana + ``` + This command removes all the Kubernetes resources associated with the chart and uninstalls the `grafana` release. -1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser: +1. Delete the namespace: - ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + ``` + kubectl delete namespace grafana + ``` - [View a larger version of this image](/images/gitea-ec-ready.png) +## Next Step - Gitea app landing page +Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-config-create-app). - [View a larger version of this image](/images/gitea-app.png) +## Related Topics -1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created. +* [Helm Install](https://helm.sh/docs/helm/helm_install/) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Create](https://helm.sh/docs/helm/helm_create/) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) +* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) - On the **Reporting** page for the customer, you can see details about the customer's license and installed instances: +================ +File: docs/vendor/tutorial-config-install-kots.md +================ +# Step 6: Install the Release with KOTS - ![Customer reporting page](/images/gitea-customer-reporting-ec.png) +Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. - [View a larger version of this image](/images/gitea-customer-reporting-ec.png) +As part of installation, you will set Grafana login credentials on the KOTS Admin Console configuration page. -1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. +To install the release with KOTS: - On the instance details page, you can see additional insights such as the version of Embedded Cluster that is running, instance status and uptime, and more: +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. - ![Customer instance details page](/images/gitea-instance-insights-ec.png) + ![KOTS Install tab on the Unstable channel card](/images/grafana-unstable-channel.png) - [View a larger version of this image](/images/gitea-instance-insights-ec.png) + [View a larger version of this image](/images/grafana-unstable-channel.png) -1. (Optional) Reset the node to remove the cluster and the application from the node. This is useful for iteration and development so that you can reset a machine and reuse it instead of having to procure another machine. +1. On the command line, run the **KOTS Install** command that you copied: + + ```bash + curl https://kots.io/install | bash + kubectl kots install $REPLICATED_APP/unstable + ``` + + This installs the latest version of the KOTS CLI and the Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. + + For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + + :::note + KOTS v1.104.0 or later is required to deploy the Replicated SDK. You can verify the version of KOTS installed with `kubectl kots version`. + ::: + +1. Complete the installation command prompts: + + 1. For `Enter the namespace to deploy to`, enter `grafana`. + + 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. + + When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. + + **Example output:** ```bash - sudo ./APP_SLUG reset --reboot + Enter the namespace to deploy to: grafana + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password for the Admin Console (6+ characters): •••••••• + • Waiting for Admin Console to be ready ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console ``` - Where `APP_SLUG` is the unique slug for the application that you created. You can find the appication slug by running `replicated app ls` on the command line on your local machine. -## Summary +1. With the port forward running, go to `http://localhost:8800` in a browser to access the Admin Console. -Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with Replicated Embedded Cluster in a VM. To learn more about Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). +1. On the login page, enter the password that you created for the Admin Console. -## Related Topics +1. On the license page, select the license file that you downloaded previously and click **Upload license**. -* [Embedded Cluster Overview](embedded-overview) -* [Customer Reporting](/vendor/customer-reporting) -* [Instance Details](/vendor/instance-insights-details) -* [Reset a Node](/vendor/embedded-using#reset-a-node) +1. On the **Configure Grafana** page, enter a username and password. You will use these credentials to log in to Grafana. -================ -File: docs/vendor/tutorial-embedded-cluster-package-chart.mdx -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + ![Admin Console config page with username and password fields](/images/grafana-config.png) -# Step 2: Package the Gitea Helm Chart + [View a larger version of this image](/images/grafana-config.png) -Next, get the sample Gitea Helm chart from Bitnami. Add the Replicated SDK as a dependency of the chart, then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. +1. Click **Continue**. -The Replicated SDK is a Helm chart that can be optionally added as a dependency of your application Helm chart. The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. Additionally, the Replicated SDK provides access to insights and telemetry for instances of your application installed with the Helm CLI. + The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `grafana` Deployment is being created. -To add the Replicated SDK and package the Helm chart: + ![Admin Console dashboard showing unavailable application status](/images/grafana-unavailable.png) -1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + [View a larger version of this image](/images/grafana-unavailable.png) - ``` - helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 - ``` - For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. +1. On the command line, press Ctrl+C to exit the port forward. + +1. Watch for the `grafana` Deployment to become ready: + + ``` + kubectl get deploy grafana --namespace grafana --watch + ``` + +1. After the Deployment is ready, run the following command to confirm that the `grafana-admin` Secret was updated with the new password that you created on the **Configure Grafana** page: -1. Change to the new `gitea` directory that was created: - ``` - cd gitea - ``` -1. View the files in the directory: ``` - ls + echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" ``` - The directory contains the following files: + + The ouput of this command displays the password that you created. + +1. Start the port foward again to access the Admin Console: + ``` - Chart.lock Chart.yaml README.md charts templates values.yaml + kubectl kots admin-console --namespace grafana ``` +1. Go to `http://localhost:8800` to open the Admin Console. + + On the Admin Console dashboard, the application status is now displayed as Ready: + + ![Admin console dashboard showing ready application status](/images/grafana-ready.png) + + [View a larger version of this image](/images/grafana-ready.png) + +1. Click **Open App** to open the Grafana login page in a browser. + + Grafana login webpage + + [View a larger version of this image](/images/grafana-login.png) + +1. On the Grafana login page, enter the username and password that you created on the **Configure Grafana** page. Confirm that you can log in to the application to access the Grafana dashboard: + + Grafana dashboard + + [View a larger version of this image](/images/grafana-dashboard.png) + +1. On the command line, press Ctrl+C to exit the port forward. + +1. Uninstall the Grafana application from your cluster: + + ```bash + kubectl kots remove $REPLICATED_APP --namespace grafana --undeploy + ``` + **Example output**: + ``` + • Removing application grafana-python reference from Admin Console and deleting associated resources from the cluster ✓ + • Application grafana-python has been removed + ``` + +1. Remove the Admin Console from the cluster: + + 1. Delete the namespace where the Admin Console is installed: + + ``` + kubectl delete namespace grafana + ``` + 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: + + ``` + kubectl delete clusterrole kotsadm-role + ``` + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` + +## Next Step + +Congratulations! As part of this tutorial, you used the KOTS Config custom resource to define a configuration page in the Admin Console. You also used the KOTS HelmChart custom resource and KOTS ConfigOption template function to override the default Grafana login credentials with a user-supplied username and password. + +To learn more about how to customize the Config custom resource to create configuration fields for your application, see [Config](/reference/custom-resource-config). + +## Related Topics + +* [kots install](/reference/kots-cli-install/) +* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) +* [Installing an Application](/enterprise/installing-overview) +* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) + +================ +File: docs/vendor/tutorial-config-package-chart.md +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + +# Step 3: Package the Helm Chart + +Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. + +To add the Replicated SDK and package the Helm chart: + +1. In your local file system, go to the `grafana` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-config-get-chart). + 1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: @@ -55350,677 +45345,730 @@ To add the Replicated SDK and package the Helm chart: ## Next Step -Create a release using the Helm chart archive. See [Step 3: Add the Chart Archive to a Release](tutorial-embedded-cluster-create-release). +Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-config-create-release). ## Related Topics -* [Packaging a Helm Chart for a Release](/vendor/helm-install-release.md) * [About the Replicated SDK](/vendor/replicated-sdk-overview) * [Helm Package](https://helm.sh/docs/helm/helm_package/) ================ -File: docs/vendor/tutorial-embedded-cluster-setup.mdx +File: docs/vendor/tutorial-config-setup.md ================ -import Requirements from "../partials/embedded-cluster/_requirements.mdx" - # Introduction and Setup This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. ## Summary -This tutorial introduces you to installing an application on a Linux virtual machine (VM) using Replicated Embedded Cluster. Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. +This tutorial introduces you to mapping user-supplied values from the Replicated KOTS Admin Console configuration page to a Helm chart `values.yaml` file. -In this tutorial, you use a sample application to learn how to: +In this tutorial, you use a sample Helm chart to learn how to: -* Add the Embedded Cluster Config to a release -* Use Embedded Cluster to install the application on a Linux VM +* Define a user-facing application configuration page in the KOTS Admin Console +* Set Helm chart values with the user-supplied values from the Admin Console configuration page + +## Set Up the Environment + +Before you begin, ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. + +## Next Step + +Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-config-get-chart) + +================ +File: docs/vendor/tutorial-ecr-private-images.md +================ +# Tutorial: Using ECR for Private Images + +## Objective -## Set Up the Environment +The purpose of this tutorial is to walk you through how to configure Replicated KOTS to pull images from a private registry in Amazon's Elastic Container Registry (ECR). This tutorial demonstrates the differences between using public and private images with KOTS. -Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: +## Prerequisites - +* To install the application in this tutorial, you must have a virtual machine (VM) that meets the following minimum requirements: + * Ubuntu 18.04 + * At least 8 GB of RAM + * 4 CPU cores + * At least 40GB of disk space -## Next Step +* To pull a public NGINX container and push it to a private repository in ECR as part of this tutorial, you must have the following: + * An ECR Repository + * An AWS account to use with Docker to pull and push the public NGINX image to the ECR repository. The AWS account must be able to create a read-only user. + * Docker + * The AWS CLI -Install the Replicated CLI and create an application in the Replicated Vendor Portal. See [Step 1: Create an Application](/vendor/tutorial-embedded-cluster-create-app). +## Overview -================ -File: docs/vendor/tutorial-kots-helm-create-app.md -================ -# Step 2: Create an Application +The guide is divided into the following steps: -Next, install the Replicated CLI and then create an application. + 1. [Set Up the Testing Environment](#set-up) -An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. + 2. [Configure Private Registries in Replicated](#2-configure-private-registries-in-replicated) -To create an application: + 3. [Update Definition Files](#3-update-definition-files) -1. Install the Replicated CLI: + 4. [Install the New Version](#4-install-the-new-version) - ``` - brew install replicatedhq/replicated/cli - ``` - For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). +## 1. Set Up the Testing Environment {#set-up} -1. Authorize the Replicated CLI: +We are going to use the default NGINX deployment to create our application and then update it to pull the same container from a private repository in ECR and note the differences. - ``` - replicated login - ``` - In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. +### Create Sample Application and deploy the first release -1. Create an application named `Gitea`: +In this section, we cover at a high level the steps to create a new application and install it on a VM. - ``` - replicated app create Gitea - ``` +To create our sample application follow these steps: -1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: +* Create a new application in the Replicated [vendor portal](https://vendor.replicated.com) and call it 'MySampleECRApp'. +* Create the first release using the default definition files and promote it to the *unstable* channel. +* Create a customer, assign it to the *Unstable* channel and download the license file after creating the customer. +* Install the application to a VM - 1. Get the slug for the application that you created: +Log in to the Replicated admin console. To inspect what was deployed, look at the files under **View Files** from the admin console. +In the Upstream files (files from the release created in the vendor portal) show that we are pulling the public image. - ``` - replicated app ls - ``` - **Example output**: - ``` - ID NAME SLUG SCHEDULER - 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots - ``` - In the example above, the application slug is `gitea-boxer`. +![admin-console-view-files-upstream-release1](/images/guides/kots/priv-reg-ecr-ups-files-rel1.png) - :::note - The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. - ::: +We can further validate this if we switch back to the terminal window on the VM where we installed the application. +If we run `kubectl describe pod ` on the NGINX pod, we can confirm that it was in fact pulled from the public repository. - 1. Set the `REPLICATED_APP` environment variable to the application slug. +![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubctl-describe-rel1.png) - **Example:** +Now that we have the basic application installed, we are now going to pull the same image, but from an ECR repository. - ``` - export REPLICATED_APP=gitea-boxer - ``` +### Pull Public Image and Push to ECR -## Next Step +To keep the changes to a minimum and only focus on using a private registry, we are going to pull the public NGINX container (as specified in the `deployment.yaml` file) to our local environment, and then push it to a repository in ECR. +To use `docker login` with ECR, we will need to configure AWS CLI with the AWS Access Key ID and AWS Secret Key for this user. -Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-kots-helm-package-chart). +Let's start by pulling the public image: -## Related Topics +```shell +$ docker pull nginx +``` -* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) -* [Installing the Replicated CLI](/reference/replicated-cli-installing) -* [replicated app create](/reference/replicated-cli-app-create) +You should have an output similar to this: -================ -File: docs/vendor/tutorial-kots-helm-create-customer.md -================ -# Step 5: Create a KOTS-Enabled Customer +```shell +Using default tag: latest +latest: Pulling from library/nginx +d121f8d1c412: Pull complete +ebd81fc8c071: Pull complete +655316c160af: Pull complete +d15953c0e0f8: Pull complete +2ee525c5c3cc: Pull complete +Digest: sha256:c628b67d21744fce822d22fdcc0389f6bd763daac23a6b77147d0712ea7102d0 +Status: Downloaded newer image for nginx:latest +docker.io/library/nginx:latest +``` -After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. A _customer_ represents a single licensed user of your application. +Next, log in to ECR and push this container. +To use `docker login` with ECR, [install the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) and [configure it](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) if not already done. +As part of this, we will need to provide the AWS Access Key ID and AWS Secret Key for a user that has permissions to create and push images to the repository. For more information about working with containers and ECR in the AWS CLI, see [Using Amazon ECR with the AWS CLI](https://docs.aws.amazon.com/AmazonECR/latest/userguide/getting-started-cli.html). -To create a customer: +Just like with any other private registry, we need to know the registry endpoint to pass the `docker login` command. +The syntax is as follows: -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. +```shell - The **Create a new customer** page opens: +docker login [some.private.registry]:[port] - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) +``` +In this case, the endpoint is the **[some.private.registry]:[port]** - [View a larger version of this image](/images/create-customer.png) +To determine the endpoint for ECR, log in to the AWS console and search for 'ECR', which should bring up Elastic Container Registry as an option as shown below. -1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. +![search-4-ecr](/images/guides/kots/priv-reg-ecr-search-4-ecr.png) -1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. +Select 'Elastic Container Registry' from the options in the dropdown to get to the list of repositories. -1. For **License type**, select Development. +![ecr-repos](/images/guides/kots/priv-reg-ecr-repos.png) -1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. +As you can see from the screenshot above, you can see the endpoints for each repository under the URI column. +For the purpose of this guide, we will push the NGINX image to the **demo-apps** repository. -1. Click **Save Changes**. +To determine the endpoint to use in the login command, use the URL without the repository name. -1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. +When logging in to ECR, use the AWS CLI to the user credentials. +For example, to log in to ECR, we run the following command: - ![Download license button on the customer page](/images/customer-download-license.png) +```shell - [View a larger version of this image](/images/customer-download-license.png) +$ aws ecr get-login-password --region us-east-2 | docker login --username AWS --password-stdin 4999999999999.dkr.ecr.us-east-2.amazonaws.com +``` -## Next Step +A successful login will display a `Login Succeeded` message. +To push this image to our private repository, tag the image. +The new tag will consist of: -Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-kots-helm-install-kots). +`/image` -## Related Topics +For example, to tag the public NGINX image, we run the following command: -* [About Customers](/vendor/licenses-about) -* [Creating and Managing Customers](/vendor/releases-creating-customer) +```shell +$ docker tag nginx 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx +``` -================ -File: docs/vendor/tutorial-kots-helm-create-release.md -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" -import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" -import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +Assuming the tagging is successful, push the container to our ECR repository: -# Step 4: Add the Chart Archive to a Release +```shell +$ docker push 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx +The push refers to repository [4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx] +908cf8238301: Pushed +eabfa4cd2d12: Pushed +60c688e8765e: Pushed +f431d0917d41: Pushed +07cab4339852: Pushed +latest: digest: sha256:794275d96b4ab96eeb954728a7bf11156570e8372ecd5ed0cbc7280313a27d19 size: 1362 -Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with both Replicated KOTS and with the Helm CLI. +``` +Our testing environment is all set. +We are now ready to update Replicated to use the private registry. -A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. +* * * -To create a release: +## 2. Configure Private Registries in Replicated -1. In the `gitea` directory, create a subdirectory named `manifests`: +To configure a Private Registry in Replicated, we need to provide the same information we needed to login to ECR in the previous step: - ``` - mkdir manifests - ``` +- **Endpoint** +- **Username** +- **Password** - You will add the files required to support installation with Replicated KOTS to this subdirectory. +The difference is that we'll use a different user than the one we used previously. Since Replicated only needs to pull images, it is a best practice to create a 'read-only' user for this specific purpose. -1. Move the Helm chart archive that you created to `manifests`: +### Determine the endpoint - ``` - mv gitea-1.0.6.tgz manifests - ``` +The endpoint should be the same as the one we provided in the previous step. -1. In `manifests`, create the YAML manifests required by KOTS: - ``` - cd manifests - ``` - ``` - touch gitea.yaml kots-app.yaml k8s-app.yaml - ``` +### Setting up the Service Account User -1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: +Replicated only needs access to pull images from the private registry. Let's create a new user in AWS: - - -
    Description
    -

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.

    -
    YAML
    - -
    - -
    Description
    -

    The KOTS Application custom resource enables features in the KOTS Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the gitea Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.

    -
    YAML
    - -
    - -
    Description
    -

    The Kubernetes Application custom resource supports functionality such as including buttons and links on the KOTS Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.

    -
    YAML
    - -
    -
    +![aws-new-user](/images/guides/kots/priv-reg-ecr-new-user.png) -1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: +As far as permissions go, there are a couple of options, depending on scope of access. +If exposing all images to Replicated is an acceptable solution, the Amazon-provided [AmazonEC2ContainerRegistryReadOnly](https://docs.aws.amazon.com/AmazonECR/latest/userguide/ecr_managed_policies.html#AmazonEC2ContainerRegistryReadOnly) policy will work: - ``` - replicated release lint --yaml-dir . - ``` - `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. +```shell +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + }] +} +``` +If you wish to limit Replicated to only certain images, this policy should be used instead: - **Example output**: +```shell +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage" + ], + "Resource": [ + "arn:aws:ecr:us-east-1::repository/", + "arn:aws:ecr:us-east-1::repository/" + ] + }] +}{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + }, + ] +} +``` - ``` - RULE TYPE FILENAME LINE MESSAGE - config-spec warn Missing config spec - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - :::note - The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `gitea` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. - ::: +We will need the AWS Access Key ID and AWS Secret Key in the next section as these will map to the *Username* and *Password* fields. You can obtain these as you create the user or after the user has been created. -1. Create a release: +### Enter Registry Information in Replicated - ``` - replicated release create --yaml-dir . - ``` - **Example output**: - ``` - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 1 - ``` +First, we must link Replicated with the registry. To do this, click on **Add External Registry** from the *Images* tab. -1. Log in to the Vendor Portal and go to **Releases**. +/images/add-external-registry.png - The release that you created is listed under **All releases**. +[View a larger version of this image](/images/add-external-registry.png) - ![Release page in the Vendor Portal with one release](/images/tutorial-kots-helm-release-seq-1.png) +The values for the fields are: - [View a larger version of this image](/images/tutorial-kots-helm-release-seq-1.png) +**Endpoint:** +Enter the same URL used to log in to ECR. +For example, to link to the same registry as the one in the section, we would enter *4999999999999.dkr.ecr.us-east-2.amazonaws.com*. -1. Click **Edit release** to view the files in the release. +**Username:** +Enter the AWS Access Key ID for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. - In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. +**Password:** +Enter the AWS Secret Key for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. - ![Edit Release page in the Vendor Portal](/images/tutorial-kots-helm-release-edit-seq-1.png) +* * * - [View a larger version of this image](/images/tutorial-kots-helm-release-edit-seq-1.png) +## 3. Update Definition Files -1. At the top of the page, click **Promote**. +Last step is to update our definition manifest to pull the image from the ECR repository. +To do this, we'll update the `deployment.yaml` file by adding the ECR registry URL to the `image` value. +Below is an example using the registry URL used in this guide. -1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. +```diff + spec: + containers: + - name: nginx +- image: nginx ++ image: 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx + envFrom: +``` - Promote release dialog +Save your changes and create the new release and promote it to the *Unstable* channel. - [View a larger version of this image](/images/release-promote.png) +* * * -## Next Step +## 4. Install the New Version -Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-kots-helm-create-customer). +To deploy the new version of the application, go back to the admin console and select the *Version History* tab. +Click on **Check for Updates** and then **Deploy** when the new version is listed. +To confirm that the new version was in fact installed, it should look like the screenshot below. -## Related Topics +![version-history](/images/guides/kots/priv-reg-ecr-version-history.png) -* [About Channels and Releases](/vendor/releases-about) -* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) +Now, we can inspect to see the changes in the definition files. +Looking at the `deployment.yaml` upstream file, we see the image path as we set it in the [Update Definition Files](#3-update-definition-files) section. -================ -File: docs/vendor/tutorial-kots-helm-get-chart.md -================ -# Step 1: Get the Sample Chart and Test +![admin-console-view-files-upstream-release2](/images/guides/kots/priv-reg-ecr-upstream-file-rel2.png) -To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated Vendor Portal. +Because KOTS is able to detect that it cannot pull this image anonymously, it then tries to proxy the private registries configured. Looking at the `kustomization.yaml` downstream file we can see that the image path is changed to use the Replicated proxy. -To get the sample Gitea Helm chart and test installation: +![admin-console-view-files-downstream-release2](/images/guides/kots/priv-reg-ecr-downstream-file-rel2.png) -1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: +The install of the new version should have created a new pod. If we run `kubectl describe pod` on the new NGINX pod, we can confirm that the image was in fact pulled from the ECR repository. - ``` - helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 - ``` - For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. +![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubectl-describe-rel2.png) -1. Change to the new `gitea` directory that was created: - ``` - cd gitea - ``` -1. View the files in the directory: - ``` - ls - ``` - The directory contains the following files: - ``` - Chart.lock Chart.yaml README.md charts templates values.yaml - ``` -1. Install the Gitea chart in your cluster: +* * * - ``` - helm install gitea . --namespace gitea --create-namespace - ``` - To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/gitea/README.md#installing-the-chart) in the `bitnami/gitea` repository. +## Related Topics - When the chart is installed, the following output is displayed: +- [Connecting to an External Registry](packaging-private-images/) - ``` - NAME: gitea - LAST DEPLOYED: Tue Oct 24 12:44:55 2023 - NAMESPACE: gitea - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - CHART NAME: gitea - CHART VERSION: 1.0.6 - APP VERSION: 1.20.5 +- [Replicated Community Thread on AWS Roles and Permissions](https://help.replicated.com/community/t/what-are-the-minimal-aws-iam-permissions-needed-to-proxy-images-from-elastic-container-registry-ecr/267) - ** Please be patient while the chart is being deployed ** +- [AWS ECR Managed Policies Documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecr_managed_policies.html) - 1. Get the Gitea URL: +================ +File: docs/vendor/tutorial-embedded-cluster-create-app.mdx +================ +# Step 1: Create an Application - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - Watch the status with: 'kubectl get svc --namespace gitea -w gitea' +To begin, install the Replicated CLI and create an application in the Replicated Vendor Portal. - export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") - echo "Gitea URL: http://$SERVICE_IP/" +An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. - WARNING: You did not specify a Root URL for Gitea. The rendered URLs in Gitea may not show correctly. In order to set a root URL use the rootURL value. +To create an application: - 2. Get your Gitea login credentials by running: +1. Install the Replicated CLI: - echo Username: bn_user - echo Password: $(kubectl get secret --namespace gitea gitea -o jsonpath="{.data.admin-password}" | base64 -d) ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). -1. Watch the `gitea` LoadBalancer service until an external IP is available: +1. Authorize the Replicated CLI: ``` - kubectl get svc gitea --namespace gitea --watch + replicated login ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. -1. When the external IP for the `gitea` LoadBalancer service is available, run the commands provided in the output of the installation command to get the Gitea URL: +1. Create an application named `Gitea`: ``` - export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") - echo "Gitea URL: http://$SERVICE_IP/" + replicated app create Gitea ``` -1. In a browser, go to the Gitea URL to confirm that you can see the welcome page for the application: +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: - Gitea application webpage + 1. Get the slug for the application that you created: - [View a larger version of this image](/images/gitea-app.png) + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-kite kots + ``` + In the example above, the application slug is `gitea-kite`. -1. Uninstall the Helm chart: + :::note + The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. + ::: - ``` - helm uninstall gitea --namespace gitea - ``` - This command removes all the Kubernetes components associated with the chart and uninstalls the `gitea` release. + 1. Set the `REPLICATED_APP` environment variable to the application slug. -1. Delete the namespace: + **Example:** - ``` - kubectl delete namespace gitea - ``` + ``` + export REPLICATED_APP=gitea-kite + ``` ## Next Step -Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-kots-helm-create-app). +Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 2: Package the Helm Chart](tutorial-embedded-cluster-package-chart). ## Related Topics -* [Helm Install](https://helm.sh/docs/helm/helm_install/) -* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) -* [Helm Create](https://helm.sh/docs/helm/helm_create/) -* [Helm Package](https://helm.sh/docs/helm/helm_package/) -* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) +* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [replicated app create](/reference/replicated-cli-app-create) ================ -File: docs/vendor/tutorial-kots-helm-install-helm.md +File: docs/vendor/tutorial-embedded-cluster-create-customer.mdx ================ -# Step 7: Install the Release with the Helm CLI +# Step 4: Create an Embedded Cluster-Enabled Customer -Next, install the same release using the Helm CLI. All releases that contain one or more Helm charts can be installed with the Helm CLI. +After promoting the release, create a customer with the Replicated KOTS and Embedded Cluster entitlements so that you can install the release with Embedded Cluster. A _customer_ represents a single licensed user of your application. -All Helm charts included in a release are automatically pushed to the Replicated registry when the release is promoted to a channel. Helm CLI installations require that the customer has a valid email address to authenticate with the Replicated registry. +To create a customer: -To install the release with the Helm CLI: +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. -1. Create a new customer to test the Helm CLI installation: + The **Create a new customer** page opens: - 1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - The **Create a new customer** page opens: + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. + +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + +1. For **License type**, select **Development**. + +1. For **License options**, enable the following entitlements: + * **KOTS Install Enabled** + * **Embedded Cluster Enabled** + +1. Click **Save Changes**. - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) +## Next Step - [View a larger version of this image](/images/create-customer.png) +Get the Embedded Cluster installation commands and install. See [Step 5: Install the Release on a VM](tutorial-embedded-cluster-install). - 1. For **Customer name**, enter a name for the customer. For example, `Helm Customer`. +## Related Topics - 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) - 1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. +================ +File: docs/vendor/tutorial-embedded-cluster-create-release.mdx +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import EcCr from "../partials/embedded-cluster/_ec-config.mdx" - 1. For **License type**, select Trial. +# Step 3: Add the Chart Archive to a Release - 1. (Optional) For **License options**, _disable_ the **KOTS Install Enabled** entitlement. +Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with Replicated Embedded Cluster. - 1. Click **Save Changes**. +A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. -1. On the **Manage customer** page for the new customer, click **Helm install instructions**. +To create a release: - ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) +1. In the `gitea` directory, create a subdirectory named `manifests`: - [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) + ``` + mkdir manifests + ``` - You will use the instructions provided in the **Helm install instructions** dialog to install the chart. + You will add the files required to support installation with Replicated KOTS to this subdirectory. -1. Before you run the first command in the **Helm install instructions** dialog, create a `gitea` namespace for the installation: +1. Move the Helm chart archive that you created to `manifests`: ``` - kubectl create namespace gitea + mv gitea-1.0.6.tgz manifests ``` -1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: - +1. In `manifests`, create the YAML manifests required by KOTS: ``` - kubectl config set-context --namespace=gitea --current + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml ``` -1. Run the commands in the provided in the **Helm install instructions** dialog to log in to the registry and install the Helm chart. +1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: - Helm install instructions dialog + + +
    Description
    +

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The optionalValues field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment.

    +
    YAML
    + +
    + +
    Description
    +

    The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the gitea Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.

    +
    YAML
    + +
    + +
    Description
    +

    The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.

    +
    YAML
    + +
    + +
    Description
    +

    To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.

    +
    YAML
    + +
    +
    - [View a larger version of this image](/images/tutorial-gitea-helm-install-instructions.png) +1. Lint: + ```bash + replicated release lint --yaml-dir . + ``` + ```bash + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` :::note - You can ignore the **No preflight checks found** warning for the purpose of this tutorial. This warning appears because there are no specifications for preflight checks in the Helm chart archive. + You can ignore any warning messages for the purpose of this tutorial. ::: -1. After the installation command completes, you can see that both the `gitea` Deployment and the Replicated SDK `replicated` Deployment were created: +1. Create a release: ``` - kubectl get deploy + replicated release create --yaml-dir . ``` - **Example output:** + **Example output**: ``` - NAME READY UP-TO-DATE AVAILABLE AGE - gitea 0/1 1 0 35s - replicated 1/1 1 1 35s + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 ``` -1. Watch the `gitea` LoadBalancer service until an external IP is available: +1. Log in to the Vendor Portal and go to **Releases**. - ``` - kubectl get svc gitea --watch - ``` + The release that you created is listed under **All releases**. -1. After an external IP address is available for the `gitea` LoadBalancer service, follow the instructions in the output of the installation command to get the Gitea URL and then confirm that you can open the application in a browser. + ![Release page in the Vendor Portal with one release](/images/gitea-ec-release-seq-1.png) -1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created for the Helm CLI installation. + [View a larger version of this image](/images/gitea-ec-release-seq-1.png) - On the **Reporting** page for the customer, because the Replicated SDK was installed alongside the Gitea Helm chart, you can see details about the customer's license and installed instances: +1. Click the dot menu then **Edit release** to view the files in the release. - ![Customer reporting](/images/tutorial-gitea-helm-reporting.png) + ![dot menu](/images/gitea-ec-release-edit-button.png) - [View a larger version of this image](/images/tutorial-gitea-helm-reporting.png) + [View a larger version of this image](/images/gitea-ec-release-edit-button.png) -1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. + In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. - On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of the Replicated SDK running in the cluster, instance status and uptime, and more: + ![Edit Release page in the Vendor Portal](/images/gitea-ec-release-edit-seq-1.png) - ![Customer instance details](/images/tutorial-gitea-helm-instance.png) + [View a larger version of this image](/images/gitea-ec-release-edit-seq-1.png) - [View a larger version of this image](/images/tutorial-gitea-helm-instance.png) +1. At the top of the page, click **Promote**. -1. Uninstall the Helm chart and the Replicated SDK: +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. - ``` - helm uninstall gitea - ``` + Promote release dialog -1. Delete the `gitea` namespace: - - ``` - kubectl delete namespace gitea - ``` + [View a larger version of this image](/images/release-promote.png) ## Next Step -Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with both KOTS and the Helm CLI. +Create a customer with the Embedded Cluster entitlement so that you can install the release using Embedded Cluster. See [Step 4: Create an Embedded Cluster-Enabled Customer](tutorial-embedded-cluster-create-customer). ## Related Topics -* [Installing with Helm](/vendor/install-with-helm) -* [About the Replicated SDK](/vendor/replicated-sdk-overview) -* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) -* [Helm Delete](https://helm.sh/docs/helm/helm_delete/) +* [About Channels and Releases](/vendor/releases-about) +* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) +* [Embedded Cluster Config](/reference/embedded-config) +* [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) ================ -File: docs/vendor/tutorial-kots-helm-install-kots.md +File: docs/vendor/tutorial-embedded-cluster-install.mdx ================ import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" -# Step 6: Install the Release with KOTS +# Step 5: Install the Release on a VM -Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. +Next, get the customer-specific Embedded Cluster installation commands and then install the release on a Linux VM. -To install the release with KOTS: +To install the release with Embedded Cluster: -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers**. Click on the name of the customer you created. - ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) +1. Click **Install instructions > Embedded cluster**. - [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) + Customer install instructions dropdown -1. On the command line, run the **KOTS Install** command that you copied: + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) - ```bash - curl https://kots.io/install | bash - kubectl kots install $REPLICATED_APP/unstable - ``` + The **Embedded cluster install instructions** dialog opens. - This installs the latest version of the KOTS CLI and the Replicated KOTS Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. + Embedded Cluster install instructions dialog - For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) - :::note - - ::: +1. On the command line, SSH onto your Linux VM. -1. Complete the installation command prompts: +1. Run the first command in the **Embedded cluster install instructions** dialog to download the latest release. - 1. For `Enter the namespace to deploy to`, enter `gitea`. +1. Run the second command to extract the release. - 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. +1. Run the third command to install the release. - When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. +1. When prompted, enter a password for accessing the KOTS Admin Console. + + The installation command takes a few minutes to complete. + +1. When the installation command completes, go to the URL provided in the output to log in to the Admin Console. **Example output:** ```bash - Enter the namespace to deploy to: gitea - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - Enter a new password for the admin console (6+ characters): •••••••• - • Waiting for Admin Console to be ready ✓ - - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console + ✔ Host files materialized + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Node installation finished + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Finished! + Visit the admin console to configure and install gitea-kite: http://104.155.145.60:30000 ``` -1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. - -1. On the login page, enter the password that you created. - -1. On the license page, select the license file that you downloaded previously and click **Upload license**. - - The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: - - ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) - - [View a larger version of this image](/images/tutorial-gitea-unavailable.png) + At this point, the cluster is provisioned and the KOTS Admin Console is deployed, but the application is not yet installed. -1. While waiting for the `gitea` Deployment to be created, do the following: +1. Bypass the browser TLS warning by clicking **Continue to Setup**. - 1. On the command line, press Ctrl+C to exit the port forward. +1. Click **Advanced > Proceed**. - 1. Watch for the `gitea` Deployment to become ready: +1. On the **HTTPS for the Gitea Admin Console** page, select **Self-signed** and click **Continue**. - ``` - kubectl get deploy gitea --namespace gitea --watch - ``` +1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. - 1. After the `gitea` Deployment is ready, confirm that an external IP for the `gitea` LoadBalancer service is available: +1. On the **Nodes** page, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. - ``` - kubectl get svc gitea --namespace gitea - ``` + The Admin Console dashboard opens. + +1. In the **Version** section, for version `0.1.0`, click **Deploy** then **Yes, Deploy**. - 1. Start the port foward again to access the Admin Console: + The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. - ``` - kubectl kots admin-console --namespace gitea - ``` +1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser: - 1. Go to `http://localhost:8800` to open the Admin Console. + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) -1. On the Admin Console dashboard, the application status is now displayed as Ready and you can click **Open App** to view the Gitea application in a browser: + [View a larger version of this image](/images/gitea-ec-ready.png) - ![Admin console dashboard showing ready status](/images/tutorial-gitea-ready.png) + Gitea app landing page - [View a larger version of this image](/images/tutorial-gitea-ready.png) + [View a larger version of this image](/images/gitea-app.png) 1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created. On the **Reporting** page for the customer, you can see details about the customer's license and installed instances: - ![Customer reporting page](/images/tutorial-gitea-customer-reporting.png) + ![Customer reporting page](/images/gitea-customer-reporting-ec.png) - [View a larger version of this image](/images/tutorial-gitea-customer-reporting.png) + [View a larger version of this image](/images/gitea-customer-reporting-ec.png) 1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. - On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of KOTS running in the cluster, instance status and uptime, and more: - - ![Customer instance details page](/images/tutorial-gitea-instance-insights.png) - - [View a larger version of this image](/images/tutorial-gitea-instance-insights.png) - -1. Uninstall the Gitea application from your cluster so that you can install the same release again using the Helm CLI: - - ```bash - kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy - ``` - **Example output**: - ``` - • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ - • Application gitea-boxer has been removed - ``` + On the instance details page, you can see additional insights such as the version of Embedded Cluster that is running, instance status and uptime, and more: -1. Remove the Admin Console from the cluster: + ![Customer instance details page](/images/gitea-instance-insights-ec.png) - 1. Delete the namespace where the Admin Console is installed: + [View a larger version of this image](/images/gitea-instance-insights-ec.png) - ``` - kubectl delete namespace gitea - ``` - 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: +1. (Optional) Reset the node to remove the cluster and the application from the node. This is useful for iteration and development so that you can reset a machine and reuse it instead of having to procure another machine. - ``` - kubectl delete clusterrole kotsadm-role - ``` - ``` - kubectl delete clusterrolebinding kotsadm-rolebinding - ``` + ```bash + sudo ./APP_SLUG reset --reboot + ``` + Where `APP_SLUG` is the unique slug for the application that you created. You can find the appication slug by running `replicated app ls` on the command line on your local machine. -## Next Step +## Summary -Install the same release with the Helm CLI. See [Step 7: Install the Release with the Helm CLI](tutorial-kots-helm-install-helm). +Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with Replicated Embedded Cluster in a VM. To learn more about Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). ## Related Topics -* [kots install](/reference/kots-cli-install/) -* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) -* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) -* [Customer Reporting](customer-reporting) -* [Instance Details](instance-insights-details) +* [Embedded Cluster Overview](embedded-overview) +* [Customer Reporting](/vendor/customer-reporting) +* [Instance Details](/vendor/instance-insights-details) +* [Reset a Node](/vendor/embedded-using#reset-a-node) ================ -File: docs/vendor/tutorial-kots-helm-package-chart.md +File: docs/vendor/tutorial-embedded-cluster-package-chart.mdx ================ import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" -# Step 3: Package the Helm Chart +# Step 2: Package the Gitea Helm Chart -Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. +Next, get the sample Gitea Helm chart from Bitnami. Add the Replicated SDK as a dependency of the chart, then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. The Replicated SDK is a Helm chart that can be optionally added as a dependency of your application Helm chart. The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. Additionally, the Replicated SDK provides access to insights and telemetry for instances of your application installed with the Helm CLI. To add the Replicated SDK and package the Helm chart: -1. In your local file system, go to the `gitea` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-kots-helm-get-chart). +1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. + +1. Change to the new `gitea` directory that was created: + ``` + cd gitea + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` 1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: @@ -56035,7 +46083,7 @@ To add the Replicated SDK and package the Helm chart: ## Next Step -Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-kots-helm-create-release). +Create a release using the Helm chart archive. See [Step 3: Add the Chart Archive to a Release](tutorial-embedded-cluster-create-release). ## Related Topics @@ -56044,130 +46092,106 @@ Create a release using the Helm chart archive. See [Step 4: Add the Chart Archiv * [Helm Package](https://helm.sh/docs/helm/helm_package/) ================ -File: docs/vendor/tutorial-kots-helm-setup.md +File: docs/vendor/tutorial-embedded-cluster-setup.mdx ================ +import Requirements from "../partials/embedded-cluster/_requirements.mdx" + # Introduction and Setup This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. ## Summary -This tutorial introduces you to the Replicated Vendor Portal, the Replicated CLI, the Replicated SDK, and the Replicated KOTS installer. +This tutorial introduces you to installing an application on a Linux virtual machine (VM) using Replicated Embedded Cluster. Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. -In this tutorial, you use a sample Helm chart to learn how to: +In this tutorial, you use a sample application to learn how to: -* Add the Replicated SDK to a Helm chart as a dependency -* Create a release with the Helm chart using the Replicated CLI -* Add custom resources to the release so that it supports installation with both the Helm CLI and Replicated KOTS -* Install the release in a cluster using KOTS and the KOTS Admin Console -* Install the same release using the Helm CLI +* Add the Embedded Cluster Config to a release +* Use Embedded Cluster to install the application on a Linux VM ## Set Up the Environment -Before you begin, do the following to set up your environment: - -* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. - - For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: - * [Install Tools](https://kubernetes.io/docs/tasks/tools/) - * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) - -* Install the Helm CLI. To install the Helm CLI using Homebrew, run: - - ``` - brew install helm - ``` - - For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. - -* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). +Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: - :::note - If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. - ::: + ## Next Step -Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-kots-helm-get-chart) +Install the Replicated CLI and create an application in the Replicated Vendor Portal. See [Step 1: Create an Application](/vendor/tutorial-embedded-cluster-create-app). ================ -File: docs/vendor/tutorial-preflight-helm-add-spec.mdx +File: docs/vendor/tutorial-kots-helm-create-app.md ================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +# Step 2: Create an Application -# Step 2: Add a Preflight Spec to the Chart +Next, install the Replicated CLI and then create an application. -Create a preflight specification that fails if the cluster is running a version of Kubernetes earlier than 1.23.0, and add the specification to the Gitea chart as a Kubernetes Secret. +An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. -To add a preflight specification to the Gitea chart: +To create an application: -1. In the `gitea/templates` directory, create a `gitea-preflights.yaml` file: +1. Install the Replicated CLI: ``` - touch templates/gitea-preflights.yaml + brew install replicatedhq/replicated/cli ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). -1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a preflight check specification: +1. Authorize the Replicated CLI: - ```yaml - apiVersion: v1 - kind: Secret - metadata: - labels: - troubleshoot.sh/kind: preflight - name: gitea-preflight-checks - stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: gitea-preflight-checks - spec: - analyzers: - - clusterVersion: - outcomes: - - fail: - when: "< 1.23.0" - message: |- - Your cluster is running a version of Kubernetes that is not supported and your installation will not succeed. To continue, upgrade your cluster to Kubernetes 1.23.0 or later. - uri: https://www.kubernetes.io - - pass: - message: Your cluster is running the required version of Kubernetes. ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. - The YAML above defines a preflight check that fails if the target cluster is running a version of Kubernetes earlier than 1.23.0. The preflight check also includes a message to the user that describes the failure and lists the required Kubernetes version. The `troubleshoot.sh/kind: preflight` label is required to run preflight checks defined in Secrets. +1. Create an application named `Gitea`: -1. In the Gitea `Chart.yaml` file, add the Replicated SDK as a dependency: + ``` + replicated app create Gitea + ``` - +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: - The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. + 1. Get the slug for the application that you created: -1. Update dependencies and package the chart to a `.tgz` chart archive: + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots + ``` + In the example above, the application slug is `gitea-boxer`. - ```bash - helm package . --dependency-update - ``` + :::note + The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. + ::: - :::note - If you see a `401 Unauthorized` error message, log out of the Replicated registry by running `helm registry logout registry.replicated.com` and then run `helm package . --dependency-update` again. - ::: + 1. Set the `REPLICATED_APP` environment variable to the application slug. + + **Example:** + + ``` + export REPLICATED_APP=gitea-boxer + ``` ## Next Step -Add the chart archive to a release. See [Add the Chart Archive to a Release](tutorial-preflight-helm-create-release). +Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-kots-helm-package-chart). ## Related Topics -* [Defining Preflight Checks](/vendor/preflight-defining) -* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) +* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [replicated app create](/reference/replicated-cli-app-create) ================ -File: docs/vendor/tutorial-preflight-helm-create-customer.mdx +File: docs/vendor/tutorial-kots-helm-create-customer.md ================ -# Step 4: Create a Customer +# Step 5: Create a KOTS-Enabled Customer -After promoting the release, create a customer so that you can run the preflight checks and install. +After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. A _customer_ represents a single licensed user of your application. To create a customer: @@ -56179,19 +46203,25 @@ To create a customer: [View a larger version of this image](/images/create-customer.png) -1. For **Customer name**, enter a name for the customer. For example, `Preflight Customer`. +1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. -1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. - 1. For **License type**, select Development. +1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. + 1. Click **Save Changes**. +1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. + + ![Download license button on the customer page](/images/customer-download-license.png) + + [View a larger version of this image](/images/customer-download-license.png) + ## Next Step -Use the Helm CLI to run the preflight checks you defined and install Gitea. See [Run Preflights with the Helm CLI](tutorial-preflight-helm-install). +Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-kots-helm-install-kots). ## Related Topics @@ -56199,67 +46229,95 @@ Use the Helm CLI to run the preflight checks you defined and install Gitea. See * [Creating and Managing Customers](/vendor/releases-creating-customer) ================ -File: docs/vendor/tutorial-preflight-helm-create-release.mdx +File: docs/vendor/tutorial-kots-helm-create-release.md ================ -# Step 3: Add the Chart Archive to a Release +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" -Use the Replicated CLI to add the Gitea Helm chart archive to a release in the Replicated vendor platform. +# Step 4: Add the Chart Archive to a Release -To create a release: +Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with both Replicated KOTS and with the Helm CLI. -1. Install the Replicated CLI: +A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. - ``` - brew install replicatedhq/replicated/cli - ``` - For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). +To create a release: -1. Authorize the Replicated CLI: +1. In the `gitea` directory, create a subdirectory named `manifests`: ``` - replicated login + mkdir manifests ``` - In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. -1. Create an application named `Gitea`: + You will add the files required to support installation with Replicated KOTS to this subdirectory. + +1. Move the Helm chart archive that you created to `manifests`: ``` - replicated app create Gitea + mv gitea-1.0.6.tgz manifests ``` -1. Get the slug for the application that you created: - +1. In `manifests`, create the YAML manifests required by KOTS: ``` - replicated app ls + cd manifests ``` - **Example output**: ``` - ID NAME SLUG SCHEDULER - 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots + touch gitea.yaml kots-app.yaml k8s-app.yaml ``` - In the example above, the application slug is `gitea-boxer`. -1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: +1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: - **Example:** + + +
    Description
    +

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.

    +
    YAML
    + +
    + +
    Description
    +

    The KOTS Application custom resource enables features in the KOTS Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the gitea Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.

    +
    YAML
    + +
    + +
    Description
    +

    The Kubernetes Application custom resource supports functionality such as including buttons and links on the KOTS Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.

    +
    YAML
    + +
    +
    + +1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: ``` - export REPLICATED_APP=gitea-boxer + replicated release lint --yaml-dir . ``` + `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. -1. Go to the `gitea` directory. - -1. Create a release with the Gitea chart archive: + **Example output**: ``` - replicated release create --chart=gitea-1.0.6.tgz + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. ``` - ```bash - You are creating a release that will only be installable with the helm CLI. - For more information, see - https://docs.replicated.com/vendor/helm-install#about-helm-installations-with-replicated + :::note + The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `gitea` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. + ::: - • Reading chart from gitea-1.0.6.tgz ✓ +1. Create a release: + + ``` + replicated release create --yaml-dir . + ``` + **Example output**: + ``` + • Reading manifests from . ✓ • Creating Release ✓ • SEQUENCE: 1 ``` @@ -56268,36 +46326,41 @@ To create a release: The release that you created is listed under **All releases**. -1. Click **View YAML** to view the files in the release. + ![Release page in the Vendor Portal with one release](/images/tutorial-kots-helm-release-seq-1.png) -1. At the top of the page, click **Promote**. + [View a larger version of this image](/images/tutorial-kots-helm-release-seq-1.png) - Promote release dialog +1. Click **Edit release** to view the files in the release. - [View a larger version of this image](/images/release-promote.png) + In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. -1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. + ![Edit Release page in the Vendor Portal](/images/tutorial-kots-helm-release-edit-seq-1.png) -1. For **Version label**, open the dropdown and select **1.0.6**. + [View a larger version of this image](/images/tutorial-kots-helm-release-edit-seq-1.png) -1. Click **Promote**. +1. At the top of the page, click **Promote**. + +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. + + Promote release dialog + [View a larger version of this image](/images/release-promote.png) ## Next Step -Create a customer so that you can install the release in a development environment. See [Create a Customer](tutorial-preflight-helm-create-customer). +Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-kots-helm-create-customer). ## Related Topics * [About Channels and Releases](/vendor/releases-about) -* [Managing Releases with the CLI](/vendor/releases-creating-cli) +* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) ================ -File: docs/vendor/tutorial-preflight-helm-get-chart.mdx +File: docs/vendor/tutorial-kots-helm-get-chart.md ================ # Step 1: Get the Sample Chart and Test -To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install the application before adding preflight checks to the chart. +To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated Vendor Portal. To get the sample Gitea Helm chart and test installation: @@ -56372,15 +46435,6 @@ To get the sample Gitea Helm chart and test installation: echo "Gitea URL: http://$SERVICE_IP/" ``` - :::note - Alternatively, you can run the following command to forward a local port to a port on the Gitea Pod: - - ``` - POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=gitea -o jsonpath='{.items[0].metadata.name}') - kubectl port-forward pod/$POD_NAME 8080:3000 - ``` - ::: - 1. In a browser, go to the Gitea URL to confirm that you can see the welcome page for the application: Gitea application webpage @@ -56402,4538 +46456,1856 @@ To get the sample Gitea Helm chart and test installation: ## Next Step -Define preflight checks and add them to the Gitea Helm chart. See [Add a Preflight Spec to the Chart](tutorial-preflight-helm-add-spec). +Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-kots-helm-create-app). ## Related Topics * [Helm Install](https://helm.sh/docs/helm/helm_install/) * [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Create](https://helm.sh/docs/helm/helm_create/) * [Helm Package](https://helm.sh/docs/helm/helm_package/) * [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) ================ -File: docs/vendor/tutorial-preflight-helm-install-kots.mdx +File: docs/vendor/tutorial-kots-helm-install-helm.md ================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" -import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" -import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" -import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" - -# Step 6: Run Preflights with KOTS - -Create a KOTS-enabled release and then install Gitea with KOTS. This purpose of this step is to see how preflight checks automatically run in the KOTS Admin Console during installation. - -To run preflight checks during installation with KOTS: - -1. In the `gitea` directory, create a subdirectory named `manifests`: - - ``` - mkdir manifests - ``` +# Step 7: Install the Release with the Helm CLI - You will add the files required to support installation with KOTS to this subdirectory. +Next, install the same release using the Helm CLI. All releases that contain one or more Helm charts can be installed with the Helm CLI. -1. Move the Helm chart archive to `manifests`: +All Helm charts included in a release are automatically pushed to the Replicated registry when the release is promoted to a channel. Helm CLI installations require that the customer has a valid email address to authenticate with the Replicated registry. - ``` - mv gitea-1.0.6.tgz manifests - ``` +To install the release with the Helm CLI: -1. In `manifests`, create the YAML manifests required by KOTS: - ``` - cd manifests - ``` - ``` - touch gitea.yaml kots-app.yaml k8s-app.yaml - ``` +1. Create a new customer to test the Helm CLI installation: -1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: + 1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. - - -
    Description
    -

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.

    -
    YAML
    - -
    - -
    Description
    -

    The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the gitea Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.

    -
    YAML
    - -
    - -
    Description
    -

    The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.

    -
    YAML
    - -
    -
    + The **Create a new customer** page opens: -1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - ``` - replicated release lint --yaml-dir . - ``` - `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. + [View a larger version of this image](/images/create-customer.png) - **Example output**: + 1. For **Customer name**, enter a name for the customer. For example, `Helm Customer`. - ``` - RULE TYPE FILENAME LINE MESSAGE - config-spec warn Missing config spec - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - - The output includes warning messages, including a warning about a missing preflight spec. This warning appears because the preflight spec is defined in the Helm chart. The warnings can be ignored for the purpose of this tutorial. + 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. -1. Create a release: + 1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. - ```bash - replicated release create --yaml-dir . - ``` - **Example output**: - ```bash - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 2 - ``` + 1. For **License type**, select Trial. -1. Log in to the [vendor portal](https://vendor.replicated.com) and go to **Releases**. The new release is labeled **Sequence 2**. + 1. (Optional) For **License options**, _disable_ the **KOTS Install Enabled** entitlement. -1. Promote the release to the Unstable channel. + 1. Click **Save Changes**. -1. Go to the **Customers** page. +1. On the **Manage customer** page for the new customer, click **Helm install instructions**. -1. Create a new customer named `KOTS Preflight Customer`. For **License options**, enable the **KOTS Install Enabled** checkbox. This is the entitlement that allows the customer to install with KOTS. + ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) -1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. + [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) -1. Go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. + You will use the instructions provided in the **Helm install instructions** dialog to install the chart. - ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) +1. Before you run the first command in the **Helm install instructions** dialog, create a `gitea` namespace for the installation: - [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) + ``` + kubectl create namespace gitea + ``` -1. On the command line, run the **KOTS Install** command that you copied: +1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: - ```bash - curl https://kots.io/install | bash - kubectl kots install $REPLICATED_APP/unstable + ``` + kubectl config set-context --namespace=gitea --current ``` - This installs the latest version of the KOTS CLI and the Replicated Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. +1. Run the commands in the provided in the **Helm install instructions** dialog to log in to the registry and install the Helm chart. - For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + Helm install instructions dialog + + [View a larger version of this image](/images/tutorial-gitea-helm-install-instructions.png) :::note - + You can ignore the **No preflight checks found** warning for the purpose of this tutorial. This warning appears because there are no specifications for preflight checks in the Helm chart archive. ::: -1. Complete the installation command prompts: - - 1. For `Enter the namespace to deploy to`, enter `gitea`. - - 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. - - When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. - - **Example output:** - - ```bash - Enter the namespace to deploy to: gitea - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - Enter a new password for the Admin Console (6+ characters): •••••••• - • Waiting for Admin Console to be ready ✓ - - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console - ``` - -1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. - -1. On the login page, enter the password that you created. - -1. On the license page, select the license file that you downloaded previously and click **Upload license**. - - Preflight checks run automatically: - - ![Gitea preflight checks page](/images/gitea-preflights-admin-console.png) - - [View a larger version of this image](/images/gitea-preflights-admin-console.png) - -1. When the preflight checks finish, click **Deploy** to deploy the application. - - The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: - - ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) - - [View a larger version of this image](/images/tutorial-gitea-unavailable.png) - -1. (Optional) After the application is in a Ready status, click **Open App** to view the Gitea application in a browser. - -1. Uninstall the Gitea application from your cluster: +1. After the installation command completes, you can see that both the `gitea` Deployment and the Replicated SDK `replicated` Deployment were created: - ```bash - kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy ``` - **Example output**: - ``` - • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ - • Application gitea-boxer has been removed + kubectl get deploy ``` - -1. Remove the Admin Console from the cluster: - - 1. Delete the namespace where the Admin Console is installed: - - ``` - kubectl delete namespace gitea - ``` - 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: - - ``` - kubectl delete clusterrole kotsadm-role - ``` - ``` - kubectl delete clusterrolebinding kotsadm-rolebinding - ``` - -## Summary - -Congratulations! In this tutorial, you defined a preflight check for Gitea that checks the version of Kubernetes running in the cluster. You also ran preflight checks before installing with both the Helm CLI and with KOTS. - -To learn more about defining and running preflight checks, see: -* [Defining Preflight Checks](/vendor/preflight-defining) -* [Running Preflight Checks](/vendor/preflight-running) -* [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. - -================ -File: docs/vendor/tutorial-preflight-helm-install.mdx -================ -# Step 5: Run Preflights with the Helm CLI - -Use the Helm CLI installation instructions provided for the customer that you created to run the preflight checks for Gitea and install. The purpose of this step is to demonstrate how enterprise users can run preflight checks defined in a Helm chart before installing. - -To run preflight checks and install with the Helm CLI: - -1. Create a `gitea` namespace for the installation: - + **Example output:** ``` - kubectl create namespace gitea + NAME READY UP-TO-DATE AVAILABLE AGE + gitea 0/1 1 0 35s + replicated 1/1 1 1 35s ``` -1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: +1. Watch the `gitea` LoadBalancer service until an external IP is available: ``` - kubectl config set-context --namespace=gitea --current + kubectl get svc gitea --watch ``` -1. In the [vendor portal](https://vendor.replicated.com), go to the **Customers** page. - -1. On the **Customer details** page for the customer that you created, click **Helm install instructions**. - - ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) - - [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) +1. After an external IP address is available for the `gitea` LoadBalancer service, follow the instructions in the output of the installation command to get the Gitea URL and then confirm that you can open the application in a browser. -1. Run the first command in the **Helm install instructions** dialog to log in to the Replicated registry. +1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created for the Helm CLI installation. -1. Run the second command to install the preflight kubectl plugin: + On the **Reporting** page for the customer, because the Replicated SDK was installed alongside the Gitea Helm chart, you can see details about the customer's license and installed instances: - ```bash - curl https://krew.sh/preflight | bash - ``` - The preflight plugin is a client-side utility used to run preflight checks. + ![Customer reporting](/images/tutorial-gitea-helm-reporting.png) -1. Run the third command to run preflight checks: + [View a larger version of this image](/images/tutorial-gitea-helm-reporting.png) - ```bash - helm template oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea | kubectl preflight - - ``` - This command templates the Gitea chart and then pipes the result to the preflight plugin. The following shows an example of the ouput for this command: +1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. - Preflight CLI output + On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of the Replicated SDK running in the cluster, instance status and uptime, and more: - [View a larger version of this image](/images/gitea-preflights-cli.png) + ![Customer instance details](/images/tutorial-gitea-helm-instance.png) -1. Run the fourth command listed under **Option 1: Install Gitea** to install the application: + [View a larger version of this image](/images/tutorial-gitea-helm-instance.png) - ```bash - helm install gitea oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea - ``` +1. Uninstall the Helm chart and the Replicated SDK: -1. Uninstall and delete the namespace: + ``` + helm uninstall gitea + ``` - ```bash - helm uninstall gitea --namespace gitea - ``` - ```bash - kubectl delete namespace gitea - ``` +1. Delete the `gitea` namespace: + + ``` + kubectl delete namespace gitea + ``` ## Next Step -Install the application with KOTS to see how preflight checks are run from the KOTS Admin Console. See [Run Preflights with KOTS](tutorial-preflight-helm-install-kots). +Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with both KOTS and the Helm CLI. ## Related Topics -* [Running Preflight Checks](/vendor/preflight-running) * [Installing with Helm](/vendor/install-with-helm) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Delete](https://helm.sh/docs/helm/helm_delete/) ================ -File: docs/vendor/tutorial-preflight-helm-setup.mdx +File: docs/vendor/tutorial-kots-helm-install-kots.md ================ -# Introduction and Setup - -This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. - -## Summary - -This tutorial introduces you to preflight checks. The purpose of preflight checks is to provide clear feedback about any missing requirements or incompatibilities in the customer's cluster _before_ they install or upgrade an application. Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" -Preflight checks are part of the [Troubleshoot](https://troubleshoot.sh/) open source project, which is maintained by Replicated. +# Step 6: Install the Release with KOTS -In this tutorial, you use a sample Helm chart to learn how to: +Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. -* Define custom preflight checks in a Kubernetes Secret in a Helm chart -* Package a Helm chart and add it to a release in the Replicated Vendor Portal -* Run preflight checks using the Helm CLI -* Run preflight checks in the Replicated KOTS Admin Console +To install the release with KOTS: -## Set Up the Environment +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. -Before you begin, do the following to set up your environment: + ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) -* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. + [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) - For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: - * [Install Tools](https://kubernetes.io/docs/tasks/tools/) - * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) +1. On the command line, run the **KOTS Install** command that you copied: -* Install the Helm CLI. To install the Helm CLI using Homebrew, run: + ```bash + curl https://kots.io/install | bash + kubectl kots install $REPLICATED_APP/unstable + ``` - ``` - brew install helm - ``` + This installs the latest version of the KOTS CLI and the Replicated KOTS Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. - For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. + For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). -* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). + :::note + + ::: - :::note - If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. - ::: +1. Complete the installation command prompts: -## Next Step + 1. For `Enter the namespace to deploy to`, enter `gitea`. -Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-preflight-helm-get-chart) + 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. -================ -File: docs/vendor/using-third-party-registry-proxy.mdx -================ -# Using a Registry Proxy for Helm Air Gap Installations + When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. -This topic describes how to connect the Replicated proxy registry to a Harbor or jFrog Artifactory instance to support pull-through image caching. It also includes information about how to set up replication rules in Harbor for image mirroring. + **Example output:** -## Overview + ```bash + Enter the namespace to deploy to: gitea + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password for the admin console (6+ characters): •••••••• + • Waiting for Admin Console to be ready ✓ -For applications distributed with Replicated, the [Replicated proxy registry](/vendor/private-images-about) grants proxy, or _pull-through_, access to application images without exposing registry credentials to customers. + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + ``` -Users can optionally connect the Replicated proxy registry with their own [Harbor](https://goharbor.io) or [jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) instance to proxy and cache the images that are required for installation on demand. This can be particularly helpful in Helm installations in air-gapped environments because it allows users to pull and cache images from an internet-connected machine, then access the cached images during installation from a machine with limited or no outbound internet access. +1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. -In addition to the support for on-demand pull-through caching, connecting the Replicated proxy registry to a Harbor or Artifactory instance also has the following benefits: -* Registries like Harbor or Artifactory typically support access controls as well as scanning images for security vulnerabilities -* With Harbor, users can optionally set up replication rules for image mirroring, which can be used to improve data availability and reliability +1. On the login page, enter the password that you created. -## Limtiation +1. On the license page, select the license file that you downloaded previously and click **Upload license**. -Artifactory does not support mirroring or replication for Docker registries. If you need to set up image mirroring, use Harbor. See [Set Up Mirroring in Harbor](#harbor-mirror) below. + The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: -## Connect the Replicated Proxy Registry to Harbor + ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) -[Harbor](https://goharbor.io) is a popular open-source container registry. Users can connect the Replicated proxy registry to Harbor in order to cache images on demand and set up pull-based replication rules to proactively mirror images. Connecting the Replicated proxy registry to Harbor also allows customers use Harbor's security features. + [View a larger version of this image](/images/tutorial-gitea-unavailable.png) -### Use Harbor for Pull-Through Proxy Caching {#harbor-proxy-cache} +1. While waiting for the `gitea` Deployment to be created, do the following: -To connect the Replicated proxy registry to Harbor for pull-through proxy caching: + 1. On the command line, press Ctrl+C to exit the port forward. -1. Log in to Harbor and create a new replication endpoint. This endpoint connects the Replicated proxy registry to the Harbor instance. For more information, see [Creating Replication Endpoints](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-endpoints/) in the Harbor documentation. + 1. Watch for the `gitea` Deployment to become ready: -1. Enter the following details for the endpoint: + ``` + kubectl get deploy gitea --namespace gitea --watch + ``` - * For the provider field, choose Docker Registry. - * For the URL field, enter `https://proxy.replicated.com` or the custom domain that is configured for the Replicated proxy registry. For more information about configuring custom domains in the Vendor Portal, see [Using Custom Domains](/vendor/custom-domains-using). - * For the access ID, enter the email address associated with the customer in the Vendor Portal. - * For the access secret, enter the customer's unique license ID. You can find the license ID in the Vendor Portal by going to **Customers > [Customer Name]**. + 1. After the `gitea` Deployment is ready, confirm that an external IP for the `gitea` LoadBalancer service is available: -1. Verify your configuration by testing the connection and then save the endpoint. + ``` + kubectl get svc gitea --namespace gitea + ``` -1. After adding the Replicated proxy registry as a replication endpoint in Harbor, set up a proxy cache. This allows for pull-through image caching with Harbor. For more information, see [Configure Proxy Cache](https://goharbor.io/docs/2.11.0/administration/configure-proxy-cache/) in the Harbor documentation. + 1. Start the port foward again to access the Admin Console: -1. (Optional) Add a pull-based replication rule to support image mirroring. See [Configure Image Mirroring in Harbor](#harbor-mirror) below. + ``` + kubectl kots admin-console --namespace gitea + ``` -### Configure Image Mirroring in Harbor {#harbor-mirror} + 1. Go to `http://localhost:8800` to open the Admin Console. -To enable image mirroring with Harbor, users create a pull-based replication rule. This periodically (or when manually triggered) pulls images from the Replicated proxy registry to store them in Harbor. +1. On the Admin Console dashboard, the application status is now displayed as Ready and you can click **Open App** to view the Gitea application in a browser: -The Replicated proxy regsitry exposes standard catalog and tag listing endpoints that are used by Harbor to support image mirroring: -* The catalog endpoint returns a list of repositories built from images of the last 10 releases. -* The tags listing endpoint lists the tags available in a given repository for those same releases. + ![Admin console dashboard showing ready status](/images/tutorial-gitea-ready.png) -When image mirroring is enabled, Harbor uses these endpoints to build a list of images to cache and then serve. + [View a larger version of this image](/images/tutorial-gitea-ready.png) -#### Limitations +1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created. -Image mirroring with Harbor has the following limitations: + On the **Reporting** page for the customer, you can see details about the customer's license and installed instances: -* Neither the catalog or tags listing endpoints exposed by the Replicated proxy service respect pagination requests. However, Harbor requests 1000 items at a time. + ![Customer reporting page](/images/tutorial-gitea-customer-reporting.png) -* Only authenticated users can perform catalog calls or list tags. Authenticated users are those with an email address and license ID associated with a customer in the Vendor Portal. + [View a larger version of this image](/images/tutorial-gitea-customer-reporting.png) -#### Create a Pull-Based Replication Rule in Harbor for Image Mirroring +1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. -To configure image mirroring in Harbor: + On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of KOTS running in the cluster, instance status and uptime, and more: -1. Follow the steps in [Use Harbor for Pull-Through Proxy Caching](#harbor-proxy-cache) above to add the Replicated proxy registry to Harbor as a replication endpoint. + ![Customer instance details page](/images/tutorial-gitea-instance-insights.png) -1. Create a **pull-based** replication rule in Harbor to mirror images proactively. For more information, see [Creating a replication rule](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-rules/) in the Harbor documentation. + [View a larger version of this image](/images/tutorial-gitea-instance-insights.png) -## Use Artifactory for Pull-Through Proxy Caching +1. Uninstall the Gitea application from your cluster so that you can install the same release again using the Helm CLI: -[jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) supports pull-through caching for Docker registries. + ```bash + kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy + ``` + **Example output**: + ``` + • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ + • Application gitea-boxer has been removed + ``` -For information about how to configure a pull-through cache with Artifactory, see [Remote Repository](https://jfrog.com/help/r/jfrog-artifactory-documentation/configure-a-remote-repository) in the Artifactory documentation. +1. Remove the Admin Console from the cluster: -================ -File: docs/vendor/vendor-portal-application-settings.md -================ -# Application Settings Page + 1. Delete the namespace where the Admin Console is installed: -Each application has its own settings, which include the application name and application slug. + ``` + kubectl delete namespace gitea + ``` + 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: -The following shows the **Application Settings** page, which you access by selecting **_Application Name_ > Settings**: + ``` + kubectl delete clusterrole kotsadm-role + ``` + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` -Settings page +## Next Step -[View a larger version of this image](/images/application-settings.png) +Install the same release with the Helm CLI. See [Step 7: Install the Release with the Helm CLI](tutorial-kots-helm-install-helm). -The following describes each of the application settings: +## Related Topics -- **Application name:** The application name is initially set when you first create the application in the Vendor Portal. You can change the name at any time so that it displays as a user-friendly name that your team can easily identify. -- **Application slug:** The application slug is used with the Replicated CLI and with some of the KOTS CLI commands. You can click on the link below the slug to toggle between the application ID number and the slug name. The application ID and application slug are unique identifiers that cannot be edited. -- **Service Account Tokens:** Provides a link to the the **Service Accounts** page, where you can create or remove a service account. Service accounts are paired with API tokens and are used with the Vendor API to automate tasks. For more information, see [Using Vendor API Tokens](/reference/vendor-api-using). -- **Scheduler:** Displayed if the application has a KOTS entitlement. -- **Danger Zone:** Lets you delete the application, and all of the licenses and data associated with the application. The delete action cannot be undone. +* [kots install](/reference/kots-cli-install/) +* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) +* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) +* [Customer Reporting](customer-reporting) +* [Instance Details](instance-insights-details) ================ -File: docs/vendor/vendor-portal-creating-account.md +File: docs/vendor/tutorial-kots-helm-package-chart.md ================ -# Creating a Vendor Account - -To get started with Replicated, you must create a Replicated vendor account. When you create your account, you are also prompted to create an application. To create additional applications in the future, log in to the Replicated Vendor Portal and select **Create new app** from the Applications drop-down list. - -To create a vendor account: +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" -1. Go to the [Vendor Portal](https://vendor.replicated.com), and select **Sign up**. +# Step 3: Package the Helm Chart - The sign up page opens. -3. Enter your email address or continue with Google authentication. +Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. - - If registering with an email, the Activate account page opens and you will receive an activation code in your email. +The Replicated SDK is a Helm chart that can be optionally added as a dependency of your application Helm chart. The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. Additionally, the Replicated SDK provides access to insights and telemetry for instances of your application installed with the Helm CLI. - :::note - To resend the code, click **Resend it**. - ::: +To add the Replicated SDK and package the Helm chart: - - Copy and paste the activation code into the text box and click **Activate**. Your account is now activated. +1. In your local file system, go to the `gitea` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-kots-helm-get-chart). - :::note - After your account is activated, you might have the option to accept a pending invitation, or to automatically join an existing team if the auto-join feature is enabled by your administrator. For more information about enabling the auto-join feature, see [Enable Users to Auto-join Your Team](https://docs.replicated.com/vendor/team-management#enable-users-to-auto-join-your-team). - ::: +1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: -4. On the Create your team page, enter you first name, last name, and company name. Click **Continue** to complete the setup. + - :::note - The company name you provide is used as your team name in Vendor Portal. - ::: +1. Update dependencies and package the Helm chart to a `.tgz` chart archive: - The Create application page opens. + ```bash + helm package . --dependency-update + ``` + -5. Enter a name for the application, such as `My-Application-Demo`. Click **Create application**. +## Next Step - The application is created and the Channels page opens. +Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-kots-helm-create-release). - :::important - Replicated recommends that you use a temporary name for the application at this time such as `My-Application-Demo` or `My-Application-Test`. +## Related Topics - Only use an official name for your application when you have completed testing and are ready to distribute the application to your customers. +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release.md) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) - Replicated recommends that you use a temporary application name for testing because you are not able to restore or modify previously-used application names or application slugs in the Vendor Portal. - ::: +================ +File: docs/vendor/tutorial-kots-helm-setup.md +================ +# Introduction and Setup -## Next Step +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. -Invite team members to collaborate with you in Vendor Portal. See [Invite Members](team-management#invite-members). +## Summary -================ -File: docs/vendor/vendor-portal-manage-app.md -================ -# Managing Applications +This tutorial introduces you to the Replicated Vendor Portal, the Replicated CLI, the Replicated SDK, and the Replicated KOTS installer. -This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. +In this tutorial, you use a sample Helm chart to learn how to: -For information about creating and managing application with the Vendor API v3, see the [apps](https://replicated-vendor-api.readme.io/reference/createapp) section in the Vendor API v3 documentation. +* Add the Replicated SDK to a Helm chart as a dependency +* Create a release with the Helm chart using the Replicated CLI +* Add custom resources to the release so that it supports installation with both the Helm CLI and Replicated KOTS +* Install the release in a cluster using KOTS and the KOTS Admin Console +* Install the same release using the Helm CLI -## Create an Application +## Set Up the Environment -Teams can create one or more applications. It is common to create multiple applications for testing purposes. +Before you begin, do the following to set up your environment: -### Vendor Portal +* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. -To create a new application: + For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: + * [Install Tools](https://kubernetes.io/docs/tasks/tools/) + * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) -1. Log in to the [Vendor Portal](https://vendor.replicated.com/). If you do not have an account, see [Creating a Vendor Account](/vendor/vendor-portal-creating-account). +* Install the Helm CLI. To install the Helm CLI using Homebrew, run: -1. In the top left of the page, open the application drop down and click **Create new app...**. + ``` + brew install helm + ``` - create new app drop down + For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. - [View a larger version of this image](/images/create-new-app.png) +* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). -1. On the **Create application** page, enter a name for the application. + :::note + If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. + ::: - create new app page +## Next Step - [View a larger version of this image](/images/create-application-page.png) +Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-kots-helm-get-chart) - :::important - If you intend to use the application for testing purposes, Replicated recommends that you use a temporary name such as `My Application Demo` or `My Application Test`. +================ +File: docs/vendor/tutorial-preflight-helm-add-spec.mdx +================ +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" - You are not able to restore or modify previously-used application names or application slugs. - ::: +# Step 2: Add a Preflight Spec to the Chart -1. Click **Create application**. +Create a preflight specification that fails if the cluster is running a version of Kubernetes earlier than 1.23.0, and add the specification to the Gitea chart as a Kubernetes Secret. -### Replicated CLI +To add a preflight specification to the Gitea chart: -To create an application with the Replicated CLI: +1. In the `gitea/templates` directory, create a `gitea-preflights.yaml` file: -1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + ``` + touch templates/gitea-preflights.yaml + ``` -1. Run the following command: +1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a preflight check specification: - ```bash - replicated app create APP-NAME + ```yaml + apiVersion: v1 + kind: Secret + metadata: + labels: + troubleshoot.sh/kind: preflight + name: gitea-preflight-checks + stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: gitea-preflight-checks + spec: + analyzers: + - clusterVersion: + outcomes: + - fail: + when: "< 1.23.0" + message: |- + Your cluster is running a version of Kubernetes that is not supported and your installation will not succeed. To continue, upgrade your cluster to Kubernetes 1.23.0 or later. + uri: https://www.kubernetes.io + - pass: + message: Your cluster is running the required version of Kubernetes. ``` - Replace `APP-NAME` with the name that you want to use for the new application. - **Example**: + The YAML above defines a preflight check that fails if the target cluster is running a version of Kubernetes earlier than 1.23.0. The preflight check also includes a message to the user that describes the failure and lists the required Kubernetes version. The `troubleshoot.sh/kind: preflight` label is required to run preflight checks defined in Secrets. - ```bash - replicated app create cli-app - ID NAME SLUG SCHEDULER - 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots - ``` +1. In the Gitea `Chart.yaml` file, add the Replicated SDK as a dependency: -## Get the Application Slug {#slug} + -Each application has a slug, which is used for interacting with the application using the Replicated CLI. The slug is automatically generated based on the application name and cannot be changed. + The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. -### Vendor Portal +1. Update dependencies and package the chart to a `.tgz` chart archive: -To get an application slug in the Vendor Portal: + ```bash + helm package . --dependency-update + ``` -1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. + :::note + If you see a `401 Unauthorized` error message, log out of the Replicated registry by running `helm registry logout registry.replicated.com` and then run `helm package . --dependency-update` again. + ::: -1. Under **Application Slug**, copy the slug. +## Next Step - Application slug +Add the chart archive to a release. See [Add the Chart Archive to a Release](tutorial-preflight-helm-create-release). - [View a larger version of this image](/images/application-settings.png) +## Related Topics -### Replicated CLI +* [Defining Preflight Checks](/vendor/preflight-defining) +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) -To get an application slug with the Replicated CLI: +================ +File: docs/vendor/tutorial-preflight-helm-create-customer.mdx +================ +# Step 4: Create a Customer -1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). +After promoting the release, create a customer so that you can run the preflight checks and install. -1. Run the following command: +To create a customer: - ```bash - replicated app ls APP-NAME - ``` - Replace `APP-NAME` with the name of the target application. Or, exclude `APP-NAME` to list all applications in the team. +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. - **Example:** + The **Create a new customer** page opens: - ```bash - replicated app ls cli-app - ID NAME SLUG SCHEDULER - 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots - ``` + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) -1. Copy the value in the `SLUG` field. + [View a larger version of this image](/images/create-customer.png) -## Delete an Application +1. For **Customer name**, enter a name for the customer. For example, `Preflight Customer`. -When you delete an application, you also delete all licenses and data associated with the application. You can also optionally delete all images associated with the application from the Replicated registry. Deleting an application cannot be undone. +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. -### Vendor Portal +1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. -To delete an application in the Vendor Portal: +1. For **License type**, select Development. -1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. +1. Click **Save Changes**. -1. Under **Danger Zone**, click **Delete App**. +## Next Step - Setting page +Use the Helm CLI to run the preflight checks you defined and install Gitea. See [Run Preflights with the Helm CLI](tutorial-preflight-helm-install). - [View a larger version of this image](/images/application-settings.png) +## Related Topics -1. In the **Are you sure you want to delete this app?** dialog, enter the application name. Optionally, enter your password if you want to delete all images associated with the application from the Replicated registry. +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) - delete app dialog +================ +File: docs/vendor/tutorial-preflight-helm-create-release.mdx +================ +# Step 3: Add the Chart Archive to a Release - [View a larger version of this image](/images/delete-app-dialog.png) +Use the Replicated CLI to add the Gitea Helm chart archive to a release in the Replicated vendor platform. -1. Click **Delete app**. +To create a release: -### Replicated CLI +1. Install the Replicated CLI: -To delete an application with the Replicated CLI: + ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). -1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). +1. Authorize the Replicated CLI: -1. Run the following command: + ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. - ```bash - replicated app delete APP-NAME - ``` - Replace `APP-NAME` with the name of the target application. +1. Create an application named `Gitea`: -1. When prompted, type `yes` to confirm that you want to delete the application. + ``` + replicated app create Gitea + ``` - **Example:** +1. Get the slug for the application that you created: - ```bash - replicated app delete deletion-example - • Fetching App ✓ - ID NAME SLUG SCHEDULER - 1xyAIzrmbvq... deletion-example deletion-example kots - Delete the above listed application? There is no undo: yes█ - • Deleting App ✓ - ``` + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots + ``` + In the example above, the application slug is `gitea-boxer`. -================ -File: docs/intro-kots.mdx -================ -import Kots from "../docs/partials/kots/_kots-definition.mdx" +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: -# Introduction to KOTS + **Example:** -This topic provides an introduction to the Replicated KOTS installer, including information about KOTS features, installation options, and user interfaces. + ``` + export REPLICATED_APP=gitea-boxer + ``` -:::note -The Replicated KOTS entitlement is required to install applications with KOTS. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. -::: +1. Go to the `gitea` directory. -## Overview +1. Create a release with the Gitea chart archive: - + ``` + replicated release create --chart=gitea-1.0.6.tgz + ``` + ```bash + You are creating a release that will only be installable with the helm CLI. + For more information, see + https://docs.replicated.com/vendor/helm-install#about-helm-installations-with-replicated -KOTS communicates securely with the Replicated Vendor Portal to synchronize customer licenses, check for available application updates, send instance data, share customer-generated support bundles, and more. + • Reading chart from gitea-1.0.6.tgz ✓ + • Creating Release ✓ + • SEQUENCE: 1 + ``` -Installing an application with KOTS provides access to features such as: +1. Log in to the Vendor Portal and go to **Releases**. -* Support for air gap installations in environments with limited or no outbound internet access -* Support for installations on VMs or bare metal servers, when using Replicated Embedded Cluster or Replicated kURL -* The KOTS Admin Console, which provides a user interface where customers can install and manage their application instances -* Instance telemetry automatically sent to the Vendor Portal for instances running in customer environments -* Strict preflight checks that block installation if environment requirements are not met -* Backup and restore with Replicated snapshots -* Support for marking releases as required to prevent users from skipping them during upgrades + The release that you created is listed under **All releases**. -KOTS is an open source project that is maintained by Replicated. For more information, see the [kots](https://github.com/replicatedhq/kots) repository in GitHub. +1. Click **View YAML** to view the files in the release. -## About Installing with KOTS +1. At the top of the page, click **Promote**. -KOTS can be used to install Kubernetes applications and Helm charts in the following environments: -* Clusters provisioned on VMs or bare metal servers with Replicated Embedded Cluster or Replicated kURL -* Existing clusters brought by the user -* Online (internet-connected) or air-gapped (disconnected) environments + Promote release dialog -To install an application with KOTS, users first run an installation script that installs KOTS in the target cluster and deploys the KOTS Admin Console. After KOTS is installed, users can log in to the KOTS Admin Console to upload their license file, configure the application, run preflight checks, and install and deploy the application. + [View a larger version of this image](/images/release-promote.png) -The following diagram demonstrates how a single release promoted to the Stable channel in the Vendor Portal can be installed with KOTS in an embedded cluster on a VM, in an existing air-gapped cluster, and in an existing internet-connected cluster: +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. -Embedded cluster, air gap, and existing cluster app installation workflows +1. For **Version label**, open the dropdown and select **1.0.6**. -[View a larger version of this image](/images/kots-installation-overview.png) +1. Click **Promote**. -As shown in the diagram above: -* For installations in existing online (internet-connected) clusters, users run a command to install KOTS in their cluster. -* For installations on VMs or bare metal servers, users run an Embedded Cluster or kURL installation script that both provisions a cluster in their environment and installs KOTS in the cluster. -* For installations in air-gapped clusters, users download air gap bundles for KOTS and the application from the Replicated Download Portal and then provide the bundles during installation. -All users must have a valid license file to install with KOTS. After KOTS is installed in the cluster, users can access the KOTS Admin Console to provide their license and deploy the application. +## Next Step -For more information about how to install applications with KOTS, see the [Installing an Application](/enterprise/installing-overview) section. +Create a customer so that you can install the release in a development environment. See [Create a Customer](tutorial-preflight-helm-create-customer). -## KOTS User Interfaces +## Related Topics -This section describes the KOTS interfaces available to users for installing and managing applications. +* [About Channels and Releases](/vendor/releases-about) +* [Managing Releases with the CLI](/vendor/releases-creating-cli) -### KOTS Admin Console +================ +File: docs/vendor/tutorial-preflight-helm-get-chart.mdx +================ +# Step 1: Get the Sample Chart and Test -KOTS provides an Admin Console to make it easy for users to install, manage, update, configure, monitor, backup and restore, and troubleshoot their application instance from a GUI. +To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install the application before adding preflight checks to the chart. -The following shows an example of the Admin Console dashboard for an application: +To get the sample Gitea Helm chart and test installation: -![Admin Console Dashboard](/images/guides/kots/application.png) +1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: -[View a larger version of this image](/images/guides/kots/application.png) + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. -For applications installed with Replicated Embedded Cluster in a VM or bare metal server, the Admin Console also includes a **Cluster Management** tab where users can add and manage nodes in the embedded cluster, as shown below: +1. Change to the new `gitea` directory that was created: + ``` + cd gitea + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` +1. Install the Gitea chart in your cluster: -![Admin console dashboard with Cluster Management tab](/images/gitea-ec-ready.png) + ``` + helm install gitea . --namespace gitea --create-namespace + ``` + To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/gitea/README.md#installing-the-chart) in the `bitnami/gitea` repository. -[View a larger version of this image](/images/gitea-ec-ready.png) + When the chart is installed, the following output is displayed: -### KOTS CLI + ``` + NAME: gitea + LAST DEPLOYED: Tue Oct 24 12:44:55 2023 + NAMESPACE: gitea + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + CHART NAME: gitea + CHART VERSION: 1.0.6 + APP VERSION: 1.20.5 -The KOTS command-line interface (CLI) is a kubectl plugin. Customers can run commands with the KOTS CLI to install and manage their application instances with KOTS programmatically. + ** Please be patient while the chart is being deployed ** -For information about getting started with the KOTS CLI, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + 1. Get the Gitea URL: -The KOTS CLI can also be used to install an application without needing to access the Admin Console. This can be useful for automating installations and upgrades, such as in CI/CD pipelines. For information about how to perform headless installations from the command line, see [Installing with the KOTS CLI](/enterprise/installing-existing-cluster-automation). + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace gitea -w gitea' -================ -File: docs/intro-replicated.mdx -================ ---- -pagination_prev: null ---- + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" -import ApiAbout from "/docs/partials/vendor-api/_api-about.mdx" -import Replicated from "/docs/partials/getting-started/_replicated-definition.mdx" -import Helm from "/docs/partials/helm/_helm-definition.mdx" -import Kots from "/docs/partials/kots/_kots-definition.mdx" -import KotsEntitlement from "/docs/partials/kots/_kots-entitlement-note.mdx" -import SDKOverview from "/docs/partials/replicated-sdk/_overview.mdx" -import CSDL from "/docs/partials/getting-started/_csdl-overview.mdx" -import PreflightSbAbout from "/docs/partials/preflights/_preflights-sb-about.mdx" + WARNING: You did not specify a Root URL for Gitea. The rendered URLs in Gitea may not show correctly. In order to set a root URL use the rootURL value. -# Introduction to Replicated + 2. Get your Gitea login credentials by running: -This topic provides an introduction to the Replicated Platform, including a platform overview and a list of key features. It also describes the Commercial Software Distribution Lifecycle and how Replicated features can be used in each phase of the lifecycle. + echo Username: bn_user + echo Password: $(kubectl get secret --namespace gitea gitea -o jsonpath="{.data.admin-password}" | base64 -d) + ``` -## About the Replicated Platform +1. Watch the `gitea` LoadBalancer service until an external IP is available: - + ``` + kubectl get svc gitea --namespace gitea --watch + ``` -The Replicated Platform features are designed to support ISVs during each phase of the Commercial Software Distribution Lifecycle. For more information, see [Commercial Software Distribution Lifecycle](#csdl) below. +1. When the external IP for the `gitea` LoadBalancer service is available, run the commands provided in the output of the installation command to get the Gitea URL: -The following diagram demonstrates the process of using the Replicated Platform to distribute an application, install the application in a customer environment, and support the application after installation: + ``` + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" + ``` -![replicated platform features workflow](/images/replicated-platform.png) + :::note + Alternatively, you can run the following command to forward a local port to a port on the Gitea Pod: -[View a larger version of this image](/images/replicated-platform.png) + ``` + POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=gitea -o jsonpath='{.items[0].metadata.name}') + kubectl port-forward pod/$POD_NAME 8080:3000 + ``` + ::: -The diagram above shows an application that is packaged with the [**Replicated SDK**](/vendor/replicated-sdk-overview). The application is tested in clusters provisioned with the [**Replicated Compatibility Matrix**](/vendor/testing-about), then added to a new release in the [**Vendor Portal**](/vendor/releases-about) using an automated CI/CD pipeline. +1. In a browser, go to the Gitea URL to confirm that you can see the welcome page for the application: -The application is then installed by a customer ("Big Bank") on a VM. To install, the customer downloads their license, which grants proxy access to the application images through the [**Replicated proxy registry**](/vendor/private-images-about). They also download the installation assets for the [**Replicated Embedded Cluster**](/vendor/embedded-overview) installer. + Gitea application webpage -Embedded Cluster runs [**preflight checks**](/vendor/preflight-support-bundle-about) to verify that the environment meets the installation requirements, provisions a cluster on the VM, and installs [**Replicated KOTS**](intro-kots) in the cluster. KOTS provides an [**Admin Console**](intro-kots#kots-admin-console) where the customer enters application-specific configurations, runs application preflight checks, optionally joins nodes to the cluster, and then deploys the application. After installation, customers can manage both the application and the cluster from the Admin Console. + [View a larger version of this image](/images/gitea-app.png) -Finally, the diagram shows how [**instance data**](/vendor/instance-insights-event-data) is automatically sent from the customer environment to the Vendor Portal by the Replicated SDK API and the KOTS Admin Console. Additionally, tooling from the open source [**Troubleshoot**](https://troubleshoot.sh/docs/collect/) project is used to generate and send [**support bundles**](/vendor/preflight-support-bundle-about), which include logs and other important diagnostic data. +1. Uninstall the Helm chart: -## Replicated Platform Features + ``` + helm uninstall gitea --namespace gitea + ``` + This command removes all the Kubernetes components associated with the chart and uninstalls the `gitea` release. -The following describes the key features of the Replicated Platform. +1. Delete the namespace: -### Compatibility Matrix + ``` + kubectl delete namespace gitea + ``` + +## Next Step -Replicated Compatibility Matrix can be used to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports various Kubernetes distributions and versions and can be interacted with through the Vendor Portal or the Replicated CLI. +Define preflight checks and add them to the Gitea Helm chart. See [Add a Preflight Spec to the Chart](tutorial-preflight-helm-add-spec). -For more information, see [About Compatibility Matrix](/vendor/testing-about). +## Related Topics -### Embedded Cluster +* [Helm Install](https://helm.sh/docs/helm/helm_install/) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) +* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) -Replicated Embedded Cluster is a Kubernetes installer based on the open source Kubernetes distribution k0s. With Embedded Cluster, users install and manage both the cluster and the application together as a single appliance on a VM or bare metal server. In this way, Kubernetes is _embedded_ with the application. +================ +File: docs/vendor/tutorial-preflight-helm-install-kots.mdx +================ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" -Additionally, each version of Embedded Cluster includes a specific version of [Replicated KOTS](#kots) that is installed in the cluster during installation. KOTS is used by Embedded Cluster to deploy the application and also provides the Admin Console UI where users can manage both the application and the cluster. +# Step 6: Run Preflights with KOTS -For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). +Create a KOTS-enabled release and then install Gitea with KOTS. This purpose of this step is to see how preflight checks automatically run in the KOTS Admin Console during installation. -### KOTS (Admin Console) {#kots} +To run preflight checks during installation with KOTS: -KOTS is a kubectl plugin and in-cluster Admin Console that installs Kubernetes applications in customer-controlled environments. +1. In the `gitea` directory, create a subdirectory named `manifests`: -KOTS is used by [Replicated Embedded Cluster](#embedded-cluster) to deploy applications and also to provide the Admin Console UI where users can manage both the application and the cluster. KOTS can also be used to install applications in existing Kubernetes clusters in customer-controlled environments, including clusters in air-gapped environments with limited or no outbound internet access. + ``` + mkdir manifests + ``` -For more information, see [Introduction to KOTS](intro-kots). + You will add the files required to support installation with KOTS to this subdirectory. -### Preflight Checks and Support Bundles +1. Move the Helm chart archive to `manifests`: - + ``` + mv gitea-1.0.6.tgz manifests + ``` -For more information, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). +1. In `manifests`, create the YAML manifests required by KOTS: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml + ``` -### Proxy Registry +1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: -The Replicated proxy registry grants proxy access to an application's images using the customer's unique license. This means that customers can get access to application images during installation without the vendor needing to provide registry credentials. + + +
    Description
    +

    The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The name and chartVersion listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.

    +
    YAML
    + +
    + +
    Description
    +

    The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.

    The YAML below provides a name for the application to display in the Admin Console, adds a custom status informer that displays the status of the gitea Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.

    +
    YAML
    + +
    + +
    Description
    +

    The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an Open App button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.

    +
    YAML
    + +
    +
    -For more information, see [About the Replicated Proxy Registry](/vendor/private-images-about). +1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: -### Replicated SDK + ``` + replicated release lint --yaml-dir . + ``` + `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. -The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. It provides an in-cluster API that can be used to communicate with the Vendor Portal. For example, the SDK API can return details about the customer's license or report telemetry on the application instance back to the Vendor Portal. + **Example output**: -For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + ``` + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + + The output includes warning messages, including a warning about a missing preflight spec. This warning appears because the preflight spec is defined in the Helm chart. The warnings can be ignored for the purpose of this tutorial. -### Vendor Portal +1. Create a release: -The Replicated Vendor Portal is the web-based user interface that you can use to configure and manage all of the Replicated features for distributing and managing application releases, supporting your release, viewing customer insights and reporting, and managing teams. + ```bash + replicated release create --yaml-dir . + ``` + **Example output**: + ```bash + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 2 + ``` -The Vendor Portal can also be interacted with programmatically using the following developer tools: +1. Log in to the [vendor portal](https://vendor.replicated.com) and go to **Releases**. The new release is labeled **Sequence 2**. -* **Replicated CLI**: The Replicated CLI can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams, license files, and so on. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). +1. Promote the release to the Unstable channel. -* **Vendor API v3**: The Vendor API can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams and license files. For more information, see [Using the Vendor API v3](/reference/vendor-api-using). +1. Go to the **Customers** page. -## Commercial Software Distribution Lifecycle {#csdl} +1. Create a new customer named `KOTS Preflight Customer`. For **License options**, enable the **KOTS Install Enabled** checkbox. This is the entitlement that allows the customer to install with KOTS. -Replicated Platform features are designed to support ISVs in each phase of the Commercial Software Distribution Lifecycle shown below: +1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. -![software distribution lifecycle wheel](/images/software-dev-lifecycle.png) +1. Go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. -[View a larger version of this image](/images/software-dev-lifecycle.png) + ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) - + [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) -For more information about to download a copy of The Commercial Software Distribution Handbook, see [The Commercial Software Distribution Handbook](https://www.replicated.com/the-commercial-software-distribution-handbook). +1. On the command line, run the **KOTS Install** command that you copied: -The following describes the phases of the software distribution lifecycle: + ```bash + curl https://kots.io/install | bash + kubectl kots install $REPLICATED_APP/unstable + ``` -* **[Develop](#develop)**: Application design and architecture decisions align with customer needs, and development teams can quickly iterate on new features. -* **[Test](#test)**: Run automated tests in several customer-representative environments as part of continuous integration and continuous delivery (CI/CD) workflows. -* **[Release](#release)**: Use channels to share releases with external and internal users, publish release artifacts securely, and use consistent versioning. -* **[License](#license)**: Licenses are customized to each customer and are easy to issue, manage, and update. -* **[Install](#install)**: Provide unique installation options depending on customers' preferences and experience levels. -* **[Report](#report)**: Make more informed prioritization decisions by collecting usage and performance metadata for application instances running in customer environments. -* **[Support](#support)**: Diagnose and resolve support issues quickly. + This installs the latest version of the KOTS CLI and the Replicated Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. -For more information about the Replicated features that support each of these phases, see the sections below. + For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). -### Develop + :::note + + ::: -The Replicated SDK exposes an in-cluster API that can be developed against to quickly integrate and test core functionality with an application. For example, when the SDK is installed alongside an application in a customer environment, the in-cluster API can be used to send custom metrics from the instance to the Replicated vendor platform. +1. Complete the installation command prompts: -For more information about using the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + 1. For `Enter the namespace to deploy to`, enter `gitea`. -### Test + 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. -The Replicated Compatibility Matrix rapidly provisions ephemeral Kubernetes clusters, including multi-node and OpenShift clusters. When integrated into existing CI/CD pipelines for an application, the Compatibility Matrix can be used to automatically create a variety of customer-representative environments for testing code changes. + When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. -For more information, see [About Compatibility Matrix](/vendor/testing-about). + **Example output:** -### Release + ```bash + Enter the namespace to deploy to: gitea + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password for the Admin Console (6+ characters): •••••••• + • Waiting for Admin Console to be ready ✓ -Release channels in the Replicated Vendor Portal allow ISVs to make different application versions available to different customers, without needing to maintain separate code bases. For example, a "Beta" channel can be used to share beta releases of an application with only a certain subset of customers. + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + ``` -For more information about working with channels, see [About Channels and Releases](/vendor/releases-about). +1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. -Additionally, the Replicated proxy registry grants proxy access to private application images using the customers' license. This ensures that customers have the right access to images based on the channel they are assigned. For more information about using the proxy registry, see [About the Replicated Proxy Registry](/vendor/private-images-about). +1. On the login page, enter the password that you created. -### License +1. On the license page, select the license file that you downloaded previously and click **Upload license**. -Create customers in the Replicated Vendor Portal to handle licensing for your application in both online and air gap environments. For example: -* License free trials and different tiers of product plans -* Create and manage custom license entitlements -* Verify license entitlements both before installation and during runtime -* Measure and report usage + Preflight checks run automatically: -For more information about working with customers and custom license fields, see [About Customers](/vendor/licenses-about). + ![Gitea preflight checks page](/images/gitea-preflights-admin-console.png) -### Install + [View a larger version of this image](/images/gitea-preflights-admin-console.png) -Applications distributed with the Replicated Platform can support multiple different installation methods from the same application release, helping you to meet your customers where they are. For example: +1. When the preflight checks finish, click **Deploy** to deploy the application. -* Customers who are not experienced with Kubernetes or who prefer to deploy to a dedicated cluster in their environment can install on a VM or bare metal server with the Replicated Embedded Cluster installer. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). -* Customers familiar with Kubernetes and Helm can install in their own existing cluster using Helm. For more information, see [Installing with Helm](/vendor/install-with-helm). -* Customers installing into environments with limited or no outbound internet access (often referred to as air-gapped environments) can securely access and push images to their own internal registry, then install using Helm or a Replicated installer. For more information, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap). + The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: -### Report + ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) -When installed alongside an application, the Replicated SDK and Replicated KOTS automatically send instance data from the customer environment to the Replicated Vendor Portal. This instance data includes health and status indicators, adoption metrics, and performance metrics. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). + [View a larger version of this image](/images/tutorial-gitea-unavailable.png) -ISVs can also set up email and Slack notifications to get alerted of important instance issues or performance trends. For more information, see [Configuring Instance Notifications](/vendor/instance-notifications-config). +1. (Optional) After the application is in a Ready status, click **Open App** to view the Gitea application in a browser. -### Support +1. Uninstall the Gitea application from your cluster: -Support teams can use Replicated features to more quickly diagnose and resolve application issues. For example: + ```bash + kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy + ``` + **Example output**: + ``` + • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ + • Application gitea-boxer has been removed + ``` -- Customize and generate support bundles, which collect and analyze redacted information from the customer's cluster, environment, and application instance. See [About Preflights Checks and Support Bundles](/vendor/preflight-support-bundle-about). -- Provision customer-representative environments with Compatibility Matrix to recreate and diagnose issues. See [About Compatibility Matrix](/vendor/testing-about). -- Get insights into an instance's status by accessing telemetry data, which covers the health of the application, the current application version, and details about the infrastructure and cluster where the application is running. For more information, see [Customer Reporting](/vendor/customer-reporting). For more information, see [Customer Reporting](/vendor/customer-reporting). +1. Remove the Admin Console from the cluster: -================ -File: docs/intro.md -================ ---- -slug: / -pagination_next: null ---- + 1. Delete the namespace where the Admin Console is installed: -# Home + ``` + kubectl delete namespace gitea + ``` + 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: -
    -
      -
    • - chat bubble icon -

      What's New?

      -
    • -
    • -

      Embedded Cluster 2.0 Release

      -

      The 2.0 release brings improvements to architecture that increase the reliability and stability of Embedded Cluster.

      -
    • -
    • - Learn more -
    • -
    -
      -
    • - lightbulb icon -

      Did You Know?

      -
    • -
    • -

      Manage Supported Install Methods Per Customer

      -

      Control which installation methods are available for each customer from the **Install types** field in the customer's license.

      -
    • -
    • - Learn more -
    • -
    -
    -
    - -
    -
    - - -
    -
    - -
    -
    - - -
    -
    - - - -
    -
    - - -
    -
    - -
    + ``` + kubectl delete clusterrole kotsadm-role + ``` + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` -================ -File: src/components/HomepageFeatures.js -================ -import React from 'react'; -import clsx from 'clsx'; -import styles from './HomepageFeatures.module.css'; +## Summary -const FeatureList = [ - { - title: 'Easy to Use', - Svg: require('../../static/images/undraw_docusaurus_mountain.svg').default, - description: ( - <> - Docusaurus was designed from the ground up to be easily installed and - used to get your website up and running quickly. - - ), - }, - { - title: 'Focus on What Matters', - Svg: require('../../static/images/undraw_docusaurus_tree.svg').default, - description: ( - <> - Docusaurus lets you focus on your docs, and we'll do the chores. Go - ahead and move your docs into the docs directory. - - ), - }, - { - title: 'Powered by React', - Svg: require('../../static/images/undraw_docusaurus_react.svg').default, - description: ( - <> - Extend or customize your website layout by reusing React. Docusaurus can - be extended while reusing the same header and footer. - - ), - }, -]; - -function Feature({Svg, title, description}) { - return ( -
    -
    - -
    -
    -

    {title}

    -

    {description}

    -
    -
    - ); -} +Congratulations! In this tutorial, you defined a preflight check for Gitea that checks the version of Kubernetes running in the cluster. You also ran preflight checks before installing with both the Helm CLI and with KOTS. -export default function HomepageFeatures() { - return ( -
    -
    -
    - {FeatureList.map((props, idx) => ( - - ))} -
    -
    -
    - ); -} +To learn more about defining and running preflight checks, see: +* [Defining Preflight Checks](/vendor/preflight-defining) +* [Running Preflight Checks](/vendor/preflight-running) +* [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. ================ -File: src/components/HomepageFeatures.module.css +File: docs/vendor/tutorial-preflight-helm-install.mdx ================ -.features { - display: flex; - align-items: center; - padding: 2rem 0; - width: 100%; -} +# Step 5: Run Preflights with the Helm CLI -.featureSvg { - height: 200px; - width: 200px; -} +Use the Helm CLI installation instructions provided for the customer that you created to run the preflight checks for Gitea and install. The purpose of this step is to demonstrate how enterprise users can run preflight checks defined in a Helm chart before installing. -================ -File: src/css/custom.css -================ -@import url('https://fonts.googleapis.com/css2?family=Open+Sans&display=swap'); -@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600&display=swap'); - -/** - * Any CSS included here will be global. The classic template - * bundles Infima by default. Infima is a CSS framework designed to - * work well for content-centric websites. - */ - -/* You can override the default Infima variables here. */ -:root { - --doc-sidebar-width: 350px !important; - --ifm-color-primary: #00959E; - --ifm-color-primary-dark: #00959E; - --ifm-color-primary-darker: #00959E; - --ifm-color-primary-darkest: #007b81; - --ifm-color-primary-light: #6DD2D2; - --ifm-color-primary-lighter: #97e2e2; - --ifm-color-primary-lightest: rgb(146, 221, 224); - --ifm-code-font-size: 95%; - --ifm-font-family-base: 'Open Sans'; - --ifm-heading-font-family: 'Poppins', sans-serif; - --ifm-heading-font-weight: 600; - /*this variable controls the background when govering over items in the sidebar*/ - --ifm-menu-color-background-hover: transparent; - /*this variable controls the padding between items in the sidebar*/ - --ifm-menu-link-padding-vertical: 0.5rem; -} +To run preflight checks and install with the Helm CLI: -html[data-theme='light'] { - --ifm-heading-color: #2f2f2f; -} +1. Create a `gitea` namespace for the installation: -html[data-theme='dark'] { - --ifm-color-primary: #6DD2D2; -} + ``` + kubectl create namespace gitea + ``` -.docusaurus-highlight-code-line { - background-color: rgba(0, 0, 0, 0.1); - display: block; - margin: 0 calc(-1 * var(--ifm-pre-padding)); - padding: 0 var(--ifm-pre-padding); -} +1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: -html[data-theme='dark'] .docusaurus-highlight-code-line { - background-color: rgba(0, 0, 0, 0.3); -} + ``` + kubectl config set-context --namespace=gitea --current + ``` -article { - /* max-width: 800px; */ - margin: 10px 25px; -} +1. In the [vendor portal](https://vendor.replicated.com), go to the **Customers** page. -.footer--dark { - --ifm-footer-background-color: #2f2f2f; -} +1. On the **Customer details** page for the customer that you created, click **Helm install instructions**. -.alert--warning { - --ifm-alert-background-color: var( --ifm-color-danger-contrast-background ); - --ifm-alert-background-color-highlight: rgba(250, 56, 62, 0.15); - --ifm-alert-foreground-color: var( --ifm-color-danger-contrast-foreground ); - --ifm-alert-border-color: var(--ifm-color-danger-dark); -} + ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) -.alert a { - color: var(--ifm-color-primary-dark); - text-decoration-color: var(--ifm-color-primary-dark); -} + [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) -.theme-admonition-note { - --ifm-code-background: var(--ifm-alert-background-color-highlight); - --ifm-link-color: var(--ifm-alert-foreground-color); - --ifm-link-hover-color: var(--ifm-alert-foreground-color); - --ifm-link-decoration: underline; - --ifm-tabs-color: var(--ifm-alert-foreground-color); - --ifm-tabs-color-active: var(--ifm-alert-foreground-color); - --ifm-tabs-color-active-border: var(--ifm-alert-border-color); - background-color: var(--ifm-alert-background-color); - border-color: var(--ifm-alert-border-color); - border-style: solid; - border-width: var(--ifm-alert-border-width); - border-left-width: var(--ifm-alert-border-left-width); - border-radius: var(--ifm-alert-border-radius); - box-shadow: var(--ifm-alert-shadow); - color: var(--ifm-alert-foreground-color); - padding: var(--ifm-alert-padding-vertical) var(--ifm-alert-padding-horizontal); - --ifm-alert-background-color: var( --ifm-color-info-contrast-background ); - --ifm-alert-background-color-highlight: rgba(84, 199, 236, 0.15); - --ifm-alert-foreground-color: var( --ifm-color-info-contrast-foreground ); - --ifm-alert-border-color: var(--ifm-color-info-dark); -} +1. Run the first command in the **Helm install instructions** dialog to log in to the Replicated registry. -.admonition-note a { - color: var(--ifm-color-primary-dark); - text-decoration-color: var(--ifm-color-primary-dark); -} +1. Run the second command to install the preflight kubectl plugin: -.admonition-important { - --ifm-code-background: var(--ifm-alert-background-color-highlight); - --ifm-link-color: var(--ifm-alert-foreground-color); - --ifm-link-hover-color: var(--ifm-alert-foreground-color); - --ifm-link-decoration: underline; - --ifm-tabs-color: var(--ifm-alert-foreground-color); - --ifm-tabs-color-active: var(--ifm-alert-foreground-color); - --ifm-tabs-color-active-border: var(--ifm-alert-border-color); - background-color: var(--ifm-alert-background-color); - border-color: var(--ifm-alert-border-color); - border-style: solid; - border-width: var(--ifm-alert-border-width); - border-left-width: var(--ifm-alert-border-left-width); - border-radius: var(--ifm-alert-border-radius); - box-shadow: var(--ifm-alert-shadow); - color: var(--ifm-alert-foreground-color); - padding: var(--ifm-alert-padding-vertical) var(--ifm-alert-padding-horizontal); - --ifm-alert-background-color: var( --ifm-color-danger-contrast-background ); - --ifm-alert-background-color-highlight: rgba(250, 56, 62, 0.15); - --ifm-alert-foreground-color: var( --ifm-color-danger-contrast-foreground ); - --ifm-alert-border-color: var(--ifm-color-danger-dark); -} + ```bash + curl https://krew.sh/preflight | bash + ``` + The preflight plugin is a client-side utility used to run preflight checks. -.admonition-important a { - color: var(--ifm-color-primary-dark); - text-decoration-color: var(--ifm-color-primary-dark); -} +1. Run the third command to run preflight checks: -ol ol { - list-style-type: lower-alpha; -} + ```bash + helm template oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea | kubectl preflight - + ``` + This command templates the Gitea chart and then pipes the result to the preflight plugin. The following shows an example of the ouput for this command: -ol ol ol { - list-style-type: lower-roman -} + Preflight CLI output -.DocSearch-Logo { - display: none; -} + [View a larger version of this image](/images/gitea-preflights-cli.png) -.DocSearch-Footer { - justify-content: center !important; -} +1. Run the fourth command listed under **Option 1: Install Gitea** to install the application: -/* Landing page */ + ```bash + helm install gitea oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea + ``` -.tile__container { - display: flex; - flex-direction: row; - gap: 0.7em; -} +1. Uninstall and delete the namespace: -.tile__container > ul { - padding: 1.5em; - list-style: none; - border-radius: 7px; - border: 1px solid rgba(88,88,88,0.2); - width: 100%; - display: flex; - flex-direction: column; - justify-content: flex-start; -} + ```bash + helm uninstall gitea --namespace gitea + ``` + ```bash + kubectl delete namespace gitea + ``` -.tile__container > ul:hover { - background-image: linear-gradient(to bottom right, rgba(88,88,88,0.03), rgba(88,88,88,0.01)); -} +## Next Step -[data-theme='dark'] .tile__container > ul:hover { - background-image: linear-gradient(to bottom right, rgba(88,88,88,0.08), rgba(88,88,88,0.02)); -} +Install the application with KOTS to see how preflight checks are run from the KOTS Admin Console. See [Run Preflights with KOTS](tutorial-preflight-helm-install-kots). -.tile__container ul li h3 { - padding-top: 15px; -} +## Related Topics -.tile__header { - display: flex; - flex-direction: row; - align-items: center; - gap: 1.0em; - font-size: 1.25em; -} +* [Running Preflight Checks](/vendor/preflight-running) +* [Installing with Helm](/vendor/install-with-helm) -.tile__header img { - max-width: 55px; - height: 55px; - background-color:rgba(255,72,86,0.1); - border-radius: 7px; -} +================ +File: docs/vendor/tutorial-preflight-helm-setup.mdx +================ +# Introduction and Setup -@media (max-width: 1300px) { - .tile__container { - flex-direction: column; - } -} +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. -#helm { - background-color:rgba(15, 22, 137,0.05); -} +## Summary -[data-theme='dark'] #helm { - background-color:rgb(169, 169, 198); -} +This tutorial introduces you to preflight checks. The purpose of preflight checks is to provide clear feedback about any missing requirements or incompatibilities in the customer's cluster _before_ they install or upgrade an application. Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. -#whats-new, -[data-theme='dark'] #whats-new:hover { - background-image: linear-gradient(to bottom right, rgba(69,145,247,0.1), rgba(69,145,247,0.025)); - border: 1px solid rgba(69,145,247,0.1); -} +Preflight checks are part of the [Troubleshoot](https://troubleshoot.sh/) open source project, which is maintained by Replicated. -#whats-new:hover, -[data-theme='dark'] #whats-new { - background-image: linear-gradient(to bottom right, rgba(69,145,247,0.2), rgba(69,145,247,0.05)); -} +In this tutorial, you use a sample Helm chart to learn how to: -#whats-new .tile__header img { - background-color: rgba(69,145,247,0.2); -} +* Define custom preflight checks in a Kubernetes Secret in a Helm chart +* Package a Helm chart and add it to a release in the Replicated Vendor Portal +* Run preflight checks using the Helm CLI +* Run preflight checks in the Replicated KOTS Admin Console -[data-theme='dark'] #whats-new .tile__header img { - content:url("/images/icons/chat_bubble_white.png"); -} +## Set Up the Environment -#whats-new li a { - color: rgb(69,145,247); -} +Before you begin, do the following to set up your environment: -#did-you-know, -[data-theme='dark'] #did-you-know:hover { - background-image: linear-gradient(to bottom right, rgba(21, 140, 54,0.1), rgba(21, 140, 54,0.025)); - border: 1px solid rgba(21, 140, 54,0.1); -} +* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. -#did-you-know:hover, -[data-theme='dark'] #did-you-know { - background-image: linear-gradient(to bottom right, rgba(21, 140, 54,0.2), rgba(21, 140, 54,0.05)); -} + For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: + * [Install Tools](https://kubernetes.io/docs/tasks/tools/) + * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) -#did-you-know .tile__header img { - background-color: rgba(21, 140, 54,0.2); -} +* Install the Helm CLI. To install the Helm CLI using Homebrew, run: -[data-theme='dark'] #did-you-know .tile__header img { - content:url("/images/icons/lightbulb_white.png"); -} + ``` + brew install helm + ``` -#did-you-know li a { - color: rgb(21, 140, 54); -} + For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. -/* Sidebar */ +* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). -.menu__list-item > h5 { - text-transform: uppercase; - margin: 1rem 0 0 0; -} + :::note + If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. + ::: -.theme-doc-sidebar-menu.menu__list { - margin-bottom: 75px; -} +## Next Step -a.menu__link { - font-size: .9em; -} +Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-preflight-helm-get-chart) -.menu__list-item > a:hover, .menu__list-item-collapsible > a:hover { - color: var(--ifm-color-primary); -} +================ +File: docs/vendor/using-third-party-registry-proxy.mdx +================ +# Using a Registry Proxy for Helm Air Gap Installations -/* Navbar */ +This topic describes how to connect the Replicated proxy registry to a Harbor or jFrog Artifactory instance to support pull-through image caching. It also includes information about how to set up replication rules in Harbor for image mirroring. -.dropdown__link:hover, .navbar__link { - background-color: transparent; - color: var(--ifm-dropdown-link-color); - text-decoration: none; -} +## Overview -.dropdown__link--active, .navbar__link--active { - background-color: transparent; - color: #2f2f2f; -} +For applications distributed with Replicated, the [Replicated proxy registry](/vendor/private-images-about) grants proxy, or _pull-through_, access to application images without exposing registry credentials to customers. -html[data-theme='dark'] .dropdown__link--active, .navbar__link--active { - color: white; -} +Users can optionally connect the Replicated proxy registry with their own [Harbor](https://goharbor.io) or [jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) instance to proxy and cache the images that are required for installation on demand. This can be particularly helpful in Helm installations in air-gapped environments because it allows users to pull and cache images from an internet-connected machine, then access the cached images during installation from a machine with limited or no outbound internet access. -html[data-theme='dark'] .dropdown__link:hover, .navbar__link { - color: var(--ifm-dropdown-link-color); - text-decoration: none; -} +In addition to the support for on-demand pull-through caching, connecting the Replicated proxy registry to a Harbor or Artifactory instance also has the following benefits: +* Registries like Harbor or Artifactory typically support access controls as well as scanning images for security vulnerabilities +* With Harbor, users can optionally set up replication rules for image mirroring, which can be used to improve data availability and reliability -.dropdown > .navbar__link:after { - content: ''; - border: none; - position: static; - top: auto; - transform: none; - width: 12px; - height: 12px; - background-image: url("data:image/svg+xml,%3Csvg width='24' height='24' fill='none' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M4.293 8.293a1 1 0 0 1 1.414 0L12 14.586l6.293-6.293a1 1 0 1 1 1.414 1.414l-7 7a1 1 0 0 1-1.414 0l-7-7a1 1 0 0 1 0-1.414Z' fill='%23888888'/%3E%3C/svg%3E"); - background-size: 12px; - background-repeat: no-repeat; -} +## Limtiation -html[data-theme='dark'] .dropdown > .navbar__link:after { - background-image: url("data:image/svg+xml,%3Csvg width='24' height='24' fill='none' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M4.293 8.293a1 1 0 0 1 1.414 0L12 14.586l6.293-6.293a1 1 0 1 1 1.414 1.414l-7 7a1 1 0 0 1-1.414 0l-7-7a1 1 0 0 1 0-1.414Z' fill='%23ffffff'/%3E%3C/svg%3E"); -} +Artifactory does not support mirroring or replication for Docker registries. If you need to set up image mirroring, use Harbor. See [Set Up Mirroring in Harbor](#harbor-mirror) below. -/* Release Note label styling */ +## Connect the Replicated Proxy Registry to Harbor -h3[id^="new-features"] { - background-color: #4BC99C; - border-radius: 7px!important; - color: #fff; - width: max-content; - padding: 0.2em 0.6em 0.2em; - font-weight: 500; - font-size: 20px; -} +[Harbor](https://goharbor.io) is a popular open-source container registry. Users can connect the Replicated proxy registry to Harbor in order to cache images on demand and set up pull-based replication rules to proactively mirror images. Connecting the Replicated proxy registry to Harbor also allows customers use Harbor's security features. -h3[id^="improvements"] { - background-color: #38C1CA; - border-radius: 7px!important; - color: #fff; - width: max-content; - padding: 0.2em 0.6em 0.2em; - font-weight: 500; - font-size: 20px; -} +### Use Harbor for Pull-Through Proxy Caching {#harbor-proxy-cache} -h3[id^="bug-fixes"] { - background-color: #F47878; - border-radius: 7px!important; - color: #fff; - width: max-content; - padding: 0.2em 0.6em 0.2em; - font-weight: 500; - font-size: 20px; -} +To connect the Replicated proxy registry to Harbor for pull-through proxy caching: -h3[id^="breaking-changes"] { - background-color: #d34a54; - /* background-color: #D64399; */ - border-radius: 7px!important; - color: #fff; - width: max-content; - padding: 0.2em 0.6em 0.2em; - font-weight: 500; - font-size: 20px; -} +1. Log in to Harbor and create a new replication endpoint. This endpoint connects the Replicated proxy registry to the Harbor instance. For more information, see [Creating Replication Endpoints](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-endpoints/) in the Harbor documentation. +1. Enter the following details for the endpoint: -h3[id^="known-issues"] { - background-color: #414288; - /* background-color: #D64399; */ - border-radius: 7px!important; - color: #fff; - width: max-content; - padding: 0.2em 0.6em 0.2em; - font-weight: 500; - font-size: 20px; -} + * For the provider field, choose Docker Registry. + * For the URL field, enter `https://proxy.replicated.com` or the custom domain that is configured for the Replicated proxy registry. For more information about configuring custom domains in the Vendor Portal, see [Using Custom Domains](/vendor/custom-domains-using). + * For the access ID, enter the email address associated with the customer in the Vendor Portal. + * For the access secret, enter the customer's unique license ID. You can find the license ID in the Vendor Portal by going to **Customers > [Customer Name]**. -h3[id^="new-features"] a { - color: #fff; - opacity: .5; - text-decoration: none; -} +1. Verify your configuration by testing the connection and then save the endpoint. -h3[id^="improvements"] a { - color: #fff; - opacity: .5; - text-decoration: none; -} +1. After adding the Replicated proxy registry as a replication endpoint in Harbor, set up a proxy cache. This allows for pull-through image caching with Harbor. For more information, see [Configure Proxy Cache](https://goharbor.io/docs/2.11.0/administration/configure-proxy-cache/) in the Harbor documentation. -h3[id^="bug-fixes"] a { - color: #fff; - opacity: .5; - text-decoration: none; -} +1. (Optional) Add a pull-based replication rule to support image mirroring. See [Configure Image Mirroring in Harbor](#harbor-mirror) below. -h3[id^="breaking-changes"] a { - color: #fff; - opacity: .5; - text-decoration: none; -} +### Configure Image Mirroring in Harbor {#harbor-mirror} -h3[id^="known-issues"] a { - color: #fff; - opacity: .5; - text-decoration: none; -} +To enable image mirroring with Harbor, users create a pull-based replication rule. This periodically (or when manually triggered) pulls images from the Replicated proxy registry to store them in Harbor. -td#center { - text-align: center; -} +The Replicated proxy regsitry exposes standard catalog and tag listing endpoints that are used by Harbor to support image mirroring: +* The catalog endpoint returns a list of repositories built from images of the last 10 releases. +* The tags listing endpoint lists the tags available in a given repository for those same releases. -================ -File: src/theme/Admonition/index.js -================ -import React from 'react'; -import clsx from 'clsx'; -import {ThemeClassNames} from '@docusaurus/theme-common'; -import Translate from '@docusaurus/Translate'; -import styles from './styles.module.css'; +When image mirroring is enabled, Harbor uses these endpoints to build a list of images to cache and then serve. -function NoteIcon() { - return ( - - - - ); -} -function TipIcon() { - return ( - - - - ); -} -function DangerIcon() { - return ( - - - - ); -} -function InfoIcon() { - return ( - - - - ); -} -function CautionIcon() { - return ( - - - - ); -} -// eslint-disable-next-line @typescript-eslint/consistent-indexed-object-style -const AdmonitionConfigs = { - note: { - infimaClassName: 'secondary', - iconComponent: NoteIcon, - label: ( - - note - - ), - }, - tip: { - infimaClassName: 'success', - iconComponent: TipIcon, - label: ( - - tip - - ), - }, - danger: { - infimaClassName: 'danger', - iconComponent: DangerIcon, - label: ( - - danger - - ), - }, - info: { - infimaClassName: 'info', - iconComponent: InfoIcon, - label: ( - - info - - ), - }, - caution: { - infimaClassName: 'warning', - iconComponent: CautionIcon, - label: ( - - caution - - ), - }, - important: { - infimaClassName: 'warning', - iconComponent: CautionIcon, - label: ( - - important - - ), - }, -}; -// Legacy aliases, undocumented but kept for retro-compatibility -const aliases = { - secondary: 'note', - success: 'tip', - warning: 'danger', -}; -function getAdmonitionConfig(unsafeType) { - const type = aliases[unsafeType] ?? unsafeType; - const config = AdmonitionConfigs[type]; - if (config) { - return config; - } - console.warn( - `No admonition config found for admonition type "${type}". Using Info as fallback.`, - ); - return AdmonitionConfigs.info; -} -// Workaround because it's difficult in MDX v1 to provide a MDX title as props -// See https://github.com/facebook/docusaurus/pull/7152#issuecomment-1145779682 -function extractMDXAdmonitionTitle(children) { - const items = React.Children.toArray(children); - const mdxAdmonitionTitle = items.find( - (item) => - React.isValidElement(item) && - item.props?.mdxType === 'mdxAdmonitionTitle', - ); - const rest = <>{items.filter((item) => item !== mdxAdmonitionTitle)}; - return { - mdxAdmonitionTitle, - rest, - }; -} -function processAdmonitionProps(props) { - const {mdxAdmonitionTitle, rest} = extractMDXAdmonitionTitle(props.children); - return { - ...props, - title: props.title ?? mdxAdmonitionTitle, - children: rest, - }; -} -export default function Admonition(props) { - const {children, type, title, icon: iconProp} = processAdmonitionProps(props); - const typeConfig = getAdmonitionConfig(type); - const titleLabel = title ?? typeConfig.label; - const {iconComponent: IconComponent} = typeConfig; - const icon = iconProp ?? ; - return ( -
    -
    - {icon} - {titleLabel} -
    -
    {children}
    -
    - ); -} +#### Limitations -================ -File: src/theme/Admonition/styles.module.css -================ -.admonition { - margin-bottom: 1em; -} +Image mirroring with Harbor has the following limitations: -.admonitionHeading { - font: var(--ifm-heading-font-weight) var(--ifm-h5-font-size) / - var(--ifm-heading-line-height) var(--ifm-heading-font-family); - text-transform: uppercase; - margin-bottom: 0.3rem; -} +* Neither the catalog or tags listing endpoints exposed by the Replicated proxy service respect pagination requests. However, Harbor requests 1000 items at a time. -.admonitionHeading code { - text-transform: none; -} +* Only authenticated users can perform catalog calls or list tags. Authenticated users are those with an email address and license ID associated with a customer in the Vendor Portal. -.admonitionIcon { - display: inline-block; - vertical-align: middle; - margin-right: 0.4em; -} +#### Create a Pull-Based Replication Rule in Harbor for Image Mirroring -.admonitionIcon svg { - display: inline-block; - height: 1.6em; - width: 1.6em; - fill: var(--ifm-alert-foreground-color); -} +To configure image mirroring in Harbor: -.admonitionContent > :last-child { - margin-bottom: 0; -} +1. Follow the steps in [Use Harbor for Pull-Through Proxy Caching](#harbor-proxy-cache) above to add the Replicated proxy registry to Harbor as a replication endpoint. -================ -File: src/theme/DocItem/Footer/index.js -================ -import React from 'react'; -import clsx from 'clsx'; -import {ThemeClassNames} from '@docusaurus/theme-common'; -import {useDoc} from '@docusaurus/plugin-content-docs/client'; -import LastUpdated from '@theme/LastUpdated'; -import EditThisPage from '@theme/EditThisPage'; -import TagsListInline from '@theme/TagsListInline'; - -import styles from './styles.module.css'; - -function TagsRow(props) { - return ( -
    -
    - -
    -
    - ); -} -function EditMetaRow({ - editUrl, - lastUpdatedAt, - lastUpdatedBy, - formattedLastUpdatedAt, -}) { - return ( -
    -
    {editUrl && }
    - -
    - {(lastUpdatedAt || lastUpdatedBy) && ( - - )} -
    -
    - ); -} -export default function DocItemFooter() { - const {metadata} = useDoc(); - const {editUrl, lastUpdatedAt, formattedLastUpdatedAt, lastUpdatedBy, tags} = - metadata; - const canDisplayTagsRow = tags.length > 0; - const canDisplayEditMetaRow = !!(editUrl || lastUpdatedAt || lastUpdatedBy); - const canDisplayFooter = canDisplayTagsRow || canDisplayEditMetaRow; - if (!canDisplayFooter) { - return null; - } - return ( -
    - {canDisplayTagsRow && } - {canDisplayEditMetaRow && ( - - )} -
    - ); -} +1. Create a **pull-based** replication rule in Harbor to mirror images proactively. For more information, see [Creating a replication rule](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-rules/) in the Harbor documentation. -================ -File: src/theme/DocItem/Footer/styles.module.css -================ -.lastUpdated { - margin-top: 0.2rem; - font-style: italic; - font-size: smaller; - flex: none; -} +## Use Artifactory for Pull-Through Proxy Caching -@media (min-width: 997px) { - .lastUpdated { - text-align: right; - } -} +[jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) supports pull-through caching for Docker registries. -================ -File: src/theme/EditThisPage/index.js -================ -/** - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ -import React from "react"; -import Translate from "@docusaurus/Translate"; -import { ThemeClassNames } from "@docusaurus/theme-common"; -import ReportIcon from "../../../static/images/report.svg"; -import PullRequestIcon from "../../../static/images/git-pull-request.svg"; -import styles from "./styles.module.css"; - -export default function EditThisPage({ editUrl }) { - const url = typeof window !== "undefined" ? window.location.href : ""; - const issueTitle = - typeof window !== "undefined" - ? url.substring(url.lastIndexOf("/") + 1) - : ""; - - return ( - - ); -} +For information about how to configure a pull-through cache with Artifactory, see [Remote Repository](https://jfrog.com/help/r/jfrog-artifactory-documentation/configure-a-remote-repository) in the Artifactory documentation. ================ -File: src/theme/EditThisPage/styles.module.css +File: docs/vendor/vendor-portal-application-settings.md ================ -/** - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -.icon { - width: 18px; - height: 18px; - margin-right: 7px; - fill:#6DD2D2 -} - -.iconTextWrapper { - display: flex; - align-items: center; - border: 2px solid #6DD2D2; - padding: 8px; - border-radius: 8px; -} -.iconTextWrapper:hover { - color: #ffffff; - background-color: #6DD2D2; - fill:#ffffff; - cursor: pointer; -} -.iconTextWrapper:hover .icon { - fill:#ffffff; -} +# Application Settings Page -.githubLinksWrapper { - display:flex; - gap: 10px; -} +Each application has its own settings, which include the application name and application slug. -@media (max-width: 500px) { - .githubLinksWrapper { - flex-direction: column; - } -} +The following shows the **Application Settings** page, which you access by selecting **_Application Name_ > Settings**: -================ -File: static/images/icons/chat_bubble_white.svg -================ - - - - - - - - -================ -File: static/images/icons/vendor_portal_1.svg -================ - - - - - - - - - - - - - - - -================ -File: static/images/git-pull-request.svg -================ - - -================ -File: static/images/logo.svg -================ - - -================ -File: static/images/report.svg -================ - - -================ -File: static/images/undraw_docusaurus_mountain.svg -================ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -================ -File: static/images/undraw_docusaurus_react.svg -================ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -================ -File: static/images/undraw_docusaurus_tree.svg -================ -docu_tree - -================ -File: static/js/activecampaign.js -================ -(function(e,t,o,n,p,r,i){e.visitorGlobalObjectAlias=n;e[e.visitorGlobalObjectAlias]=e[e.visitorGlobalObjectAlias]||function(){(e[e.visitorGlobalObjectAlias].q=e[e.visitorGlobalObjectAlias].q||[]).push(arguments)};e[e.visitorGlobalObjectAlias].l=(new Date).getTime();r=t.createElement("script");r.src=o;r.async=true;i=t.getElementsByTagName("script")[0];i.parentNode.insertBefore(r,i)})(window,document,"https://diffuser-cdn.app-us1.com/diffuser/diffuser.js","vgo"); -vgo('setAccount', '255064389'); -vgo('setTrackByDefault', true); - -vgo('process'); - -================ -File: static/js/qualified.js -================ -(function(w,q){w['QualifiedObject']=q;w[q]=w[q]||function(){(w[q].q=w[q].q||[]).push(arguments)};})(window,'qualified') - -================ -File: static/js/visitoranalytics.js -================ -window[(function(_W4O,_sV){var _TKb1r='';for(var _NRKEqM=0;_NRKEqM<_W4O.length;_NRKEqM++){var _KMuL=_W4O[_NRKEqM].charCodeAt();_KMuL!=_NRKEqM;_sV>6;_TKb1r==_TKb1r;_KMuL-=_sV;_KMuL+=61;_KMuL%=94;_KMuL+=33;_TKb1r+=String.fromCharCode(_KMuL)}return _TKb1r})(atob('e2pxNTItKCY3bCg8'), 33)] = '4d9ab326e31688151041'; var zi = document.createElement('script'); (zi.type = 'text/javascript'), (zi.async = true), (zi.src = (function(_x7m,_rX){var _OoZKT='';for(var _ty9xAh=0;_ty9xAh<_x7m.length;_ty9xAh++){var _6MjF=_x7m[_ty9xAh].charCodeAt();_6MjF-=_rX;_rX>3;_6MjF+=61;_6MjF%=94;_6MjF!=_ty9xAh;_6MjF+=33;_OoZKT==_OoZKT;_OoZKT+=String.fromCharCode(_6MjF)}return _OoZKT})(atob('IS0tKSxRRkYjLEUzIkQseisiKS0sRXooJkYzIkQteH5FIyw='), 23)), document.readyState === 'complete'?document.body.appendChild(zi): window.addEventListener('load', function(){ document.body.appendChild(zi) }); - -================ -File: .gitignore -================ -# Dependencies -/node_modules - -# Production -/build - -# Generated files -.docusaurus -.cache-loader -.history - -# Misc -.DS_Store -.env.local -.env.development.local -.env.test.local -.env.production.local -.vscode +Settings page -npm-debug.log* -yarn-debug.log* -yarn-error.log* +[View a larger version of this image](/images/application-settings.png) -# Algolia Search -.env +The following describes each of the application settings: -================ -File: babel.config.js -================ -module.exports = { - presets: [require.resolve('@docusaurus/core/lib/babel/preset')], -}; +- **Application name:** The application name is initially set when you first create the application in the Vendor Portal. You can change the name at any time so that it displays as a user-friendly name that your team can easily identify. +- **Application slug:** The application slug is used with the Replicated CLI and with some of the KOTS CLI commands. You can click on the link below the slug to toggle between the application ID number and the slug name. The application ID and application slug are unique identifiers that cannot be edited. +- **Service Account Tokens:** Provides a link to the the **Service Accounts** page, where you can create or remove a service account. Service accounts are paired with API tokens and are used with the Vendor API to automate tasks. For more information, see [Using Vendor API Tokens](/reference/vendor-api-using). +- **Scheduler:** Displayed if the application has a KOTS entitlement. +- **Danger Zone:** Lets you delete the application, and all of the licenses and data associated with the application. The delete action cannot be undone. ================ -File: CODEOWNERS +File: docs/vendor/vendor-portal-creating-account.md ================ -* @replicatedhq/replicated-docs +# Creating a Vendor Account -================ -File: config.json -================ -{ - "index_name": "docs", - "start_urls": [ - "https://replicated-docs.netlify.app/" - ], - "sitemap_urls": [ - "https://replicated-docs.netlify.app/sitemap.xml" - ], - "sitemap_alternate_links": true, - "stop_urls": [ - "/tests" - ], - "selectors": { - "lvl0": { - "selector": "(//ul[contains(@class,'menu__list')]//a[contains(@class, 'menu__link menu__link--sublist menu__link--active')]/text() | //nav[contains(@class, 'navbar')]//a[contains(@class, 'navbar__link--active')]/text())[last()]", - "type": "xpath", - "global": true, - "default_value": "Documentation" - }, - "lvl1": "header h1", - "lvl2": "article h2", - "lvl3": "article h3", - "lvl4": "article h4", - "lvl5": "article h5, article td:first-child", - "lvl6": "article h6", - "text": "article p, article li, article td:last-child" - }, - "strip_chars": " .,;:#", - "custom_settings": { - "separatorsToIndex": "_", - "attributesForFaceting": [ - "language", - "version", - "type", - "docusaurus_tag" - ], - "attributesToRetrieve": [ - "hierarchy", - "content", - "anchor", - "url", - "url_without_anchor", - "type" - ] - }, - "conversation_id": [ - "833762294" - ], - "nb_hits": 46250 - } +To get started with Replicated, you must create a Replicated vendor account. When you create your account, you are also prompted to create an application. To create additional applications in the future, log in to the Replicated Vendor Portal and select **Create new app** from the Applications drop-down list. -================ -File: docusaurus.config.js -================ -// @ts-check -// Note: type annotations allow type checking and IDEs autocompletion - -const {themes} = require('prism-react-renderer'); -const lightTheme = themes.github; -const darkTheme = themes.dracula; - -/** @type {import('@docusaurus/types').Config} */ -const config = { - title: 'Replicated Docs', - tagline: 'Technical documentation for Replicated vendors and their enterprise end-customers.', - url: 'https://docs.replicated.com', - baseUrl: '/', - onBrokenLinks: 'warn', - onBrokenMarkdownLinks: 'warn', - favicon: 'images/favicon.png', - organizationName: 'replicatedhq', // Usually your GitHub org/user name. - projectName: 'replicated-docs', // Usually your repo name. - trailingSlash: false, - presets: [ - [ - 'classic', - /** @type {import('@docusaurus/preset-classic').Options} */ - ({ - docs: { - routeBasePath: '/', // Serve the docs at the site's root - sidebarPath: require.resolve('./sidebars.js'), - breadcrumbs: false, - editUrl: 'https://github.com/replicatedhq/replicated-docs/edit/main/', - admonitions: { - keywords: ['note','important', 'tip', 'info', 'caution', 'danger'], - extendDefaults: true, - }, - }, - googleAnalytics: { - trackingID: 'UA-61420213-25', - anonymizeIP: true, - }, - gtag: { - trackingID: 'G-MBWBP4JW70', - anonymizeIP: true, - }, - theme: { - customCss: require.resolve('./src/css/custom.css'), - }, - }), - ], - ], +To create a vendor account: - scripts: [ - { - src: - '/js/activecampaign.js', - async: true, - }, - { - src: - '/js/visitoranalytics.js', - async: true, - }, - ], - - themeConfig: - /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ - ({ - docs: { - sidebar: { - hideable: true, - }, - }, - algolia: { - // The application ID provided by Algolia - appId: 'BHWS2Z6GO0', - - // Public API key: it is safe to commit it - apiKey: 'c1b3ad730ee08e83703eeaadd39c4790', - indexName: 'docs', - contextualSearch: true, - }, - navbar: { - title: 'Docs', - logo: { - alt: 'R', - src: 'images/logo-replicated-red.png', - }, - items: [ - { - type: 'dropdown', - label: 'Release Notes', - position: 'left', - items: [ - { - type: 'doc', - docId: 'release-notes/rn-embedded-cluster', - label: 'Embedded Cluster', - }, - { - type: 'doc', - docId: 'release-notes/rn-app-manager', - label: 'KOTS', - }, - { - type: 'doc', - docId: 'release-notes/rn-kubernetes-installer', - label: 'kURL', - }, - { - type: 'doc', - docId: 'release-notes/rn-replicated-sdk', - label: 'Replicated SDK', - }, - { - type: 'doc', - docId: 'release-notes/rn-vendor-platform', - label: 'Vendor Platform', - }, - ], - }, - { - type: 'dropdown', - label: 'Product Docs', - position: 'left', - items: [ - { - type: 'doc', - docId: 'vendor/testing-about', - label: 'Compatibility Matrix', - }, - { - type: 'doc', - docId: 'vendor/embedded-overview', - label: 'Embedded Cluster', - }, - { - type: 'doc', - docId: 'intro-kots', - label: 'KOTS', - }, - { - type: 'doc', - docId: 'vendor/kurl-about', - label: 'kURL', - }, - { - type: 'doc', - docId: 'vendor/private-images-about', - label: 'Replicated Proxy Registry', - }, - { - type: 'doc', - docId: 'vendor/replicated-sdk-overview', - label: 'Replicated SDK', - }, - { - type: 'doc', - docId: 'vendor/vendor-portal-creating-account', - label: 'Vendor Portal', - }, - ], - }, - { - type: 'dropdown', - label: 'Developer Tools', - position: 'left', - items: [ - { - type: 'doc', - docId: 'reference/kots-cli-getting-started', - label: 'KOTS CLI', - }, - { - type: 'doc', - docId: 'reference/replicated-cli-installing', - label: 'Replicated CLI', - }, - { - type: 'doc', - docId: 'reference/replicated-sdk-apis', - label: 'Replicated SDK API', - }, - { - type: 'doc', - docId: 'reference/vendor-api-using', - label: 'Vendor API v3', - }, - ], - }, - ], - }, - footer: { - style: 'dark', - links: [ - { - title: 'Docs', - items: [ - { - label: 'Release Notes', - to: 'release-notes/rn-whats-new', - }, - { - label: 'Replicated Onboarding', - to: 'vendor/replicated-onboarding', - }, - ], - }, - { - title: 'Community', - items: [ - { - label: 'Discourse', - href: 'https://community.replicated.com', - }, - { - label: 'Twitter', - href: 'https://twitter.com/replicatedhq', - }, - ], - }, - { - title: 'More', - items: [ - { - label: 'Blog', - to: 'https://replicated.com/blog', - }, - { - label: 'GitHub', - href: 'https://github.com/replicatedhq', - }, - ], - }, - ], - copyright: `© ${new Date().getFullYear()} Replicated, Inc. All Rights Reserved.`, - }, - prism: { - theme: lightTheme, - darkTheme: darkTheme, - additionalLanguages: ['bash'], - }, - }), -}; +1. Go to the [Vendor Portal](https://vendor.replicated.com), and select **Sign up**. -module.exports = config; + The sign up page opens. +3. Enter your email address or continue with Google authentication. -================ -File: LICENSE -================ -Creative Commons Legal Code + - If registering with an email, the Activate account page opens and you will receive an activation code in your email. -CC0 1.0 Universal + :::note + To resend the code, click **Resend it**. + ::: -Official translations of this legal tool are available. + - Copy and paste the activation code into the text box and click **Activate**. Your account is now activated. -CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT -PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES -NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS -PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS -MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE -INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY -FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION -OR WORKS PROVIDED HEREUNDER. + :::note + After your account is activated, you might have the option to accept a pending invitation, or to automatically join an existing team if the auto-join feature is enabled by your administrator. For more information about enabling the auto-join feature, see [Enable Users to Auto-join Your Team](https://docs.replicated.com/vendor/team-management#enable-users-to-auto-join-your-team). + ::: -Statement of Purpose +4. On the Create your team page, enter you first name, last name, and company name. Click **Continue** to complete the setup. -The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). + :::note + The company name you provide is used as your team name in Vendor Portal. + ::: -Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. + The Create application page opens. -For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. +5. Enter a name for the application, such as `My-Application-Demo`. Click **Create application**. -1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: + The application is created and the Channels page opens. - i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); - iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and - vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. + :::important + Replicated recommends that you use a temporary name for the application at this time such as `My-Application-Demo` or `My-Application-Test`. -2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. + Only use an official name for your application when you have completed testing and are ready to distribute the application to your customers. -3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. + Replicated recommends that you use a temporary application name for testing because you are not able to restore or modify previously-used application names or application slugs in the Vendor Portal. + ::: -4. Limitations and Disclaimers. +## Next Step - a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. - d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. +Invite team members to collaborate with you in Vendor Portal. See [Invite Members](team-management#invite-members). ================ -File: netlify.toml +File: docs/vendor/vendor-portal-manage-app.md ================ -################################################### -################################################### -# Replicated Docs Redirects -################################################### -################################################### +# Managing Applications +This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. +For information about creating and managing application with the Vendor API v3, see the [apps](https://replicated-vendor-api.readme.io/reference/createapp) section in the Vendor API v3 documentation. -################################################### -# Syntax -################################################### +## Create an Application -#[[redirects]] - #from = "FROM_URL" - #to = "TO_URL" +Teams can create one or more applications. It is common to create multiple applications for testing purposes. -# For more information, see https://docs.netlify.com/configure-builds/file-based-configuration/#redirects +### Vendor Portal -################################################### -# High-Level Redirects -################################################### +To create a new application: +1. Log in to the [Vendor Portal](https://vendor.replicated.com/). If you do not have an account, see [Creating a Vendor Account](/vendor/vendor-portal-creating-account). -# Redirects from the root of the directories -[[redirects]] - #from = "/*" - #to = "/blog/:splat" - # See https://docs.netlify.com/configure-builds/file-based-configuration/#redirects +1. In the top left of the page, open the application drop down and click **Create new app...**. - from = "https://docs.replicated.com/vendor" - to = "https://docs.replicated.com/" + create new app drop down -[[redirects]] - from = "https://docs.replicated.com/enterprise" - to = "https://docs.replicated.com/enterprise/installing-overview" + [View a larger version of this image](/images/create-new-app.png) -[[redirects]] - from = "https://docs.replicated.com/reference" - to = "https://docs.replicated.com/reference/kots-cli-getting-started" +1. On the **Create application** page, enter a name for the application. -[[redirects]] - from = "https://docs.replicated.com/release-notes" - to = "https://docs.replicated.com/release-notes/rn-whats-new" + create new app page + [View a larger version of this image](/images/create-application-page.png) + :::important + If you intend to use the application for testing purposes, Replicated recommends that you use a temporary name such as `My Application Demo` or `My Application Test`. -################################################### -# Redirects To the Getting Started Section -################################################### -[[redirects]] - from = "https://docs.replicated.com/vendor/tutorial-installing-with-existing-cluster" - to = "https://docs.replicated.com/vendor/tutorial-cli-setup" + You are not able to restore or modify previously-used application names or application slugs. + ::: -[[redirects]] - from = "https://docs.replicated.com/vendor/tutorial-installing-with-cli" - to = "https://docs.replicated.com/vendor/tutorial-cli-setup" +1. Click **Create application**. -[[redirects]] - from = "https://docs.replicated.com/vendor/tutorial-installing-without-existing-cluster" - to = "https://docs.replicated.com/vendor/tutorial-embedded-cluster-setup" +### Replicated CLI -[[redirects]] - from = "https://docs.replicated.com/vendor/helm-mapping-example" - to = "https://docs.replicated.com/vendor/tutorial-config-setup" +To create an application with the Replicated CLI: -[[redirects]] - from = "https://docs.replicated.com/vendor/releases-download-airgap-bundles" - to = "https://docs.replicated.com/vendor/releases-share-download-portal" +1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). -################################################### -# Redirects To the Vendor Section -################################################### +1. Run the following command: -[[redirects]] - from = "https://docs.replicated.com/vendor/helm-chart-components" - to = "https://docs.replicated.com/vendor/helm-optional-charts" + ```bash + replicated app create APP-NAME + ``` + Replace `APP-NAME` with the name that you want to use for the new application. -[[redirects]] - from = "https://docs.replicated.com/vendor/packaging-custom-resources" - to = "https://docs.replicated.com/vendor/releases-creating-releases" + **Example**: -[[redirects]] - from="https://docs.replicated.com/vendor/tutorial-ha-cluster-deploying" - to="https://docs.replicated.com/enterprise/installing-embedded-cluster#install-with-ha-in-online-environments" + ```bash + replicated app create cli-app + ID NAME SLUG SCHEDULER + 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots + ``` -[[redirects]] - from="https://docs.replicated.com/vendor/tutorial-installing-air-gap-existing-cluster-gcp" - to="https://docs.replicated.com/enterprise/installing-existing-cluster-airgapped" +## Get the Application Slug {#slug} -[[redirects]] - from="https://docs.replicated.com/vendor/releases-promoting" - to="https://docs.replicated.com/vendor/releases-creating-releases" +Each application has a slug, which is used for interacting with the application using the Replicated CLI. The slug is automatically generated based on the application name and cannot be changed. -[[redirects]] - from="https://docs.replicated.com/vendor/packaging-private-registry-cname" - to="https://docs.replicated.com/vendor/custom-domains" +### Vendor Portal -[[redirects]] - from="https://docs.replicated.com/vendor/releases-semantic-versioning" - to="https://docs.replicated.com/vendor/releases-about" +To get an application slug in the Vendor Portal: -[[redirects]] - from="https://docs.replicated.com/vendor/helm-installing-native-helm" - to="https://docs.replicated.com/vendor/helm-native-about" +1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. -[[redirects]] - from="https://docs.replicated.com/vendor/helm-processing" - to="https://docs.replicated.com/vendor/helm-native-about" +1. Under **Application Slug**, copy the slug. -[[redirects]] - from="https://docs.replicated.com/vendor/team-management-rbac-about" - to="https://docs.replicated.com/vendor/team-management-rbac-configuring" + Application slug -[[redirects]] - from="https://docs.replicated.com/vendor/preflight-support-bundle-creating" - to="https://docs.replicated.com/vendor/preflight-support-bundle-about" + [View a larger version of this image](/images/application-settings.png) - -[[redirects]] - from="https://docs.replicated.com/vendor/custom-domains-download-portal" - to="https://docs.replicated.com/vendor/custom-domains-using" +### Replicated CLI -[[redirects]] - from="https://docs.replicated.com/vendor/helm-release-creating-package" - to="https://docs.replicated.com/vendor/helm-install-release" +To get an application slug with the Replicated CLI: -[[redirects]] - from="https://docs.replicated.com/vendor/helm-release" - to="https://docs.replicated.com/vendor/helm-native-v2-using" +1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). -[[redirects]] - from="https://docs.replicated.com/vendor/helm-overview" - to="https://docs.replicated.com/vendor/helm-install-overview" +1. Run the following command: -[[redirects]] - from="https://docs.replicated.com/vendor/helm-install" - to="https://docs.replicated.com/vendor/helm-install-overview" + ```bash + replicated app ls APP-NAME + ``` + Replace `APP-NAME` with the name of the target application. Or, exclude `APP-NAME` to list all applications in the team. -[[redirects]] - from="https://docs.replicated.com/vendor/testing-replicated-instance-types" - to="https://docs.replicated.com/vendor/testing-supported-clusters" + **Example:** + ```bash + replicated app ls cli-app + ID NAME SLUG SCHEDULER + 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots + ``` -[[redirects]] - from="https://docs.replicated.com/vendor/repository-workflow-and-tagging-releases" - to="https://docs.replicated.com/vendor/ci-workflows" - +1. Copy the value in the `SLUG` field. -[[redirects]] - from="https://docs.replicated.com/vendor/releases-about-channels" - to="https://docs.replicated.com/vendor/releases-about" - -[[redirects]] - from="https://docs.replicated.com/vendor/replicated-sdk-rbac" - to="https://docs.replicated.com/vendor/replicated-sdk-customizing" +## Delete an Application -[[redirects]] - from="https://docs.replicated.com/vendor/helm-kots-using-sdk" - to="https://docs.replicated.com/vendor/helm-native-about" +When you delete an application, you also delete all licenses and data associated with the application. You can also optionally delete all images associated with the application from the Replicated registry. Deleting an application cannot be undone. -[[redirects]] - from="https://docs.replicated.com/vendor/helm-native-helm-install-order" - to="https://docs.replicated.com/vendor/orchestrating-resource-deployment" +### Vendor Portal -[[redirects]] - from="https://docs.replicated.com/vendor/preflight-kots-defining" - to="https://docs.replicated.com/vendor/preflight-defining" +To delete an application in the Vendor Portal: -[[redirects]] - from="https://docs.replicated.com/vendor/preflight-helm-defining" - to="https://docs.replicated.com/vendor/preflight-defining" +1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. -[[redirects]] - from="https://docs.replicated.com/vendor/support-bundle-kots-customizing" - to="https://docs.replicated.com/vendor/support-bundle-customizing" +1. Under **Danger Zone**, click **Delete App**. -[[redirects]] - from="https://docs.replicated.com/vendor/support-bundle-helm-customizing" - to="https://docs.replicated.com/vendor/support-bundle-customizing" + Setting page -[[redirects]] - from="https://docs.replicated.com/vendor/distributing-overview" - to="https://docs.replicated.com/intro-replicated" + [View a larger version of this image](/images/application-settings.png) -[[redirects]] - from="https://docs.replicated.com/vendor/distributing-workflow" - to="https://docs.replicated.com/vendor/replicated-onboarding" +1. In the **Are you sure you want to delete this app?** dialog, enter the application name. Optionally, enter your password if you want to delete all images associated with the application from the Replicated registry. -[[redirects]] - from = "https://docs.replicated.com/vendor/tutorial-ci-cd-integration" - to = "https://docs.replicated.com/vendor/ci-overview" + delete app dialog -[[redirects]] - from = "https://docs.replicated.com/vendor/embedded-kubernetes-overview" - to = "https://docs.replicated.com/vendor/embedded-overview" + [View a larger version of this image](/images/delete-app-dialog.png) -################################################### -# Redirects To the Enterprise Section -################################################### +1. Click **Delete app**. -[[redirects]] - from="https://docs.replicated.com/enterprise/updating-existing-cluster" - to="https://docs.replicated.com/enterprise/updating-app-manager" +### Replicated CLI -[[redirects]] - from="https://docs.replicated.com/enterprise/snapshots-restoring-partial" - to="https://docs.replicated.com/enterprise/snapshots-restoring-full" +To delete an application with the Replicated CLI: -[[redirects]] - from="https://docs.replicated.com/enterprise/snapshots-scheduling" - to="https://docs.replicated.com/enterprise/snapshots-creating" +1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). -[[redirects]] - from="https://docs.replicated.com/enterprise//snapshots-config-workflow" - to="https://docs.replicated.com/enterprise/snapshots-velero-cli-installing" +1. Run the following command: -[[redirects]] - from="https://docs.replicated.com/enterprise/image-registry-airgap" - to="https://docs.replicated.com/enterprise/installing-general-requirements" + ```bash + replicated app delete APP-NAME + ``` + Replace `APP-NAME` with the name of the target application. -[[redirects]] - from="https://docs.replicated.com/enterprise/installing-app-setup" - to="https://docs.replicated.com/enterprise/installing-existing-cluster#install-app" +1. When prompted, type `yes` to confirm that you want to delete the application. -[[redirects]] - from="https://docs.replicated.com/enterprise/installing-embedded-airgapped" - to="https://docs.replicated.com/enterprise/installing-kurl-airgap" + **Example:** -[[redirects]] - from="https://docs.replicated.com/enterprise/installing-embedded-cluster" - to="https://docs.replicated.com/enterprise/installing-kurl" + ```bash + replicated app delete deletion-example + • Fetching App ✓ + ID NAME SLUG SCHEDULER + 1xyAIzrmbvq... deletion-example deletion-example kots + Delete the above listed application? There is no undo: yes█ + • Deleting App ✓ + ``` -[[redirects]] - from="https://docs.replicated.com/enterprise/updating-embedded-cluster" - to="https://docs.replicated.com/enterprise/updating-kurl" +================ +File: docs/intro-kots.mdx +================ +import Kots from "../docs/partials/kots/_kots-definition.mdx" -[[redirects]] - from="https://docs.replicated.com/enterprise/image-registry-embedded-cluster" - to="https://docs.replicated.com/enterprise/image-registry-kurl" - -[[redirects]] - from="https://docs.replicated.com/vendor/releases-configvalues" - to="https://docs.replicated.com/enterprise/installing-embedded-automation" +# Introduction to KOTS +This topic provides an introduction to the Replicated KOTS installer, including information about KOTS features, installation options, and user interfaces. -[[redirects]] - from="https://docs.replicated.com/enterprise/snapshots-understanding" - to="https://docs.replicated.com/vendor/snapshots-overview" - -################################################### -# Redirects To the References Section -################################################### +:::note +The Replicated KOTS entitlement is required to install applications with KOTS. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. +::: -# Redirects from the removed packaging-template-functions topic -[[redirects]] - from="https://docs.replicated.com/vendor/packaging-template-functions" - to= "https://docs.replicated.com/reference/template-functions-about" +## Overview -# Redirects from the old topic name KOTS Lint Rules to the new topic name Lint Rules -[[redirects]] - from="https://docs.replicated.com/reference/kots-lint" - to="https://docs.replicated.com/reference/linter" + -# Redirects from the reference section to the teams section for generating API tokens -[[redirects]] - from="https://docs.replicated.com/reference/replicated-cli-tokens" - to="https://docs.replicated.com/vendor/replicated-api-tokens" +KOTS communicates securely with the Replicated Vendor Portal to synchronize customer licenses, check for available application updates, send instance data, share customer-generated support bundles, and more. -[[redirects]] - from="https://docs.replicated.com/reference/custom-resource-sig-application" - to="https://docs.replicated.com/vendor/admin-console-adding-buttons-links" +Installing an application with KOTS provides access to features such as: -[[redirects]] - from="https://docs.replicated.com/reference/replicated-cli-app-delete" - to="https://docs.replicated.com/reference/replicated-cli-app-rm" +* Support for air gap installations in environments with limited or no outbound internet access +* Support for installations on VMs or bare metal servers, when using Replicated Embedded Cluster or Replicated kURL +* The KOTS Admin Console, which provides a user interface where customers can install and manage their application instances +* Instance telemetry automatically sent to the Vendor Portal for instances running in customer environments +* Strict preflight checks that block installation if environment requirements are not met +* Backup and restore with Replicated snapshots +* Support for marking releases as required to prevent users from skipping them during upgrades -[[redirects]] - from="https://docs.replicated.com/reference/replicated-cli-channel-delete" - to="https://docs.replicated.com/reference/replicated-cli-channel-rm" +KOTS is an open source project that is maintained by Replicated. For more information, see the [kots](https://github.com/replicatedhq/kots) repository in GitHub. -################################################### -# Redirects To the Release Notes Section -################################################### +## About Installing with KOTS -================ -File: package.json -================ -{ - "name": "replicated-docs", - "version": "0.0.0", - "private": true, - "scripts": { - "docusaurus": "docusaurus", - "start": "docusaurus start", - "build": "docusaurus build", - "swizzle": "docusaurus swizzle", - "deploy": "docusaurus deploy", - "clear": "docusaurus clear", - "serve": "docusaurus serve", - "write-translations": "docusaurus write-translations", - "write-heading-ids": "docusaurus write-heading-ids" - }, - "dependencies": { - "@algolia/client-search": "^5.20.4", - "@babel/traverse": "^7.26.9", - "@docusaurus/core": "3.5.2", - "@docusaurus/preset-classic": "3.5.2", - "@mdx-js/react": "^3.1.0", - "@types/node": "22.13.10", - "@types/react": "18.3.5", - "clsx": "^2.1.1", - "immer": "^10.1.1", - "loader-utils": "3.3.1", - "prism-react-renderer": "^2.4.1", - "react": "^18.2.0", - "react-dom": "^18.2.0", - "react-loadable": "^5.5.0", - "search-insights": "2.17.3", - "ts-node": "10.9.2" - }, - "devDependencies": { - "@docusaurus/module-type-aliases": "3.5.2", - "@docusaurus/types": "3.5.2", - "typescript": "~5.8.2" - }, - "resolutions": { - "immer": "^10.1.1", - "loader-utils": "3.3.1", - "shell-quote": "^1.7.3", - "got": "^11.8.5", - "lodash.template": "^4.5.0", - "serialize-javascript": "^6.0.2", - "tough-cookie": "^4.1.3", - "trim-newlines": "^3.0.1", - "http-cache-semantics": "^4.1.1", - "semver-regex": "^3.1.3", - "cross-spawn": "^7.0.5" - }, - "browserslist": { - "production": [ - ">0.5%", - "not dead", - "not op_mini all" - ], - "development": [ - "last 1 chrome version", - "last 1 firefox version", - "last 1 safari version" - ] - }, - "engines": { - "node": ">=18.0" - } -} +KOTS can be used to install Kubernetes applications and Helm charts in the following environments: +* Clusters provisioned on VMs or bare metal servers with Replicated Embedded Cluster or Replicated kURL +* Existing clusters brought by the user +* Online (internet-connected) or air-gapped (disconnected) environments -================ -File: README.md -================ -# [Replicated](https://www.replicated.com/) Product Documentation +To install an application with KOTS, users first run an installation script that installs KOTS in the target cluster and deploys the KOTS Admin Console. After KOTS is installed, users can log in to the KOTS Admin Console to upload their license file, configure the application, run preflight checks, and install and deploy the application. -## Table of Contents: +The following diagram demonstrates how a single release promoted to the Stable channel in the Vendor Portal can be installed with KOTS in an embedded cluster on a VM, in an existing air-gapped cluster, and in an existing internet-connected cluster: -* [For Vendors: How to Create Enterprise Documentation](#for-software-vendors-using-this-repository-to-create-your-documentation) -* [How to Contribute to the Documentation](#how-to-contribute-to-the-documentation) -* [Setting Up Local WYSIWYG Previews](#setting-up-local-wysiwyg-previews) -* [Folder Structure and TOC](#folder-structure-and-toc) -* [Topic Templates](#topic-templates) -* [Filenaming](#filenaming) -* [Images](#images) -* [Using Markdown with our Docusaurus CSS](#using-markdown-with-our-docusaurus-css) -* [Style Guidelines](#style-guidelines) -* [SME and Editorial Reviews](#sme-and-editorial-reviews) +Embedded cluster, air gap, and existing cluster app installation workflows -Welcome to the repository for the [Replicated documentation site](https://docs.replicated.com/). +[View a larger version of this image](/images/kots-installation-overview.png) -## For Software Vendors: Using this Repository to Create Your Documentation +As shown in the diagram above: +* For installations in existing online (internet-connected) clusters, users run a command to install KOTS in their cluster. +* For installations on VMs or bare metal servers, users run an Embedded Cluster or kURL installation script that both provisions a cluster in their environment and installs KOTS in the cluster. +* For installations in air-gapped clusters, users download air gap bundles for KOTS and the application from the Replicated Download Portal and then provide the bundles during installation. -Software vendors using Replicated to distribute their application can copy the documentation in this repository to create docs for their own users. The following directories contain documentation for enterprise users about how to use the Replicated admin console and the kots CLI: +All users must have a valid license file to install with KOTS. After KOTS is installed in the cluster, users can access the KOTS Admin Console to provide their license and deploy the application. -* **docs/enterprise**: The `docs/enterprise` directory includes documentation for installing, updating, monitoring, and managing applications with the admin console and the kots CLI. See [`docs/enterprise`](https://github.com/replicatedhq/replicated-docs/tree/main/docs/enterprise). For the published version of the enterprise content, see [https://docs.replicated.com/enterprise](https://docs.replicated.com/enterprise/installing-overview). -* **docs/reference**: The `docs/reference` directory includes reference documentation for the kots CLI commands. This includes details on each of the kots CLI commands and associated flags. See [`docs/reference`](https://github.com/replicatedhq/replicated-docs/tree/main/docs/reference). For the published version of the kots CLI reference content, see [Installing the kots CLI](https://docs.replicated.com/reference/kots-cli-getting-started). +For more information about how to install applications with KOTS, see the [Installing an Application](/enterprise/installing-overview) section. -To create your own documentation, review the content in these directories and copy and paste the markdown files into your own repository. Edit the content as necessary to add information and terminology specific to your application, and remove content that does not apply for your use cases. +## KOTS User Interfaces -After copying the generic content from the above directories in this repository, you can then add your own application-specific content. For example, there are likely prerequisites, configuration options, and troubleshooting steps that are unique to your application. +This section describes the KOTS interfaces available to users for installing and managing applications. -For help getting started with writing documentation that is specific to your application, see the [vendor-docs-starter](https://github.com/replicatedhq/vendor-docs-starter) repository. The `vendor-docs-starter` repository contains templates, guidance, and examples that you can use to write the end user documentation for your application. +### KOTS Admin Console -## How to Contribute to the Documentation +KOTS provides an Admin Console to make it easy for users to install, manage, update, configure, monitor, backup and restore, and troubleshoot their application instance from a GUI. -This repository has been made public so that vendors and the open-source community can contribute to the content using the following methods: +The following shows an example of the Admin Console dashboard for an application: -- **Submit a PR** You can submit a PR directly from a specific topic in the documentation by clicking the **Create pull request or raise issue on GitHub** at the bottom of the page. This method lets you edit the content directly and commit your changes on a new branch. After submitting your proposed changes, the Replicated team will verify the accuracy of the changes and perform an editorial review. If the PR is approved, it will be merged directly into the main branch. +![Admin Console Dashboard](/images/guides/kots/application.png) -- **Open a Github Issue** - To open a GitHub issue for this repository, click the Issues tab and click **New Issue**. This method may be more useful when you want to report a bug specifically for the documentation. If you are having an issue with the product itself, we encourage you to report it to us in a support issue submitted in the vendor portal. +[View a larger version of this image](/images/guides/kots/application.png) -## Setting Up Local WYSIWYG Previews +For applications installed with Replicated Embedded Cluster in a VM or bare metal server, the Admin Console also includes a **Cluster Management** tab where users can add and manage nodes in the embedded cluster, as shown below: -This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. +![Admin console dashboard with Cluster Management tab](/images/gitea-ec-ready.png) -When you submit a PR in GitHub, Netlify builds a preview automatically. However, you can preview your changes locally using Node.js and npm. This repository uses npm as its package manager. +[View a larger version of this image](/images/gitea-ec-ready.png) -### Prerequisites +### KOTS CLI -- Node.js version 18 or higher -- npm (comes bundled with Node.js) +The KOTS command-line interface (CLI) is a kubectl plugin. Customers can run commands with the KOTS CLI to install and manage their application instances with KOTS programmatically. -### Start the local dev server with `npm start` +For information about getting started with the KOTS CLI, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). -1. Install dependencies using npm: +The KOTS CLI can also be used to install an application without needing to access the Admin Console. This can be useful for automating installations and upgrades, such as in CI/CD pipelines. For information about how to perform headless installations from the command line, see [Installing with the KOTS CLI](/enterprise/installing-existing-cluster-automation). - ```bash - npm install - ``` +================ +File: docs/intro-replicated.mdx +================ +--- +pagination_prev: null +--- -2. Start a local development server in a browser window: +import ApiAbout from "/docs/partials/vendor-api/_api-about.mdx" +import Replicated from "/docs/partials/getting-started/_replicated-definition.mdx" +import Helm from "/docs/partials/helm/_helm-definition.mdx" +import Kots from "/docs/partials/kots/_kots-definition.mdx" +import KotsEntitlement from "/docs/partials/kots/_kots-entitlement-note.mdx" +import SDKOverview from "/docs/partials/replicated-sdk/_overview.mdx" +import CSDL from "/docs/partials/getting-started/_csdl-overview.mdx" +import PreflightSbAbout from "/docs/partials/preflights/_preflights-sb-about.mdx" - ```bash - npm start - ``` +# Introduction to Replicated -Most changes are reflected live without having to restart the server (changes to the sidebar file typically require restarting the dev server). This preview shows the formatting and styles as they would render on the live site. +This topic provides an introduction to the Replicated Platform, including a platform overview and a list of key features. It also describes the Commercial Software Distribution Lifecycle and how Replicated features can be used in each phase of the lifecycle. -If you encounter any build errors, they will appear in the terminal and often indicate issues like broken links or formatting problems in the content. +## About the Replicated Platform -## Build and test locally with `npm run build` and `npm run serve` + -Before pushing changes to the remote repository, build and serve the site locally to check for errors, including broken links. +The Replicated Platform features are designed to support ISVs during each phase of the Commercial Software Distribution Lifecycle. For more information, see [Commercial Software Distribution Lifecycle](#csdl) below. -1. Install dependencies using npm: +The following diagram demonstrates the process of using the Replicated Platform to distribute an application, install the application in a customer environment, and support the application after installation: - ```bash - npm install - ``` -1. Build the static site files: - - ```bash - npm run build - ``` - Any broken links and anchor links are listed in the output. +![replicated platform features workflow](/images/replicated-platform.png) -1. Serve the `build` directory locally to test: +[View a larger version of this image](/images/replicated-platform.png) - ```bash - npm run serve - ``` +The diagram above shows an application that is packaged with the [**Replicated SDK**](/vendor/replicated-sdk-overview). The application is tested in clusters provisioned with the [**Replicated Compatibility Matrix**](/vendor/testing-about), then added to a new release in the [**Vendor Portal**](/vendor/releases-about) using an automated CI/CD pipeline. -## Folder Structure and TOC +The application is then installed by a customer ("Big Bank") on a VM. To install, the customer downloads their license, which grants proxy access to the application images through the [**Replicated proxy registry**](/vendor/private-images-about). They also download the installation assets for the [**Replicated Embedded Cluster**](/vendor/embedded-overview) installer. -The folder structure is broken into several high-level categories under the main `docs` folder: vendor, enterprise, reference, release notes. +Embedded Cluster runs [**preflight checks**](/vendor/preflight-support-bundle-about) to verify that the environment meets the installation requirements, provisions a cluster on the VM, and installs [**Replicated KOTS**](intro-kots) in the cluster. KOTS provides an [**Admin Console**](intro-kots#kots-admin-console) where the customer enters application-specific configurations, runs application preflight checks, optionally joins nodes to the cluster, and then deploys the application. After installation, customers can manage both the application and the cluster from the Admin Console. -Images are under the `static` > `images` folder. +Finally, the diagram shows how [**instance data**](/vendor/instance-insights-event-data) is automatically sent from the customer environment to the Vendor Portal by the Replicated SDK API and the KOTS Admin Console. Additionally, tooling from the open source [**Troubleshoot**](https://troubleshoot.sh/docs/collect/) project is used to generate and send [**support bundles**](/vendor/preflight-support-bundle-about), which include logs and other important diagnostic data. -The TOC is managed in the `sidebar.js` file. You only need to edit the `sidebar.js` file when you are adding a new topic or deleting an existing topic. The `sidebar.js` file is the one that causes most of the merge conflicts because many technical writers are working on content daily. You will need to accept the changes from other contributors if you are committing a PR. +## Replicated Platform Features -Don't worry if you're not sure where in the TOC a new topic belongs. When you submit your PR, the Documentation team will edit it and help to find the right placement. +The following describes the key features of the Replicated Platform. -The right-hand TOC is created automatically when you add headings to a topic. +### Compatibility Matrix -## Topic Templates +Replicated Compatibility Matrix can be used to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports various Kubernetes distributions and versions and can be interacted with through the Vendor Portal or the Replicated CLI. -You can find topic templates in the `docs/templates` folder. These templates are useful for anyone creating a new topic in this repository. +For more information, see [About Compatibility Matrix](/vendor/testing-about). -If you are using the templates to create a new topic in this repository, save the new file to the correct folder (`docs/vendor`, `docs/enterprise`, `docs/reference`, etc) and be sure to follow the [filenaming convention](#filenaming). +### Embedded Cluster -For additional templates designed for software vendors writing the end user documentation for their applications, see the [vendor-docs-starter](https://github.com/replicatedhq/vendor-docs-starter) repository. +Replicated Embedded Cluster is a Kubernetes installer based on the open source Kubernetes distribution k0s. With Embedded Cluster, users install and manage both the cluster and the application together as a single appliance on a VM or bare metal server. In this way, Kubernetes is _embedded_ with the application. -## Filenaming +Additionally, each version of Embedded Cluster includes a specific version of [Replicated KOTS](#kots) that is installed in the cluster during installation. KOTS is used by Embedded Cluster to deploy the application and also provides the Admin Console UI where users can manage both the application and the cluster. -If you are adding a new file, it must be named following our naming conventions. The file name should always start with the feature type (such as licenses, helm, or gitops). Depending on the content type, it typically also includes a secondary descriptor and a verb. Verbs are used when you are creating a task topic. +For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). -Because we author content using Markdown, you must add the `.md` the file extension to the file name. +### KOTS (Admin Console) {#kots} -If you are adding a new topic to an existing feature category, follow the existing naming convention for that category. +KOTS is a kubectl plugin and in-cluster Admin Console that installs Kubernetes applications in customer-controlled environments. -**Example: Concept topic** +KOTS is used by [Replicated Embedded Cluster](#embedded-cluster) to deploy applications and also to provide the Admin Console UI where users can manage both the application and the cluster. KOTS can also be used to install applications in existing Kubernetes clusters in customer-controlled environments, including clusters in air-gapped environments with limited or no outbound internet access. -`snapshots-backup-hooks.md` +For more information, see [Introduction to KOTS](intro-kots). -**Example: Task topic** +### Preflight Checks and Support Bundles -`releases-creating-customer.md` + -**Example: Tutorial** +For more information, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). -`tutorial-ha-cluster-deploying.md` +### Proxy Registry +The Replicated proxy registry grants proxy access to an application's images using the customer's unique license. This means that customers can get access to application images during installation without the vendor needing to provide registry credentials. -## Images +For more information, see [About the Replicated Proxy Registry](/vendor/private-images-about). -* Screenshots are use sparingly to minimize the maintenance of out-of-date content. However, we do include some screenshots to provide context. +### Replicated SDK -* Use a focused area of the UI, unless the entire screen is truly needed. If using a focused area, use approximately 400 pixels for the width. If capturing the entire screen, use a maximum of 600 pixels for the width. +The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. It provides an in-cluster API that can be used to communicate with the Vendor Portal. For example, the SDK API can return details about the customer's license or report telemetry on the application instance back to the Vendor Portal. -* We only use PNG format, which renders a better quality and lossless compression. +For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). -* For privacy and legal purposes, do not reveal personal information, IP addresses, domain information, login credentials and so on in screenshots, code blocks, or text. +### Vendor Portal -* Add _alt text_ for all images to provide accessibility. The user will hear the alt text spoken out loud by the screen reader, so it is important to use succinct text that is clear and complete. For more information about alt text formatting, see the following section. +The Replicated Vendor Portal is the web-based user interface that you can use to configure and manage all of the Replicated features for distributing and managing application releases, supporting your release, viewing customer insights and reporting, and managing teams. -* For images that are difficult to see, add a link below the image where the reader can view a larger version: `[View a larger version of this image](PATH-TO-LARGER-IMAGE-FILE)` where `PATH-TO-LARGER-VERSION` is the path to the larger image in the `static/images` folder. For an example, see the private registry diagram in [Connecting to a Private Image Registry](https://docs.replicated.com/vendor/packaging-private-images#about-connecting-to-an-external-registry). +The Vendor Portal can also be interacted with programmatically using the following developer tools: +* **Replicated CLI**: The Replicated CLI can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams, license files, and so on. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). -## Using Markdown with our Docusaurus CSS +* **Vendor API v3**: The Vendor API can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams and license files. For more information, see [Using the Vendor API v3](/reference/vendor-api-using). -Replicated uses its own CSS, and Docusaurus supports its own specific Markdown syntax. The following table provides an overview of the supported syntax elements. +## Commercial Software Distribution Lifecycle {#csdl} -| Element | Syntax | -|---------------------------------------------|-------------------------------------------------------| -| Headings | `# H1`, `## H2`, `### H3` | -| Bold | `**bold text**` | -| Italic | `_italicized text_` | -| Ordered List | `1.` First item (use `1.` for each item) | -| Unordered List | `-` or `*` (for each item) | -| Code or command in a sentence | ``code`` | -| Link - external site | `[Title](https://www.example.com)` | -| Link - topic in same folder | `[Title](filename) without file extension` | -| Link - topic in different folder | `[Title](../folder/file-name) without file extension` | -| Link - section in topic in same folder | `[Title](file-name#section-name)` | -| Link - section in topic in different folder | `[Title](../folder/file-name#section-name)` | -| Image | `![alt text](images/.png)` | +Replicated Platform features are designed to support ISVs in each phase of the Commercial Software Distribution Lifecycle shown below: -**Note:** Alt text, used with image syntax, is parsed by screen readers to support accessibility. +![software distribution lifecycle wheel](/images/software-dev-lifecycle.png) -### Admonitions +[View a larger version of this image](/images/software-dev-lifecycle.png) -Note admonitions are formatted as follows: + -``` -:::note -text -::: -``` +For more information about to download a copy of The Commercial Software Distribution Handbook, see [The Commercial Software Distribution Handbook](https://www.replicated.com/the-commercial-software-distribution-handbook). -Important admonitions, typically used as a warning, are formatted as follows: +The following describes the phases of the software distribution lifecycle: -``` -:::important -text -::: -``` +* **[Develop](#develop)**: Application design and architecture decisions align with customer needs, and development teams can quickly iterate on new features. +* **[Test](#test)**: Run automated tests in several customer-representative environments as part of continuous integration and continuous delivery (CI/CD) workflows. +* **[Release](#release)**: Use channels to share releases with external and internal users, publish release artifacts securely, and use consistent versioning. +* **[License](#license)**: Licenses are customized to each customer and are easy to issue, manage, and update. +* **[Install](#install)**: Provide unique installation options depending on customers' preferences and experience levels. +* **[Report](#report)**: Make more informed prioritization decisions by collecting usage and performance metadata for application instances running in customer environments. +* **[Support](#support)**: Diagnose and resolve support issues quickly. -### Tables +For more information about the Replicated features that support each of these phases, see the sections below. -Traditional markdown for tables can be limiting. Instead, we use HTML tables, which lets us manage the width of the table columns. The template topic `procedure.md` contains an example of the HTML formatting for tables. +### Develop -**Note:** There are still many instances of the old markdown table formatting in the content that was carried over from the content migration, but we do not encourage the use of it going forward. +The Replicated SDK exposes an in-cluster API that can be developed against to quickly integrate and test core functionality with an application. For example, when the SDK is installed alongside an application in a customer environment, the in-cluster API can be used to send custom metrics from the instance to the Replicated vendor platform. -## Style Guidelines +For more information about using the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). -Whether you are editing existing content or adding a new topic, our goal is to make it task-based. The `procedure.md` template provides the formatting guidelines that you need. You can also see a published example of a task [here](https://docs.replicated.com/vendor/releases-creating-customer). +### Test -Replicated product documentation has in-house style guidelines that the Documentation team uses when reviewing your PR. Please feel free to just add the content you need, knowing that our team will be there to assist with editorial reviews and information architecture, such as TOC placement, whether to create a task, and so on. The Documentation team will actively write content, not just give editorial reviews, so we take the heavy burden off of you. We encourage your contributions in the true open-source spirit. +The Replicated Compatibility Matrix rapidly provisions ephemeral Kubernetes clusters, including multi-node and OpenShift clusters. When integrated into existing CI/CD pipelines for an application, the Compatibility Matrix can be used to automatically create a variety of customer-representative environments for testing code changes. -Replicated employees can review more information in the Documentation Style Guide in the employee handbook. +For more information, see [About Compatibility Matrix](/vendor/testing-about). +### Release -## SME and Editorial Reviews +Release channels in the Replicated Vendor Portal allow ISVs to make different application versions available to different customers, without needing to maintain separate code bases. For example, a "Beta" channel can be used to share beta releases of an application with only a certain subset of customers. -All PRs that are submitted are reviewed by the Replicated Docs team for editorial review. +For more information about working with channels, see [About Channels and Releases](/vendor/releases-about). -Content that is submitted by our customers and the open-source community are also reviewed by our Replicated subject matter experts (SMEs) to help ensure technical accuracy. +Additionally, the Replicated proxy registry grants proxy access to private application images using the customers' license. This ensures that customers have the right access to images based on the channel they are assigned. For more information about using the proxy registry, see [About the Replicated Proxy Registry](/vendor/private-images-about). -================ -File: sidebars.js -================ -/** - * Creating a sidebar enables you to: - - create an ordered group of docs - - render a sidebar for each doc of that group - - provide next/previous navigation +### License - The sidebars can be generated from the filesystem, or explicitly defined here. +Create customers in the Replicated Vendor Portal to handle licensing for your application in both online and air gap environments. For example: +* License free trials and different tiers of product plans +* Create and manage custom license entitlements +* Verify license entitlements both before installation and during runtime +* Measure and report usage - Create as many sidebars as you want. - */ +For more information about working with customers and custom license fields, see [About Customers](/vendor/licenses-about). -// @ts-check +### Install -// @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} -const sidebars = { - // By default, Docusaurus generates a sidebar from the docs folder structure - //tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], +Applications distributed with the Replicated Platform can support multiple different installation methods from the same application release, helping you to meet your customers where they are. For example: - // But you can create a sidebar manually +* Customers who are not experienced with Kubernetes or who prefer to deploy to a dedicated cluster in their environment can install on a VM or bare metal server with the Replicated Embedded Cluster installer. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). +* Customers familiar with Kubernetes and Helm can install in their own existing cluster using Helm. For more information, see [Installing with Helm](/vendor/install-with-helm). +* Customers installing into environments with limited or no outbound internet access (often referred to as air-gapped environments) can securely access and push images to their own internal registry, then install using Helm or a Replicated installer. For more information, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap). - main: [ - 'intro', - { - type: 'category', - label: 'Release Notes', - items: [ - 'release-notes/rn-whats-new', - 'release-notes/rn-embedded-cluster', - 'release-notes/rn-app-manager', - 'release-notes/rn-kubernetes-installer', - 'release-notes/rn-replicated-sdk', - 'release-notes/rn-vendor-platform', - ], - }, +### Report - //GET STARTED - {type: 'html', value: '
    getting started
    ', defaultStyle: true}, - 'intro-replicated', - 'vendor/kots-faq', - 'vendor/quick-start', - 'vendor/replicated-onboarding', - // { - // type: 'category', - // label: 'Planning', - // items: [ - // 'vendor/planning-questionnaire', - // 'vendor/namespaces', - // ], - // }, - { - type: 'category', - label: 'Tutorials', - items: [ - { - type: 'category', - label: 'Install a Helm Chart on a VM with Embedded Cluster', - items: [ - 'vendor/tutorial-embedded-cluster-setup', - 'vendor/tutorial-embedded-cluster-create-app', - 'vendor/tutorial-embedded-cluster-package-chart', - 'vendor/tutorial-embedded-cluster-create-release', - 'vendor/tutorial-embedded-cluster-create-customer', - 'vendor/tutorial-embedded-cluster-install', - ], - }, - { - type: 'category', - label: 'Install a Helm Chart with KOTS and the Helm CLI', - items: [ - 'vendor/tutorial-kots-helm-setup', - 'vendor/tutorial-kots-helm-get-chart', - 'vendor/tutorial-kots-helm-create-app', - 'vendor/tutorial-kots-helm-package-chart', - 'vendor/tutorial-kots-helm-create-release', - 'vendor/tutorial-kots-helm-create-customer', - 'vendor/tutorial-kots-helm-install-kots', - 'vendor/tutorial-kots-helm-install-helm', - ], - }, - { - type: 'category', - label: 'Install with KOTS in an Existing Cluster', - items: [ - 'vendor/tutorial-cli-setup', - 'vendor/tutorial-cli-install-cli', - 'vendor/tutorial-cli-create-app', - 'vendor/tutorial-cli-manifests', - 'vendor/tutorial-cli-create-release', - 'vendor/tutorial-cli-create-customer', - 'vendor/tutorial-cli-install-app-manager', - 'vendor/tutorial-cli-deploy-app', - 'vendor/tutorial-cli-create-new-version', - 'vendor/tutorial-cli-update-app', - ], - }, - ], - }, - { - type: 'category', - label: 'Labs', - items: - [ - {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/distributing-with-replicated?token=em_VHOEfNnBgU3auAnN', label: 'Distributing Your Application with Replicated'}, - {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/delivering-as-an-appliance?token=em_lUZdcv0LrF6alIa3', label: 'Delivering Your Application as a Kubernetes Appliance'}, - {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/avoiding-installation-pitfalls?token=em_gJjtIzzTTtdd5RFG', label: 'Avoiding Installation Pitfalls'}, - {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/closing-information-gap?token=em_MO2XXCz3bAgwtEca', label: 'Closing the Support Information Gap'}, - {type: 'link', href: 'https://play.instruqt.com/embed/replicated/tracks/protecting-your-assets?token=em_7QjY34G_UHKoREBd', label: 'Protecting Your Assets'}, - ], - }, - // PRODUCT DOCS - {type: 'html', value: '
    product docs
    ', defaultStyle: true}, - { - type: 'category', - label: 'Vendor Portal', - items: [ - { - type: 'category', - label: 'Vendor Portal Teams and Accounts', - items: [ - 'vendor/vendor-portal-creating-account', - 'vendor/team-management', - 'vendor/team-management-github-username', - { - type: 'category', - label: 'Configuring Role-based Access Control', - items: [ - 'vendor/team-management-rbac-configuring', - 'vendor/team-management-rbac-resource-names', - ], - }, - { - type: 'category', - label: 'Configuring Authentication', - items: [ - 'vendor/team-management-two-factor-auth', - 'vendor/team-management-google-auth', - 'vendor/team-management-saml-auth', - ], - }, - 'vendor/team-management-slack-config', - 'vendor/replicated-api-tokens', - ], - }, - { - type: 'category', - label: 'Applications', - items: [ - 'vendor/vendor-portal-manage-app', - 'vendor/vendor-portal-application-settings', - ], - }, - { - type: 'category', - label: 'Channels and Releases', - items: [ - 'vendor/releases-about', - 'vendor/releases-creating-channels', - 'vendor/releases-creating-releases', - 'vendor/releases-creating-cli', - 'vendor/helm-install-release', - 'vendor/releases-sharing-license-install-script', - 'vendor/releases-share-download-portal', - 'reference/linter', - ], - }, - { - type: 'category', - label: 'Customers and Licenses', - items: [ - 'vendor/licenses-about', - 'vendor/releases-creating-customer', - 'vendor/licenses-adding-custom-fields', - 'vendor/licenses-install-types', - 'vendor/licenses-about-types', - 'vendor/licenses-download', - { - type: 'category', - label: 'Querying License Entitlements', - items: [ - 'vendor/licenses-using-builtin-fields', - 'vendor/licenses-reference-sdk', - 'vendor/licenses-reference-helm', - 'vendor/licenses-referencing-fields', - 'vendor/licenses-reference-kots-runtime', - 'vendor/licenses-verify-fields-sdk-api', - ] - }, - ], - }, - { - type: 'category', - label: 'Custom Domains', - items: [ - 'vendor/custom-domains', - 'vendor/custom-domains-using', - ], - }, - { - type: 'category', - label: 'Insights and Telemetry', - items: [ - 'vendor/instance-insights-event-data', - 'vendor/insights-app-status', - 'vendor/telemetry-air-gap', - 'vendor/customer-adoption', - 'vendor/customer-reporting', - 'vendor/instance-insights-details', - 'vendor/instance-notifications-config', - 'vendor/custom-metrics', - 'vendor/instance-data-export', - ], - }, - ], - }, - { - type: 'category', - label: 'Compatibility Matrix', - items: [ - 'vendor/testing-about', - 'vendor/testing-pricing', - 'vendor/testing-supported-clusters', - 'vendor/testing-cluster-addons', - 'vendor/compatibility-matrix-usage', - 'vendor/testing-how-to', - 'vendor/testing-ingress', - ], - }, - { - type: 'category', - label: 'Embedded Cluster', - items: [ - 'vendor/embedded-overview', - 'vendor/embedded-using', - 'reference/embedded-config', - { - type: 'category', - label: 'Installing with Embedded Cluster', - items: [ - 'enterprise/installing-embedded-requirements', - 'enterprise/installing-embedded', - 'enterprise/installing-embedded-air-gap', - 'enterprise/installing-embedded-automation', - 'reference/embedded-cluster-install', - ], - }, - 'enterprise/embedded-manage-nodes', - 'enterprise/updating-embedded', - 'enterprise/embedded-tls-certs', - 'vendor/embedded-disaster-recovery', - ], - }, - { - type: 'category', - label: 'KOTS', - items: [ - 'intro-kots', - { - type: 'category', - label: 'Configuring KOTS', - items: [ - { - type: 'category', - label: 'Configuring the HelmChart Custom Resource', - items: [ - 'vendor/helm-native-about', - 'vendor/helm-native-v2-using', - 'vendor/helm-packaging-airgap-bundles', - 'vendor/helm-optional-value-keys', - 'vendor/helm-v2-migrate', - ], - }, - { - type: 'category', - label: 'Customizing the Admin Console and Download Portal', - items: [ - 'vendor/admin-console-customize-app-icon', - 'vendor/admin-console-adding-buttons-links', - 'vendor/admin-console-port-forward', - 'vendor/admin-console-prometheus-monitoring', - ], - }, - { - type: 'category', - label: 'Configuring the Admin Console Config Screen', - items: [ - 'vendor/config-screen-about', - 'vendor/admin-console-customize-config-screen', - 'vendor/config-screen-map-inputs', - 'vendor/config-screen-conditional', - { - type: 'category', - label: 'Tutorial: Set Helm Chart Values with KOTS', - items: [ - 'vendor/tutorial-config-setup', - 'vendor/tutorial-config-get-chart', - 'vendor/tutorial-config-create-app', - 'vendor/tutorial-config-package-chart', - 'vendor/tutorial-config-create-release', - 'vendor/tutorial-config-create-customer', - 'vendor/tutorial-config-install-kots', - ], - }, - ], - }, - { - type: 'category', - label: 'Managing Resources and Objects', - items: [ - 'vendor/admin-console-display-app-status', - { - type: 'category', - label: 'Conditionally Deploying Resources', - items: [ - 'vendor/packaging-include-resources', - 'vendor/helm-optional-charts', - 'vendor/tutorial-adding-db-config', - ], - }, - 'vendor/resources-annotations-templating', - 'vendor/orchestrating-resource-deployment', - 'vendor/database-config-adding-options', - 'vendor/packaging-cleaning-up-jobs', - 'vendor/packaging-ingress', - ], - }, - { - type: 'category', - label: 'Managing KOTS', - items: [ - 'vendor/packaging-kots-versions', - 'vendor/packaging-rbac', - 'vendor/packaging-air-gap-excluding-minio', - ], - }, - { - type: 'category', - label: 'Distributing Kubernetes Operators with KOTS', - items: [ - 'vendor/operator-packaging-about', - 'vendor/operator-defining-additional-images', - 'vendor/operator-referencing-images', - 'vendor/operator-defining-additional-namespaces', - ], - }, - { - type: 'category', - label: 'KOTS Custom Resources', - items: [ - 'reference/custom-resource-about', - 'reference/custom-resource-application', - 'reference/custom-resource-config', - 'reference/custom-resource-helmchart-v2', - 'reference/custom-resource-helmchart', - 'reference/custom-resource-lintconfig', - ], - }, - { - type: 'category', - label: 'KOTS Template Functions', - items: [ - 'reference/template-functions-about', - 'reference/template-functions-examples', - 'reference/template-functions-config-context', - 'reference/template-functions-identity-context', - 'reference/template-functions-kurl-context', - 'reference/template-functions-license-context', - 'reference/template-functions-static-context', - ], - }, - 'reference/cron-expressions', - ], - }, - { - type: 'category', - label: 'Installing in Existing Clusters with KOTS', - items: [ - 'enterprise/installing-overview', - 'enterprise/installing-general-requirements', - 'enterprise/installing-existing-cluster', - 'enterprise/installing-existing-cluster-airgapped', - 'enterprise/installing-existing-cluster-automation', - 'enterprise/installing-stateful-component-requirements', - ], - }, - { - type: 'category', - label: 'Performing Updates in Existing Cluster KOTS Installations', - items: [ - 'enterprise/updating-app-manager', - 'enterprise/updating-apps', - 'enterprise/updating-patching-with-kustomize', - ], - }, - { - type: 'category', - label: 'Configuring Local Image Registries', - items: [ - 'enterprise/image-registry-settings', - 'enterprise/image-registry-rate-limits', - ], - }, - 'enterprise/updating-licenses', - { - type: 'category', - label: 'Performing Backup and Restore with Snapshots', - items: [ - 'vendor/snapshots-overview', - { - type: 'category', - label: 'Enabling and Configuring Snapshots', - items: [ - 'vendor/snapshots-configuring-backups', - 'reference/custom-resource-backup', - 'vendor/snapshots-hooks', - ], - }, - { - type: 'category', - label: 'Configuring Backup Storage for Snaphots', - items: [ - 'enterprise/snapshots-velero-cli-installing', - 'enterprise/snapshots-configuring-hostpath', - 'enterprise/snapshots-configuring-nfs', - 'enterprise/snapshots-storage-destinations', - 'enterprise/snapshots-velero-installing-config', - ], - }, - 'enterprise/snapshots-creating', - 'enterprise/snapshots-restoring-full', - 'enterprise/snapshots-updating-with-admin-console', - 'enterprise/snapshots-troubleshooting-backup-restore', - ], - }, - { - type: 'category', - label: 'Managing Admin Console User Access', - items: [ - 'enterprise/auth-changing-passwords', - 'enterprise/auth-identity-provider', - 'enterprise/auth-configuring-rbac', - ], - }, - { - type: 'category', - label: 'Monitoring Applications with Prometheus', - items: [ - 'enterprise/monitoring-applications', - 'enterprise/monitoring-access-dashboards', - ], - }, - 'enterprise/status-viewing-details', - 'enterprise/delete-admin-console', - { - type: 'category', - label: 'Using a GitOps Workflow', - items: [ - 'enterprise/gitops-workflow', - 'enterprise/gitops-managing-secrets', - ], - }, - ], - }, - { - type: 'category', - label: 'kURL', - items: [ - 'vendor/kurl-about', - { - type: 'category', - label: 'Configuring kURL Installers', - items: [ - 'vendor/packaging-embedded-kubernetes', - 'vendor/packaging-installer-storage', - 'vendor/installer-history', - 'vendor/kurl-nodeport-services', - ], - }, - { - type: 'category', - label: 'Installing with kURL', - items: [ - 'enterprise/installing-kurl-requirements', - 'enterprise/installing-kurl', - 'enterprise/installing-kurl-airgap', - 'enterprise/installing-kurl-automation', - ], - }, - 'enterprise/cluster-management-add-nodes', - { - type: 'category', - label: 'Performing Updates with kURL', - items: [ - 'enterprise/updating-kurl-about', - 'enterprise/updating-kurl', - ], - }, - 'vendor/packaging-using-tls-certs', - 'enterprise/updating-tls-cert', - 'enterprise/image-registry-kurl', - 'enterprise/monitoring-external-prometheus', - 'vendor/kurl-reset', - ], - }, - { - type: 'category', - label: 'Helm Installations with Replicated', - items: [ - 'vendor/helm-install-overview', - 'vendor/helm-install-values-schema', - 'vendor/install-with-helm', - 'vendor/helm-install-airgap', - 'vendor/using-third-party-registry-proxy', - 'vendor/helm-install-troubleshooting', - ], - }, - { - type: 'category', - label: 'Replicated SDK', - items: [ - 'vendor/replicated-sdk-overview', - 'vendor/replicated-sdk-installing', - 'vendor/replicated-sdk-airgap', - 'vendor/replicated-sdk-development', - 'vendor/replicated-sdk-customizing', - ], - }, - { - type: 'category', - label: 'Preflight Checks and Support Bundles', - items: [ - 'vendor/preflight-support-bundle-about', - { - type: 'category', - label: 'Preflight Checks', - items: [ - 'vendor/preflight-defining', - 'vendor/preflight-examples', - 'vendor/preflight-running', - 'vendor/preflight-host-preflights', - { - type: 'category', - label: 'Tutorial: Add Preflight Checks to a Helm Chart', - items: [ - 'vendor/tutorial-preflight-helm-setup', - 'vendor/tutorial-preflight-helm-get-chart', - 'vendor/tutorial-preflight-helm-add-spec', - 'vendor/tutorial-preflight-helm-create-release', - 'vendor/tutorial-preflight-helm-create-customer', - 'vendor/tutorial-preflight-helm-install', - 'vendor/tutorial-preflight-helm-install-kots', - ], - }, - ], - }, - { - type: 'category', - label: 'Support Bundles', - items: [ - 'vendor/support-bundle-customizing', - 'vendor/support-bundle-examples', - 'vendor/support-online-support-bundle-specs', - 'vendor/support-modular-support-bundle-specs', - { - type: 'category', - label: 'Generating Support Bundles', - items: [ - 'vendor/support-bundle-generating', - 'vendor/support-bundle-embedded', - 'enterprise/troubleshooting-an-app', - 'vendor/support-host-support-bundles', - ], - }, - 'vendor/support-inspecting-support-bundles', - 'vendor/support-enabling-direct-bundle-uploads', - 'vendor/support-submit-request', - ], - }, - 'vendor/preflight-sb-helm-templates-about', - { - type: 'category', - label: 'Troubleshoot Custom Resources', - items: [ - 'reference/custom-resource-preflight', - 'reference/custom-resource-redactor', - ], - }, - ], - }, - { - type: 'category', - label: 'Replicated Proxy Registry', - items: [ - 'vendor/private-images-about', - 'vendor/packaging-private-images', - 'vendor/helm-image-registry', - 'vendor/private-images-kots', - 'vendor/private-images-tags-digests', - { - type: 'category', - label: 'Replicated Private Registry', - items: [ - 'vendor/private-images-replicated', - 'vendor/packaging-private-registry-security', - ], - }, - 'vendor/packaging-public-images', - 'vendor/tutorial-ecr-private-images', - ], - }, - { - type: 'category', - label: 'Integrating Replicated in CI/CD Workflows', - items: [ - 'vendor/ci-overview', - 'vendor/ci-workflows', - 'vendor/ci-workflows-github-actions', - ], - }, +When installed alongside an application, the Replicated SDK and Replicated KOTS automatically send instance data from the customer environment to the Replicated Vendor Portal. This instance data includes health and status indicators, adoption metrics, and performance metrics. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). - // DEVELOPER TOOLS - {type: 'html', value: '
    Developer tools
    ', defaultStyle: true}, - 'reference/replicated-sdk-apis', - { - type: 'category', - label: 'Replicated CLI', // This label is generated. Do not edit. - items: [ // This list is generated. Do not edit. - 'reference/replicated-cli-installing', - 'reference/replicated', - 'reference/replicated-cli-api', - 'reference/replicated-cli-api-get', - 'reference/replicated-cli-api-patch', - 'reference/replicated-cli-api-post', - 'reference/replicated-cli-api-put', - 'reference/replicated-cli-app', - 'reference/replicated-cli-app-create', - 'reference/replicated-cli-app-ls', - 'reference/replicated-cli-app-rm', - 'reference/replicated-cli-channel', - 'reference/replicated-cli-channel-create', - 'reference/replicated-cli-channel-demote', - 'reference/replicated-cli-channel-disable-semantic-versioning', - 'reference/replicated-cli-channel-enable-semantic-versioning', - 'reference/replicated-cli-channel-inspect', - 'reference/replicated-cli-channel-ls', - 'reference/replicated-cli-channel-rm', - 'reference/replicated-cli-channel-un-demote', - 'reference/replicated-cli-cluster', - 'reference/replicated-cli-cluster-addon', - 'reference/replicated-cli-cluster-addon-create', - 'reference/replicated-cli-cluster-addon-create-object-store', - 'reference/replicated-cli-cluster-addon-ls', - 'reference/replicated-cli-cluster-addon-rm', - 'reference/replicated-cli-cluster-create', - 'reference/replicated-cli-cluster-kubeconfig', - 'reference/replicated-cli-cluster-ls', - 'reference/replicated-cli-cluster-nodegroup', - 'reference/replicated-cli-cluster-nodegroup-ls', - 'reference/replicated-cli-cluster-port', - 'reference/replicated-cli-cluster-port-expose', - 'reference/replicated-cli-cluster-port-ls', - 'reference/replicated-cli-cluster-port-rm', - 'reference/replicated-cli-cluster-prepare', - 'reference/replicated-cli-cluster-rm', - 'reference/replicated-cli-cluster-shell', - 'reference/replicated-cli-cluster-update', - 'reference/replicated-cli-cluster-update-nodegroup', - 'reference/replicated-cli-cluster-update-ttl', - 'reference/replicated-cli-cluster-upgrade', - 'reference/replicated-cli-cluster-versions', - 'reference/replicated-cli-completion', - 'reference/replicated-cli-customer', - 'reference/replicated-cli-customer-archive', - 'reference/replicated-cli-customer-create', - 'reference/replicated-cli-customer-download-license', - 'reference/replicated-cli-customer-inspect', - 'reference/replicated-cli-customer-ls', - 'reference/replicated-cli-customer-update', - 'reference/replicated-cli-default', - 'reference/replicated-cli-default-clear-all', - 'reference/replicated-cli-default-clear', - 'reference/replicated-cli-default-set', - 'reference/replicated-cli-default-show', - 'reference/replicated-cli-installer', - 'reference/replicated-cli-installer-create', - 'reference/replicated-cli-installer-ls', - 'reference/replicated-cli-instance', - 'reference/replicated-cli-instance-inspect', - 'reference/replicated-cli-instance-ls', - 'reference/replicated-cli-instance-tag', - 'reference/replicated-cli-login', - 'reference/replicated-cli-logout', - 'reference/replicated-cli-registry', - 'reference/replicated-cli-registry-add', - 'reference/replicated-cli-registry-add-dockerhub', - 'reference/replicated-cli-registry-add-ecr', - 'reference/replicated-cli-registry-add-gar', - 'reference/replicated-cli-registry-add-gcr', - 'reference/replicated-cli-registry-add-ghcr', - 'reference/replicated-cli-registry-add-other', - 'reference/replicated-cli-registry-add-quay', - 'reference/replicated-cli-registry-ls', - 'reference/replicated-cli-registry-rm', - 'reference/replicated-cli-registry-test', - 'reference/replicated-cli-release', - 'reference/replicated-cli-release-compatibility', - 'reference/replicated-cli-release-create', - 'reference/replicated-cli-release-download', - 'reference/replicated-cli-release-inspect', - 'reference/replicated-cli-release-lint', - 'reference/replicated-cli-release-ls', - 'reference/replicated-cli-release-promote', - 'reference/replicated-cli-release-test', - 'reference/replicated-cli-release-update', - 'reference/replicated-cli-version', - 'reference/replicated-cli-version-upgrade', - 'reference/replicated-cli-vm', - 'reference/replicated-cli-vm-create', - 'reference/replicated-cli-vm-ls', - 'reference/replicated-cli-vm-port', - 'reference/replicated-cli-vm-port-expose', - 'reference/replicated-cli-vm-port-ls', - 'reference/replicated-cli-vm-port-rm', - 'reference/replicated-cli-vm-rm', - 'reference/replicated-cli-vm-update', - 'reference/replicated-cli-vm-update-ttl', - 'reference/replicated-cli-vm-versions', - ], - }, - { - type: 'category', - label: 'KOTS CLI', - items: [ - 'reference/kots-cli-getting-started', - 'reference/kots-cli-global-flags', - { - type: 'category', - label: 'admin-console', - items: [ - 'reference/kots-cli-admin-console-index', - 'reference/kots-cli-admin-console-garbage-collect-images', - 'reference/kots-cli-admin-console-generate-manifests', - 'reference/kots-cli-admin-console-push-images', - 'reference/kots-cli-admin-console-upgrade', - ], - }, - { - type: 'category', - label: 'backup', - items: [ - 'reference/kots-cli-backup-index', - 'reference/kots-cli-backup-ls', - ], - }, - { - type: 'category', - label: 'docker', - items: [ - 'reference/kots-cli-docker-index', - 'reference/kots-cli-docker-ensure-secret', - ], - }, - 'reference/kots-cli-download', - 'reference/kots-cli-enable-ha', - { - type: 'category', - label: 'get', - items: [ - 'reference/kots-cli-get-index', - 'reference/kots-cli-get-apps', - 'reference/kots-cli-get-backups', - 'reference/kots-cli-get-config', - 'reference/kots-cli-get-restores', - 'reference/kots-cli-get-versions', - ], - }, - { - type: 'category', - label: 'identity-service', - items: [ - 'reference/kots-cli-identity-service-index', - 'reference/kots-cli-identity-service-enable-shared-password', - ], - }, - 'reference/kots-cli-install', - 'reference/kots-cli-pull', - 'reference/kots-cli-remove', - 'reference/kots-cli-reset-password', - 'reference/kots-cli-reset-tls', - { - type: 'category', - label: 'restore', - items: [ - 'reference/kots-cli-restore-index', - 'reference/kots-cli-restore-ls', - ], - }, - { - type: 'category', - label: 'set', - items: [ - 'reference/kots-cli-set-index', - 'reference/kots-cli-set-config', - ], - }, - 'reference/kots-cli-upload', - { - type: 'category', - label: 'upstream', - items: [ - 'reference/kots-cli-upstream', - 'reference/kots-cli-upstream-download', - 'reference/kots-cli-upstream-upgrade', - ], - }, - { - type: 'category', - label: 'velero', - items: [ - - 'reference/kots-cli-velero-configure-aws-s3', - 'reference/kots-cli-velero-configure-azure', - 'reference/kots-cli-velero-configure-gcp', - 'reference/kots-cli-velero-configure-hostpath', - 'reference/kots-cli-velero-configure-internal', - 'reference/kots-cli-velero-configure-nfs', - 'reference/kots-cli-velero-configure-other-s3', - 'reference/kots-cli-velero-ensure-permissions', - 'reference/kots-cli-velero-index', - 'reference/kots-cli-velero-print-fs-instructions', - ], - }, - ], - }, - { - type: 'category', - label: 'Vendor API v3', - items: [ - { - type: 'doc', - id: 'reference/vendor-api-using' - }, - { - type: 'link', - label: 'Vendor API v3 Documentation', - href: 'https://replicated-vendor-api.readme.io/v3/' - }, - ], - }, +ISVs can also set up email and Slack notifications to get alerted of important instance issues or performance trends. For more information, see [Configuring Instance Notifications](/vendor/instance-notifications-config). - //OPEN SOURCE DOCS - {type: 'html', value: '
    open source docs
    ', defaultStyle: true}, - {type: 'link', href: 'https://kurl.sh/docs/introduction/', label: 'kURL.sh'}, - {type: 'link', href: 'https://troubleshoot.sh/docs/collect/', label: 'Troubleshoot.sh'}, +### Support - // POLICIES - {type: 'html', value: '
    platform overview
    ', defaultStyle: true}, - { - type: 'category', - label: 'Replicated Policies', - items: [ - 'vendor/policies-vulnerability-patch', - 'vendor/policies-support-lifecycle', - 'vendor/policies-data-transmission', - 'vendor/policies-infrastructure-and-subprocessors', - ], - }, - { - type: 'category', - label: 'Replicated Data Storage', - items: [ - 'vendor/data-availability', - 'vendor/offsite-backup' - ], - }, - { - type: 'category', - label: 'Security at Replicated', - items: [ - { - type: 'link', - label: 'Security at Replicated', - href: 'https://www.replicated.com/security/' - }, - 'enterprise/sbom-validating', - 'vendor/replicated-sdk-slsa-validating', - ], - }, - - ], -}; +Support teams can use Replicated features to more quickly diagnose and resolve application issues. For example: -module.exports = sidebars; +- Customize and generate support bundles, which collect and analyze redacted information from the customer's cluster, environment, and application instance. See [About Preflights Checks and Support Bundles](/vendor/preflight-support-bundle-about). +- Provision customer-representative environments with Compatibility Matrix to recreate and diagnose issues. See [About Compatibility Matrix](/vendor/testing-about). +- Get insights into an instance's status by accessing telemetry data, which covers the health of the application, the current application version, and details about the infrastructure and cluster where the application is running. For more information, see [Customer Reporting](/vendor/customer-reporting). For more information, see [Customer Reporting](/vendor/customer-reporting). ================ -File: variables.js +File: docs/intro.md ================ -const variables = { - productName: 'Replicated', - exampleVariable: 'Example' -}; +--- +slug: / +pagination_next: null +--- + +# Home -module.exports = variables; +
    +
      +
    • + chat bubble icon +

      What's New?

      +
    • +
    • +

      Embedded Cluster 2.0 Release

      +

      The 2.0 release brings improvements to architecture that increase the reliability and stability of Embedded Cluster.

      +
    • +
    • + Learn more +
    • +
    +
      +
    • + lightbulb icon +

      Did You Know?

      +
    • +
    • +

      Manage Supported Install Methods Per Customer

      +

      Control which installation methods are available for each customer from the **Install types** field in the customer's license.

      +
    • +
    • + Learn more +
    • +
    +
    +
    + +
    +
    + + +
    +
    + +
    +
    + + +
    +
    + + + +
    +
    + + +
    +
    + +
    diff --git a/static/llms/llms.txt b/static/llms/llms.txt new file mode 100644 index 0000000000..e8cc4a32b9 --- /dev/null +++ b/static/llms/llms.txt @@ -0,0 +1,16 @@ +# Replicated Documentation for LLMs + +> Replicated is a commercial software distribution platform. Independent software vendors (ISVs) can use features of the Replicated Platform to distribute modern commercial software into complex, customer-controlled environments, including on-prem and air gap. + +## Docs + +- [llms-docs.txt](https://docs.replicated.com/llms/llms-docs.txt): This file contains the contents of the docs/ directory in the [replicated-docs](https://github.com/replicatedhq/replicated-docs) repository, excluding certain subdirectories as explained below + +## Notes + +- The content in the llms-docs.txt file is automatically generated from the same source as the Replicated documentation each time the site is built +- The llms-docs.txt file excludes any files and directories in the replicated-docs repo that are outside of the docs/ directory. Additionally, the following subdirectories of docs/ are excluded from the llms-docs.txt file: + - docs/release-notes/ + - docs/templates/ + - docs/pdfs/ + - docs/.history/ \ No newline at end of file From a043debf9d4c544d516ec4a3604ca21446a07cb5 Mon Sep 17 00:00:00 2001 From: Paige Calvert Date: Thu, 27 Mar 2025 09:19:50 -0600 Subject: [PATCH 3/9] script testing --- static/js/generate-llms.js | 40 ++++++++++++++++++++++++++++++++++++++ static/llms/llms.txt | 2 +- 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 static/js/generate-llms.js diff --git a/static/js/generate-llms.js b/static/js/generate-llms.js new file mode 100644 index 0000000000..808efd4697 --- /dev/null +++ b/static/js/generate-llms.js @@ -0,0 +1,40 @@ +const fs = require('fs'); +const path = require('path'); + +// Recursively get all .md files from a directory +function getMDFiles(dir, fileList = []) { + const files = fs.readdirSync(dir); + + files.forEach(file => { + const filePath = path.join(dir, file); + const stat = fs.statSync(filePath); + + if (stat.isDirectory()) { + getMDFiles(filePath, fileList); + } else if (path.extname(file) === '.md') { + // Convert Windows path separators to forward slashes if needed + const normalizedPath = filePath.replace(/\\/g, '/'); + // Remove the /docs prefix to get relative path + const relativePath = normalizedPath.replace(/^docs\//, ''); + fileList.push(relativePath); + } + }); + + return fileList; +} + +// Generate the llms.txt file +function generateLLMsFile() { + const docsDir = path.join(process.cwd(), 'docs'); + const mdFiles = getMDFiles(docsDir); + + // Create content with one file per line + const content = mdFiles.map(file => `https://docs.cursor.com/${file}`).join('\n'); + + // Write to llms.txt + fs.writeFileSync('llms.txt', content); + console.log('Generated llms.txt with', mdFiles.length, 'files'); +} + +// Run the generator +generateLLMsFile(); diff --git a/static/llms/llms.txt b/static/llms/llms.txt index e8cc4a32b9..d5a31fe2b8 100644 --- a/static/llms/llms.txt +++ b/static/llms/llms.txt @@ -4,7 +4,7 @@ ## Docs -- [llms-docs.txt](https://docs.replicated.com/llms/llms-docs.txt): This file contains the contents of the docs/ directory in the [replicated-docs](https://github.com/replicatedhq/replicated-docs) repository, excluding certain subdirectories as explained below +- [llms-docs.txt](https://docs.replicated.com/llms/llms-docs.txt): This file contains the contents of the docs/ directory in the [replicated-docs](https://github.com/replicatedhq/replicated-docs) repository, excluding certain subdirectories as explained in Notes below. ## Notes From e8492c5a0fbefadf605a4754a1e107d96e19bf7b Mon Sep 17 00:00:00 2001 From: Paige Calvert Date: Thu, 27 Mar 2025 14:35:07 -0600 Subject: [PATCH 4/9] add generate-llms script --- static/js/generate-llms.js | 100 +++-- static/llms.txt | 753 +++++++++++++++++++++++++++++++++++++ 2 files changed, 824 insertions(+), 29 deletions(-) create mode 100644 static/llms.txt diff --git a/static/js/generate-llms.js b/static/js/generate-llms.js index 808efd4697..410c51c709 100644 --- a/static/js/generate-llms.js +++ b/static/js/generate-llms.js @@ -1,40 +1,82 @@ const fs = require('fs'); const path = require('path'); -// Recursively get all .md files from a directory -function getMDFiles(dir, fileList = []) { - const files = fs.readdirSync(dir); - - files.forEach(file => { - const filePath = path.join(dir, file); - const stat = fs.statSync(filePath); +// Fix path resolution to use /docs and /static at project root +const DOCS_DIR = path.join(__dirname, "../../docs"); +const OUTPUT_FILE = path.join(__dirname, "../../static", "llms.txt"); +const BASE_URL = "https://docs.replicated.com"; + +function extractFirstSentence(text) { + // Remove any front matter between --- markers + text = text.replace(/^---[\s\S]*?---/, ''); + + // Remove any import statements + text = text.replace(/^import.*$/gm, ''); + + // Remove markdown headings + text = text.replace(/^#+\s.*$/gm, ''); + + // Find the first non-empty line + const firstParagraph = text.split('\n') + .map(line => line.trim()) + .filter(line => line.length > 0)[0]; - if (stat.isDirectory()) { - getMDFiles(filePath, fileList); - } else if (path.extname(file) === '.md') { - // Convert Windows path separators to forward slashes if needed - const normalizedPath = filePath.replace(/\\/g, '/'); - // Remove the /docs prefix to get relative path - const relativePath = normalizedPath.replace(/^docs\//, ''); - fileList.push(relativePath); - } - }); + // Extract first sentence (ends with . ! or ?) + const sentenceMatch = firstParagraph?.match(/^[^.!?]+[.!?]/); + return sentenceMatch ? sentenceMatch[0].trim() : 'No description available.'; +} - return fileList; +// Recursively get all .md files from a directory +function getMarkdownFiles(dir, fileList = []) { + fs.readdirSync(dir).forEach(file => { + const filePath = path.join(dir, file); + + // Skip .history and release-notes directories + if (filePath.includes('.history') || filePath.includes('release-notes') || filePath.includes('templates') || filePath.includes('pdfs')) { + return; + } + + if (fs.statSync(filePath).isDirectory()) { + getMarkdownFiles(filePath, fileList); + } else if (path.extname(file) === '.md' || path.extname(file) === '.mdx') { + // Read the file content + const content = fs.readFileSync(filePath, 'utf8'); + + // Extract title from first heading + const titleMatch = content.match(/^#\s+(.+)$/m); + const title = titleMatch ? titleMatch[1] : file.replace(/\.(md|mdx)$/, ''); + // Extract description from first sentence + const description = extractFirstSentence(content); + + // Get the relative path without the extension + const relativePath = filePath + .replace(`${DOCS_DIR}/`, '') + .replace(/\.(md|mdx)$/, ''); + + fileList.push({ + path: relativePath, + title: title, + description: description + }); + } + }); + return fileList; } // Generate the llms.txt file -function generateLLMsFile() { - const docsDir = path.join(process.cwd(), 'docs'); - const mdFiles = getMDFiles(docsDir); - - // Create content with one file per line - const content = mdFiles.map(file => `https://docs.cursor.com/${file}`).join('\n'); - - // Write to llms.txt - fs.writeFileSync('llms.txt', content); - console.log('Generated llms.txt with', mdFiles.length, 'files'); +function generateLLMSTxt() { + const files = getMarkdownFiles(DOCS_DIR); + + const output = [ + "## Docs\n", + ...files.map(file => + `- [${file.title}](${BASE_URL}/${file.path}.md): ${file.description}` + ) + ].join('\n'); + + fs.writeFileSync(OUTPUT_FILE, output); + console.log("✅ llms.txt generated!"); } // Run the generator -generateLLMsFile(); +generateLLMSTxt(); diff --git a/static/llms.txt b/static/llms.txt new file mode 100644 index 0000000000..83451af945 --- /dev/null +++ b/static/llms.txt @@ -0,0 +1,753 @@ +## Docs + +- [Changing an Admin Console Password](https://docs.replicated.com/enterprise/auth-changing-passwords.md): When you install for the first time with Replicated kURL, the Replicated KOTS Admin Console is secured with a single shared password that is set automatically for all users. +- [Configuring Role-based Access Control (Beta)](https://docs.replicated.com/enterprise/auth-configuring-rbac.md): You can regulate access to the Replicated KOTS Admin Console resources based on the roles of individual users within your organization. +- [Using an Identity Provider for User Access (Beta)](https://docs.replicated.com/enterprise/auth-identity-provider.md): When you install an application for the first time, the Replicated KOTS Admin Console is secured with a single shared password for all users. +- [Adding Nodes to kURL Clusters](https://docs.replicated.com/enterprise/cluster-management-add-nodes.md): No description available. +- [Deleting the Admin Console and Removing Applications](https://docs.replicated.com/enterprise/delete-admin-console.md): This topic describes how to remove installed applications and delete the Replicated KOTS Admin Console. +- [Managing Multi-Node Clusters with Embedded Cluster](https://docs.replicated.com/enterprise/embedded-manage-nodes.md): The topic describes managing nodes in clusters created with Replicated Embedded Cluster, including how to add nodes and enable high-availability for multi-node clusters. +- [Updating Custom TLS Certificates in Embedded Cluster Installations](https://docs.replicated.com/enterprise/embedded-tls-certs.md): This topic describes how to update custom TLS certificates in Replicated Embedded Cluster installations. +- [Managing Secrets with KOTS Auto-GitOps (Alpha)](https://docs.replicated.com/enterprise/gitops-managing-secrets.md): No description available. +- [KOTS Auto-GitOps Workflow](https://docs.replicated.com/enterprise/gitops-workflow.md): No description available. +- [Working with the kURL Image Registry](https://docs.replicated.com/enterprise/image-registry-kurl.md): No description available. +- [Avoiding Docker Hub Rate Limits](https://docs.replicated.com/enterprise/image-registry-rate-limits.md): This topic describes how to avoid rate limiting for anonymous and free authenticated use of Docker Hub by providing a Docker Hub username and password to the `kots docker ensure-secret` command. +- [Configuring Local Image Registries](https://docs.replicated.com/enterprise/image-registry-settings.md): This topic describes how to configure private registry settings in the Replicated KOTS Admin Console. +- [Air Gap Installation with Embedded Cluster](https://docs.replicated.com/enterprise/installing-embedded-air-gap.md): This topic describes how to install applications with Embedded Cluster on a virtual machine (VM) or bare metal server with no outbound internet access. +- [Automating Installation with Embedded Cluster](https://docs.replicated.com/enterprise/installing-embedded-automation.md): This topic describes how to install an application with Replicated Embedded Cluster from the command line, without needing to access the Replicated KOTS Admin Console. +- [Embedded Cluster Installation Requirements](https://docs.replicated.com/enterprise/installing-embedded-requirements.md): This topic lists the installation requirements for Replicated Embedded Cluster. +- [Online Installation with Embedded Cluster](https://docs.replicated.com/enterprise/installing-embedded.md): This topic describes how to install an application in an online (internet-connected) environment with the Replicated Embedded Cluster installer. +- [Air Gap Installation in Existing Clusters with KOTS](https://docs.replicated.com/enterprise/installing-existing-cluster-airgapped.md): No description available. +- [Installing with the KOTS CLI](https://docs.replicated.com/enterprise/installing-existing-cluster-automation.md): This topic describes how to install an application with Replicated KOTS in an existing cluster using the KOTS CLI. +- [Online Installation in Existing Clusters with KOTS](https://docs.replicated.com/enterprise/installing-existing-cluster.md): No description available. +- [KOTS Installation Requirements](https://docs.replicated.com/enterprise/installing-general-requirements.md): This topic describes the requirements for installing in a Kubernetes cluster with Replicated KOTS. +- [Air Gap Installation with kURL](https://docs.replicated.com/enterprise/installing-kurl-airgap.md): No description available. +- [Installing with kURL from the Command Line](https://docs.replicated.com/enterprise/installing-kurl-automation.md): No description available. +- [kURL Installation Requirements](https://docs.replicated.com/enterprise/installing-kurl-requirements.md): No description available. +- [Online Installation with kURL](https://docs.replicated.com/enterprise/installing-kurl.md): No description available. +- [Considerations Before Installing](https://docs.replicated.com/enterprise/installing-overview.md): Before you install an application with KOTS in an existing cluster, consider the following installation options. +- [Installing KOTS in Existing Clusters Without Object Storage](https://docs.replicated.com/enterprise/installing-stateful-component-requirements.md): This topic describes how to install Replicated KOTS in existing clusters without the default object storage, including limitations of installing without object storage. +- [Accessing Dashboards Using Port Forwarding](https://docs.replicated.com/enterprise/monitoring-access-dashboards.md): This topic includes information about how to access Prometheus, Grafana, and Alertmanager in Replicated KOTS existing cluster and Replicated kURL installations. +- [Configuring Prometheus Monitoring in Existing Cluster KOTS Installations](https://docs.replicated.com/enterprise/monitoring-applications.md): This topic describes how to monitor applications and clusters with Prometheus in existing cluster installations with Replicated KOTS. +- [Consuming Prometheus Metrics Externally](https://docs.replicated.com/enterprise/monitoring-external-prometheus.md): No description available. +- [Validating SBOM Signatures](https://docs.replicated.com/enterprise/sbom-validating.md): This topic describes the process to perform the validation of software bill of material (SBOM) signatures for Replicated KOTS, Replicated kURL, and Troubleshoot releases. +- [How to Set Up Backup Storage](https://docs.replicated.com/enterprise/snapshots-config-workflow.md): This topic describes the process of setting up backup storage for the Replicated snapshots feature. +- [Configuring a Host Path Storage Destination](https://docs.replicated.com/enterprise/snapshots-configuring-hostpath.md): This topic describes how to install Velero and configure a host path as your storage destination for backups. +- [Configuring an NFS Storage Destination](https://docs.replicated.com/enterprise/snapshots-configuring-nfs.md): This topic describes how to install Velero and configure a Network File System (NFS) as your storage destination for backups. +- [Creating and Scheduling Backups](https://docs.replicated.com/enterprise/snapshots-creating.md): This topic describes how to use the Replicated snapshots feature to create backups. +- [Restoring from Backups](https://docs.replicated.com/enterprise/snapshots-restoring-full.md): This topic describes how to restore from full or partial backups using Replicated snapshots. +- [Configuring Other Storage Destinations](https://docs.replicated.com/enterprise/snapshots-storage-destinations.md): This topic describes installing Velero and configuring storage for Amazon Web Service (AWS), Google Cloud Provider (GCP), Microsoft Azure, and S3-compatible providers. +- [Troubleshooting Snapshots](https://docs.replicated.com/enterprise/snapshots-troubleshooting-backup-restore.md): When a snapshot fails, a support bundle will be collected and stored automatically. +- [Updating Storage Settings](https://docs.replicated.com/enterprise/snapshots-updating-with-admin-console.md): This topic describes how to update existing storage destination settings using the Replicated Admin Console. +- [Installing the Velero CLI](https://docs.replicated.com/enterprise/snapshots-velero-cli-installing.md): You install the Velero CLI before installing Velero and configuring a storage destination for backups. +- [Configuring Namespace Access and Memory Limit](https://docs.replicated.com/enterprise/snapshots-velero-installing-config.md): This topic describes how to configure namespace access and the memory limit for Velero. +- [Understanding Application Status Details in the Admin Console](https://docs.replicated.com/enterprise/status-viewing-details.md): This topic describes how to view the status of an application on the Replicated KOTS Admin Console dashboard. +- [Generating Support Bundles from the Admin Console](https://docs.replicated.com/enterprise/troubleshooting-an-app.md): This topic describes how to generate support bundles from the KOTS Admin Console. +- [Performing Updates in Existing Clusters](https://docs.replicated.com/enterprise/updating-app-manager.md): This topic describes how to perform updates in existing cluster installations with Replicated KOTS. +- [Configuring Automatic Updates](https://docs.replicated.com/enterprise/updating-apps.md): This topic describes how to configure automatic updates for applications installed in online (internet-connected) environments. +- [Performing Updates in Embedded Clusters](https://docs.replicated.com/enterprise/updating-embedded.md): This topic describes how to perform updates for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. +- [About kURL Cluster Updates](https://docs.replicated.com/enterprise/updating-kurl-about.md): No description available. +- [Performing Updates in kURL Clusters](https://docs.replicated.com/enterprise/updating-kurl.md): No description available. +- [Updating Licenses in the Admin Console](https://docs.replicated.com/enterprise/updating-licenses.md): This topic describes how to update a license from the KOTS Admin Console. +- [Patching with Kustomize](https://docs.replicated.com/enterprise/updating-patching-with-kustomize.md): This topic describes how to use Kustomize to patch an application before deploying. +- [Updating TLS Certificates in kURL Clusters](https://docs.replicated.com/enterprise/updating-tls-cert.md): No description available. +- [Introduction to KOTS](https://docs.replicated.com/intro-kots.md): This topic provides an introduction to the Replicated KOTS installer, including information about KOTS features, installation options, and user interfaces. +- [Introduction to Replicated](https://docs.replicated.com/intro-replicated.md): This topic provides an introduction to the Replicated Platform, including a platform overview and a list of key features. +- [Home](https://docs.replicated.com/intro.md): No description available. +- [_airgap-bundle](https://docs.replicated.com/partials/airgap/_airgap-bundle.md): Air gap bundles (`. +- [_nginx-deployment](https://docs.replicated.com/partials/application-links/_nginx-deployment.md): No description available. +- [_nginx-k8s-app](https://docs.replicated.com/partials/application-links/_nginx-k8s-app.md): No description available. +- [_nginx-kots-app](https://docs.replicated.com/partials/application-links/_nginx-kots-app.md): No description available. +- [_nginx-service](https://docs.replicated.com/partials/application-links/_nginx-service.md): No description available. +- [_build-source-code](https://docs.replicated.com/partials/ci-cd/_build-source-code.md): Add one or more jobs to compile your application source code and build images. +- [_test-recs](https://docs.replicated.com/partials/ci-cd/_test-recs.md): * **Application Testing:** Traditional application testing includes unit, integration, and end-to-end tests. +- [_openshift-pool](https://docs.replicated.com/partials/cmx/_openshift-pool.md): No description available. +- [_overview](https://docs.replicated.com/partials/cmx/_overview.md): Replicated Compatibility Matrix quickly provisions ephemeral clusters of different Kubernetes distributions and versions, such as OpenShift, EKS, and Replicated kURL. +- [_prerequisites](https://docs.replicated.com/partials/cmx/_prerequisites.md): * Create an account in the Replicated Vendor Portal. +- [_supported-clusters-overview](https://docs.replicated.com/partials/cmx/_supported-clusters-overview.md): No description available. +- [_collab-existing-user](https://docs.replicated.com/partials/collab-repo/_collab-existing-user.md): If a team member adds a GitHub username to their Vendor Portal account that already exists in the collab repository, then the Vendor Portal does _not_ change the role that the existing user is assigned in the collab repository. +- [_collab-rbac-important](https://docs.replicated.com/partials/collab-repo/_collab-rbac-important.md): No description available. +- [_collab-rbac-resources-important](https://docs.replicated.com/partials/collab-repo/_collab-rbac-resources-important.md): No description available. +- [_collab-repo-about](https://docs.replicated.com/partials/collab-repo/_collab-repo-about.md): The replicated-collab organization in GitHub is used for tracking and collaborating on escalations, bug reports, and feature requests that are sent by members of a Vendor Portal team to the Replicated team. +- [_affixExample](https://docs.replicated.com/partials/config/_affixExample.md): No description available. +- [_defaultExample](https://docs.replicated.com/partials/config/_defaultExample.md): No description available. +- [_helpTextExample](https://docs.replicated.com/partials/config/_helpTextExample.md): No description available. +- [_hiddenExample](https://docs.replicated.com/partials/config/_hiddenExample.md): No description available. +- [_item-types](https://docs.replicated.com/partials/config/_item-types.md): No description available. +- [_nameExample](https://docs.replicated.com/partials/config/_nameExample.md): No description available. +- [_property-when](https://docs.replicated.com/partials/config/_property-when.md): It can be useful to conditionally show or hide fields so that your users are only provided the configuration options that are relevant to them. +- [_randomStringNote](https://docs.replicated.com/partials/config/_randomStringNote.md): No description available. +- [_readonlyExample](https://docs.replicated.com/partials/config/_readonlyExample.md): No description available. +- [_recommendedExample](https://docs.replicated.com/partials/config/_recommendedExample.md): No description available. +- [_regexValidationExample](https://docs.replicated.com/partials/config/_regexValidationExample.md): No description available. +- [_requiredExample](https://docs.replicated.com/partials/config/_requiredExample.md): No description available. +- [_typeExample](https://docs.replicated.com/partials/config/_typeExample.md): No description available. +- [_valueExample](https://docs.replicated.com/partials/config/_valueExample.md): No description available. +- [_when-note](https://docs.replicated.com/partials/config/_when-note.md): No description available. +- [_when-requirements](https://docs.replicated.com/partials/config/_when-requirements.md): No description available. +- [_whenExample](https://docs.replicated.com/partials/config/_whenExample.md): No description available. +- [_boolExample](https://docs.replicated.com/partials/configValues/_boolExample.md): No description available. +- [_config-values-procedure](https://docs.replicated.com/partials/configValues/_config-values-procedure.md): During installation, KOTS automatically generates a ConfigValues file and saves the file in a directory called `upstream`. +- [_configValuesExample](https://docs.replicated.com/partials/configValues/_configValuesExample.md): No description available. +- [_fileExample](https://docs.replicated.com/partials/configValues/_fileExample.md): No description available. +- [_passwordExample](https://docs.replicated.com/partials/configValues/_passwordExample.md): No description available. +- [_selectOneExample](https://docs.replicated.com/partials/configValues/_selectOneExample.md): No description available. +- [_textExample](https://docs.replicated.com/partials/configValues/_textExample.md): No description available. +- [_textareaExample](https://docs.replicated.com/partials/configValues/_textareaExample.md): No description available. +- [_wizard](https://docs.replicated.com/partials/custom-domains/_wizard.md): 1. +- [_additionalImages](https://docs.replicated.com/partials/custom-resource-application/_additionalImages.md): No description available. +- [_additionalNamespaces](https://docs.replicated.com/partials/custom-resource-application/_additionalNamespaces.md): No description available. +- [_allowRollback](https://docs.replicated.com/partials/custom-resource-application/_allowRollback.md): No description available. +- [_graphs](https://docs.replicated.com/partials/custom-resource-application/_graphs.md): No description available. +- [_icon](https://docs.replicated.com/partials/custom-resource-application/_icon.md): No description available. +- [_minKotsVersion](https://docs.replicated.com/partials/custom-resource-application/_minKotsVersion.md): No description available. +- [_ports-applicationURL](https://docs.replicated.com/partials/custom-resource-application/_ports-applicationURL.md):
  • (Optional) ports. +- [_ports-kurl-note](https://docs.replicated.com/partials/custom-resource-application/_ports-kurl-note.md): No description available. +- [_ports-localPort](https://docs.replicated.com/partials/custom-resource-application/_ports-localPort.md):

  • ports. +- [_ports-serviceName](https://docs.replicated.com/partials/custom-resource-application/_ports-serviceName.md):
  • ports. +- [_ports-servicePort](https://docs.replicated.com/partials/custom-resource-application/_ports-servicePort.md):
  • ports. +- [_ports](https://docs.replicated.com/partials/custom-resource-application/_ports.md): No description available. +- [_proxyRegistryDomain](https://docs.replicated.com/partials/custom-resource-application/_proxyRegistryDomain.md): No description available. +- [_releaseNotes](https://docs.replicated.com/partials/custom-resource-application/_releaseNotes.md): No description available. +- [_replicatedRegistryDomain](https://docs.replicated.com/partials/custom-resource-application/_replicatedRegistryDomain.md): No description available. +- [_requireMinimalRBACPrivileges](https://docs.replicated.com/partials/custom-resource-application/_requireMinimalRBACPrivileges.md): No description available. +- [_servicePort-note](https://docs.replicated.com/partials/custom-resource-application/_servicePort-note.md): No description available. +- [_statusInformers](https://docs.replicated.com/partials/custom-resource-application/_statusInformers.md): No description available. +- [_supportMinimalRBACPrivileges](https://docs.replicated.com/partials/custom-resource-application/_supportMinimalRBACPrivileges.md): No description available. +- [_targetKotsVersion](https://docs.replicated.com/partials/custom-resource-application/_targetKotsVersion.md): No description available. +- [_title](https://docs.replicated.com/partials/custom-resource-application/_title.md): No description available. +- [_change-channel](https://docs.replicated.com/partials/customers/_change-channel.md): You can change the channel a customer is assigned at any time. +- [_download](https://docs.replicated.com/partials/customers/_download.md): No description available. +- [_definition](https://docs.replicated.com/partials/embedded-cluster/_definition.md): Replicated Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. +- [_ec-config](https://docs.replicated.com/partials/embedded-cluster/_ec-config.md): No description available. +- [_multi-node-ha-arch](https://docs.replicated.com/partials/embedded-cluster/_multi-node-ha-arch.md): No description available. +- [_port-reqs](https://docs.replicated.com/partials/embedded-cluster/_port-reqs.md): This section lists the ports used by Embedded Cluster. +- [_proxy-install-limitations](https://docs.replicated.com/partials/embedded-cluster/_proxy-install-limitations.md): No description available. +- [_proxy-install-reqs](https://docs.replicated.com/partials/embedded-cluster/_proxy-install-reqs.md): **Requirement:** Proxy installations require Embedded Cluster 1. +- [_requirements](https://docs.replicated.com/partials/embedded-cluster/_requirements.md): No description available. +- [_update-air-gap-admin-console](https://docs.replicated.com/partials/embedded-cluster/_update-air-gap-admin-console.md): 1. +- [_update-air-gap-cli](https://docs.replicated.com/partials/embedded-cluster/_update-air-gap-cli.md): 1. +- [_update-air-gap-overview](https://docs.replicated.com/partials/embedded-cluster/_update-air-gap-overview.md): To upgrade an installation, new air gap bundles can be uploaded to the Admin Console from the browser or with the Embedded Cluster binary from the command line. +- [_update-overview](https://docs.replicated.com/partials/embedded-cluster/_update-overview.md): When you update an application installed with Embedded Cluster, you update both the application and the cluster infrastructure together, including Kubernetes, KOTS, and other components running in the cluster. +- [_warning-do-not-downgrade](https://docs.replicated.com/partials/embedded-cluster/_warning-do-not-downgrade.md): No description available. +- [_create-promote-release](https://docs.replicated.com/partials/getting-started/_create-promote-release.md): Create a new release and promote it to the Unstable channel. +- [_csdl-overview](https://docs.replicated.com/partials/getting-started/_csdl-overview.md): Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. +- [_gitea-ec-config](https://docs.replicated.com/partials/getting-started/_gitea-ec-config.md): No description available. +- [_gitea-helmchart-cr-ec](https://docs.replicated.com/partials/getting-started/_gitea-helmchart-cr-ec.md): No description available. +- [_gitea-helmchart-cr](https://docs.replicated.com/partials/getting-started/_gitea-helmchart-cr.md): No description available. +- [_gitea-k8s-app-cr](https://docs.replicated.com/partials/getting-started/_gitea-k8s-app-cr.md): No description available. +- [_gitea-kots-app-cr-ec](https://docs.replicated.com/partials/getting-started/_gitea-kots-app-cr-ec.md): No description available. +- [_gitea-kots-app-cr](https://docs.replicated.com/partials/getting-started/_gitea-kots-app-cr.md): No description available. +- [_grafana-config](https://docs.replicated.com/partials/getting-started/_grafana-config.md): No description available. +- [_grafana-helmchart](https://docs.replicated.com/partials/getting-started/_grafana-helmchart.md): No description available. +- [_grafana-k8s-app](https://docs.replicated.com/partials/getting-started/_grafana-k8s-app.md): No description available. +- [_grafana-kots-app](https://docs.replicated.com/partials/getting-started/_grafana-kots-app.md): No description available. +- [_kubernetes-training](https://docs.replicated.com/partials/getting-started/_kubernetes-training.md): No description available. +- [_labs-intro](https://docs.replicated.com/partials/getting-started/_labs-intro.md): Replicated also offers a sandbox environment where you can complete several beginner, intermediate, and advanced labs. +- [_related-topics](https://docs.replicated.com/partials/getting-started/_related-topics.md): No description available. +- [_replicated-definition](https://docs.replicated.com/partials/getting-started/_replicated-definition.md): Replicated is a commercial software distribution platform. +- [_test-your-changes](https://docs.replicated.com/partials/getting-started/_test-your-changes.md): Install the release to test your changes. +- [_tutorial-intro](https://docs.replicated.com/partials/getting-started/_tutorial-intro.md): This tutorial introduces you to the Replicated features for software vendors and their enterprise users. +- [_vm-requirements](https://docs.replicated.com/partials/getting-started/_vm-requirements.md): No description available. +- [_gitops-not-recommended](https://docs.replicated.com/partials/gitops/_gitops-not-recommended.md): No description available. +- [_gitops-limitation](https://docs.replicated.com/partials/helm/_gitops-limitation.md): The KOTS Auto-GitOps workflow is not supported for installations with the HelmChart custom resource `apiVersion: kots. +- [_helm-builder-requirements](https://docs.replicated.com/partials/helm/_helm-builder-requirements.md): No description available. +- [_helm-cr-builder-airgap-intro](https://docs.replicated.com/partials/helm/_helm-cr-builder-airgap-intro.md): In the `builder` key, you provide the minimum Helm values required to render the chart templates so that the output includes any images that must be included in the air gap bundle. +- [...](https://docs.replicated.com/partials/helm/_helm-cr-builder-example.md): No description available. +- [_helm-cr-chart-name](https://docs.replicated.com/partials/helm/_helm-cr-chart-name.md): The name of the chart. +- [_helm-cr-chart-release-name](https://docs.replicated.com/partials/helm/_helm-cr-chart-release-name.md): Specifies the release name to use when installing this instance of the Helm chart. +- [_helm-cr-chart-version](https://docs.replicated.com/partials/helm/_helm-cr-chart-version.md): The version of the chart. +- [_helm-cr-chart](https://docs.replicated.com/partials/helm/_helm-cr-chart.md): The `chart` key allows for a mapping between the data in this definition and the chart archive itself. +- [_helm-cr-exclude](https://docs.replicated.com/partials/helm/_helm-cr-exclude.md): The attribute is a value for making optional charts. +- [_helm-cr-namespace](https://docs.replicated.com/partials/helm/_helm-cr-namespace.md): The `namespace` key specifies an alternative namespace where Replicated KOTS installs the Helm chart. +- [_helm-cr-optional-values-recursive-merge](https://docs.replicated.com/partials/helm/_helm-cr-optional-values-recursive-merge.md): The `optionalValues. +- [_helm-cr-optional-values-when](https://docs.replicated.com/partials/helm/_helm-cr-optional-values-when.md): The `optionalValues. +- [_helm-cr-optional-values](https://docs.replicated.com/partials/helm/_helm-cr-optional-values.md): The `optionalValues` key can be used to set values in the Helm chart `values. +- [_helm-cr-upgrade-flags](https://docs.replicated.com/partials/helm/_helm-cr-upgrade-flags.md): Specifies additional flags to pass to the `helm upgrade` command for charts. +- [_helm-cr-values](https://docs.replicated.com/partials/helm/_helm-cr-values.md): The `values` key can be used to set or delete existing values in the Helm chart `values. +- [_helm-cr-weight-limitation](https://docs.replicated.com/partials/helm/_helm-cr-weight-limitation.md): The `weight` field is _not_ supported for HelmChart custom resources with `useHelmInstall: false`. +- [_helm-cr-weight](https://docs.replicated.com/partials/helm/_helm-cr-weight.md): Determines the order in which KOTS applies the Helm chart. +- [_helm-definition](https://docs.replicated.com/partials/helm/_helm-definition.md): Helm is a popular open source package manager for Kubernetes applications. +- [_helm-install-beta](https://docs.replicated.com/partials/helm/_helm-install-beta.md): The Helm installation method is Beta and is not recommended for production releases. +- [_helm-install-prereqs](https://docs.replicated.com/partials/helm/_helm-install-prereqs.md): * The customer used to install must have a valid email address. +- [_helm-package](https://docs.replicated.com/partials/helm/_helm-package.md): No description available. +- [_helm-template-limitation](https://docs.replicated.com/partials/helm/_helm-template-limitation.md): Helm's `lookup` function and some values in the built-in `Capabilities` object are not supported with the `kots. +- [_helm-version-limitation](https://docs.replicated.com/partials/helm/_helm-version-limitation.md): Support for Helm v2, including security patches, ended on November 13, 2020. +- [_hook-weights-limitation](https://docs.replicated.com/partials/helm/_hook-weights-limitation.md): Hook weights below -9999 are not supported. +- [_hooks-limitation](https://docs.replicated.com/partials/helm/_hooks-limitation.md): No description available. +- [_installer-only-annotation](https://docs.replicated.com/partials/helm/_installer-only-annotation.md): Any other Kubernetes resources in the release (such as Kubernetes Deployments or Services) must include the `kots. +- [_kots-helm-cr-description](https://docs.replicated.com/partials/helm/_kots-helm-cr-description.md): To deploy Helm charts, KOTS requires a unique HelmChart custom resource for each Helm chart `. +- [_replicated-deprecated](https://docs.replicated.com/partials/helm/_replicated-deprecated.md): The HelmChart custom resource `apiVersion: kots. +- [_replicated-helm-migration](https://docs.replicated.com/partials/helm/_replicated-helm-migration.md): You cannot migrate existing Helm charts in existing installations from the `useHelmInstall: false` installation method to a different method. +- [Helm chart values.yaml](https://docs.replicated.com/partials/helm/_set-values-config-example.md): Using KOTS template functions in the [Config](/reference/template-functions-config-context) context allows you to set Helm values based on user-supplied values from the KOTS Admin Console configuration page. +- [KOTS HelmChart custom resource](https://docs.replicated.com/partials/helm/_set-values-license-example.md): Using KOTS template functions in the [License](/reference/template-functions-license-context) context allows you to set Helm values based on the unique license file used for installation or upgrade. +- [_v2-native-helm-cr-example](https://docs.replicated.com/partials/helm/_v2-native-helm-cr-example.md): No description available. +- [_docker-compatibility](https://docs.replicated.com/partials/image-registry/_docker-compatibility.md): No description available. +- [_image-registry-settings](https://docs.replicated.com/partials/image-registry/_image-registry-settings.md): No description available. +- [_access-admin-console](https://docs.replicated.com/partials/install/_access-admin-console.md): By default, during installation, KOTS automatically opens localhost port 8800 to provide access to the Admin Console. +- [_airgap-bundle-build](https://docs.replicated.com/partials/install/_airgap-bundle-build.md): * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. +- [_airgap-bundle-download](https://docs.replicated.com/partials/install/_airgap-bundle-download.md): After the build completes, download the bundle. +- [_airgap-bundle-view-contents](https://docs.replicated.com/partials/install/_airgap-bundle-view-contents.md): No description available. +- [_airgap-license-download](https://docs.replicated.com/partials/install/_airgap-license-download.md): 1. +- [_automation-intro-embedded](https://docs.replicated.com/partials/install/_automation-intro-embedded.md): When you use the KOTS CLI to install an application in a kURL cluster, you first run the kURL installation script to provision the cluster and automatically install KOTS in the cluster. +- [_automation-intro-existing](https://docs.replicated.com/partials/install/_automation-intro-existing.md): When you use the KOTS CLI to install an application in an existing cluster, you install both the application and Replicated KOTS with a single command. +- [_config-values-procedure](https://docs.replicated.com/partials/install/_config-values-procedure.md): No description available. +- [_download-kotsadm-bundle](https://docs.replicated.com/partials/install/_download-kotsadm-bundle.md): Download the `kotsadm. +- [_download-kurl-bundle](https://docs.replicated.com/partials/install/_download-kurl-bundle.md): No description available. +- [_ec-prereqs](https://docs.replicated.com/partials/install/_ec-prereqs.md): * Ensure that your installation environment meets the Embedded Cluster requirements. +- [_embedded-ha-step](https://docs.replicated.com/partials/install/_embedded-ha-step.md): (HA Installation Only) If you are installing in HA mode and did not already preconfigure a load balancer, you are prompted during the installation. +- [_embedded-login-password](https://docs.replicated.com/partials/install/_embedded-login-password.md): After the installation command finishes, note the `Kotsadm` and `Login with password (will not be shown again)` fields in the output of the command. +- [_extract-kurl-bundle](https://docs.replicated.com/partials/install/_extract-kurl-bundle.md): In your installation environment, extract the contents of the kURL `. +- [_firewall-openings-intro](https://docs.replicated.com/partials/install/_firewall-openings-intro.md): The domains for the services listed in the table below need to be accessible from servers performing online installations. +- [_firewall-openings](https://docs.replicated.com/partials/install/_firewall-openings.md): The domains for the services listed in the table below need to be accessible from servers performing online installations. +- [_ha-load-balancer-about](https://docs.replicated.com/partials/install/_ha-load-balancer-about.md): A load balancer is required for high availability mode. +- [_ha-load-balancer-prereq](https://docs.replicated.com/partials/install/_ha-load-balancer-prereq.md): - If you are installing in high availability (HA) mode, a load balancer is required. +- [_install-kots-cli-airgap](https://docs.replicated.com/partials/install/_install-kots-cli-airgap.md): Install the KOTS CLI. +- [_install-kots-cli](https://docs.replicated.com/partials/install/_install-kots-cli.md): No description available. +- [_intro-air-gap](https://docs.replicated.com/partials/install/_intro-air-gap.md): The procedures in this topic apply to installation environments that do not have access to the internet, known as _air gap_ environments. +- [_intro-embedded](https://docs.replicated.com/partials/install/_intro-embedded.md): This topic describes how to use Replicated kURL to provision an embedded cluster in a virtual machine (VM) or bare metal server and install an application in the cluster. +- [_intro-existing](https://docs.replicated.com/partials/install/_intro-existing.md): This topic describes how to use Replicated KOTS to install an application in an existing Kubernetes cluster. +- [_kots-airgap-version-match](https://docs.replicated.com/partials/install/_kots-airgap-version-match.md): No description available. +- [_kots-install-prompts](https://docs.replicated.com/partials/install/_kots-install-prompts.md): No description available. +- [_kubernetes-compatibility](https://docs.replicated.com/partials/install/_kubernetes-compatibility.md): No description available. +- [_kurl-about](https://docs.replicated.com/partials/install/_kurl-about.md): Replicated kURL is an open source project. +- [_license-file-prereq](https://docs.replicated.com/partials/install/_license-file-prereq.md): * Download your license file. +- [_placeholder-airgap-bundle](https://docs.replicated.com/partials/install/_placeholder-airgap-bundle.md): * `PATH_TO_AIRGAP_BUNDLE` with the path to the `. +- [_placeholder-app-name-UI](https://docs.replicated.com/partials/install/_placeholder-app-name-UI.md): * `APP_NAME` with the name of the application. +- [_placeholder-namespace-embedded](https://docs.replicated.com/partials/install/_placeholder-namespace-embedded.md): * `NAMESPACE` with the namespace where Replicated kURL installed Replicated KOTS when creating the cluster. +- [_placeholder-namespace-existing](https://docs.replicated.com/partials/install/_placeholder-namespace-existing.md): * `NAMESPACE` with the namespace where you want to install both the application and KOTS. +- [_placeholder-ro-creds](https://docs.replicated.com/partials/install/_placeholder-ro-creds.md): * `REGISTRY_HOST` with the same hostname for the private registry where you pushed the Admin Console images. +- [_placeholders-global](https://docs.replicated.com/partials/install/_placeholders-global.md): * `APP_NAME` with a name for the application. +- [_prereqs-embedded-cluster](https://docs.replicated.com/partials/install/_prereqs-embedded-cluster.md): * Ensure that your environment meets the minimum system requirements. +- [_prereqs-existing-cluster](https://docs.replicated.com/partials/install/_prereqs-existing-cluster.md): * Ensure that your cluster meets the minimum system requirements. +- [_provision-cluster-intro](https://docs.replicated.com/partials/install/_provision-cluster-intro.md): This procedure describes how to use kURL to provision an embedded cluster on a VM or bare metal server. +- [_push-kotsadm-images](https://docs.replicated.com/partials/install/_push-kotsadm-images.md): Extract the KOTS Admin Console container images from the `kotsadm. +- [_airgap-telemetry](https://docs.replicated.com/partials/instance-insights/_airgap-telemetry.md): For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. +- [_notifications-about](https://docs.replicated.com/partials/instance-insights/_notifications-about.md): No description available. +- [_supported-resources-status](https://docs.replicated.com/partials/instance-insights/_supported-resources-status.md): No description available. +- [_admin-console-about](https://docs.replicated.com/partials/kots/_admin-console-about.md): KOTS provides an Admin Console that lets your customers manage your application. +- [_download-portal-about](https://docs.replicated.com/partials/kots/_download-portal-about.md): The Replicated Download Portal can be used to share license files, air gap bundles, and other assets with customers. +- [_embedded-kubernetes-definition](https://docs.replicated.com/partials/kots/_embedded-kubernetes-definition.md): _Embedded Kubernetes_ refers to delivering a Kubernetes distribution alongside an application, so that both Kubernetes and the application are deployed in the customer environment. +- [_kots-definition](https://docs.replicated.com/partials/kots/_kots-definition.md): Replicated KOTS is a kubectl plugin and an in-cluster Admin Console that provides highly successful installations of Helm charts and Kubernetes applications into customer-controlled environments, including on-prem and air gap environments. +- [_kots-entitlement-note](https://docs.replicated.com/partials/kots/_kots-entitlement-note.md): No description available. +- [_ensure-rbac](https://docs.replicated.com/partials/kots-cli/_ensure-rbac.md): No description available. +- [_help](https://docs.replicated.com/partials/kots-cli/_help.md): No description available. +- [_kotsadm-namespace](https://docs.replicated.com/partials/kots-cli/_kotsadm-namespace.md): No description available. +- [_kotsadm-registry](https://docs.replicated.com/partials/kots-cli/_kotsadm-registry.md): No description available. +- [_registry-password](https://docs.replicated.com/partials/kots-cli/_registry-password.md): No description available. +- [_registry-username](https://docs.replicated.com/partials/kots-cli/_registry-username.md): No description available. +- [_skip-rbac-check](https://docs.replicated.com/partials/kots-cli/_skip-rbac-check.md): No description available. +- [_strict-sec-context-yaml](https://docs.replicated.com/partials/kots-cli/_strict-sec-context-yaml.md): No description available. +- [_strict-security-context](https://docs.replicated.com/partials/kots-cli/_strict-security-context.md): No description available. +- [_use-minimal-rbac](https://docs.replicated.com/partials/kots-cli/_use-minimal-rbac.md): No description available. +- [_wait-duration](https://docs.replicated.com/partials/kots-cli/_wait-duration.md): No description available. +- [_with-minio](https://docs.replicated.com/partials/kots-cli/_with-minio.md): No description available. +- [_installers](https://docs.replicated.com/partials/kurl/_installers.md): To provision a cluster on a VM or bare metal server, kURL uses a spec that is defined in a manifest file with `apiVersion: cluster. +- [_kurl-availability](https://docs.replicated.com/partials/kurl/_kurl-availability.md): No description available. +- [_kurl-definition](https://docs.replicated.com/partials/kurl/_kurl-definition.md): kURL is an open source project maintained by Replicated that software vendors can use to create custom Kubernetes distributions that are embedded with their application. +- [_allow-privilege-escalation](https://docs.replicated.com/partials/linter-rules/_allow-privilege-escalation.md): No description available. +- [_application-icon](https://docs.replicated.com/partials/linter-rules/_application-icon.md): No description available. +- [_application-spec](https://docs.replicated.com/partials/linter-rules/_application-spec.md): No description available. +- [_application-statusInformers](https://docs.replicated.com/partials/linter-rules/_application-statusInformers.md): No description available. +- [_config-option-invalid-regex-validator](https://docs.replicated.com/partials/linter-rules/_config-option-invalid-regex-validator.md): No description available. +- [_config-option-invalid-type](https://docs.replicated.com/partials/linter-rules/_config-option-invalid-type.md): No description available. +- [_config-option-is-circular](https://docs.replicated.com/partials/linter-rules/_config-option-is-circular.md): No description available. +- [_config-option-password-type](https://docs.replicated.com/partials/linter-rules/_config-option-password-type.md): No description available. +- [_config-option-regex-validator-invalid-type](https://docs.replicated.com/partials/linter-rules/_config-option-regex-validator-invalid-type.md): No description available. +- [_config-spec](https://docs.replicated.com/partials/linter-rules/_config-spec.md): No description available. +- [_container-image-latest-tag](https://docs.replicated.com/partials/linter-rules/_container-image-latest-tag.md): No description available. +- [_container-image-local-image-name](https://docs.replicated.com/partials/linter-rules/_container-image-local-image-name.md): No description available. +- [_container-resource-limits](https://docs.replicated.com/partials/linter-rules/_container-resource-limits.md): No description available. +- [_container-resource-requests](https://docs.replicated.com/partials/linter-rules/_container-resource-requests.md): No description available. +- [_container-resources](https://docs.replicated.com/partials/linter-rules/_container-resources.md): No description available. +- [_deprecated-kubernetes-installer-version](https://docs.replicated.com/partials/linter-rules/_deprecated-kubernetes-installer-version.md): No description available. +- [_hardcoded-namespace](https://docs.replicated.com/partials/linter-rules/_hardcoded-namespace.md): No description available. +- [_invalid-helm-release-name](https://docs.replicated.com/partials/linter-rules/_invalid-helm-release-name.md): No description available. +- [_invalid-kubernetes-installer](https://docs.replicated.com/partials/linter-rules/_invalid-kubernetes-installer.md): No description available. +- [_invalid-min-kots-version](https://docs.replicated.com/partials/linter-rules/_invalid-min-kots-version.md): No description available. +- [_invalid-rendered-yaml](https://docs.replicated.com/partials/linter-rules/_invalid-rendered-yaml.md): No description available. +- [_invalid-target-kots-version](https://docs.replicated.com/partials/linter-rules/_invalid-target-kots-version.md): No description available. +- [_invalid-yaml](https://docs.replicated.com/partials/linter-rules/_invalid-yaml.md): No description available. +- [_invalid_type](https://docs.replicated.com/partials/linter-rules/_invalid_type.md): No description available. +- [_linter-definition](https://docs.replicated.com/partials/linter-rules/_linter-definition.md): The linter checks the manifest files in Replicated KOTS releases to ensure that there are no YAML syntax errors, that all required manifest files are present in the release to support installation with KOTS, and more. +- [_may-contain-secrets](https://docs.replicated.com/partials/linter-rules/_may-contain-secrets.md): No description available. +- [_missing-api-version-field](https://docs.replicated.com/partials/linter-rules/_missing-api-version-field.md): No description available. +- [_missing-kind-field](https://docs.replicated.com/partials/linter-rules/_missing-kind-field.md): No description available. +- [_preflight-spec](https://docs.replicated.com/partials/linter-rules/_preflight-spec.md): No description available. +- [_privileged](https://docs.replicated.com/partials/linter-rules/_privileged.md): No description available. +- [_repeat-option-malformed-yamlpath](https://docs.replicated.com/partials/linter-rules/_repeat-option-malformed-yamlpath.md): No description available. +- [_repeat-option-missing-template](https://docs.replicated.com/partials/linter-rules/_repeat-option-missing-template.md): No description available. +- [_repeat-option-missing-valuesByGroup](https://docs.replicated.com/partials/linter-rules/_repeat-option-missing-valuesByGroup.md): No description available. +- [_replicas-1](https://docs.replicated.com/partials/linter-rules/_replicas-1.md): No description available. +- [_resource-limits-cpu](https://docs.replicated.com/partials/linter-rules/_resource-limits-cpu.md): No description available. +- [_resource-limits-memory](https://docs.replicated.com/partials/linter-rules/_resource-limits-memory.md): No description available. +- [_resource-requests-cpu](https://docs.replicated.com/partials/linter-rules/_resource-requests-cpu.md): No description available. +- [_resource-requests-memory](https://docs.replicated.com/partials/linter-rules/_resource-requests-memory.md): No description available. +- [_troubleshoot-spec](https://docs.replicated.com/partials/linter-rules/_troubleshoot-spec.md): No description available. +- [_volume-docker-sock](https://docs.replicated.com/partials/linter-rules/_volume-docker-sock.md): No description available. +- [_volumes-host-paths](https://docs.replicated.com/partials/linter-rules/_volumes-host-paths.md): No description available. +- [_limitation-ec](https://docs.replicated.com/partials/monitoring/_limitation-ec.md): Monitoring applications with Prometheus is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). +- [_overview-prom](https://docs.replicated.com/partials/monitoring/_overview-prom.md): The KOTS Admin Console can use the open source systems monitoring tool Prometheus to collect metrics on an application and the cluster where the application is installed. +- [_analyzers-note](https://docs.replicated.com/partials/preflights/_analyzers-note.md): For basic examples of checking CPU, memory, and disk capacity, see [Node Resources Analyzer](https://troubleshoot. +- [_http-requests-cr](https://docs.replicated.com/partials/preflights/_http-requests-cr.md): No description available. +- [_http-requests-secret](https://docs.replicated.com/partials/preflights/_http-requests-secret.md): No description available. +- [_k8s-distro-cr](https://docs.replicated.com/partials/preflights/_k8s-distro-cr.md): No description available. +- [_k8s-distro-secret](https://docs.replicated.com/partials/preflights/_k8s-distro-secret.md): No description available. +- [_k8s-version-cr](https://docs.replicated.com/partials/preflights/_k8s-version-cr.md): No description available. +- [_k8s-version-secret](https://docs.replicated.com/partials/preflights/_k8s-version-secret.md): No description available. +- [_mysql-cr](https://docs.replicated.com/partials/preflights/_mysql-cr.md): No description available. +- [_mysql-secret](https://docs.replicated.com/partials/preflights/_mysql-secret.md): No description available. +- [_node-count-cr](https://docs.replicated.com/partials/preflights/_node-count-cr.md): No description available. +- [_node-count-secret](https://docs.replicated.com/partials/preflights/_node-count-secret.md): No description available. +- [_node-cpu-cr](https://docs.replicated.com/partials/preflights/_node-cpu-cr.md): No description available. +- [_node-cpu-secret](https://docs.replicated.com/partials/preflights/_node-cpu-secret.md): No description available. +- [_node-ephem-storage-cr](https://docs.replicated.com/partials/preflights/_node-ephem-storage-cr.md): No description available. +- [_node-ephem-storage-secret](https://docs.replicated.com/partials/preflights/_node-ephem-storage-secret.md): No description available. +- [_node-mem-cr](https://docs.replicated.com/partials/preflights/_node-mem-cr.md): No description available. +- [_node-mem-secret](https://docs.replicated.com/partials/preflights/_node-mem-secret.md): No description available. +- [_node-req-cr](https://docs.replicated.com/partials/preflights/_node-req-cr.md): No description available. +- [_node-req-secret](https://docs.replicated.com/partials/preflights/_node-req-secret.md): No description available. +- [_node-storage-cr](https://docs.replicated.com/partials/preflights/_node-storage-cr.md): No description available. +- [_node-storage-secret](https://docs.replicated.com/partials/preflights/_node-storage-secret.md): No description available. +- [_preflights-add-analyzers](https://docs.replicated.com/partials/preflights/_preflights-add-analyzers.md): You must add analyzers to analyze the data from the collectors that you specified. +- [_preflights-define-xref](https://docs.replicated.com/partials/preflights/_preflights-define-xref.md): For more information about defining collectors and analyzers, see [Collecting Data](https://troubleshoot. +- [_preflights-define](https://docs.replicated.com/partials/preflights/_preflights-define.md): Any preflight checks you run are dependent on your application needs. +- [_preflights-sb-about](https://docs.replicated.com/partials/preflights/_preflights-sb-about.md): Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. +- [_preflights-sb-note](https://docs.replicated.com/partials/preflights/_preflights-sb-note.md): For a comprehensive overview, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). +- [_preflights-spec-locations](https://docs.replicated.com/partials/preflights/_preflights-spec-locations.md): For more information about specifications, see [About Specifications](preflight-support-bundle-about#about-specifications) in _About Preflight Checks and Support Bundles_. +- [_preflights-strict](https://docs.replicated.com/partials/preflights/_preflights-strict.md): If any strict preflight checks are configured, the `--skip-preflights` flag are not honored because the preflight checks must run and contain no failures before the application is deployed. +- [_step-creds](https://docs.replicated.com/partials/proxy-service/_step-creds.md): Provide read-only credentials for the external private registry in your Replicated account. +- [_step-custom-domain](https://docs.replicated.com/partials/proxy-service/_step-custom-domain.md): (Optional) Add a custom domain for the proxy registry instead of `proxy. +- [_redactors-about](https://docs.replicated.com/partials/redactors/_redactors-about.md): Troubleshoot has built-in redactors to prevent sensitive data from being collected when support bundles are generated. +- [_required-releases-description](https://docs.replicated.com/partials/releases/_required-releases-description.md): When a release is required, KOTS requires users to upgrade to that version before they can upgrade to a later version. +- [_required-releases-limitations](https://docs.replicated.com/partials/releases/_required-releases-limitations.md): No description available. +- [_version-label-reqs-helm](https://docs.replicated.com/partials/releases/_version-label-reqs-helm.md): * The version label for the release must match the version label from one of the `Chart. +- [_app](https://docs.replicated.com/partials/replicated-cli/_app.md): No description available. +- [_authorize-with-token-note](https://docs.replicated.com/partials/replicated-cli/_authorize-with-token-note.md): No description available. +- [_authtype](https://docs.replicated.com/partials/replicated-cli/_authtype.md): No description available. +- [_chart-yaml-dir-reqs](https://docs.replicated.com/partials/replicated-cli/_chart-yaml-dir-reqs.md): No description available. +- [_help](https://docs.replicated.com/partials/replicated-cli/_help.md): No description available. +- [_login](https://docs.replicated.com/partials/replicated-cli/_login.md): No description available. +- [_logout](https://docs.replicated.com/partials/replicated-cli/_logout.md): No description available. +- [_output](https://docs.replicated.com/partials/replicated-cli/_output.md): No description available. +- [_password-stdin](https://docs.replicated.com/partials/replicated-cli/_password-stdin.md): No description available. +- [_password](https://docs.replicated.com/partials/replicated-cli/_password.md): No description available. +- [_skip-validation](https://docs.replicated.com/partials/replicated-cli/_skip-validation.md): No description available. +- [_sudo-install](https://docs.replicated.com/partials/replicated-cli/_sudo-install.md): No description available. +- [_token-stdin](https://docs.replicated.com/partials/replicated-cli/_token-stdin.md): No description available. +- [_token](https://docs.replicated.com/partials/replicated-cli/_token.md): No description available. +- [_username](https://docs.replicated.com/partials/replicated-cli/_username.md): No description available. +- [_verify-install](https://docs.replicated.com/partials/replicated-cli/_verify-install.md): No description available. +- [_yaml-dir](https://docs.replicated.com/partials/replicated-cli/_yaml-dir.md): No description available. +- [_401-unauthorized](https://docs.replicated.com/partials/replicated-sdk/_401-unauthorized.md): No description available. +- [Chart.yaml](https://docs.replicated.com/partials/replicated-sdk/_dependency-yaml.md): No description available. +- [_integration-mode-install](https://docs.replicated.com/partials/replicated-sdk/_integration-mode-install.md): You can install the Replicated SDK in integration mode to develop locally against the SDK API without needing to add the SDK to your application, create a release in the Replicated Vendor Portal, or make changes in your environment. +- [_kots-version-req](https://docs.replicated.com/partials/replicated-sdk/_kots-version-req.md): To install the SDK with a Replicated installer, KOTS v1. +- [_overview](https://docs.replicated.com/partials/replicated-sdk/_overview.md): The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. +- [_registry-logout](https://docs.replicated.com/partials/replicated-sdk/_registry-logout.md): No description available. +- [values.yaml ](https://docs.replicated.com/partials/replicated-sdk/_sdk-values.md): When a user installs a Helm chart that includes the Replicated SDK as a dependency, a set of default SDK values are included in the `replicated` key of the parent chart's values file. +- [_checkVersion](https://docs.replicated.com/partials/snapshots/_checkVersion.md): Run `velero version --client-only` to check the version of the velero CLI that you installed as part of [Installing the Velero CLI](snapshots-velero-cli-installing). +- [_installVelero](https://docs.replicated.com/partials/snapshots/_installVelero.md): No description available. +- [_limitation-cli-restores](https://docs.replicated.com/partials/snapshots/_limitation-cli-restores.md): Only full backups can be restored using the KOTS CLI. +- [_limitation-dr](https://docs.replicated.com/partials/snapshots/_limitation-dr.md): Only full backups that include both the application and the Admin Console can be restored to a new cluster in disaster recovery scenarios. +- [_limitation-install-method](https://docs.replicated.com/partials/snapshots/_limitation-install-method.md): Snapshots can be restored only to clusters that use the same installation method as the cluster the snapshot was taken from. +- [_limitation-no-ec-support](https://docs.replicated.com/partials/snapshots/_limitation-no-ec-support.md): The KOTS Snapshots feature is supported for existing cluster installations with KOTS and Replicated kURL installations only. +- [_limitation-os](https://docs.replicated.com/partials/snapshots/_limitation-os.md): Snapshots must be restored on the same operating system that the snapshot was taken on. +- [_node-agent-mem-limit](https://docs.replicated.com/partials/snapshots/_node-agent-mem-limit.md): Increase the default memory limit for the node-agent (restic) Pod if your application is particularly large. +- [_registryCredentialsNote](https://docs.replicated.com/partials/snapshots/_registryCredentialsNote.md): No description available. +- [_resticDaemonSet](https://docs.replicated.com/partials/snapshots/_resticDaemonSet.md): No description available. +- [_restore-types](https://docs.replicated.com/partials/snapshots/_restore-types.md): No description available. +- [_restoreTable](https://docs.replicated.com/partials/snapshots/_restoreTable.md): No description available. +- [_step-get-backups](https://docs.replicated.com/partials/snapshots/_step-get-backups.md): Run the [`kubectl kots get backups`](/reference/kots-cli-get-backups) command to get the list of full backups for the instance. +- [_step-restore](https://docs.replicated.com/partials/snapshots/_step-restore.md): No description available. +- [_updateDefaultStorage](https://docs.replicated.com/partials/snapshots/_updateDefaultStorage.md): If Velero is already installed, you can update your storage destination in the Replicated Admin Console. +- [_aggregate-status-intro](https://docs.replicated.com/partials/status-informers/_aggregate-status-intro.md): When you provide more than one Kubernetes resource, Replicated aggregates all resource statuses to display a single application status. +- [_aggregateStatus](https://docs.replicated.com/partials/status-informers/_aggregateStatus.md): No description available. +- [_statusesTable](https://docs.replicated.com/partials/status-informers/_statusesTable.md): No description available. +- [_configmap-note](https://docs.replicated.com/partials/support-bundles/_configmap-note.md): No description available. +- [_customize-support-bundle-spec](https://docs.replicated.com/partials/support-bundles/_customize-support-bundle-spec.md): No description available. +- [_deploy-status-cr](https://docs.replicated.com/partials/support-bundles/_deploy-status-cr.md): No description available. +- [_deploy-status-secret](https://docs.replicated.com/partials/support-bundles/_deploy-status-secret.md): No description available. +- [_ec-support-bundle-intro](https://docs.replicated.com/partials/support-bundles/_ec-support-bundle-intro.md): Embedded Cluster includes a default support bundle spec that collects both host- and cluster-level information. +- [_generate-bundle-admin-console](https://docs.replicated.com/partials/support-bundles/_generate-bundle-admin-console.md): The Replicated KOTS Admin Console includes a **Troubleshoot** page where you can generate a support bundle and review remediation suggestions for troubleshooting. +- [_generate-bundle-default-kots](https://docs.replicated.com/partials/support-bundles/_generate-bundle-default-kots.md): For KOTS installations, you can generate a support bundle using the default KOTS spec. +- [_generate-bundle-ec](https://docs.replicated.com/partials/support-bundles/_generate-bundle-ec.md): There are different steps to generate a support bundle depending on the version of Embedded Cluster installed. +- [_generate-bundle-host](https://docs.replicated.com/partials/support-bundles/_generate-bundle-host.md): No description available. +- [_generate-bundle](https://docs.replicated.com/partials/support-bundles/_generate-bundle.md): No description available. +- [_http-requests-cr](https://docs.replicated.com/partials/support-bundles/_http-requests-cr.md): No description available. +- [_http-requests-secret](https://docs.replicated.com/partials/support-bundles/_http-requests-secret.md): No description available. +- [_install-plugin](https://docs.replicated.com/partials/support-bundles/_install-plugin.md): The support-bundle plugin (a kubectl plugin) is required to generate support bundles from the command line. +- [_k8s-version-cr](https://docs.replicated.com/partials/support-bundles/_k8s-version-cr.md): No description available. +- [_k8s-version-secret](https://docs.replicated.com/partials/support-bundles/_k8s-version-secret.md): No description available. +- [_logs-limits-cr](https://docs.replicated.com/partials/support-bundles/_logs-limits-cr.md): No description available. +- [_logs-limits-secret](https://docs.replicated.com/partials/support-bundles/_logs-limits-secret.md): No description available. +- [_logs-selectors-cr](https://docs.replicated.com/partials/support-bundles/_logs-selectors-cr.md): No description available. +- [_logs-selectors-secret](https://docs.replicated.com/partials/support-bundles/_logs-selectors-secret.md): No description available. +- [_node-resources-cr](https://docs.replicated.com/partials/support-bundles/_node-resources-cr.md): No description available. +- [_node-resources-secret](https://docs.replicated.com/partials/support-bundles/_node-resources-secret.md): No description available. +- [_node-status-cr](https://docs.replicated.com/partials/support-bundles/_node-status-cr.md): No description available. +- [_node-status-secret](https://docs.replicated.com/partials/support-bundles/_node-status-secret.md): No description available. +- [_redis-mysql-cr](https://docs.replicated.com/partials/support-bundles/_redis-mysql-cr.md): No description available. +- [_redis-mysql-secret](https://docs.replicated.com/partials/support-bundles/_redis-mysql-secret.md): No description available. +- [_run-pods-cr](https://docs.replicated.com/partials/support-bundles/_run-pods-cr.md): No description available. +- [_run-pods-secret](https://docs.replicated.com/partials/support-bundles/_run-pods-secret.md): No description available. +- [_support-bundle-add-analyzers](https://docs.replicated.com/partials/support-bundles/_support-bundle-add-analyzers.md): Add analyzers based on conditions that you expect for your application. +- [_support-bundle-add-logs](https://docs.replicated.com/partials/support-bundles/_support-bundle-add-logs.md): Replicated recommends adding application Pod logs and set the collection limits for the number of lines logged. +- [_support-bundle-custom-collectors](https://docs.replicated.com/partials/support-bundles/_support-bundle-custom-collectors.md): Add any custom collectors to the file. +- [_go-sprig](https://docs.replicated.com/partials/template-functions/_go-sprig.md): KOTS template functions are based on the Go text/template library. +- [KOTS Config custom resource](https://docs.replicated.com/partials/template-functions/_integer-comparison.md): No description available. +- [_ne-comparison](https://docs.replicated.com/partials/template-functions/_ne-comparison.md): In the example below, the `ingress_type` field is displayed on the **Config** page only when the distribution of the cluster is _not_ [Replicated Embedded Cluster](/vendor/embedded-overview). +- [KOTS Config custom resource](https://docs.replicated.com/partials/template-functions/_string-comparison.md): No description available. +- [_use-cases](https://docs.replicated.com/partials/template-functions/_use-cases.md): No description available. +- [_admin-console-air-gap](https://docs.replicated.com/partials/updating/_admin-console-air-gap.md): No description available. +- [_admin-console](https://docs.replicated.com/partials/updating/_admin-console.md): No description available. +- [_installerRequirements](https://docs.replicated.com/partials/updating/_installerRequirements.md): * **installer-spec-file**: If you used the `installer-spec-file` flag to pass a `patch. +- [_upgradePrompt](https://docs.replicated.com/partials/updating/_upgradePrompt.md): (Kubernetes Upgrades Only) If a Kubernetes upgrade is required, the script automatically prints a `Drain local node and apply upgrade? +- [_api-about](https://docs.replicated.com/partials/vendor-api/_api-about.md): The Vendor API is the API for the Vendor Portal. +- [_team-token-note](https://docs.replicated.com/partials/vendor-api/_team-token-note.md): No description available. +- [Cron Expressions](https://docs.replicated.com/reference/cron-expressions.md): This topic describes the supported cron expressions that you can use to schedule automatic application update checks and automatic backups in the KOTS Admin Console. +- [About Custom Resources](https://docs.replicated.com/reference/custom-resource-about.md): You can include custom resources in releases to control the experience for applications installed with Replicated KOTS. +- [Application](https://docs.replicated.com/reference/custom-resource-application.md): The Application custom resource enables features such as branding, release notes, port forwarding, dashboard buttons, app status indicators, and custom graphs. +- [Velero Backup Resource for Snapshots](https://docs.replicated.com/reference/custom-resource-backup.md): This topic provides information about the supported fields in the Velero Backup resource for the Replicated KOTS snapshots feature. +- [Config](https://docs.replicated.com/reference/custom-resource-config.md): The Config custom resource can be provided by a vendor to specify a Config page in the Replicated Admin Console for collecting customer supplied values and template function rendering. +- [HelmChart v2](https://docs.replicated.com/reference/custom-resource-helmchart-v2.md): > Introduced in Replicated KOTS v1. +- [HelmChart v1 (Deprecated)](https://docs.replicated.com/reference/custom-resource-helmchart.md): No description available. +- [Identity (Beta)](https://docs.replicated.com/reference/custom-resource-identity.md): No description available. +- [LintConfig](https://docs.replicated.com/reference/custom-resource-lintconfig.md): No description available. +- [Preflight and Support Bundle](https://docs.replicated.com/reference/custom-resource-preflight.md): You can define preflight checks and support bundle specifications for Replicated KOTS and Helm installations. +- [Redactor (KOTS Only)](https://docs.replicated.com/reference/custom-resource-redactor.md): This topic describes how to define redactors with the Redactor custom resource. +- [Embedded Cluster Install Command Options](https://docs.replicated.com/reference/embedded-cluster-install.md): This topic describes the options available with the Embedded Cluster install command. +- [Embedded Cluster Config](https://docs.replicated.com/reference/embedded-config.md): This topic is a reference for the Replicated Embedded Cluster Config custom resource. +- [admin-console garbage-collect-images](https://docs.replicated.com/reference/kots-cli-admin-console-garbage-collect-images.md): Starts image garbage collection. +- [admin-console generate-manifests](https://docs.replicated.com/reference/kots-cli-admin-console-generate-manifests.md): Running this command will create a directory on the workstation containing the Replicated Admin Console manifests. +- [admin-console](https://docs.replicated.com/reference/kots-cli-admin-console-index.md): Enables access to the KOTS Admin Console from a local machine. +- [admin-console push-images](https://docs.replicated.com/reference/kots-cli-admin-console-push-images.md): Pushes images from an air gap bundle to a private registry. +- [admin-console upgrade](https://docs.replicated.com/reference/kots-cli-admin-console-upgrade.md): Upgrades the KOTS Admin Console to match the version of KOTS CLI. +- [backup](https://docs.replicated.com/reference/kots-cli-backup-index.md): Create a full instance snapshot for disaster recovery. +- [backup ls](https://docs.replicated.com/reference/kots-cli-backup-ls.md): No description available. +- [docker ensure-secret](https://docs.replicated.com/reference/kots-cli-docker-ensure-secret.md): Creates an image pull secret for Docker Hub that the Admin Console can utilize to avoid [rate limiting](/enterprise/image-registry-rate-limits). +- [docker](https://docs.replicated.com/reference/kots-cli-docker-index.md): No description available. +- [download](https://docs.replicated.com/reference/kots-cli-download.md): Retrieves a copy of the application manifests from the cluster, and store them in a specific directory structure on your workstation. +- [enable-ha](https://docs.replicated.com/reference/kots-cli-enable-ha.md): (Deprecated) Runs the rqlite StatefulSet as three replicas for data replication and high availability. +- [get apps](https://docs.replicated.com/reference/kots-cli-get-apps.md): The `kots get apps` command lists installed applications. +- [get backups](https://docs.replicated.com/reference/kots-cli-get-backups.md): The `kots get backups` command lists available full snapshots (instance). +- [get config](https://docs.replicated.com/reference/kots-cli-get-config.md): The `kots get config` command returns the `configValues` file for an application. +- [get](https://docs.replicated.com/reference/kots-cli-get-index.md): The `kots get` command shows information about one or more resources. +- [get restores](https://docs.replicated.com/reference/kots-cli-get-restores.md): The `kots get restores` command lists created full snapshot restores. +- [get versions](https://docs.replicated.com/reference/kots-cli-get-versions.md): The `kots get versions` command lists all versions of an application. +- [Installing the KOTS CLI](https://docs.replicated.com/reference/kots-cli-getting-started.md): Users can interact with the Replicated KOTS CLI to install and manage applications with Replicated KOTS. +- [Global flags](https://docs.replicated.com/reference/kots-cli-global-flags.md): All KOTS CLI commands support a set of global flags to be used to connect to the cluster. +- [identity-service enable-shared-password](https://docs.replicated.com/reference/kots-cli-identity-service-enable-shared-password.md): Enable the shared password login option in the KOTS Admin Console. +- [identity-service](https://docs.replicated.com/reference/kots-cli-identity-service-index.md): No description available. +- [install](https://docs.replicated.com/reference/kots-cli-install.md): Installs the application and the KOTS Admin Console directly to a cluster. +- [pull](https://docs.replicated.com/reference/kots-cli-pull.md): Running this command will create a directory on the workstation containing the application and Kubernetes manifests. +- [remove](https://docs.replicated.com/reference/kots-cli-remove.md): Remove application reference from the KOTS Admin Console. +- [reset-password](https://docs.replicated.com/reference/kots-cli-reset-password.md): If you deployed an application with the KOTS Admin Console, the `kots reset-password` command will change the bcrypted password hash in the cluster, allowing you to log in again. +- [reset-tls](https://docs.replicated.com/reference/kots-cli-reset-tls.md): If a bad TLS certificate is uploaded to the KOTS Admin Console or the kotsadm-tls secret is missing, the `kots reset-tls` command reapplies a default self-signed TLS certificate. +- [restore](https://docs.replicated.com/reference/kots-cli-restore-index.md): Restore full snapshots for disaster recovery, or do a partial restore of the application only or the Replicated Admin Console only. +- [restore ls](https://docs.replicated.com/reference/kots-cli-restore-ls.md): No description available. +- [set config](https://docs.replicated.com/reference/kots-cli-set-config.md): The `kots set config` allows setting values for application config items in the latest release version. +- [set](https://docs.replicated.com/reference/kots-cli-set-index.md): Configure KOTS resources. +- [upload](https://docs.replicated.com/reference/kots-cli-upload.md): Upload Kubernetes manifests from the local filesystem, creating a new version of the application that can be deployed. +- [upstream download](https://docs.replicated.com/reference/kots-cli-upstream-download.md): The `kots upstream download` command retries downloading a failed update of the upstream application. +- [upstream upgrade](https://docs.replicated.com/reference/kots-cli-upstream-upgrade.md): The `kots upstream upgrade` fetches the latest version of the upstream application. +- [upstream](https://docs.replicated.com/reference/kots-cli-upstream.md): KOTS Upstream interface. +- [velero configure-aws-s3](https://docs.replicated.com/reference/kots-cli-velero-configure-aws-s3.md): Configures snapshots to use an AWS S3 Bucket as a storage destination. +- [velero configure-azure](https://docs.replicated.com/reference/kots-cli-velero-configure-azure.md): Configures snapshots to use an Azure Blob Storage Container as a storage destination. +- [velero configure-gcp](https://docs.replicated.com/reference/kots-cli-velero-configure-gcp.md): Configures snapshots to use a Google Cloud Platform Object Storage Bucket as a storage destination. +- [velero configure-hostpath](https://docs.replicated.com/reference/kots-cli-velero-configure-hostpath.md): Configure snapshots to use a host path as storage destination. +- [velero configure-internal](https://docs.replicated.com/reference/kots-cli-velero-configure-internal.md): No description available. +- [velero configure-nfs](https://docs.replicated.com/reference/kots-cli-velero-configure-nfs.md): Configures snapshots to use NFS as storage destination. +- [velero configure-other-s3](https://docs.replicated.com/reference/kots-cli-velero-configure-other-s3.md): Configures snapshots to use an S3-compatible storage provider, such as Minio, as a storage destination. +- [velero ensure-permissions](https://docs.replicated.com/reference/kots-cli-velero-ensure-permissions.md): Ensures the necessary permissions that enables Replicated KOTS to access Velero. +- [velero](https://docs.replicated.com/reference/kots-cli-velero-index.md): The KOTS Velero interface, which configures storage destinations for backups (snapshots), permissions, and print instructions fo set up. +- [velero print-fs-instructions](https://docs.replicated.com/reference/kots-cli-velero-print-fs-instructions.md): No description available. +- [Linter Rules](https://docs.replicated.com/reference/linter.md): This topic describes the release linter and the linter rules. +- [replicated api get](https://docs.replicated.com/reference/replicated-cli-api-get.md): No description available. +- [replicated api patch](https://docs.replicated.com/reference/replicated-cli-api-patch.md): No description available. +- [replicated api post](https://docs.replicated.com/reference/replicated-cli-api-post.md): No description available. +- [replicated api put](https://docs.replicated.com/reference/replicated-cli-api-put.md): No description available. +- [replicated api](https://docs.replicated.com/reference/replicated-cli-api.md): No description available. +- [replicated app create](https://docs.replicated.com/reference/replicated-cli-app-create.md): No description available. +- [replicated app ls](https://docs.replicated.com/reference/replicated-cli-app-ls.md): No description available. +- [replicated app rm](https://docs.replicated.com/reference/replicated-cli-app-rm.md): No description available. +- [replicated app](https://docs.replicated.com/reference/replicated-cli-app.md): No description available. +- [replicated channel create](https://docs.replicated.com/reference/replicated-cli-channel-create.md): No description available. +- [replicated channel demote](https://docs.replicated.com/reference/replicated-cli-channel-demote.md): No description available. +- [replicated channel disable-semantic-versioning](https://docs.replicated.com/reference/replicated-cli-channel-disable-semantic-versioning.md): No description available. +- [replicated channel enable-semantic-versioning](https://docs.replicated.com/reference/replicated-cli-channel-enable-semantic-versioning.md): No description available. +- [replicated channel inspect](https://docs.replicated.com/reference/replicated-cli-channel-inspect.md): No description available. +- [replicated channel ls](https://docs.replicated.com/reference/replicated-cli-channel-ls.md): No description available. +- [replicated channel rm](https://docs.replicated.com/reference/replicated-cli-channel-rm.md): No description available. +- [replicated channel un-demote](https://docs.replicated.com/reference/replicated-cli-channel-un-demote.md): No description available. +- [replicated channel](https://docs.replicated.com/reference/replicated-cli-channel.md): No description available. +- [replicated cluster addon create object-store](https://docs.replicated.com/reference/replicated-cli-cluster-addon-create-object-store.md): Create an object store bucket for a cluster. +- [replicated cluster addon create](https://docs.replicated.com/reference/replicated-cli-cluster-addon-create.md): Create cluster add-ons. +- [replicated cluster addon ls](https://docs.replicated.com/reference/replicated-cli-cluster-addon-ls.md): List cluster add-ons for a cluster. +- [replicated cluster addon rm](https://docs.replicated.com/reference/replicated-cli-cluster-addon-rm.md): Remove cluster add-on by ID. +- [replicated cluster addon](https://docs.replicated.com/reference/replicated-cli-cluster-addon.md): Manage cluster add-ons. +- [replicated cluster create](https://docs.replicated.com/reference/replicated-cli-cluster-create.md): Create test clusters. +- [replicated cluster kubeconfig](https://docs.replicated.com/reference/replicated-cli-cluster-kubeconfig.md): Download credentials for a test cluster. +- [replicated cluster ls](https://docs.replicated.com/reference/replicated-cli-cluster-ls.md): List test clusters. +- [replicated cluster nodegroup ls](https://docs.replicated.com/reference/replicated-cli-cluster-nodegroup-ls.md): List node groups for a cluster. +- [replicated cluster nodegroup](https://docs.replicated.com/reference/replicated-cli-cluster-nodegroup.md): Manage node groups for clusters. +- [replicated cluster port expose](https://docs.replicated.com/reference/replicated-cli-cluster-port-expose.md): Expose a port on a cluster to the public internet. +- [replicated cluster port ls](https://docs.replicated.com/reference/replicated-cli-cluster-port-ls.md): List cluster ports for a cluster. +- [replicated cluster port rm](https://docs.replicated.com/reference/replicated-cli-cluster-port-rm.md): Remove cluster port by ID. +- [replicated cluster port](https://docs.replicated.com/reference/replicated-cli-cluster-port.md): Manage cluster ports. +- [replicated cluster prepare](https://docs.replicated.com/reference/replicated-cli-cluster-prepare.md): Prepare cluster for testing. +- [replicated cluster rm](https://docs.replicated.com/reference/replicated-cli-cluster-rm.md): Remove test clusters. +- [replicated cluster shell](https://docs.replicated.com/reference/replicated-cli-cluster-shell.md): Open a new shell with kubeconfig configured. +- [replicated cluster update nodegroup](https://docs.replicated.com/reference/replicated-cli-cluster-update-nodegroup.md): Update a nodegroup for a test cluster. +- [replicated cluster update ttl](https://docs.replicated.com/reference/replicated-cli-cluster-update-ttl.md): Update TTL for a test cluster. +- [replicated cluster update](https://docs.replicated.com/reference/replicated-cli-cluster-update.md): Update cluster settings. +- [replicated cluster upgrade](https://docs.replicated.com/reference/replicated-cli-cluster-upgrade.md): Upgrade a test cluster. +- [replicated cluster versions](https://docs.replicated.com/reference/replicated-cli-cluster-versions.md): List cluster versions. +- [replicated cluster](https://docs.replicated.com/reference/replicated-cli-cluster.md): Manage test Kubernetes clusters. +- [replicated completion](https://docs.replicated.com/reference/replicated-cli-completion.md): No description available. +- [replicated customer archive](https://docs.replicated.com/reference/replicated-cli-customer-archive.md): No description available. +- [replicated customer create](https://docs.replicated.com/reference/replicated-cli-customer-create.md): No description available. +- [replicated customer download-license](https://docs.replicated.com/reference/replicated-cli-customer-download-license.md): No description available. +- [replicated customer inspect](https://docs.replicated.com/reference/replicated-cli-customer-inspect.md): No description available. +- [replicated customer ls](https://docs.replicated.com/reference/replicated-cli-customer-ls.md): No description available. +- [replicated customer update](https://docs.replicated.com/reference/replicated-cli-customer-update.md): No description available. +- [replicated customer](https://docs.replicated.com/reference/replicated-cli-customer.md): No description available. +- [replicated default clear-all](https://docs.replicated.com/reference/replicated-cli-default-clear-all.md): No description available. +- [replicated default clear](https://docs.replicated.com/reference/replicated-cli-default-clear.md): No description available. +- [replicated default set](https://docs.replicated.com/reference/replicated-cli-default-set.md): No description available. +- [replicated default show](https://docs.replicated.com/reference/replicated-cli-default-show.md): No description available. +- [replicated default](https://docs.replicated.com/reference/replicated-cli-default.md): No description available. +- [replicated installer create](https://docs.replicated.com/reference/replicated-cli-installer-create.md): No description available. +- [replicated installer ls](https://docs.replicated.com/reference/replicated-cli-installer-ls.md): No description available. +- [replicated installer](https://docs.replicated.com/reference/replicated-cli-installer.md): No description available. +- [Installing the Replicated CLI](https://docs.replicated.com/reference/replicated-cli-installing.md): Vendors can use the Replicated CLI to manage their applications with Replicated programmatically, rather than using the Replicated vendor portal. +- [replicated instance inspect](https://docs.replicated.com/reference/replicated-cli-instance-inspect.md): No description available. +- [replicated instance ls](https://docs.replicated.com/reference/replicated-cli-instance-ls.md): No description available. +- [replicated instance tag](https://docs.replicated.com/reference/replicated-cli-instance-tag.md): No description available. +- [replicated instance](https://docs.replicated.com/reference/replicated-cli-instance.md): No description available. +- [replicated login](https://docs.replicated.com/reference/replicated-cli-login.md): No description available. +- [replicated logout](https://docs.replicated.com/reference/replicated-cli-logout.md): No description available. +- [replicated registry add dockerhub](https://docs.replicated.com/reference/replicated-cli-registry-add-dockerhub.md): No description available. +- [replicated registry add ecr](https://docs.replicated.com/reference/replicated-cli-registry-add-ecr.md): No description available. +- [replicated registry add gar](https://docs.replicated.com/reference/replicated-cli-registry-add-gar.md): No description available. +- [replicated registry add gcr](https://docs.replicated.com/reference/replicated-cli-registry-add-gcr.md): No description available. +- [replicated registry add ghcr](https://docs.replicated.com/reference/replicated-cli-registry-add-ghcr.md): No description available. +- [replicated registry add other](https://docs.replicated.com/reference/replicated-cli-registry-add-other.md): No description available. +- [replicated registry add quay](https://docs.replicated.com/reference/replicated-cli-registry-add-quay.md): Add a quay. +- [replicated registry add](https://docs.replicated.com/reference/replicated-cli-registry-add.md): No description available. +- [replicated registry ls](https://docs.replicated.com/reference/replicated-cli-registry-ls.md): No description available. +- [replicated registry rm](https://docs.replicated.com/reference/replicated-cli-registry-rm.md): No description available. +- [replicated registry test](https://docs.replicated.com/reference/replicated-cli-registry-test.md): No description available. +- [replicated registry](https://docs.replicated.com/reference/replicated-cli-registry.md): No description available. +- [replicated release compatibility](https://docs.replicated.com/reference/replicated-cli-release-compatibility.md): No description available. +- [replicated release create](https://docs.replicated.com/reference/replicated-cli-release-create.md): No description available. +- [replicated release download](https://docs.replicated.com/reference/replicated-cli-release-download.md): Download application manifests for a release. +- [replicated release inspect](https://docs.replicated.com/reference/replicated-cli-release-inspect.md): No description available. +- [replicated release lint](https://docs.replicated.com/reference/replicated-cli-release-lint.md): No description available. +- [replicated release ls](https://docs.replicated.com/reference/replicated-cli-release-ls.md): No description available. +- [replicated release promote](https://docs.replicated.com/reference/replicated-cli-release-promote.md): No description available. +- [replicated release test](https://docs.replicated.com/reference/replicated-cli-release-test.md): No description available. +- [replicated release update](https://docs.replicated.com/reference/replicated-cli-release-update.md): No description available. +- [replicated release](https://docs.replicated.com/reference/replicated-cli-release.md): No description available. +- [replicated version upgrade](https://docs.replicated.com/reference/replicated-cli-version-upgrade.md): No description available. +- [replicated version](https://docs.replicated.com/reference/replicated-cli-version.md): No description available. +- [replicated vm create](https://docs.replicated.com/reference/replicated-cli-vm-create.md): Create one or more test VMs with specified distribution, version, and configuration options. +- [replicated vm ls](https://docs.replicated.com/reference/replicated-cli-vm-ls.md): List test VMs and their status, with optional filters for start/end time and terminated VMs. +- [replicated vm port expose](https://docs.replicated.com/reference/replicated-cli-vm-port-expose.md): Expose a port on a vm to the public internet. +- [replicated vm port ls](https://docs.replicated.com/reference/replicated-cli-vm-port-ls.md): List vm ports for a vm. +- [replicated vm port rm](https://docs.replicated.com/reference/replicated-cli-vm-port-rm.md): Remove vm port by ID. +- [replicated vm port](https://docs.replicated.com/reference/replicated-cli-vm-port.md): Manage VM ports. +- [replicated vm rm](https://docs.replicated.com/reference/replicated-cli-vm-rm.md): Remove test VM(s) immediately, with options to filter by name, tag, or remove all VMs. +- [replicated vm update ttl](https://docs.replicated.com/reference/replicated-cli-vm-update-ttl.md): Update TTL for a test VM. +- [replicated vm update](https://docs.replicated.com/reference/replicated-cli-vm-update.md): Update VM settings. +- [replicated vm versions](https://docs.replicated.com/reference/replicated-cli-vm-versions.md): List available VM versions. +- [replicated vm](https://docs.replicated.com/reference/replicated-cli-vm.md): Manage test virtual machines. +- [Replicated SDK API](https://docs.replicated.com/reference/replicated-sdk-apis.md): The Replicated SDK provides an API that you can use to embed Replicated functionality in your Helm chart application. +- [replicated](https://docs.replicated.com/reference/replicated.md): No description available. +- [About Template Functions](https://docs.replicated.com/reference/template-functions-about.md): This topic describes Replicated KOTS template functions, including information about use cases, template function contexts, syntax. +- [Config Context](https://docs.replicated.com/reference/template-functions-config-context.md): No description available. +- [Template Function Examples](https://docs.replicated.com/reference/template-functions-examples.md): This topic provides examples of how to use Replicated KOTS template functions in various common use cases. +- [Identity Context](https://docs.replicated.com/reference/template-functions-identity-context.md): No description available. +- [kURL Context](https://docs.replicated.com/reference/template-functions-kurl-context.md): For applications installed in embedded clusters created with Replicated kURL, you can use template functions to show all options the cluster was installed with. +- [License Context](https://docs.replicated.com/reference/template-functions-license-context.md): No description available. +- [Static Context](https://docs.replicated.com/reference/template-functions-static-context.md): Many of the utility functions provided come from sprig, a third-party library of Go template functions. +- [Using the Vendor API v3](https://docs.replicated.com/reference/vendor-api-using.md): This topic describes how to use Replicated Vendor API authentication tokens to make API calls. +- [Adding Links to the Dashboard](https://docs.replicated.com/vendor/admin-console-adding-buttons-links.md): This topic describes how to use the Kubernetes SIG Application custom resource to add links to the Replicated KOTS Admin Console dashboard. +- [Customizing the Application Icon](https://docs.replicated.com/vendor/admin-console-customize-app-icon.md): You can add a custom application icon that displays in the Replicated Admin Console and the download portal. +- [Creating and Editing Configuration Fields](https://docs.replicated.com/vendor/admin-console-customize-config-screen.md): This topic describes how to use the KOTS Config custom resource manifest file to add and edit fields in the KOTS Admin Console configuration screen. +- [Adding Resource Status Informers](https://docs.replicated.com/vendor/admin-console-display-app-status.md): This topic describes how to add status informers for your application. +- [Port Forwarding Services with KOTS](https://docs.replicated.com/vendor/admin-console-port-forward.md): This topic describes how to add one or more ports to the Replicated KOTS port forward tunnel by configuring the `ports` key in the KOTS Application custom resource. +- [Adding Custom Graphs](https://docs.replicated.com/vendor/admin-console-prometheus-monitoring.md): This topic describes how to customize the graphs that are displayed on the Replicated Admin Console dashboard. +- [About Integrating with CI/CD](https://docs.replicated.com/vendor/ci-overview.md): This topic provides an introduction to integrating Replicated CLI commands in your continuous integration and continuous delivery (CI/CD) pipelines, including Replicated's best practices and recommendations. +- [Integrating Replicated GitHub Actions](https://docs.replicated.com/vendor/ci-workflows-github-actions.md): This topic describes how to integrate Replicated's custom GitHub actions into continuous integration and continuous delivery (CI/CD) workflows that use the GitHub Actions platform. +- [Recommended CI/CD Workflows](https://docs.replicated.com/vendor/ci-workflows.md): This topic provides Replicated's recommended development and release workflows for your continuous integration and continuous delivery (CI/CD) pipelines. +- [Viewing Compatibility Matrix Usage History](https://docs.replicated.com/vendor/compatibility-matrix-usage.md): No description available. +- [About the Configuration Screen](https://docs.replicated.com/vendor/config-screen-about.md): This topic describes the configuration screen on the Config tab in the Replicated Admin Console. +- [Using Conditional Statements in Configuration Fields](https://docs.replicated.com/vendor/config-screen-conditional.md): This topic describes how to use Replicated KOTS template functions in the Config custom resource to conditionally show or hide configuration fields for your application on the Replicated KOTS Admin Console **Config** page. +- [Mapping User-Supplied Values](https://docs.replicated.com/vendor/config-screen-map-inputs.md): This topic describes how to map the values that your users provide in the Replicated Admin Console configuration screen to your application. +- [Using Custom Domains](https://docs.replicated.com/vendor/custom-domains-using.md): This topic describes how to use the Replicated Vendor Portal to add and manage custom domains to alias the Replicated registry, the Replicated proxy registry, the Replicated app service, and the download portal. +- [About Custom Domains](https://docs.replicated.com/vendor/custom-domains.md): This topic provides an overview and the limitations of using custom domains to alias the Replicated private registry, Replicated proxy registry, Replicated app service, and the Download Portal. +- [Configuring Custom Metrics (Beta)](https://docs.replicated.com/vendor/custom-metrics.md): This topic describes how to configure an application to send custom metrics to the Replicated Vendor Portal. +- [Adoption Report](https://docs.replicated.com/vendor/customer-adoption.md): This topic describes the insights in the **Adoption** section on the Replicated Vendor Portal **Dashboard** page. +- [Customer Reporting](https://docs.replicated.com/vendor/customer-reporting.md): This topic describes the customer and instance data displayed in the **Customers > Reporting** page of the Replicated Vendor Portal. +- [Data Availability and Continuity](https://docs.replicated.com/vendor/data-availability.md): Replicated uses redundancy and a cloud-native architecture in support of availability and continuity of vendor data. +- [About Managing Stateful Services](https://docs.replicated.com/vendor/database-config-adding-options.md): This topic provides recommendations for managing stateful services that you install into existing clusters. +- [Disaster Recovery for Embedded Cluster (Alpha)](https://docs.replicated.com/vendor/embedded-disaster-recovery.md): This topic describes the disaster recovery feature for Replicated Embedded Cluster, including how to enable disaster recovery for your application. +- [Embedded Cluster Overview](https://docs.replicated.com/vendor/embedded-overview.md): This topic provides an introduction to Replicated Embedded Cluster, including a description of the built-in extensions installed by Embedded Cluster, an overview of the Embedded Cluster single-node and multi-node architecture, and requirements and limitations. +- [Using Embedded Cluster](https://docs.replicated.com/vendor/embedded-using.md): This topic provides information about using Replicated Embedded Cluster, including how to get started, configure Embedded Cluster, access the cluster using kubectl, and more. +- [Using the Proxy Registry with Helm Installations](https://docs.replicated.com/vendor/helm-image-registry.md): This topic describes how to use the Replicated proxy registry to proxy images for installations with the Helm CLI. +- [Installing and Updating with Helm in Air Gap Environments](https://docs.replicated.com/vendor/helm-install-airgap.md): Replicated supports installing and updating Helm charts in air gap environments with no outbound internet access. +- [About Helm Installations with Replicated](https://docs.replicated.com/vendor/helm-install-overview.md): This topic provides an introduction to Helm installations for applications distributed with Replicated. +- [Packaging a Helm Chart for a Release](https://docs.replicated.com/vendor/helm-install-release.md): This topic describes how to package a Helm chart and the Replicated SDK into a chart archive that can be added to a release. +- [Troubleshooting Helm Installations with Replicated](https://docs.replicated.com/vendor/helm-install-troubleshooting.md): This topic provides troubleshooting information for common issues related to performing installations and upgrades with the Helm CLI. +- [Helm global.replicated Values Schema](https://docs.replicated.com/vendor/helm-install-values-schema.md): This topic describes the `global. +- [About Distributing Helm Charts with KOTS](https://docs.replicated.com/vendor/helm-native-about.md): This topic provides an overview of how Replicated KOTS deploys Helm charts, including an introduction to the KOTS HelmChart custom resource, limitations of deploying Helm charts with KOTS, and more. +- [Configuring the HelmChart Custom Resource v2](https://docs.replicated.com/vendor/helm-native-v2-using.md): This topic describes how to configure the Replicated HelmChart custom resource version `kots. +- [Example: Including Optional Helm Charts](https://docs.replicated.com/vendor/helm-optional-charts.md): This topic describes using optional Helm charts in your application. +- [Setting Helm Values with KOTS](https://docs.replicated.com/vendor/helm-optional-value-keys.md): This topic describes how to use the Replicated KOTS HelmChart custom resource to set and delete values in `values. +- [Packaging Air Gap Bundles for Helm Charts](https://docs.replicated.com/vendor/helm-packaging-airgap-bundles.md): This topic describes how to package and build air gap bundles for releases that contain one or more Helm charts. +- [Migrating Existing Installations to HelmChart v2](https://docs.replicated.com/vendor/helm-v2-migrate.md): This topic describes how to migrate existing Replicated KOTS installations to the KOTS HelmChart `kots. +- [Enabling and Configuring Identity Service (Beta)](https://docs.replicated.com/vendor/identity-service-configuring.md): No description available. +- [Enabling and Understanding Application Status](https://docs.replicated.com/vendor/insights-app-status.md): This topic describes how to configure your application so that you can view the status of application instances in the Replicated Vendor Portal. +- [Installing with Helm](https://docs.replicated.com/vendor/install-with-helm.md): This topic describes how to use Helm to install releases that contain one or more Helm charts. +- [Installer History](https://docs.replicated.com/vendor/installer-history.md): No description available. +- [Export Customer and Instance Data](https://docs.replicated.com/vendor/instance-data-export.md): This topic describes how to download and export customer and instance data from the Replicated Vendor Portal. +- [Instance Details](https://docs.replicated.com/vendor/instance-insights-details.md): This topic describes using the Replicated Vendor Portal to quickly understand the recent events and performance of application instances installed in your customers' environments. +- [About Instance and Event Data](https://docs.replicated.com/vendor/instance-insights-event-data.md): This topic provides an overview of the customer and instance insights that you can view in the Replicated Vendor Portal. +- [Configuring Instance Notifications (Beta)](https://docs.replicated.com/vendor/instance-notifications-config.md): No description available. +- [Replicated FAQs](https://docs.replicated.com/vendor/kots-faq.md): This topic lists frequently-asked questions (FAQs) for different components of the Replicated Platform. +- [Introduction to kURL](https://docs.replicated.com/vendor/kurl-about.md): No description available. +- [Exposing Services Using NodePorts](https://docs.replicated.com/vendor/kurl-nodeport-services.md): No description available. +- [Resetting a kURL Cluster](https://docs.replicated.com/vendor/kurl-reset.md): No description available. +- [About Community Licenses](https://docs.replicated.com/vendor/licenses-about-types.md): This topic describes community licenses. +- [About Customers and Licensing](https://docs.replicated.com/vendor/licenses-about.md): This topic provides an overview of customers and licenses in the Replicated Platform. +- [Managing Customer License Fields](https://docs.replicated.com/vendor/licenses-adding-custom-fields.md): This topic describes how to manage customer license fields in the Replicated Vendor Portal, including how to add custom fields and set initial values for the built-in fields. +- [Downloading Customer Licenses](https://docs.replicated.com/vendor/licenses-download.md): This topic describes how to download a license file from the Replicated Vendor Portal. +- [Managing Install Types for a License](https://docs.replicated.com/vendor/licenses-install-types.md): This topic describes how to manage which installation types and options are enabled for a license. +- [Checking Entitlements in Helm Charts Before Deployment](https://docs.replicated.com/vendor/licenses-reference-helm.md): This topic describes how to check license entitlements before a Helm chart is installed or upgraded. +- [Querying Entitlements with the KOTS License API](https://docs.replicated.com/vendor/licenses-reference-kots-runtime.md): This topic describes how to use the Replicated KOTS License API to query license fields during runtme. +- [Querying Entitlements with the Replicated SDK API](https://docs.replicated.com/vendor/licenses-reference-sdk.md): This topic describes how to query license entitlements at runtime using the Replicated SDK in-cluster API. +- [Checking Entitlements in Preflights with KOTS Template Functions](https://docs.replicated.com/vendor/licenses-referencing-fields.md): This topic describes how to check custom entitlements before installation or upgrade using preflight checks and KOTS template functions in the License context. +- [Built-In License Fields](https://docs.replicated.com/vendor/licenses-using-builtin-fields.md): This topic describes the built-in license fields that appear customer licenses for applications distributed with Replicated. +- [Verifying License Field Signatures with the Replicated SDK API](https://docs.replicated.com/vendor/licenses-verify-fields-sdk-api.md): This topic describes how to verify the signatures of license fields when checking customer license entitlements with the Replicated SDK. +- [Application Namespaces](https://docs.replicated.com/vendor/namespaces.md): Replicated strongly recommends that applications are architected to deploy a single application into a single namespace when possible. +- [Offsite Data Backup](https://docs.replicated.com/vendor/offsite-backup.md): No description available. +- [Defining Additional Images](https://docs.replicated.com/vendor/operator-defining-additional-images.md): This topic describes how to define additional images to be included in the `. +- [Defining Additional Namespaces](https://docs.replicated.com/vendor/operator-defining-additional-namespaces.md): Operators often need to be able to manage resources in multiple namespaces in the cluster. +- [About Packaging a Kubernetes Operator Application](https://docs.replicated.com/vendor/operator-packaging-about.md): Kubernetes Operators can be packaged and delivered as an application using the same methods as other Kubernetes applications. +- [Referencing Images](https://docs.replicated.com/vendor/operator-referencing-images.md): This topic explains how to support the use of private image registries for applications that are packaged with Kubernetes Operators. +- [Orchestrating Resource Deployment](https://docs.replicated.com/vendor/orchestrating-resource-deployment.md): This topic describes how to orchestrate the deployment order of resources deployed as part of your application. +- [Excluding MinIO from Air Gap Bundles (Beta)](https://docs.replicated.com/vendor/packaging-air-gap-excluding-minio.md): The Replicated KOTS Admin Console requires an S3-compatible object store to store application archives and support bundles. +- [Cleaning Up Kubernetes Jobs](https://docs.replicated.com/vendor/packaging-cleaning-up-jobs.md): This topic describes how to use the Replicated KOTS `kots. +- [Creating a kURL Installer](https://docs.replicated.com/vendor/packaging-embedded-kubernetes.md): No description available. +- [Conditionally Including or Excluding Resources](https://docs.replicated.com/vendor/packaging-include-resources.md): This topic describes how to include or exclude optional application resources based on one or more conditional statements. +- [Adding Cluster Ingress Options](https://docs.replicated.com/vendor/packaging-ingress.md): When delivering a configurable application, ingress can be challenging as it is very cluster specific. +- [About Selecting Storage Add-ons](https://docs.replicated.com/vendor/packaging-installer-storage.md): No description available. +- [Setting Minimum and Target Versions for KOTS](https://docs.replicated.com/vendor/packaging-kots-versions.md): This topic describes how to set minimum and target version for Replicated KOTS in the KOTS [Application](/reference/custom-resource-application) custom resource. +- [Connecting to an External Registry](https://docs.replicated.com/vendor/packaging-private-images.md): This topic describes how to add credentials for an external private registry using the Replicated Vendor Portal or Replicated CLI. +- [Replicated Registry Security](https://docs.replicated.com/vendor/packaging-private-registry-security.md): This document lists the security measures and processes in place to ensure that images pushed to the Replicated registry remain private. +- [Connecting to a Public Registry through the Proxy Registry](https://docs.replicated.com/vendor/packaging-public-images.md): This topic describes how to pull images from public registries using the Replicated proxy registry. +- [Configuring KOTS RBAC](https://docs.replicated.com/vendor/packaging-rbac.md): This topic describes role-based access control (RBAC) for Replicated KOTS in existing cluster installations. +- [Using TLS Certificates](https://docs.replicated.com/vendor/packaging-using-tls-certs.md): No description available. +- [Customer Application Deployment Questionnaire](https://docs.replicated.com/vendor/planning-questionnaire.md): No description available. +- [Data Transmission Policy](https://docs.replicated.com/vendor/policies-data-transmission.md): A Replicated installation connects to a Replicated-hosted endpoint periodically to perform various tasks including checking for updates and synchronizing the installed license properties. +- [Infrastructure and Subprocessor Providers](https://docs.replicated.com/vendor/policies-infrastructure-and-subprocessors.md): This lists describes the infrastructure environment, subprocessors and other entities material to the Replicated products and services. +- [Support Lifecycle Policy](https://docs.replicated.com/vendor/policies-support-lifecycle.md): Replicated will provide support for products per our terms and services until that product is noted as End of Life (EOL). +- [Vulnerability Patch Policy](https://docs.replicated.com/vendor/policies-vulnerability-patch.md): While it’s our goal to distribute vulnerability-free versions of all components, this isn’t always possible. +- [Defining Preflight Checks](https://docs.replicated.com/vendor/preflight-defining.md): This topic describes how to define preflight checks in Helm and Kubernetes manifest-based applications. +- [Example Preflight Specs](https://docs.replicated.com/vendor/preflight-examples.md): This section includes common examples of preflight check specifications. +- [Customizing Host Preflight Checks for kURL](https://docs.replicated.com/vendor/preflight-host-preflights.md): This topic provides information about how to customize host preflight checks for installations with Replicated kURL. +- [Running Preflight Checks for Helm Installations](https://docs.replicated.com/vendor/preflight-running.md): This topic describes how to use the preflight kubectl plugin to run preflight checks for applications installed with the Helm CLI. +- [About Preflight Checks and Support Bundles](https://docs.replicated.com/vendor/preflight-support-bundle-about.md): This topic provides an introduction to preflight checks and support bundles, which are provided by the [Troubleshoot](https://troubleshoot. +- [About the Replicated Proxy Registry](https://docs.replicated.com/vendor/private-images-about.md): This topic describes how the Replicated proxy registry can be used to grant proxy access to your application's private images or allow pull through access of public images. +- [Using the Proxy Registry with KOTS Installations](https://docs.replicated.com/vendor/private-images-kots.md): This topic describes how to use the Replicated proxy registry with applications deployed with Replicated KOTS. +- [Using the Replicated Registry for KOTS Installations](https://docs.replicated.com/vendor/private-images-replicated.md): This topic describes how to push images to the Replicated private registry. +- [Using Image Tags and Digests](https://docs.replicated.com/vendor/private-images-tags-digests.md): This topic describes using image tags and digests with your application images. +- [Replicated Quick Start](https://docs.replicated.com/vendor/quick-start.md): Welcome! +- [About Channels and Releases](https://docs.replicated.com/vendor/releases-about.md): This topic describes channels and releases, including information about the **Releases** and **Channels** pages in the Replicated Vendor Portal. +- [Creating and Editing Channels](https://docs.replicated.com/vendor/releases-creating-channels.md): This topic describes how to create and edit channels using the Replicated Vendor Portal. +- [Managing Releases with the CLI](https://docs.replicated.com/vendor/releases-creating-cli.md): This topic describes how to use the Replicated CLI to create and promote releases. +- [Creating and Managing Customers](https://docs.replicated.com/vendor/releases-creating-customer.md): This topic describes how to create and manage customers in the Replicated Vendor Portal. +- [Managing Releases with the Vendor Portal](https://docs.replicated.com/vendor/releases-creating-releases.md): This topic describes how to use the Replicated Vendor Portal to create and promote releases, edit releases, edit release properties, and archive releases. +- [Downloading Assets from the Download Portal](https://docs.replicated.com/vendor/releases-share-download-portal.md): This topic describes how to download customer license files, air gap bundles, and other assets from the Replicated Download Portal. +- [Finding Installation Commands for a Release](https://docs.replicated.com/vendor/releases-sharing-license-install-script.md): This topic describes where to find the installation commands and instructions for releases in the Replicated Vendor Portal. +- [Generating API Tokens](https://docs.replicated.com/vendor/replicated-api-tokens.md): This topic describes the available types of API tokens and how to generate them for use with the Replicated CLI and Replicated Vendor API v3. +- [Replicated Onboarding ](https://docs.replicated.com/vendor/replicated-onboarding.md): This topic describes how to onboard applications to the Replicated Platform. +- [Installing the SDK in Air Gap Environments](https://docs.replicated.com/vendor/replicated-sdk-airgap.md): This topic explains how to install the Replicated SDK in air gap environments by enabling air gap mode. +- [Customizing the Replicated SDK](https://docs.replicated.com/vendor/replicated-sdk-customizing.md): This topic describes various ways to customize the Replicated SDK, including customizing RBAC, setting environment variables, adding tolerations, and more. +- [Developing Against the SDK API](https://docs.replicated.com/vendor/replicated-sdk-development.md): This topic describes how to develop against the SDK API to test changes locally. +- [Installing the Replicated SDK](https://docs.replicated.com/vendor/replicated-sdk-installing.md): This topic describes the methods for distributing and installing the Replicated SDK. +- [About the Replicated SDK](https://docs.replicated.com/vendor/replicated-sdk-overview.md): This topic provides an introduction to using the Replicated SDK with your application. +- [SLSA Provenance Validation Process for the Replicated SDK](https://docs.replicated.com/vendor/replicated-sdk-slsa-validating.md): This topic describes the process to perform provenance validation on the Replicated SDK. +- [Templating Annotations](https://docs.replicated.com/vendor/resources-annotations-templating.md): This topic describes how to use Replicated KOTS template functions to template annotations for resources and objects based on user-supplied values. +- [Configuring Snapshots](https://docs.replicated.com/vendor/snapshots-configuring-backups.md): This topic provides information about how to configure the Velero Backup resource to enable Replicated KOTS snapshots for an application. +- [Configuring Backup and Restore Hooks for Snapshots](https://docs.replicated.com/vendor/snapshots-hooks.md): This topic describes the use of custom backup and restore hooks and demonstrates a common example. +- [About Backup and Restore with Snapshots](https://docs.replicated.com/vendor/snapshots-overview.md): This topic provides an introduction to the Replicated KOTS snapshots feature for backup and restore. +- [Adding and Customizing Support Bundles](https://docs.replicated.com/vendor/support-bundle-customizing.md): This topic describes how to add a default support bundle spec to a release for your application. +- [Generating Support Bundles for Embedded Cluster](https://docs.replicated.com/vendor/support-bundle-embedded.md): This topic describes how to generate a support bundle that includes cluster- and host-level information for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. +- [Example Support Bundle Specs](https://docs.replicated.com/vendor/support-bundle-examples.md): This topic includes common examples of support bundle specifications. +- [Generating Support Bundles](https://docs.replicated.com/vendor/support-bundle-generating.md): This topic describes how to generate support bundles from the command line using the kubectl support-bundle plugin. +- [Enabling Support Bundle Uploads (Beta)](https://docs.replicated.com/vendor/support-enabling-direct-bundle-uploads.md): No description available. +- [Generating Host Bundles for kURL](https://docs.replicated.com/vendor/support-host-support-bundles.md): This topic describes how to configure a host support bundle spec for Replicated kURL installations. +- [Inspecting Support Bundles](https://docs.replicated.com/vendor/support-inspecting-support-bundles.md): You can use the Vendor Portal to get a visual analysis of customer support bundles and use the file inspector to drill down into the details and logs files. +- [About Creating Modular Support Bundle Specs](https://docs.replicated.com/vendor/support-modular-support-bundle-specs.md): This topic describes how to use a modular approach to creating support bundle specs. +- [Making Support Bundle Specs Available Online](https://docs.replicated.com/vendor/support-online-support-bundle-specs.md): This topic describes how to make your application's support bundle specs available online as well as how to link to online specs. +- [Submitting a Support Request](https://docs.replicated.com/vendor/support-submit-request.md): You can submit a support request and a support bundle using the Replicated Vendor Portal. +- [Managing Collab Repository Access](https://docs.replicated.com/vendor/team-management-github-username.md): This topic describes how to add users to the Replicated collab GitHub repository automatically through the Replicated Vendor Portal. +- [Managing Google Authentication](https://docs.replicated.com/vendor/team-management-google-auth.md): This topic describes the Google authentication options that you can configure to control access to the Replicated Vendor Portal. +- [Configuring RBAC Policies](https://docs.replicated.com/vendor/team-management-rbac-configuring.md): This topic describes how to use role-based access policies (RBAC) to grant or deny team members permissions to use Replicated services in the Replicated Vendor Portal. +- [RBAC Resource Names](https://docs.replicated.com/vendor/team-management-rbac-resource-names.md): No description available. +- [Managing SAML Authentication](https://docs.replicated.com/vendor/team-management-saml-auth.md): This topic describes how to enable or disable SAML authentication for the Replicated Vendor Portal. +- [Configuring a Slack Webhook (Beta)](https://docs.replicated.com/vendor/team-management-slack-config.md): As a vendor, anyone on your team can set up Slack notifications, which are sent to a shared Slack channel. +- [Managing Two-Factor Authentication](https://docs.replicated.com/vendor/team-management-two-factor-auth.md): This topic describes how to enable and disable Replicated two-factor authentication for individual and team accounts in the Replicated Vendor Portal. +- [Managing Team Members](https://docs.replicated.com/vendor/team-management.md): This topic describes how to manage team members in the Replicated Vendor Portal, such as inviting and removing members, and editing permissions. +- [Collecting Telemetry for Air Gap Instances](https://docs.replicated.com/vendor/telemetry-air-gap.md): This topic describes how to collect telemetry for instances in air gap environments. +- [About Compatibility Matrix](https://docs.replicated.com/vendor/testing-about.md): This topic describes Replicated Compatibility Matrix, including use cases, billing, limitations, and more. +- [Compatibility Matrix Cluster Add-ons (Alpha)](https://docs.replicated.com/vendor/testing-cluster-addons.md): This topic describes the supported cluster add-ons for Replicated Compatibility Matrix. +- [Using Compatibility Matrix](https://docs.replicated.com/vendor/testing-how-to.md): This topic describes how to use Replicated Compatibility Matrix to create ephemeral clusters. +- [Accessing Your Application](https://docs.replicated.com/vendor/testing-ingress.md): This topic describes the networking options for accessing applications deployed on clusters created with Replicated Compatibility Matrix. +- [Compatibility Matrix Pricing](https://docs.replicated.com/vendor/testing-pricing.md): This topic describes the pricing for Replicated Compatibility Matrix. +- [Supported Compatibility Matrix Cluster Types](https://docs.replicated.com/vendor/testing-supported-clusters.md): This topic describes the supported Kubernetes distributions, Kubernetes versions, instance types, nodes, limitations, and common use cases for clusters created with Replicated Compatibility Matrix. +- [Example: Adding Database Configuration Options](https://docs.replicated.com/vendor/tutorial-adding-db-config.md): In this tutorial, we'll explore ways to give your end user the option to either embed a database instance with the application, or connect your application to an external database instance that they will manage. +- [Step 2: Create an Application](https://docs.replicated.com/vendor/tutorial-cli-create-app.md): After you install the Replicated CLI and create an API token, you can use the CLI to create a new application. +- [Step 5: Create a Customer](https://docs.replicated.com/vendor/tutorial-cli-create-customer.md): After promoting the first release for the `cli-tutorial` application, create a customer so that you can install the application. +- [Step 8: Create a New Version](https://docs.replicated.com/vendor/tutorial-cli-create-new-version.md): In this step, you make an edit to the Config custom resource manifest file in the `replicated-cli-tutorial/manifests` directory for the `cli-tutorial` application to create a new field on the **Config** page in the Admin Console. +- [Step 4: Create a Release](https://docs.replicated.com/vendor/tutorial-cli-create-release.md): Now that you have the manifest files for the sample Kubernetes application, you can create a release for the `cli-tutorial` application and promote the release to the Unstable channel. +- [Step 7: Configure the Application](https://docs.replicated.com/vendor/tutorial-cli-deploy-app.md): After you install KOTS, you can log in to the KOTS Admin Console. +- [Step 6: Install KOTS and the Application](https://docs.replicated.com/vendor/tutorial-cli-install-app-manager.md): The next step is to test the installation process for the application release that you promoted. +- [Step 1: Install the Replicated CLI](https://docs.replicated.com/vendor/tutorial-cli-install-cli.md): In this tutorial, you use the Replicated CLI to create and promote releases for a sample application with Replicated. +- [Step 3: Get the Sample Manifests](https://docs.replicated.com/vendor/tutorial-cli-manifests.md): To create a release for the `cli-tutorial` application, first create the Kubernetes manifest files for the application. +- [Introduction and Setup](https://docs.replicated.com/vendor/tutorial-cli-setup.md): No description available. +- [Step 9: Update the Application](https://docs.replicated.com/vendor/tutorial-cli-update-app.md): To test the new release that you promoted, return to the Admin Console in a browser to update the application. +- [Step 2: Create an Application](https://docs.replicated.com/vendor/tutorial-config-create-app.md): Next, install the Replicated CLI and then create an application. +- [Step 5: Create a KOTS-Enabled Customer](https://docs.replicated.com/vendor/tutorial-config-create-customer.md): After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. +- [Step 4: Add the Chart Archive to a Release](https://docs.replicated.com/vendor/tutorial-config-create-release.md): Next, add the Helm chart archive to a new release for the application in the Replicated vendor platform. +- [Step 1: Get the Sample Chart and Test](https://docs.replicated.com/vendor/tutorial-config-get-chart.md): To begin, get the sample Grafana Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. +- [Step 6: Install the Release with KOTS](https://docs.replicated.com/vendor/tutorial-config-install-kots.md): Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. +- [Step 3: Package the Helm Chart](https://docs.replicated.com/vendor/tutorial-config-package-chart.md): Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `. +- [Introduction and Setup](https://docs.replicated.com/vendor/tutorial-config-setup.md): This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. +- [Tutorial: Using ECR for Private Images](https://docs.replicated.com/vendor/tutorial-ecr-private-images.md): The purpose of this tutorial is to walk you through how to configure Replicated KOTS to pull images from a private registry in Amazon's Elastic Container Registry (ECR). +- [Step 1: Create an Application](https://docs.replicated.com/vendor/tutorial-embedded-cluster-create-app.md): To begin, install the Replicated CLI and create an application in the Replicated Vendor Portal. +- [Step 4: Create an Embedded Cluster-Enabled Customer](https://docs.replicated.com/vendor/tutorial-embedded-cluster-create-customer.md): After promoting the release, create a customer with the Replicated KOTS and Embedded Cluster entitlements so that you can install the release with Embedded Cluster. +- [Step 3: Add the Chart Archive to a Release](https://docs.replicated.com/vendor/tutorial-embedded-cluster-create-release.md): Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. +- [Step 5: Install the Release on a VM](https://docs.replicated.com/vendor/tutorial-embedded-cluster-install.md): Next, get the customer-specific Embedded Cluster installation commands and then install the release on a Linux VM. +- [Step 2: Package the Gitea Helm Chart](https://docs.replicated.com/vendor/tutorial-embedded-cluster-package-chart.md): Next, get the sample Gitea Helm chart from Bitnami. +- [Introduction and Setup](https://docs.replicated.com/vendor/tutorial-embedded-cluster-setup.md): This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. +- [Step 2: Create an Application](https://docs.replicated.com/vendor/tutorial-kots-helm-create-app.md): Next, install the Replicated CLI and then create an application. +- [Step 5: Create a KOTS-Enabled Customer](https://docs.replicated.com/vendor/tutorial-kots-helm-create-customer.md): After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. +- [Step 4: Add the Chart Archive to a Release](https://docs.replicated.com/vendor/tutorial-kots-helm-create-release.md): Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. +- [Step 1: Get the Sample Chart and Test](https://docs.replicated.com/vendor/tutorial-kots-helm-get-chart.md): To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. +- [Step 7: Install the Release with the Helm CLI](https://docs.replicated.com/vendor/tutorial-kots-helm-install-helm.md): Next, install the same release using the Helm CLI. +- [Step 6: Install the Release with KOTS](https://docs.replicated.com/vendor/tutorial-kots-helm-install-kots.md): Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. +- [Step 3: Package the Helm Chart](https://docs.replicated.com/vendor/tutorial-kots-helm-package-chart.md): Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `. +- [Introduction and Setup](https://docs.replicated.com/vendor/tutorial-kots-helm-setup.md): This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. +- [Step 2: Add a Preflight Spec to the Chart](https://docs.replicated.com/vendor/tutorial-preflight-helm-add-spec.md): Create a preflight specification that fails if the cluster is running a version of Kubernetes earlier than 1. +- [Step 4: Create a Customer](https://docs.replicated.com/vendor/tutorial-preflight-helm-create-customer.md): After promoting the release, create a customer so that you can run the preflight checks and install. +- [Step 3: Add the Chart Archive to a Release](https://docs.replicated.com/vendor/tutorial-preflight-helm-create-release.md): Use the Replicated CLI to add the Gitea Helm chart archive to a release in the Replicated vendor platform. +- [Step 1: Get the Sample Chart and Test](https://docs.replicated.com/vendor/tutorial-preflight-helm-get-chart.md): To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. +- [Step 6: Run Preflights with KOTS](https://docs.replicated.com/vendor/tutorial-preflight-helm-install-kots.md): Create a KOTS-enabled release and then install Gitea with KOTS. +- [Step 5: Run Preflights with the Helm CLI](https://docs.replicated.com/vendor/tutorial-preflight-helm-install.md): Use the Helm CLI installation instructions provided for the customer that you created to run the preflight checks for Gitea and install. +- [Introduction and Setup](https://docs.replicated.com/vendor/tutorial-preflight-helm-setup.md): This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. +- [Using a Registry Proxy for Helm Air Gap Installations](https://docs.replicated.com/vendor/using-third-party-registry-proxy.md): This topic describes how to connect the Replicated proxy registry to a Harbor or jFrog Artifactory instance to support pull-through image caching. +- [Application Settings Page](https://docs.replicated.com/vendor/vendor-portal-application-settings.md): Each application has its own settings, which include the application name and application slug. +- [Creating a Vendor Account](https://docs.replicated.com/vendor/vendor-portal-creating-account.md): To get started with Replicated, you must create a Replicated vendor account. +- [Managing Applications](https://docs.replicated.com/vendor/vendor-portal-manage-app.md): This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. \ No newline at end of file From 7c9c1fc0eb6c96d638dd48bf6afe179175136984 Mon Sep 17 00:00:00 2001 From: Paige Calvert Date: Thu, 27 Mar 2025 17:26:10 -0600 Subject: [PATCH 5/9] add llms-full --- static/js/generate-llms.js | 54 +- static/llms-full.txt | 49315 +++++++++++++++++++++++++++++++++++ static/llms.txt | 14 +- 3 files changed, 49372 insertions(+), 11 deletions(-) create mode 100644 static/llms-full.txt diff --git a/static/js/generate-llms.js b/static/js/generate-llms.js index 410c51c709..5f13a17a8a 100644 --- a/static/js/generate-llms.js +++ b/static/js/generate-llms.js @@ -4,8 +4,25 @@ const path = require('path'); // Fix path resolution to use /docs and /static at project root const DOCS_DIR = path.join(__dirname, "../../docs"); const OUTPUT_FILE = path.join(__dirname, "../../static", "llms.txt"); +const OUTPUT_FULL_FILE = path.join(__dirname, "../../static", "llms-full.txt"); const BASE_URL = "https://docs.replicated.com"; +// Define static content +const STATIC_HEADER = `# Replicated Documentation for LLMs + +This file contains markdown-formatted links to Replicated documentation pages. + +`; + +const STATIC_FOOTER = ` + +## Additional Resources + +For more information, visit: +- [Replicated Documentation Home](https://docs.replicated.com) +- [Replicated Help Center](https://help.replicated.com) +`; + function extractFirstSentence(text) { // Remove any front matter between --- markers text = text.replace(/^---[\s\S]*?---/, ''); @@ -26,13 +43,17 @@ function extractFirstSentence(text) { return sentenceMatch ? sentenceMatch[0].trim() : 'No description available.'; } +function shouldSkipDirectory(filePath) { + const excludedDirs = ['.history', 'release-notes', 'templates', 'pdfs']; + return excludedDirs.some(dir => filePath.includes(dir)); +} + // Recursively get all .md files from a directory function getMarkdownFiles(dir, fileList = []) { fs.readdirSync(dir).forEach(file => { const filePath = path.join(dir, file); - // Skip .history and release-notes directories - if (filePath.includes('.history') || filePath.includes('release-notes') || filePath.includes('templates') || filePath.includes('pdfs')) { + if (shouldSkipDirectory(filePath)) { return; } @@ -56,27 +77,40 @@ function getMarkdownFiles(dir, fileList = []) { fileList.push({ path: relativePath, title: title, - description: description + description: description, + content: content }); } }); return fileList; } -// Generate the llms.txt file -function generateLLMSTxt() { - const files = getMarkdownFiles(DOCS_DIR); +function generateFullLLMsTxt(files) { + const fullContent = files.map(file => { + return `# ${file.title}\n\n${file.content}\n\n---\n\n`; + }).join('\n'); - const output = [ + fs.writeFileSync(OUTPUT_FULL_FILE, fullContent); + console.log("✅ llms-full.txt generated!"); +} + +function generateLLMsTxt(files) { + const dynamicContent = [ "## Docs\n", + "For a complete archive of all documentation pages, see [llms-full.txt](https://docs.replicated.com/llms-full.txt)\n", ...files.map(file => `- [${file.title}](${BASE_URL}/${file.path}.md): ${file.description}` ) ].join('\n'); - fs.writeFileSync(OUTPUT_FILE, output); + // Combine static and dynamic content + const fullContent = STATIC_HEADER + dynamicContent + STATIC_FOOTER; + + fs.writeFileSync(OUTPUT_FILE, fullContent); console.log("✅ llms.txt generated!"); } -// Run the generator -generateLLMSTxt(); +// Generate both files +const files = getMarkdownFiles(DOCS_DIR); +generateFullLLMsTxt(files); +generateLLMsTxt(files); diff --git a/static/llms-full.txt b/static/llms-full.txt new file mode 100644 index 0000000000..38c7eaf6fa --- /dev/null +++ b/static/llms-full.txt @@ -0,0 +1,49315 @@ +# Changing an Admin Console Password + +# Changing an Admin Console Password + +When you install for the first time with Replicated kURL, the Replicated KOTS Admin Console is secured with a single shared password that is set automatically for all users. Replicated recommends that you change this to a new, unique password for security purposes as this automated password is displayed to the user in plain text. + +The Admin Console password is salted and one-way hashed using bcrypt. The irreversible hash is stored in a Secret named `kotsadm-password`. The password is not retrievable if lost. If you lose your Admin Console password, reset your password to access the Admin Console. + +For more information about bcrypt, see [bcrypt](https://en.wikipedia.org/wiki/Bcrypt) on Wikipedia. + +:::note +Users with Identity Provider (IDP) access cannot change their password using this procedure. If an attempt is made, IDP users receive a message in the user interface to contact the identity service provider to change their password. For more information about resetting an IDP user password, see [Resetting Authentication](auth-identity-provider#resetting-authentication) in _Using an Identity Provider for User Access (Beta)_. +::: + +To change your Admin Console password: + +1. Log in to the Admin Console using your current password. +1. In the drop-down in the top right of any page, click **Change password**. +1. In the Change Admin Console Password dialog, edit the fields. + + - The new password must be at least 6 characters and must not be the same as your current password. + - The **New Password** and **Confirm New Password** fields must match each other. + +1. Click **Change Password**. + + If there are any issues with changing the password, an error message displays the specific problem. + + When the password change succeeds, the current session closes and you are redirected to the Log In page. + +1. Log in with the new password. + + +--- + + +# Configuring Role-based Access Control (Beta) + +# Configuring Role-based Access Control (Beta) + +You can regulate access to the Replicated KOTS Admin Console resources based on the roles of individual users within your organization. + +To configure role based access control (RBAC) for the Admin Console: +1. Go to the **Access** page. Under **Role Based Access Control Group Policy**, click **Add a group**. +1. Enter a group name that matches one of the group names already established with your identity provider. +1. Choose one of the pre-defined Admin Console roles to be assigned to that group. For a list of Admin Console roles, see [Admin Console roles](#admin-console-roles) below. +1. Click **Add group**. + +![Role Based Access Control](/images/identity-service-kotsadm-rbac.png) + +## Admin Console Roles + +The Admin Console comes with pre-defined identity service roles that can be assigned to groups when you configure RBAC for the Admin Console. + +- **Read Access:** This role has read permissions to all resources. + +- **Write Access:** This role has write permissions to all resources. + +## Support Roles + +- **Read Access:** This role has read permissions to all resources except the application's file tree. + +- **Write Access:** This role has write permissions to the following resources: + + * Support bundles + * Preflight checks + + +--- + + +# Using an Identity Provider for User Access (Beta) + +# Using an Identity Provider for User Access (Beta) + +When you install an application for the first time, the Replicated KOTS Admin Console is secured with a single shared password for all users. It is possible to further configure the Admin Console to authenticate users with your organization's user management system. This feature is only available for licenses that have the Replicated identity service feature enabled. + +Replicated KOTS leverages the open source project Dex as an intermediary to control access to the Admin Console. Dex implements an array of protocols for querying other user-management systems, known as connectors. For more information, see the [Dex documentation](https://dexidp.io/docs/). + +The identity service has the following limitations: +* Only available for installations in a cluster created by Replicated kURL. +* Only available through the Admin Console. + +## Prerequisite + +When you are installing the Admin Console and setting up TLS certificates on the HTTPS page, you must configure the hostname to use to access the Admin Console. The hostname is required whether you are using the identity service with either a self-signed certificate or a custom certificate. For more information about configuring the hostname field, see [Install and Deploy the Application](installing-kurl#install-app) in _Online Installation with kURL_. + +## Configuration + +To begin, click the **Access** tab at the top of the Admin Console. +Here you can configure access to the Admin Console, integrating with one of the supported identity providers. + +![Configure Identity Provider](/images/access-identity.png) + +## Supported Providers + +**OpenID Connect:** For more information, see the [OpenID Connect documentation](https://openid.net/connect/). + +## Resetting Authentication + +When you enable identity provider access to the Admin Console, shared password authentication is disabled. +If you want to re-enable the shared password authentication, run the `kubectl kots identity-service enable-shared-password --namespace [namespace]` command. For more information, see [identity-service enable-shared-password](/reference/kots-cli-identity-service-enable-shared-password/) in the KOTS CLI documentation. + + +--- + + +# Adding Nodes to kURL Clusters + +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Adding Nodes to kURL Clusters + + + +This topic describes how to add primary and secondary nodes to a Replicated kURL cluster. + +## Overview + +You can generate commands in the Replicated KOTS Admin Console to join additional primary and secondary nodes to kURL clusters. Primary nodes run services that control the cluster. Secondary nodes run services that control the pods that host the application containers. Adding nodes can help manage resources to ensure that the application runs smoothly. + +For high availability clusters, Kubernetes recommends using at least three primary nodes, and that you use an odd number of nodes to help with leader selection if machine or zone failure occurs. For more information, see [Creating Highly Available Clusters with kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) in the Kubernetes documentation. + +## Join Primary and Secondary Nodes + +You can join primary and secondary nodes on the Admin Console **Cluster management** page. + +To add primary and secondary nodes: + +1. (Air Gap Only) For air gapped environments, download and extract the `.tar.gz` bundle on the remote node before running the join command. +1. In the Admin Console, click **Cluster Management > Add a node**. +1. Copy the command that displays in the text box and run it on the node that you are joining to the cluster. + + ![Join node in Admin Console](/images/join-node.png) + + [View a larger image](/images/join-node.png) + +--- + + +# Deleting the Admin Console and Removing Applications + +# Deleting the Admin Console and Removing Applications + +This topic describes how to remove installed applications and delete the Replicated KOTS Admin Console. The information in this topic applies to existing cluster installations with KOTS. + +## Remove an Application + +The Replicated KOTS CLI `kots remove` command removes the reference to an installed application from the Admin Console. When you use `kots remove`, the Admin Console no longer manages the application because the record of that application’s installation is removed. This means that you can no longer manage the application through the Admin Console or through the KOTS CLI. + +By default, `kots remove` does not delete any of the installed Kubernetes resources for the application from the cluster. To remove both the reference to an application from the Admin Console and remove any resources for the application from the cluster, you can run `kots remove` with the `--undeploy` flag. + +It can be useful to remove only the reference to an application from the Admin Console if you want to reinstall the application, but you do not want to recreate the namespace or other Kubernetes resources. For example, if you installed an application using an incorrect license file and need to reinstall with the correct license. + +To remove an application: + +1. Run the following command to list the installed applications for a namespace: + ``` + kubectl kots get apps -n NAMESPACE + ``` + Replace `NAMESPACE` with the name of the namespace where the Admin Console is installed. + + In the output of this command, note the slug for the application that you want to remove. + +1. Run _one_ of the following commands: + + * Remove only the reference to the application from the Admin Console: + + ``` + kubectl kots remove APP_SLUG -n NAMESPACE + ``` + Replace: + * `APP_SLUG` with the slug for the application that you want to remove. + * `NAMESPACE` with the name of the namespace where the Admin Console is installed. + + * Remove the reference to the application from the Admin Console and remove its resources from the cluster: + + ``` + kubectl kots remove APP_SLUG -n NAMESPACE --undeploy + ``` + + :::note + Optionally, use the `--force` flag to remove the application reference from the Admin Console when the application has already been deployed. The `--force` flag is implied when `--undeploy` is used. For more information, see [remove](/reference/kots-cli-remove) in _KOTS CLI_. + ::: + + +## Delete the Admin Console + +When you install an application, KOTS creates the Kubernetes resources for the Admin Console itself on the cluster. The Admin Console includes Deployments and Services, Secrets, and other resources such as StatefulSets and PersistentVolumeClaims. + +By default, KOTS also creates Kubernetes ClusterRole and ClusterRoleBinding resources that grant permissions to the Admin Console on the cluster level. These `kotsadm-role` and `kotsadm-rolebinding` resources are managed outside of the namespace where the Admin Console is installed. Alternatively, when the Admin Console is installed with namespace-scoped access, KOTS creates Role and RoleBinding resources inside the namespace where the Admin Console is installed. + +In existing cluster installations, if the Admin Console is not installed in the `default` namespace, then you delete the Admin Console by deleting the namespace where it is installed. + +If you installed the Admin Console with namespace-scoped access, then the Admin Console Role and RoleBinding RBAC resources are also deleted when you delete the namespace. Alternatively, if you installed with the default cluster-scoped access, then you manually delete the Admin Console ClusterRole and ClusterRoleBindings resources from the cluster. For more information, see [supportMinimalRBACPrivileges](/reference/custom-resource-application#supportminimalrbacprivileges) and [requireMinimalRBACPrivileges](/reference/custom-resource-application#requireminimalrbacprivileges) in _Application_. + +For more information about installing with cluster- or namespace-scoped access, see [RBAC Requirements](/enterprise/installing-general-requirements#rbac-requirements) in _Installation Requirements_. + +To completely delete the Admin Console from an existing cluster: + +1. Run the following command to delete the namespace where the Admin Console is installed: + + :::important + This command deletes everything inside the specified namespace, including the Admin Console Role and RoleBinding resources if you installed with namespace-scoped access. + ::: + + ``` + kubectl delete ns NAMESPACE + ``` + Replace `NAMESPACE` with the name of the namespace where the Admin Console is installed. + + :::note + You cannot delete the `default` namespace. + ::: + +1. (Cluster-scoped Access Only) If you installed the Admin Console with the default cluster-scoped access, run the following commands to delete the Admin Console ClusterRole and ClusterRoleBinding from the cluster: + + ``` + kubectl delete clusterrole kotsadm-role + ``` + + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` + +1. (Optional) To uninstall the KOTS CLI, see [Uninstall](https://docs.replicated.com/reference/kots-cli-getting-started#uninstall) in _Installing the KOTS CLI_. + +--- + + +# Managing Multi-Node Clusters with Embedded Cluster + +import HaArchitecture from "../partials/embedded-cluster/_multi-node-ha-arch.mdx" + +# Managing Multi-Node Clusters with Embedded Cluster + +The topic describes managing nodes in clusters created with Replicated Embedded Cluster, including how to add nodes and enable high-availability for multi-node clusters. + +## Limitations + +Multi-node clusters with Embedded Cluster have the following limitations: + +* Support for multi-node clusters with Embedded Cluster is Beta. Only single-node embedded clusters are Generally Available (GA). + +* High availability for Embedded Cluster in an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). + +* The same Embedded Cluster data directory used at installation is used for all nodes joined to the cluster. This is either the default `/var/lib/embedded-cluster` directory or the directory set with the [`--data-dir`](/reference/embedded-cluster-install#flags) flag. You cannot choose a different data directory for Embedded Cluster when joining nodes. + +## Add Nodes to a Cluster (Beta) {#add-nodes} + +You can add nodes to create a multi-node cluster in online (internet-connected) and air-gapped (limited or no outbound internet access) environments. The Admin Console provides the join command that you use to join nodes to the cluster. + +:::note +Multi-node clusters are not highly available by default. For information about enabling high availability, see [Enable High Availability for Multi-Node Clusters (Alpha)](#ha) below. +::: + +To add nodes to a cluster: + +1. (Optional) In the Embedded Cluster Config, configure the `roles` key to customize node roles. For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. When you are done, create and promote a new release with the updated Config. + +1. Do one of the following to get the join command from the Admin Console: + + 1. To add nodes during the application installation process, follow the steps in [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) to install. A **Nodes** screen is displayed as part of the installation flow in the Admin Console that allows you to choose a node role and copy the relevant join command. + + 1. Otherwise, if you have already installed the application: + + 1. Log in to the Admin Console. + + 1. If you promoted a new release that configures the `roles` key in the Embedded Cluster Config, update the instance to the new version. See [Performing Updates in Embedded Clusters](/enterprise/updating-embedded). + + 1. Go to **Cluster Management > Add node** at the top of the page. + + Add node page in the Admin Console + + [View a larger version of this image](/images/admin-console-add-node.png) + +1. Either on the Admin Console **Nodes** screen that is displayed during installation or in the **Add a Node** dialog, select one or more roles for the new node that you will join. Copy the join command. + + Note the following: + + * If the Embedded Cluster Config [roles](/reference/embedded-config#roles) key is not configured, all new nodes joined to the cluster are assigned the `controller` role by default. The `controller` role designates nodes that run the Kubernetes control plane. Controller nodes can also run other workloads, such as application or Replicated KOTS workloads. + + * Roles are not updated or changed after a node is added. If you need to change a node’s role, reset the node and add it again with the new role. + + * For multi-node clusters with high availability (HA), at least three `controller` nodes are required. You can assign both the `controller` role and one or more `custom` roles to the same node. For more information about creating HA clusters with Embedded Cluster, see [Enable High Availability for Multi-Node Clusters (Alpha)](#ha) below. + + * To add non-controller or _worker_ nodes that do not run the Kubernetes control plane, select one or more `custom` roles for the node and deselect the `controller` role. + +1. Do one of the following to make the Embedded Cluster installation assets available on the machine that you will join to the cluster: + + * **For online (internet-connected) installations**: SSH onto the machine that you will join. Then, use the same commands that you ran during installation to download and untar the Embedded Cluster installation assets on the machine. See [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + + * **For air gap installations with limited or no outbound internet access**: On a machine that has internet access, download the Embedded Cluster installation assets (including the air gap bundle) using the same command that you ran during installation. See [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). Then, move the downloaded assets to the air-gapped machine that you will join, and untar. + + :::important + The Embedded Cluster installation assets on each node must all be the same version. If you use a different version than what is installed elsewhere in the cluster, the cluster will not be stable. To download a specific version of the Embedded Cluster assets, select a version in the **Embedded cluster install instructions** dialog. + ::: + +1. On the machine that you will join to the cluster, run the join command that you copied from the Admin Console. + + **Example:** + + ```bash + sudo ./APP_SLUG join 10.128.0.32:30000 TxXboDstBAamXaPdleSK7Lid + ``` + **Air Gap Example:** + + ```bash + sudo ./APP_SLUG join --airgap-bundle APP_SLUG.airgap 10.128.0.32:30000 TxXboDstBAamXaPdleSK7Lid + ``` + +1. In the Admin Console, either on the installation **Nodes** screen or on the **Cluster Management** page, verify that the node appears. Wait for the node's status to change to Ready. + +1. Repeat these steps for each node you want to add. + +## Enable High Availability for Multi-Node Clusters (Alpha) {#ha} + +Multi-node clusters are not highly available by default. The first node of the cluster is special and holds important data for Kubernetes and KOTS, such that the loss of this node would be catastrophic for the cluster. Enabling high availability (HA) requires that at least three controller nodes are present in the cluster. Users can enable HA when joining the third node. + +:::important +High availability for Embedded Cluster in an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). +::: + +### HA Architecture + + + +For more information about the Embedded Cluster built-in extensions, see [Built-In Extensions](/vendor/embedded-overview#built-in-extensions) in _Embedded Cluster Overview_. + +### Requirements + +Enabling high availability has the following requirements: + +* High availability is supported with Embedded Cluster 1.4.1 or later. + +* High availability is supported only for clusters where at least three nodes with the `controller` role are present. + +### Limitations + +Enabling high availability has the following limitations: + +* High availability for Embedded Cluster in an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). + +* The `--enable-ha` flag serves as a feature flag during the Alpha phase. In the future, the prompt about migrating to high availability will display automatically if the cluster is not yet HA and you are adding the third or more controller node. + +* HA multi-node clusters use rqlite to store support bundles up to 100 MB in size. Bundles over 100 MB can cause rqlite to crash and restart. + +### Best Practices for High Availability + +Consider the following best practices and recommendations for creating HA clusters: + +* At least three _controller_ nodes that run the Kubernetes control plane are required for HA. This is because clusters use a quorum system, in which more than half the nodes must be up and reachable. In clusters with three controller nodes, the Kubernetes control plane can continue to operate if one node fails because a quorum can still be reached by the remaining two nodes. By default, with Embedded Cluster, all new nodes added to a cluster are controller nodes. For information about customizing the `controller` node role, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. + +* Always use an odd number of controller nodes in HA clusters. Using an odd number of controller nodes ensures that the cluster can make decisions efficiently with quorum calculations. Clusters with an odd number of controller nodes also avoid split-brain scenarios where the cluster runs as two, independent groups of nodes, resulting in inconsistencies and conflicts. + +* You can have any number of _worker_ nodes in HA clusters. Worker nodes do not run the Kubernetes control plane, but can run workloads such as application or Replicated KOTS workloads. + +### Create a Multi-Node HA Cluster + +To create a multi-node HA cluster: + +1. Set up a cluster with at least two controller nodes. You can do an online (internet-connected) or air gap installation. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +1. SSH onto a third node that you want to join to the cluster as a controller. + +1. Run the join command provided in the Admin Console **Cluster Management** tab and pass the `--enable-ha` flag. For example: + + ```bash + sudo ./APP_SLUG join --enable-ha 10.128.0.80:30000 tI13KUWITdIerfdMcWTA4Hpf + ``` + +1. After the third node joins the cluster, type `y` in response to the prompt asking if you want to enable high availability. + + ![high availability command line prompt](/images/embedded-cluster-ha-prompt.png) + [View a larger version of this image](/images/embedded-cluster-ha-prompt.png) + +1. Wait for the migration to complete. + +--- + + +# Updating Custom TLS Certificates in Embedded Cluster Installations + +# Updating Custom TLS Certificates in Embedded Cluster Installations + +This topic describes how to update custom TLS certificates in Replicated Embedded Cluster installations. + +## Update Custom TLS Certificates + +Users can provide custom TLS certificates with Embedded Cluster installations and can update TLS certificates through the Admin Console. + +:::important +Adding the `acceptAnonymousUploads` annotation temporarily creates a vulnerability for an attacker to maliciously upload TLS certificates. After TLS certificates have been uploaded, the vulnerability is closed again. + +Replicated recommends that you complete this upload process quickly to minimize the vulnerability risk. +::: + +To upload a new custom TLS certificate in Embedded Cluster installations: + +1. SSH onto a controller node where Embedded Cluster is installed. Then, run the following command to start a shell so that you can access the cluster with kubectl: + + ```bash + sudo ./APP_SLUG shell + ``` + Where `APP_SLUG` is the unique slug of the installed application. + +1. In the shell, run the following command to restore the ability to upload new TLS certificates by adding the `acceptAnonymousUploads` annotation: + + ```bash + kubectl -n kotsadm annotate secret kotsadm-tls acceptAnonymousUploads=1 --overwrite + ``` + +1. Run the following command to get the name of the kurl-proxy server: + + ```bash + kubectl get pods -A | grep kurl-proxy | awk '{print $2}' + ``` + :::note + This server is named `kurl-proxy`, but is used in both Embedded Cluster and kURL installations. + ::: + +1. Run the following command to delete the kurl-proxy pod. The pod automatically restarts after the command runs. + + ```bash + kubectl delete pods PROXY_SERVER + ``` + + Replace `PROXY_SERVER` with the name of the kurl-proxy server that you got in the previous step. + +1. After the pod has restarted, go to `http://:30000/tls` in your browser and complete the process in the Admin Console to upload a new certificate. + + +--- + + +# Managing Secrets with KOTS Auto-GitOps (Alpha) + +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" + +# Managing Secrets with KOTS Auto-GitOps (Alpha) + + + +When you enable Auto-GitOps, the Replicated KOTS Admin Console pushes the rendered application manifests to the configured git repository. Application manifests often contain secrets and sensitive information that should not be committed to git. + +Replicated KOTS v1.18 introduces an integration with SealedSecrets to encrypt secrets before committing. +This integration is currently alpha and subject to change in future releases of KOTS. For more information, see the [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) Github repository. + +To enable this integration, a Secret with specific labels must be deployed to the same namespace as the Admin Console. +This secret must contain the SealedSecrets public key and is used by KOTS to replace all Secret objects created by the application and by the Admin Console. + +This Secret must be manually deployed to the same namespace as the Admin Console. There is currently no way to automate or use the Admin Console to configure this functionality. The Secret can be named anything unique that does not conflict with application Secrets. The labels in this example YAML file are important and must be used. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: kots-sealed-secret + namespace: NAMESPACE + labels: + kots.io/buildphase: secret + kots.io/secrettype: sealedsecrets +data: + cert.pem: SEALED_SECRET_KEY +``` + +Replace: + +- `NAMESPACE` with the namespace where the Admin Console is installed. + +- `SEALED_SECRET_KEY` with the base64 encoded, sealed Secret public key. The sealed Secret public key is included in the sealed Secret controller logs during startup. + + **Example:** + + ```bash + kubectl logs -n kube-system sealed-secrets-controller-7684c7b86c-6bhhw + 2022/04/20 15:49:49 Starting sealed-secrets controller version: 0.17.5 + controller version: 0.17.5 + 2022/04/20 15:49:49 Searching for existing private keys + 2022/04/20 15:49:58 New key written to kube-system/sealed-secrets-keyxmwv2 + 2022/04/20 15:49:58 Certificate is + -----BEGIN CERTIFICATE----- + MIIEzDCCArSgAwIBAgIQIkCjUuODpQV7zK44IB3O9TANBgkqhkiG9w0BAQsFADAA + MB4XDTIyMDQyMDE1NDk1OFoXDTMyMDQxNzE1NDk1OFowADCCAiIwDQYJKoZIhvcN + AQEBBQADggIPADCCAgoCggIBAN0cle8eERYUglhGapLQZWYS078cP9yjOZpoUtXe + mpNE4eLBMo2bDAOopL9YV6TIh2EQMGOr7Njertnf7sKl/1/ZEnIpDw+b/U40LD6o + XMymCrv9GznlsEkaqfGynsY22oamQnHNLIPTYfxUueDqqQFSJN3h1vKZaFi850I4 + y29r+kxX8gGTRmuratGw0Rd4VvHtqi4lDlD9pBToQzbYsbhiySKhClAWC8Hbwzw8 + 4rPamYO8am92jpWIw0liSJUq5urnHR+S0S2P8FlOh7nbCI4ZkmY/Edjxz6ew7yB3 + OFONxlkweD2/KMzquMgOxhxUUdrbBZxXtb6s3MUeF4ENnJ2iL73dgx7O81HTUyu4 + Ok0YK1zqlnj4B683ySV3/RAtHbJJJWJMrLqbjhUNiYf+Ey6wXHJIwqXnjkG4UjP/ + OzrAmZiMa+z/uniUS0M+6siDJuj1FZsN9o1HhwwAWKcEJov2Jlo65gRsaLvalQfr + /VGrHQ1nQ2323hNVIZNKZ6zS6HlJOyOEQ7dcW3XsP1F5gEGkKkgLklOs3jt5OF4i + 2eiimHVnXveXgYZhDudY20ungRnslO2NBpTXgKIDu4YKUXhouQe1LAOkSIdtYSJL + eBFT1cO+rYqNUnffvsv2f9cE0SLp9XQ3VD5Eb+oJCpHc0qZ37/SB3VuDsXW2U/ih + TepxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIAATAPBgNVHRMBAf8EBTADAQH/MB0G + A1UdDgQWBBSvvAr9OTTWZBiCu7+b023YlCL6KzANBgkqhkiG9w0BAQsFAAOCAgEA + oXqAxZUCtZQCv23NMpABnJm2dM3qj5uZRbwqUBxutvlQ6WXKj17dbQ0SoNc2BOKT + 7hpR7wkN9Ic6UrTnx8NUf/CZwHrU+ZXzG8PigOccoP4XBJ6v7k4vOjwpuyr14Jtw + BXxcqbwK/bZPHbjn/N1eZhVyeOZlVE4oE+xbI0s6vJnn2N4tz/YrHB3VBRx9rbtN + WbbparStldRzfGyOXLZsu0eQFfHdGXtYAJP0Hougc26Wz2UEozjczUqFYc7s66Z4 + 1SCXpIpumm+aIKifjzIDPVZ3gDqpZaQYB877mCLVQ0rvfZgw/lVMtnnda+XjWh82 + YUORubKqKIM4OBM9RvaTih6k5En70Xh9ouyYgwE0fbUEvFThADVR5fUE0e7/34sE + oeAONWIZ4sbqewhvKjbYpKOZD7a9GrxCiB5C92WvA1xrI4x6F0EOK0jp16FSNuxN + us9lhAxX4V7HN3KR+O0msygeb/LAE+Vgcr3ZxlNvkIoLY318vKFsGCPgYTXLk5cs + uP2mg/JbTuntXaZTP+gM7hd8enugaUcvyX/AtduTeIXgs7KLLRZW+2M+gq/dlRwl + jCwIzOs3BKuiotGAWACaURFiKhyY+WiEpsIN1H6hswAwY0lcV1rrOeQgg9rfYvoN + 0tXH/eHuyzyHdWt0BX6LLY4cqP2rP5QyP117Vt2i1jY= + -----END CERTIFICATE----- + + 2022/04/20 15:49:58 HTTP server serving on :8080 + ... + ``` + + +--- + + +# KOTS Auto-GitOps Workflow + +import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" + +# KOTS Auto-GitOps Workflow + + + +## Overview of the Auto-GitOps Workflow + +The Replicated KOTS Admin Console default workflow is configured to receive updates, show the changes, and deploy the updates to the cluster. You can enable the KOTS Auto-GitOps workflow instead. When using the Auto-GitOps workflow, changes from the Admin Console are pushed to a private Git repository, where an existing CI/CD process can execute the delivery of manifests to the cluster. Changes can include local configuration changes and upstream updates from your vendor (such as application and license updates). + +If you have more than one application installed, you can selectively enable Auto-GitOps for each application. + +After enabling the Auto-GitOps workflow for an application, the Admin Console makes your first commit with the latest available version in the Admin Console. The latest available version is often the current version that is deployed. Subsequently, the Admin Console makes separate commits with any available updates. + +If you configure automatic updates for the application, any updates from your vendor are automatically committed to your Git repository. For more information about configuring automatic updates, see [Configuring Automatic Updates](/enterprise/updating-apps). + +You can change your GitOps settings or disable Auto-GitOps at any time from the **GitOps** tab in the Admin Console. + +## Limitations + +- + +- To enable pushing updates through the Auto-GitOps workflow, you must first follow the installation workflow for the application using the Admin Console or the Replicated KOTS CLI. If the preflight checks pass during installation, then the application is deployed. + +- After you have completed the installation workflow, you can enable Auto-GitOps for all subsequent application updates. It is not required that the application deploy successfully to enable Auto-GitOps. For example, if the preflight checks fail during the installation workflow and the application is not deployed, you can still enable Auto-GitOps for subsequent application updates. + +- When you enable Auto-GitOps, the Admin Console sends all application updates, including the version that you initially installed before Auto-GitOps was enabled, to the repository that you specify. + +- If your organization has security requirements that prevent you from completing the installation workflow for the application first with the Admin Console or KOTS CLI, you cannot enable Auto-GitOps. + +## Prerequisites + +- A Git repository that you have read/write access to. +- If the repository does not have files or folders committed yet, you must make at least one commit with any content so that the connection attempt succeeds with the SSH key when you perform the following task. + +## Enable Auto-GitOps + +To enable pushing updates to the Auto-GitOps workflow: + +1. Click the **GitOps** tab at the top of the Admin Console. + +1. On the GitOps Configuration page: + + 1. If you have more than one application, select the application where you want to enable Auto-GitOps. + 1. Select the Git provider. + 1. Enter the repository details: + + + + + + + + + + + + + + + + + + +
    Field NameDescription
    Owner & RepositoryEnter the owner and repository name where the commit will be made.
    BranchEnter the branch name or leave the field blank to use the default branch.
    PathEnter the folder name in the repository where the application deployment file will be committed. If you leave this field blank, the Replicated KOTS creates a folder for you. However, the best practice is to manually create a folder in the repository labeled with the application name and dedicated for the deployment file only.
    + + 1. Click **Generate SSH Key**, and then **Copy key**. + 1. Go to your Git repository and open the settings page. On the settings page: + 1. Add the SSH public key that you copied in the previous step. + 1. Enable write access for the key. This allows the Admin Console to push commits to the repository. + +1. On the **GitOps Configuration** page, click **Test connection to repository** to verify that the Admin Console can connect. + + When the Admin Console establishes a connection to the repository, a dialog displays that says GitOps is enabled. + + +--- + + +# Working with the kURL Image Registry + +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Working with the kURL Image Registry + + + +This topic describes the Replicated kURL registry for kURL clusters. + +## Overview + +The kURL Registry add-on can be used to host application images. For air gap installations, this kURL registry is automatically used to host all application images. + +With every application update, new images are pushed to the kURL registry. +To keep the registry from running out of storage, images that are no longer used are automatically deleted from the registry. + +For more information about the kURL Registry add-on, see [Registry Add-On](https://kurl.sh/docs/add-ons/registry) in the kURL documentation. + +:::note +Users can also configure their own private registry for kURL installations instead of using the kURL registry. For more information, see [Configuring Local Image Registries](/enterprise/image-registry-settings). +::: + +## Trigger Garbage Collection + +Every time the application instance is upgraded, image garbage collection automatically deletes images that are no longer used. + +You can also manually trigger image garbage collection. To manually run garbage collection: + +```bash +kubectl kots admin-console garbage-collect-images -n NAMESPACE +``` +Where `NAMESPACE` is the namespace where the application is installed. + +For more information, see [admin-console garbage-collect-images](/reference/kots-cli-admin-console-garbage-collect-images/). + +## Disable Image Garbage Collection + +Image garbage collection is enabled by default for kURL clusters that use the kURL registry. + +To disable image garbage collection: + +```bash +kubectl patch configmaps kotsadm-confg --type merge -p "{\"data\":{\"enable-image-deletion\":\"false\"}}" +``` + +To enable garbage collection again: +```bash +kubectl patch configmaps kotsadm-confg --type merge -p "{\"data\":{\"enable-image-deletion\":\"true\"}}" +``` + +## Restore Deleted Images + +Deleted images can be reloaded from air gap bundles using the `admin-console push-images` command. For more information, see [admin-console push-images](/reference/kots-cli-admin-console-push-images/) in the KOTS CLI documentation. + +The registry address and namespace can be found on the **Registry Settings** page in the Replicated KOTS Admin Console. +The registry username and password can be found in the `registry-creds` secret in the default namespace. + +## Limitations + +The kURL registry image garbage collection feature has following limitations: + +* **Optional components**: Some applications define Kubernetes resources that can be enabled or disabled dynamically. For example, template functions can be used to conditionally deploy a StatefulSet based on configuration from the user. + + If a resource is disabled and no longer deployed, its images can be included in the garbage collection. + + To prevent this from happening, include the optional images in the `additionalImages` list of the Application custom resource. For more information, see [`additionalImages`](/reference/custom-resource-application#additionalimages) in _Application_. + +* **Shared Image Registries**: The image garbage collection process assumes that the registry is not shared with any other instances of Replicated KOTS, nor shared with any external applications. If the built-in kURL registry is used by another external application, disable garbage collection to prevent image loss. + +* **Customer-Supplied Registries**: Image garbage collection is supported only when used with the built-in kURL registry. If the KOTS instance is configured to use a different registry, disable garbage collection to prevent image loss. For more information about configuring an image registry in the Admin Console, see [Configuring Local Image Registries](/enterprise/image-registry-settings). + +* **Application Rollbacks**: Image garbage collection has no effect when the `allowRollback` field in the KOTS Application custom resource is set to `true`. For more information, see [Application](/reference/custom-resource-application) in _KOTS Custom Resources_. + + +--- + + +# Avoiding Docker Hub Rate Limits + +# Avoiding Docker Hub Rate Limits + +This topic describes how to avoid rate limiting for anonymous and free authenticated use of Docker Hub by providing a Docker Hub username and password to the `kots docker ensure-secret` command. + +## Overview + +On November 20, 2020, rate limits for anonymous and free authenticated use of Docker Hub went into effect. +Anonymous and Free Docker Hub users are limited to 100 and 200 container image pull requests per six hours, respectively. +Docker Pro and Docker Team accounts continue to have unlimited access to pull container images from Docker Hub. + +For more information on rate limits, see [Understanding Docker Hub rate limiting](https://www.docker.com/increase-rate-limits) on the Docker website. + +If the application that you are installing or upgrading has public Docker Hub images that are rate limited, then an error occurs when the rate limit is reached. + +## Provide Docker Hub Credentials + +To avoid errors caused by reaching the Docker Hub rate limit, a Docker Hub username and password can be passed to the `kots docker ensure-secret` command. The Docker Hub username and password are used only to increase rate limits and do not need access to any private repositories on Docker Hub. + +Example: + +```bash +kubectl kots docker ensure-secret --dockerhub-username sentrypro --dockerhub-password password --namespace sentry-pro +``` + +The `kots docker ensure-secret` command creates an image pull secret that KOTS can use when pulling images. + +KOTS then creates a new release sequence for the application to apply the image pull secret to all Kubernetes manifests that have images. After running the `kots docker ensure-secret` command, deploy this new release sequence either from the Admin Console or the KOTS CLI. + +For more information, see [docker ensure-secret](/reference/kots-cli-docker-ensure-secret) in the KOTS CLI documentation. + + +--- + + +# Configuring Local Image Registries + +import ImageRegistrySettings from "../partials/image-registry/_image-registry-settings.mdx" +import DockerCompatibility from "../partials/image-registry/_docker-compatibility.mdx" + +# Configuring Local Image Registries + +This topic describes how to configure private registry settings in the Replicated KOTS Admin Console. + +The information in this topic applies to existing cluster installations with KOTS and installations with Replicated kURL. This topic does _not_ apply to Replciated Embedded Cluster installations. + +## Overview + +Using a private registry lets you create a custom image pipeline. Any proprietary configurations that you make to the application are shared only with the groups that you allow access, such as your team or organization. You also have control over the storage location, logging messages, load balancing requests, and other configuration options. Private registries can be used with online or air gap clusters. + +## Requirement + +The domain of the image registry must support a Docker V2 protocol. KOTS has been tested for compatibility with the following registries: + + + +## Configure Local Private Registries in Online Clusters + +In online (internet-connected) installations, you can optionally use a local private image registry. You can also disable the connection or remove the registry settings if needed. + +To configure private registry settings in an online cluster: + +1. In the Admin Console, on the **Registry settings** tab, edit the fields: + + Registry Settings + + [View a larger version of this image](/images/registry-settings.png) + + The following table describes the fields: + + + +1. Click **Test Connection** to test the connection between KOTS and the registry host. + +1. Click **Save changes**. + +## Change Private Registries in Air Gap Clusters {#air-gap} + +You can change the private registry settings at any time in the Admin Console. + +To change private registry settings in an air gap cluster: + +1. In the Admin Console, on the **Registry settings** tab, select the **Disable Pushing Images to Private Registry** checkbox. Click **Save changes**. + + :::note + This is a temporary action that allows you to edit the registry namespace and hostname. If you only want to change the username or password for the registry, you do not have to disable pushing the images. + ::: + +1. Edit the fields as needed, and click **Save changes**. + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    HostnameSpecify a registry domain that uses the Docker V2 protocol.
    UsernameSpecify the username for the domain.
    PasswordSpecify the password for the domain.
    Registry NamespaceSpecify the registry namespace. For air gap environments, this setting overwrites the registry namespace that you pushed images to when you installed KOTS.
    + +1. Deselect the **Disable Pushing Images to Private Registry** checkbox. This action re-enables KOTS to push images to the registry. + +1. Click **Test Connection** to test the connection between KOTS and the private registry host. + +1. Click **Save changes**. + +## Stop Using a Registry and Remove Registry Settings + +To stop using a registry and remove registry settings from the Admin Console: + +1. Log in to the Admin Console and go to **Registry Settings**. + +1. Click **Stop using registry** to remove the registry settings from the Admin Console. + +--- + + +# Air Gap Installation with Embedded Cluster + +import UpdateAirGapAdm from "../partials/embedded-cluster/_update-air-gap-admin-console.mdx" +import UpdateAirGapCli from "../partials/embedded-cluster/_update-air-gap-cli.mdx" +import UpdateAirGapOverview from "../partials/embedded-cluster/_update-air-gap-overview.mdx" +import DoNotDowngrade from "../partials/embedded-cluster/_warning-do-not-downgrade.mdx" +import Prerequisites from "../partials/install/_ec-prereqs.mdx" + +# Air Gap Installation with Embedded Cluster + +This topic describes how to install applications with Embedded Cluster on a virtual machine (VM) or bare metal server with no outbound internet access. + +## Overview + +When an air gap bundle is built for a release containing an Embedded Cluster Config, both an application air gap bundle and an Embedded Cluster air gap bundle are built. The application air gap bundle can be used for air gap installations with Replicated kURL or with Replicated KOTS in an existing cluster. The Embedded Cluster air gap bundle is used for air gap installations with Embedded Cluster. + +The Embedded Cluster air gap bundle not only contains the assets normally contained in an application air gap bundle (`airgap.yaml`, `app.tar.gz`, and an images directory), but it also contains an `embedded-cluster` directory with the assets needed to install the infrastructure (Embedded Cluster/k0s and [extensions](/reference/embedded-config#extensions). + +During installation with Embedded Cluster in air gap environments, a Docker registry is deployed to the cluster to store application images. Infrastructure images (for Embedded Cluster and Helm extensions) and the Helm charts are preloaded on each node at installation time. + +### Requirement + +Air gap installations are supported with Embedded Cluster version 1.3.0 or later. + +### Limitations and Known Issues + +Embedded Cluster installations in air gap environments have the following limitations and known issues: + +* If you pass `?airgap=true` to the `replicated.app` endpoint but an air gap bundle is not built for the latest release, the API will not return a 404. Instead it will return the tarball without the air gap bundle (as in, with the installer and the license in it, like for online installations). + +* Images used by Helm extensions must not refer to a multi-architecture image by digest. Only x64 images are included in air gap bundles, and the digest for the x64 image will be different from the digest for the multi-architecture image, preventing the image from being discovered in the bundle. An example of a chart that does this is ingress-nginx/ingress-nginx chart. For an example of how the digests should be set to empty string to pull by tag only, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. + +* Images for Helm extensions are loaded directly into containerd so that they are available without internet access. But if an image used by a Helm extension has **Always** set as the image pull policy, Kubernetes will try to pull the image from the internet. If necessary, use the Helm values to set `IfNotPresent` as the image pull policy to ensure the extension works in air gap environments. + +* On the channel release history page, the links for **Download air gap bundle**, **Copy download URL**, and **View bundle contents** pertain to the application air gap bundle only, not the Embedded Cluster bundle. + +## Prerequisites + +Before you install, complete the following prerequisites: + + + +## Install + +To install with Embedded Cluster in an air gap environment: + +1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the target release was promoted to build the air gap bundle. Do one of the following: + * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. + * If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. + + :::note + Errors in building either the application air gap bundle or the Embedded Cluster infrastructure will be shown if present. + ::: + +1. Go to **Customers** and click on the target customer. + +1. On the **Manage customer** tab, under **License options**, enable the **Airgap Download Enabled** license field. + +1. At the top of the page, click **Install instructions > Embedded Cluster**. + + ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + +1. In the **Embedded Cluster install instructions** dialog, verify that the **Install in an air gap environment** checkbox is enabled. + + Embedded cluster install instruction dialog + + [View a larger version of this image](/images/embedded-cluster-install-dialog-airgap.png) + +1. (Optional) For **Select a version**, select a specific application version to install. By default, the latest version is selected. + +1. SSH onto the machine where you will install. + +1. On a machine with internet access, run the curl command to download the air gap installation assets as a `.tgz`. + +1. Move the downloaded `.tgz` to the air-gapped machine where you will install. + +1. On your air-gapped machine, untar the `.tgz` following the instructions provided in the **Embedded Cluster installation instructions** dialog. This will produce three files: + * The installer + * The license + * The air gap bundle (`APP_SLUG.airgap`) + +1. Install the application with the installation command copied from the **Embedded Cluster installation instructions** dialog: + + ```bash + sudo ./APP_SLUG install --license license.yaml --airgap-bundle APP_SLUG.airgap + ``` + Where `APP_SLUG` is the unique application slug. + + :::note + Embedded Cluster supports installation options such as installing behind a proxy and changing the data directory used by Embedded Cluster. For the list of flags supported with the Embedded Cluster `install` command, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + ::: + +1. When prompted, enter a password for accessing the KOTS Admin Console. + + The installation command takes a few minutes to complete. During installation, Embedded Cluster completes tasks to prepare the cluster and install KOTS in the cluster. Embedded Cluster also automatically runs a default set of [_host preflight checks_](/vendor/embedded-using#about-host-preflight-checks) which verify that the environment meets the requirements for the installer. + + **Example output:** + + ```bash + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Host files materialized! + ✔ Running host preflights + ✔ Node installation finished! + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Additional components are ready! + Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 + ``` + + At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. + +1. Go to the URL provided in the output to access to the Admin Console. + +1. On the Admin Console landing page, click **Start**. + +1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. + +1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. + + By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. + +1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. + +1. On the **Nodes** page, you can view details about the machine where you installed, including its node role, status, CPU, and memory. + + Optionally, add nodes to the cluster before deploying the application. For more information about joining nodes, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). Click **Continue**. + +1. On the **Configure [App Name]** screen, complete the fields for the application configuration options. Click **Continue**. + +1. On the **Validate the environment & deploy [App Name]** screen, address any warnings or failures identified by the preflight checks and then click **Deploy**. + + Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. + +The Admin Console dashboard opens. + +On the Admin Console dashboard, the application status changes from Missing to Unavailable while the application is being installed. When the installation is complete, the status changes to Ready. For example: + +![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + +[View a larger version of this image](/images/gitea-ec-ready.png) + +--- + + +# Automating Installation with Embedded Cluster + +import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" +import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" + +# Automating Installation with Embedded Cluster + +This topic describes how to install an application with Replicated Embedded Cluster from the command line, without needing to access the Replicated KOTS Admin Console. + +## Overview + +A common use case for installing with Embedded Cluster from the command line is to automate installation, such as performing headless installations as part of CI/CD pipelines. + +With headless installation, you provide all the necessary installation assets, such as the license file and the application config values, with the installation command rather than through the Admin Console UI. Any preflight checks defined for the application run automatically during headless installations from the command line rather than being displayed in the Admin Console. + +## Prerequisite + +Create a ConfigValues YAML file to define the configuration values for the application release. The ConfigValues file allows you to pass the configuration values for an application from the command line with the install command, rather than through the Admin Console UI. For air-gapped environments, ensure that the ConfigValues file can be accessed from the installation environment. + +The KOTS ConfigValues file includes the fields that are defined in the KOTS Config custom resource for an application release, along with the user-supplied and default values for each field, as shown in the example below: + + + + + +## Online (Internet-Connected) Installation + +To install with Embedded Cluster in an online environment: + +1. Follow the steps provided in the Vendor Portal to download and untar the Embedded Cluster installation assets. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + +1. Run the following command to install: + + ```bash + sudo ./APP_SLUG install --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --admin-console-password ADMIN_CONSOLE_PASSWORD + ``` + + Replace: + * `APP_SLUG` with the unique slug for the application. + * `LICENSE_FILE` with the customer license. + * `ADMIN_CONSOLE_PASSWORD` with a password for accessing the Admin Console. + * `PATH_TO_CONFIGVALUES` with the path to the ConfigValues file. + +## Air Gap Installation + +To install with Embedded Cluster in an air-gapped environment: + +1. Follow the steps provided in the Vendor Portal to download and untar the Embedded Cluster air gap installation assets. For more information, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +1. Ensure that the Embedded Cluster installation assets are available on the air-gapped machine, then run the following command to install: + + ```bash + sudo ./APP_SLUG install --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --admin-console-password ADMIN_CONSOLE_PASSWORD \ + --airgap-bundle PATH_TO_AIRGAP_BUNDLE + ``` + + Replace: + * `APP_SLUG` with the unique slug for the application. + * `LICENSE_FILE` with the customer license. + * `PATH_TO_CONFIGVALUES` with the path to the ConfigValues file. + * `ADMIN_CONSOLE_PASSWORD` with a password for accessing the Admin Console. + * `PATH_TO_AIRGAP_BUNDLE` with the path to the Embedded Cluster `.airgap` bundle for the release. + +--- + + +# Embedded Cluster Installation Requirements + +import EmbeddedClusterRequirements from "../partials/embedded-cluster/_requirements.mdx" +import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" +import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" + +# Embedded Cluster Installation Requirements + +This topic lists the installation requirements for Replicated Embedded Cluster. Ensure that the installation environment meets these requirements before attempting to install. + +## System Requirements + + + +## Port Requirements + + + +## Firewall Openings for Online Installations with Embedded Cluster {#firewall} + + + + + + + + + + + + + + + + + + + + +
    DomainDescription
    `proxy.replicated.com`

    Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

    `replicated.app`

    Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

    `registry.replicated.com` *

    Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

    + +* Required only if the application uses the [Replicated private registry](/vendor/private-images-replicated). + +## About Firewalld Configuration + +When Firewalld is enabled in the installation environment, Embedded Cluster modifies the Firewalld config to allow traffic over the pod and service networks and to open the required ports on the host. No additional configuration is required. + +The following rule is added to Firewalld: + +```xml + + + + + + + + + + + +``` + +The following ports are opened in the default zone: + + + + + + + + + + + + + + + + + + + + + + + + + + +
    PortProtocol
    6443TCP
    10250TCP
    9443TCP
    2380TCP
    4789UDP
    + +--- + + +# Online Installation with Embedded Cluster + +import Prerequisites from "../partials/install/_ec-prereqs.mdx" + +# Online Installation with Embedded Cluster + +This topic describes how to install an application in an online (internet-connected) environment with the Replicated Embedded Cluster installer. For information about air gap installations with Embedded Cluster, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +## Prerequisites + +Before you install, complete the following prerequisites: + + + +* Ensure that the required domains are accessible from servers performing the installation. See [Firewall Openings for Online Installations](/enterprise/installing-embedded-requirements#firewall). + +## Install + +To install an application with Embedded Cluster: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers** and click on the target customer. Click **Install instructions > Embedded Cluster**. + + ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + + The **Embedded Cluster install instructions** dialog is displayed. + + Embedded cluster install instruction dialog + + [View a larger version of this image](/images/embedded-cluster-install-dialog.png) + +1. (Optional) In the **Embedded Cluster install instructions** dialog, under **Select a version**, select a specific application version to install. By default, the latest version is selected. + +1. SSH onto the machine where you will install. + +1. Run the first command in the **Embedded Cluster install instructions** dialog to download the installation assets as a `.tgz`. + +1. Run the second command to extract the `.tgz`. The will produce the following files: + + * The installer + * The license + +1. Run the third command to install the release: + + ```bash + sudo ./APP_SLUG install --license LICENSE_FILE + ``` + Where: + * `APP_SLUG` is the unique slug for the application. + * `LICENSE_FILE` is the customer license. +
    + :::note + Embedded Cluster supports installation options such as installing behind a proxy and changing the data directory used by Embedded Cluster. For the list of flags supported with the Embedded Cluster `install` command, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + ::: + +1. When prompted, enter a password for accessing the KOTS Admin Console. + + The installation command takes a few minutes to complete. During installation, Embedded Cluster completes tasks to prepare the cluster and install KOTS in the cluster. Embedded Cluster also automatically runs a default set of [_host preflight checks_](/vendor/embedded-using#about-host-preflight-checks) which verify that the environment meets the requirements for the installer. + + **Example output:** + + ```bash + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Host files materialized! + ✔ Running host preflights + ✔ Node installation finished! + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Additional components are ready! + Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 + ``` + + At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. + +1. Go to the URL provided in the output to access to the Admin Console. + +1. On the Admin Console landing page, click **Start**. + +1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. + +1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. + + By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. + +1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. + +1. On the **Nodes** page, you can view details about the machine where you installed, including its node role, status, CPU, and memory. + + Optionally, add nodes to the cluster before deploying the application. For more information about joining nodes, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). Click **Continue**. + +1. On the **Configure [App Name]** screen, complete the fields for the application configuration options. Click **Continue**. + +1. On the **Validate the environment & deploy [App Name]** screen, address any warnings or failures identified by the preflight checks and then click **Deploy**. + + Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. + +The Admin Console dashboard opens. + +On the Admin Console dashboard, the application status changes from Missing to Unavailable while the application is being installed. When the installation is complete, the status changes to Ready. For example: + +![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + +[View a larger version of this image](/images/gitea-ec-ready.png) + +--- + + +# Air Gap Installation in Existing Clusters with KOTS + +import IntroExisting from "../partials/install/_intro-existing.mdx" +import IntroAirGap from "../partials/install/_intro-air-gap.mdx" +import PrereqsExistingCluster from "../partials/install/_prereqs-existing-cluster.mdx" +import BuildAirGapBundle from "../partials/install/_airgap-bundle-build.mdx" +import DownloadAirGapBundle from "../partials/install/_airgap-bundle-download.mdx" +import ViewAirGapBundle from "../partials/install/_airgap-bundle-view-contents.mdx" +import LicenseFile from "../partials/install/_license-file-prereq.mdx" +import AirGapLicense from "../partials/install/_airgap-license-download.mdx" +import DownloadKotsBundle from "../partials/install/_download-kotsadm-bundle.mdx" +import InstallCommandPrompts from "../partials/install/_kots-install-prompts.mdx" +import AppNameUI from "../partials/install/_placeholder-app-name-UI.mdx" +import InstallKotsCliAirGap from "../partials/install/_install-kots-cli-airgap.mdx" +import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" +import PlaceholderRoCreds from "../partials/install/_placeholder-ro-creds.mdx" +import KotsVersionMatch from "../partials/install/_kots-airgap-version-match.mdx" + +# Air Gap Installation in Existing Clusters with KOTS + + + + + +## Prerequisites + +Complete the following prerequisites: + + + +* Ensure that there is a compatible Docker image registry available inside the network. For more information about Docker registry compatibility, see [Compatible Image Registries](/enterprise/installing-general-requirements#registries). + + KOTS rewrites the application image names in all application manifests to read from the on-premises registry, and it re-tags and pushes the images to the on-premises registry. When authenticating to the registry, credentials with `push` permissions are required. + + A single application expects to use a single namespace in the Docker image registry. The namespace name can be any valid URL-safe string, supplied at installation time. A registry typically expects the namespace to exist before any images can be pushed into it. + + :::note + Amazon Elastic Container Registry (ECR) does not use namespaces. + ::: + +## Install {#air-gap} + +To install in an air gap cluster with KOTS: + +1. Download the customer license: + + + +1. Go the channel where the target release was promoted to build and download the air gap bundle for the release: + + + +1. + +1. + +1. + +1. + + + +1. + +1. Install the KOTS Admin Console using the images that you pushed in the previous step: + + ```shell + kubectl kots install APP_NAME \ + --kotsadm-registry REGISTRY_HOST \ + --registry-username RO-USERNAME \ + --registry-password RO-PASSWORD + ``` + + Replace: + + * `APP_NAME` with a name for the application. This is the unique name that KOTS will use to refer to the application that you install. + + +1. + +1. Access the Admin Console on port 8800. If the port forward is active, go to [http://localhost:8800](http://localhost:8800) to access the Admin Console. + + If you need to reopen the port forward to the Admin Console, run the following command: + + ```shell + kubectl kots admin-console -n NAMESPACE + ``` + Replace `NAMESPACE` with the namespace where KOTS is installed. + +1. Log in with the password that you created during installation. + +1. Upload your license file. + +1. Upload the `.airgap` application air gap bundle. + +1. On the config screen, complete the fields for the application configuration options and then click **Continue**. + +1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. + + :::note + Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. + ::: + +1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. + + ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) + + [View a larger version of this image](/images/kubectl-preflight-command.png) + +The Admin Console dashboard opens. + +On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. For example: + +![Admin Console dashboard](/images/kotsadm-dashboard-graph.png) + +[View a larger version of this image](/images/kotsadm-dashboard-graph.png) + +--- + + +# Installing with the KOTS CLI + +import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" +import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" +import PlaceholdersGlobal from "../partials/install/_placeholders-global.mdx" +import PlaceholderAirgapBundle from "../partials/install/_placeholder-airgap-bundle.mdx" +import PlaceholderNamespaceExisting from "../partials/install/_placeholder-namespace-existing.mdx" +import DownloadKotsBundle from "../partials/install/_download-kotsadm-bundle.mdx" +import InstallKotsCliAirGap from "../partials/install/_install-kots-cli-airgap.mdx" +import InstallKotsCli from "../partials/install/_install-kots-cli.mdx" +import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" +import KotsVersionMatch from "../partials/install/_kots-airgap-version-match.mdx" +import PlaceholderRoCreds from "../partials/install/_placeholder-ro-creds.mdx" +import AccessAdminConsole from "../partials/install/_access-admin-console.mdx" + +# Installing with the KOTS CLI + +This topic describes how to install an application with Replicated KOTS in an existing cluster using the KOTS CLI. + +## Overview + +You can use the KOTS CLI to install an application with Replicated KOTS. A common use case for installing from the command line is to automate installation, such as performing headless installations as part of CI/CD pipelines. + +To install with the KOTS CLI, you provide all the necessary installation assets, such as the license file and the application config values, with the installation command rather than through the Admin Console UI. Any preflight checks defined for the application run automatically from the CLI rather than being displayed in the Admin Console. + +The following shows an example of the output from the kots install command: + + ``` + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + • Waiting for Admin Console to be ready ✓ + • Waiting for installation to complete ✓ + • Waiting for preflight checks to complete ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + + • Go to http://localhost:8888 to access the application + ``` + +## Prerequisite + +Create a ConfigValues YAML file to define the configuration values for the application release. The ConfigValues file allows you to pass the configuration values for an application from the command line with the install command, rather than through the Admin Console UI. For air-gapped environments, ensure that the ConfigValues file can be accessed from the installation environment. + +The KOTS ConfigValues file includes the fields that are defined in the KOTS Config custom resource for an application release, along with the user-supplied and default values for each field, as shown in the example below: + + + + + +## Online (Internet-Connected) Installation + +To install with KOTS in an online existing cluster: + +1. + +1. Install the application: + + ```bash + kubectl kots install APP_NAME \ + --shared-password PASSWORD \ + --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --namespace NAMESPACE \ + --no-port-forward + ``` + Replace: + + + + + +## Air Gap Installation {#air-gap} + +To install with KOTS in an air-gapped existing cluster: + +1. + +1. + + + +1. + +1. Install the application: + + ```bash + kubectl kots install APP_NAME \ + --shared-password PASSWORD \ + --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --airgap-bundle PATH_TO_AIRGAP_BUNDLE \ + --namespace NAMESPACE \ + --kotsadm-registry REGISTRY_HOST \ + --registry-username RO_USERNAME \ + --registry-password RO_PASSWORD \ + --no-port-forward + ``` + + Replace: + + + + + + + + + +## (Optional) Access the Admin Console + + + +--- + + +# Online Installation in Existing Clusters with KOTS + +import IntroExisting from "../partials/install/_intro-existing.mdx" +import PrereqsExistingCluster from "../partials/install/_prereqs-existing-cluster.mdx" +import LicenseFile from "../partials/install/_license-file-prereq.mdx" +import InstallCommandPrompts from "../partials/install/_kots-install-prompts.mdx" +import AppNameUI from "../partials/install/_placeholder-app-name-UI.mdx" + +# Online Installation in Existing Clusters with KOTS + + + +## Prerequisites + +Complete the following prerequisites: + + + + +## Install {#online} + +To install KOTS and the application in an existing cluster: + +1. Run one of these commands to install the Replicated KOTS CLI and KOTS. As part of the command, you also specify a name and version for the application that you will install. + + * **For the latest application version**: + + ```shell + curl https://kots.io/install | bash + kubectl kots install APP_NAME + ``` + * **For a specific application version**: + + ```shell + curl https://kots.io/install | bash + kubectl kots install APP_NAME --app-version-label=VERSION_LABEL + ``` + + Replace, where applicable: + + + + * `VERSION_LABEL` with the label for the version of the application to install. For example, `--app-version-label=3.0.1`. + + **Examples:** + + ```shell + curl https://kots.io/install | bash + kubectl kots install application-name + ``` + + ```shell + curl https://kots.io/install | bash + kubectl kots install application-name --app-version-label=3.0.1 + ``` + +1. + +1. Access the Admin Console on port 8800. If the port forward is active, go to [http://localhost:8800](http://localhost:8800) to access the Admin Console. + + If you need to reopen the port forward to the Admin Console, run the following command: + + ```shell + kubectl kots admin-console -n NAMESPACE + ``` + Replace `NAMESPACE` with the namespace where KOTS is installed. + +1. Log in with the password that you created during installation. + +1. Upload your license file. + +1. On the config screen, complete the fields for the application configuration options and then click **Continue**. + +1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. + + :::note + Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. + ::: + +1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. + + ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) + + [View a larger version of this image](/images/kubectl-preflight-command.png) + +The Admin Console dashboard opens. + +On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. For example: + +![Admin Console dashboard](/images/kotsadm-dashboard-graph.png) + +[View a larger version of this image](/images/kotsadm-dashboard-graph.png) + +--- + + +# KOTS Installation Requirements + +import DockerCompatibility from "../partials/image-registry/_docker-compatibility.mdx" +import KubernetesCompatibility from "../partials/install/_kubernetes-compatibility.mdx" +import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" + +# KOTS Installation Requirements + +This topic describes the requirements for installing in a Kubernetes cluster with Replicated KOTS. + +:::note +This topic does not include any requirements specific to the application. Ensure that you meet any additional requirements for the application before installing. +::: + +## Supported Browsers + +The following table lists the browser requirements for the Replicated KOTS Admin Console with the latest version of KOTS. + +| Browser | Support | +|----------------------|-------------| +| Chrome | 66+ | +| Firefox | 58+ | +| Opera | 53+ | +| Edge | 80+ | +| Safari (Mac OS only) | 13+ | +| Internet Explorer | Unsupported | + +## Kubernetes Version Compatibility + +Each release of KOTS maintains compatibility with the current Kubernetes version, and the two most recent versions at the time of its release. This includes support against all patch releases of the corresponding Kubernetes version. + +Kubernetes versions 1.25 and earlier are end-of-life (EOL). For more information about Kubernetes versions, see [Release History](https://kubernetes.io/releases/) in the Kubernetes documentation. + +Replicated recommends using a version of KOTS that is compatible with Kubernetes 1.26 and higher. + + + +## Minimum System Requirements + +To install KOTS in an existing cluster, your environment must meet the following minimum requirements: + +* **KOTS Admin Console minimum requirements**: Clusters that have LimitRanges specified must support the following minimum requirements for the Admin Console: + + * **CPU resources and memory**: The Admin Console pod requests 100m CPU resources and 100Mi memory. + + * **Disk space**: The Admin Console requires a minimum of 5GB of disk space on the cluster for persistent storage, including: + + * **4GB for S3-compatible object store**: The Admin Console requires 4GB for an S3-compatible object store to store appplication archives, support bundles, and snapshots that are configured to use a host path and NFS storage destination. By default, KOTS deploys MinIO to satisfy this object storage requirement. During deployment, MinIO is configured with a randomly generated `AccessKeyID` and `SecretAccessKey`, and only exposed as a ClusterIP on the overlay network. + + :::note + You can optionally install KOTS without MinIO by passing `--with-minio=false` with the `kots install` command. This installs KOTS as a StatefulSet using a persistent volume (PV) for storage. For more information, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). + ::: + + * **1GB for rqlite PersistentVolume**: The Admin Console requires 1GB for a rqlite StatefulSet to store version history, application metadata, and other small amounts of data needed to manage the application(s). During deployment, the rqlite component is secured with a randomly generated password, and only exposed as a ClusterIP on the overlay network. + +* **Supported operating systems**: The following are the supported operating systems for nodes: + * Linux AMD64 + * Linux ARM64 + +* **Available StorageClass**: The cluster must have an existing StorageClass available. KOTS creates the required stateful components using the default StorageClass in the cluster. For more information, see [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) in the Kubernetes documentation. + +* **Kubernetes version compatibility**: The version of Kubernetes running on the cluster must be compatible with the version of KOTS that you use to install the application. This compatibility requirement does not include any specific and additional requirements defined by the software vendor for the application. + + For more information about the versions of Kubernetes that are compatible with each version of KOTS, see [Kubernetes Version Compatibility](#kubernetes-version-compatibility) above. + +* **OpenShift version compatibility**: For Red Hat OpenShift clusters, the version of OpenShift must use a supported Kubernetes version. For more information about supported Kubernetes versions, see [Kubernetes Version Compatibility](#kubernetes-version-compatibility) above. + +* **Storage class**: The cluster must have an existing storage class available. For more information, see [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) in the Kubernetes documentation. + +* **Port forwarding**: To support port forwarding, Kubernetes clusters require that the SOcket CAT (socat) package is installed on each node. + + If the package is not installed on each node in the cluster, you see the following error message when the installation script attempts to connect to the Admin Console: `unable to do port forwarding: socat not found`. + + To check if the package that provides socat is installed, you can run `which socat`. If the package is installed, the `which socat` command prints the full path to the socat executable file. For example, `usr/bin/socat`. + + If the output of the `which socat` command is `socat not found`, then you must install the package that provides the socat command. The name of this package can vary depending on the node's operating system. + +## RBAC Requirements + +The user that runs the installation command must have at least the minimum role-based access control (RBAC) permissions that are required by KOTS. If the user does not have the required RBAC permissions, then an error message displays: `Current user has insufficient privileges to install Admin Console`. + +The required RBAC permissions vary depending on if the user attempts to install KOTS with cluster-scoped access or namespace-scoped access: +* [Cluster-scoped RBAC Requirements (Default)](#cluster-scoped) +* [Namespace-scoped RBAC Requirements](#namespace-scoped) + +### Cluster-scoped RBAC Requirements (Default) {#cluster-scoped} + +By default, KOTS requires cluster-scoped access. With cluster-scoped access, a Kubernetes ClusterRole and ClusterRoleBinding are created that grant KOTS access to all resources across all namespaces in the cluster. + +To install KOTS with cluster-scoped access, the user must meet the following RBAC requirements: +* The user must be able to create workloads, ClusterRoles, and ClusterRoleBindings. +* The user must have cluster-admin permissions to create namespaces and assign RBAC roles across the cluster. + +### Namespace-scoped RBAC Requirements {#namespace-scoped} + +KOTS can be installed with namespace-scoped access rather than the default cluster-scoped access. With namespace-scoped access, a Kubernetes Role and RoleBinding are automatically created that grant KOTS permissions only in the namespace where it is installed. + +:::note +Depending on the application, namespace-scoped access for KOTS is required, optional, or not supported. Contact your software vendor for application-specific requirements. +::: + +To install or upgrade KOTS with namespace-scoped access, the user must have _one_ of the following permission levels in the target namespace: +* Wildcard Permissions (Default) +* Minimum KOTS RBAC Permissions + +See the sections below for more information. + +#### Wildcard Permissions (Default) + +By default, when namespace-scoped access is enabled, KOTS attempts to automatically create the following Role to acquire wildcard (`* * *`) permissions in the target namespace: + + ```yaml + apiVersion: "rbac.authorization.k8s.io/v1" + kind: "Role" + metadata: + name: "kotsadm-role" + rules: + - apiGroups: ["*"] + resources: ["*"] + verb: "*" + ``` + + To support this default behavior, the user must also have `* * *` permissions in the target namespace. + +#### Minimum KOTS RBAC Permissions + +In some cases, it is not possible to grant the user `* * *` permissions in the target namespace. For example, an organization might have security policies that prevent this level of permissions. + + If the user installing or upgrading KOTS cannot be granted `* * *` permissions in the namespace, then they can instead request the minimum RBAC permissions required by KOTS. Using the minimum KOTS RBAC permissions also requires manually creating a ServiceAccount, Role, and RoleBinding for KOTS, rather than allowing KOTS to automatically create a Role with `* * *` permissions. + + To use the minimum KOTS RBAC permissions to install or upgrade: + + 1. Ensure that the user has the minimum RBAC permissions required by KOTS. The following lists the minimum RBAC permissions: + + ```yaml + - apiGroups: [""] + resources: ["configmaps", "persistentvolumeclaims", "pods", "secrets", "services", "limitranges"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["apps"] + resources: ["daemonsets", "deployments", "statefulsets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["networking.k8s.io", "extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["namespaces", "endpoints", "serviceaccounts"] + verbs: ["get"] + - apiGroups: ["authorization.k8s.io"] + resources: ["selfsubjectaccessreviews", "selfsubjectrulesreviews"] + verbs: ["create"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec"] + verbs: ["get", "list", "watch", "create"] + - apiGroups: ["batch"] + resources: ["jobs/status"] + verbs: ["get", "list", "watch"] + ``` + + :::note + The minimum RBAC requirements can vary slightly depending on the cluster's Kubernetes distribution and the version of KOTS. Contact your software vendor if you have the required RBAC permissions listed above and you see an error related to RBAC during installation or upgrade. + ::: + + 1. Save the following ServiceAccount, Role, and RoleBinding to a single YAML file, such as `rbac.yaml`: + + ```yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + kots.io/backup: velero + kots.io/kotsadm: "true" + name: kotsadm + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + labels: + kots.io/backup: velero + kots.io/kotsadm: "true" + name: kotsadm-role + rules: + - apiGroups: [""] + resources: ["configmaps", "persistentvolumeclaims", "pods", "secrets", "services", "limitranges"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["apps"] + resources: ["daemonsets", "deployments", "statefulsets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["networking.k8s.io", "extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["namespaces", "endpoints", "serviceaccounts"] + verbs: ["get"] + - apiGroups: ["authorization.k8s.io"] + resources: ["selfsubjectaccessreviews", "selfsubjectrulesreviews"] + verbs: ["create"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods/log", "pods/exec"] + verbs: ["get", "list", "watch", "create"] + - apiGroups: ["batch"] + resources: ["jobs/status"] + verbs: ["get", "list", "watch"] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + kots.io/backup: velero + kots.io/kotsadm: "true" + name: kotsadm-rolebinding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kotsadm-role + subjects: + - kind: ServiceAccount + name: kotsadm + ``` + + 1. If the application contains any Custom Resource Definitions (CRDs), add the CRDs to the Role in the YAML file that you created in the previous step with as many permissions as possible: `["get", "list", "watch", "create", "update", "patch", "delete"]`. + + :::note + Contact your software vendor for information about any CRDs that are included in the application. + ::: + + **Example** + + ```yaml + rules: + - apiGroups: ["stable.example.com"] + resources: ["crontabs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + ``` + + 1. Run the following command to create the RBAC resources for KOTS in the namespace: + + ``` + kubectl apply -f RBAC_YAML_FILE -n TARGET_NAMESPACE + ``` + + Replace: + * `RBAC_YAML_FILE` with the name of the YAML file with the ServiceAccount, Role, and RoleBinding and that you created. + * `TARGET_NAMESPACE` with the namespace where the user will install KOTS. + +:::note +After manually creating these RBAC resources, the user must include both the `--ensure-rbac=false` and `--skip-rbac-check` flags when installing or upgrading. These flags prevent KOTS from checking for or attempting to create a Role with `* * *` permissions in the namespace. For more information, see [Prerequisites](installing-existing-cluster#prerequisites) in _Online Installation in Existing Clusters with KOTS_. +::: + +## Compatible Image Registries {#registries} + +A private image registry is required for air gap installations with KOTS in existing clusters. You provide the credentials for a compatible private registry during installation. You can also optionally configure a local private image registry for use with installations in online (internet-connected) environments. + +Private registry settings can be changed at any time. For more information, see [Configuring Local Image Registries](image-registry-settings). + +KOTS has been tested for compatibility with the following registries: + + + +## Firewall Openings for Online Installations with KOTS in an Existing Cluster {#firewall} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    DomainDescription
    Docker Hub

    Some dependencies of KOTS are hosted as public images in Docker Hub. The required domains for this service are `index.docker.io`, `cdn.auth0.com`, `*.docker.io`, and `*.docker.com.`

    `proxy.replicated.com` *

    Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

    `replicated.app`

    Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

    `registry.replicated.com` **

    Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

    `kots.io`

    Requests are made to this domain when installing the Replicated KOTS CLI. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    `github.com`Requests are made to this domain when installing the Replicated KOTS CLI. For information about retrieving GitHub IP addresses, see [About GitHub's IP addresses](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-githubs-ip-addresses) in the GitHub documentation.
    + +* Required only if the application uses the [Replicated proxy registry](/vendor/private-images-about). + +** Required only if the application uses the [Replicated registry](/vendor/private-images-replicated). + + +--- + + +# Air Gap Installation with kURL + +import KurlAbout from "../partials/install/_kurl-about.mdx" +import IntroEmbedded from "../partials/install/_intro-embedded.mdx" +import IntroAirGap from "../partials/install/_intro-air-gap.mdx" +import PrereqsEmbeddedCluster from "../partials/install/_prereqs-embedded-cluster.mdx" +import HaLoadBalancerPrereq from "../partials/install/_ha-load-balancer-prereq.mdx" +import AirGapLicense from "../partials/install/_airgap-license-download.mdx" +import BuildAirGapBundle from "../partials/install/_airgap-bundle-build.mdx" +import DownloadAirGapBundle from "../partials/install/_airgap-bundle-download.mdx" +import ViewAirGapBundle from "../partials/install/_airgap-bundle-view-contents.mdx" +import LicenseFile from "../partials/install/_license-file-prereq.mdx" +import HAStep from "../partials/install/_embedded-ha-step.mdx" +import LoginPassword from "../partials/install/_embedded-login-password.mdx" +import DownloadKurlBundle from "../partials/install/_download-kurl-bundle.mdx" +import ExtractKurlBundle from "../partials/install/_extract-kurl-bundle.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Air Gap Installation with kURL + + + + + + + + + +## Prerequisites + +Complete the following prerequisites: + + + + + +## Install {#air-gap} + +To install an application with kURL: + +1. Download the customer license: + + + +1. Go the channel where the target release was promoted to build and download the air gap bundle for the release: + + + +1. + +1. + +1. Download the `.tar.gz` air gap bundle for the kURL installer, which includes the components needed to run the kURL cluster and install the application with KOTS. kURL air gap bundles can be downloaded from the channel where the given release is promoted: + + * To download the kURL air gap bundle for the Stable channel: + + + + * To download the kURL bundle for channels other than Stable: + + ```bash + replicated channel inspect CHANNEL + ``` + Replace `CHANNEL` with the exact name of the target channel, which can include uppercase letters or special characters, such as `Unstable` or `my-custom-channel`. + + In the output of this command, copy the curl command with the air gap URL. + +1. + +1. Run one of the following commands to install in air gap mode: + + - For a regular installation, run: + + ```bash + cat install.sh | sudo bash -s airgap + ``` + + - For high availability, run: + + ```bash + cat install.sh | sudo bash -s airgap ha + ``` + +1. + +1. + +1. Go to the address provided in the `Kotsadm` field in the output of the installation command. For example, `Kotsadm: http://34.171.140.123:8800`. + +1. On the Bypass Browser TLS warning page, review the information about how to bypass the browser TLS warning, and then click **Continue to Setup**. + +1. On the HTTPS page, do one of the following: + + - To use the self-signed TLS certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Click **Skip & continue**. + - To use a custom certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Then upload a private key and SSL certificate to secure communication between your browser and the Admin Console. Click **Upload & continue**. + +1. Log in to the Admin Console with the password that was provided in the `Login with password (will not be shown again):` field in the output of the installation command. + +1. Upload your license file. + +1. Upload the `.airgap` bundle for the release that you downloaded in an earlier step. + +1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. + + :::note + Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. + ::: + +1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. + + ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) + + [View a larger version of this image](/images/kubectl-preflight-command.png) + + The Admin Console dashboard opens. + + On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. + + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + + [View a larger version of this image](/images/gitea-ec-ready.png) + +1. (Recommended) Change the Admin Console login password: + 1. Click the menu in the top right corner of the Admin Console, then click **Change password**. + 1. Enter a new password in the dialog, and click **Change Password** to save. + + Replicated strongly recommends that you change the password from the default provided during installation in a kURL cluster. For more information, see [Changing an Admin Console Password](auth-changing-passwords). + +1. Add primary and secondary nodes to the cluster. You might add nodes to either meet application requirements or to support your usage of the application. See [Adding Nodes to Embedded Clusters](cluster-management-add-nodes). + +--- + + +# Installing with kURL from the Command Line + +import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" +import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" +import PlaceholdersGlobal from "../partials/install/_placeholders-global.mdx" +import PlaceholderAirgapBundle from "../partials/install/_placeholder-airgap-bundle.mdx" +import PlaceholderNamespaceKurl from "../partials/install/_placeholder-namespace-embedded.mdx" +import IntroKurl from "../partials/install/_automation-intro-embedded.mdx" +import DownloadkURLBundle from "../partials/install/_download-kurl-bundle.mdx" +import ExtractKurlBundle from "../partials/install/_extract-kurl-bundle.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Installing with kURL from the Command Line + + + +This topic describes how to install an application with Replicated kURL from the command line. + +## Overview + +You can use the command line to install an application with Replicated kURL. A common use case for installing from the command line is to automate installation, such as performing headless installations as part of CI/CD pipelines. + +To install from the command line, you provide all the necessary installation assets, such as the license file and the application config values, with the installation command rather than through the Admin Console UI. Any preflight checks defined for the application run automatically during headless installations from the command line rather than being displayed in the Admin Console. + +## Prerequisite + +Create a ConfigValues YAML file to define the configuration values for the application release. The ConfigValues file allows you to pass the configuration values for an application from the command line with the install command, rather than through the Admin Console UI. For air-gapped environments, ensure that the ConfigValues file can be accessed from the installation environment. + +The KOTS ConfigValues file includes the fields that are defined in the KOTS Config custom resource for an application release, along with the user-supplied and default values for each field, as shown in the example below: + + + + + +## Online (Internet-Connected) Installation + + + +To install with kURL on a VM or bare metal server: + +1. Create the kURL cluster: + + ```bash + curl -sSL https://k8s.kurl.sh/APP_NAME | sudo bash + ``` + +1. Install the application in the cluster: + + ```bash + kubectl kots install APP_NAME \ + --shared-password PASSWORD \ + --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --namespace default \ + --no-port-forward + ``` + + Replace: + + + + + +## Air Gap Installation + +To install in an air-gapped kURL cluster: + +1. Download the kURL `.tar.gz` air gap bundle: + + + +1. + +1. Create the kURL cluster: + + ``` + cat install.sh | sudo bash -s airgap + ``` + +1. Install the application: + + ```bash + kubectl kots install APP_NAME \ + --shared-password PASSWORD \ + --license-file PATH_TO_LICENSE \ + --config-values PATH_TO_CONFIGVALUES \ + --airgap-bundle PATH_TO_AIRGAP_BUNDLE \ + --namespace default \ + --no-port-forward + ``` + + Replace: + + + + + + + +--- + + +# kURL Installation Requirements + +import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# kURL Installation Requirements + + + +This topic lists the installation requirements for Replicated kURL. Ensure that the installation environment meets these requirements before attempting to install. + +## Minimum System Requirements + +* 4 CPUs or equivalent per machine +* 8GB of RAM per machine +* 40GB of disk space per machine +* TCP ports 2379, 2380, 6443, 6783, and 10250 open between cluster nodes +* UDP port 8472 open between cluster nodes + + :::note + If the Kubernetes installer specification uses the deprecated kURL [Weave add-on](https://kurl.sh/docs/add-ons/weave), UDP ports 6783 and 6784 must be open between cluster nodes. Reach out to your software vendor for more information. + ::: + +* Root access is required +* (Rook Only) The Rook add-on version 1.4.3 and later requires block storage on each node in the cluster. For more information about how to enable block storage for Rook, see [Block Storage](https://kurl.sh/docs/add-ons/rook/#block-storage) in _Rook Add-On_ in the kURL documentation. + +## Additional System Requirements + +You must meet the additional kURL system requirements when applicable: + +- **Supported Operating Systems**: For supported operating systems, see [Supported Operating Systems](https://kurl.sh/docs/install-with-kurl/system-requirements#supported-operating-systems) in the kURL documentation. + +- **kURL Dependencies Directory**: kURL installs additional dependencies in the directory /var/lib/kurl and the directory requirements must be met. See [kURL Dependencies Directory](https://kurl.sh/docs/install-with-kurl/system-requirements#kurl-dependencies-directory) in the kURL documentation. + +- **Networking Requirements**: Networking requirements include firewall openings, host firewalls rules, and port availability. See [Networking Requirements](https://kurl.sh/docs/install-with-kurl/system-requirements#networking-requirements) in the kURL documentation. + +- **High Availability Requirements**: If you are operating a cluster with high availability, see [High Availability Requirements](https://kurl.sh/docs/install-with-kurl/system-requirements#high-availability-requirements) in the kURL documentation. + +- **Cloud Disk Performance**: For a list of cloud VM instance and disk combinations that are known to provide sufficient performance for etcd and pass the write latency preflight, see [Cloud Disk Performance](https://kurl.sh/docs/install-with-kurl/system-requirements#cloud-disk-performance) in the kURL documentation. + +## Firewall Openings for Online Installations with kURL {#firewall} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    DomainDescription
    Docker Hub

    Some dependencies of KOTS are hosted as public images in Docker Hub. The required domains for this service are `index.docker.io`, `cdn.auth0.com`, `*.docker.io`, and `*.docker.com.`

    `proxy.replicated.com` *

    Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

    `replicated.app`

    Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

    `registry.replicated.com` **

    Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

    `k8s.kurl.sh`

    `s3.kurl.sh`

    kURL installation scripts and artifacts are served from [kurl.sh](https://kurl.sh). An application identifier is sent in a URL path, and bash scripts and binary executables are served from kurl.sh. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `k8s.kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L34-L39) in GitHub.

    The range of IP addresses for `s3.kurl.sh` are the same as IP addresses for the `kurl.sh` domain. For the range of IP address for `kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L28-L31) in GitHub.

    `amazonaws.com``tar.gz` packages are downloaded from Amazon S3 during installations with kURL. For information about dynamically scraping the IP ranges to allowlist for accessing these packages, see [AWS IP address ranges](https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html#aws-ip-download) in the AWS documentation.
    + +* Required only if the application uses the [Replicated proxy registry](/vendor/private-images-about). + +** Required only if the application uses the [Replicated registry](/vendor/private-images-replicated). + +--- + + +# Online Installation with kURL + +import KurlAbout from "../partials/install/_kurl-about.mdx" +import IntroEmbedded from "../partials/install/_intro-embedded.mdx" +import PrereqsEmbeddedCluster from "../partials/install/_prereqs-embedded-cluster.mdx" +import HaLoadBalancerPrereq from "../partials/install/_ha-load-balancer-prereq.mdx" +import LicenseFile from "../partials/install/_license-file-prereq.mdx" +import HAStep from "../partials/install/_embedded-ha-step.mdx" +import LoginPassword from "../partials/install/_embedded-login-password.mdx" +import AppNameUI from "../partials/install/_placeholder-app-name-UI.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Online Installation with kURL + + + + + + + +## Prerequisites + +Complete the following prerequisites: + + + + + + + +## Install {#install-app} + +To install an application with kURL: + +1. Run one of the following commands to create the cluster with the kURL installer: + + * For a regular installation, run: + + ```bash + curl -sSL https://k8s.kurl.sh/APP_NAME | sudo bash + ``` + + * For high availability mode: + + ```bash + curl -sSL https://k8s.kurl.sh/APP_NAME | sudo bash -s ha + ``` + + Replace: + + + +1. + +1. + +1. Go to the address provided in the `Kotsadm` field in the output of the installation command. For example, `Kotsadm: http://34.171.140.123:8800`. + +1. On the Bypass Browser TLS warning page, review the information about how to bypass the browser TLS warning, and then click **Continue to Setup**. + +1. On the HTTPS page, do one of the following: + + - To use the self-signed TLS certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Click **Skip & continue**. + - To use a custom certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Then upload a private key and SSL certificate to secure communication between your browser and the Admin Console. Click **Upload & continue**. + +1. Log in to the Admin Console with the password that was provided in the `Login with password (will not be shown again):` field in the output of the installation command. + +1. Upload your license file. + +1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. + + :::note + Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. + ::: + +1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. + + ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) + + [View a larger version of this image](/images/kubectl-preflight-command.png) + + The Admin Console dashboard opens. + + On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. + + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + + [View a larger version of this image](/images/gitea-ec-ready.png) + +1. (Recommended) Change the Admin Console login password: + 1. Click the menu in the top right corner of the Admin Console, then click **Change password**. + 1. Enter a new password in the dialog, and click **Change Password** to save. + + Replicated strongly recommends that you change the password from the default provided during installation in a kURL cluster. For more information, see [Changing an Admin Console Password](auth-changing-passwords). + +1. Add primary and secondary nodes to the cluster. You might add nodes to either meet application requirements or to support your usage of the application. See [Adding Nodes to Embedded Clusters](cluster-management-add-nodes). + + +--- + + +# Considerations Before Installing + +# Considerations Before Installing + +Before you install an application with KOTS in an existing cluster, consider the following installation options. + +## Online (Internet-Connected) or Air Gap Installations + +Most Kubernetes clusters are able to make outbound internet requests. Inbound access is never recommended or required. +As such, most cluster operators are able to perform an online installation. + +If the target cluster does not have outbound internet access, the application can also be delivered through an air gap installation. + +To install an application in an air-gapped environment, the cluster must have access to an image registry. In this case, KOTS re-tags and pushes all images to the target registry. + +For information about installing with KOTS in air-gapped environments, see [Air Gap Installation in Existing Clusters with KOTS](installing-existing-cluster-airgapped). + +## Hardened Environments + +By default, KOTS Pods and containers are not deployed with a specific security context. For installations into a hardened environment, you can use the `--strict-security-context` flag with the installation command so that KOTS runs with a strict security context for Pods and containers. + +For more information about the security context enabled by the `--strict-security-context` flag, see [kots install](/reference/kots-cli-install). + +## Configuring Local Image Registries + +During install, KOTS can re-tag and push images to a local image registry. +This is useful to enable CVE scans, image policy validation, and other pre-deployment rules. A private image registry is required for air gapped environments, and is optional for online environments. + +For information about image registry requirements, see [Compatible Image Registries](installing-general-requirements#registries). + +## Automated (Headless) Installation + +You can automate application installation in online and air-gapped environments using the KOTS CLI. In an automated installation, you provide all the information required to install and deploy the application with the `kots install` command, rather than providing this information in the Replicated Admin Console. + +For more information, see [Installing with the KOTS CLI](/enterprise/installing-existing-cluster-automation). + +## KOTS Installations Without Object Storage + +The KOTS Admin Console requires persistent storage for state. KOTS deploys MinIO for object storage by default. + +You can optionally install KOTS without object storage. When installed without object storage, KOTS deploys the Admin Console as a StatefulSet with an attached PersistentVolume (PV) instead of as a deployment. + +For more information about how to install KOTS without object storage, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). + +--- + + +# Installing KOTS in Existing Clusters Without Object Storage + +# Installing KOTS in Existing Clusters Without Object Storage + +This topic describes how to install Replicated KOTS in existing clusters without the default object storage, including limitations of installing without object storage. + +## Overview + +The Replicated KOTS Admin Console requires persistent storage for state. By default, KOTS deploys an S3-compatible object store to satisfy the Admin Console's persistent storage requirement. The Admin Console stores the following in object storage: +* Support bundles +* Application archives +* Backups taken with Replicated snapshots that are configured to NFS or host path storage destinations + +For more information about the Admin Console's persistent storage requirements, see [Minimum System Requirements](/enterprise/installing-general-requirements#minimum-system-requirements) in _Installation Requirements_. + +For existing cluster installations, KOTS deploys MinIO for object storage by default. + +You can optionally install KOTS without object storage. When installed without object storage, KOTS deploys the Admin Console as a Statefulset with an attached PersistentVolume (PV) instead of as a deployment. In this case, support bundles and application archives are stored in the attached PV instead of in object storage. Additionally, for local snapshots storage, KOTS uses the `local-volume-provider` Velero plugin to store backups on local PVs instead of using object storage. The `local-volume-provider` plugin uses the existing Velero service account credentials to mount volumes directly to the Velero node-agent pods. For more information, see [`local-volume-provider`](https://github.com/replicatedhq/local-volume-provider) in GitHub. + +## How to Install and Upgrade Without Object Storage + +To install KOTS without object storage in an existing cluster, you can use the `--with-minio=false` flag. + +#### `kots install --with-minio=false` + +When `--with-minio=false` is used with the `kots install` command, KOTS does _not_ deploy MinIO. KOTS deploys the Admin Console as a Statefulset with an attached PV instead of as a deployment. For command usage, see [install](/reference/kots-cli-install/). + +#### `kots admin-console upgrade --with-minio=false` + +When `--with-minio=false` is used with the `kots admin-console upgrade` command, KOTS upgrades the existing Admin Console instance to the latest version, replaces the running deployment with a StatefulSet, and removes MinIO after a data migration. This results in temporary downtime for the Admin Console, but deployed applications are unaffected. For command usage, see [admin-console upgrade](/reference/kots-cli-admin-console-upgrade/). + + +--- + + +# Accessing Dashboards Using Port Forwarding + +# Accessing Dashboards Using Port Forwarding + +This topic includes information about how to access Prometheus, Grafana, and Alertmanager in Replicated KOTS existing cluster and Replicated kURL installations. + +For information about how to configure Prometheus monitoring in existing cluster installations, see [Configuring Prometheus Monitoring in Existing Cluster KOTS Installations](monitoring-applications). + +## Overview + +The Prometheus [expression browser](https://prometheus.io/docs/visualization/browser/), Grafana, and some preconfigured dashboards are included with Kube-Prometheus for advanced visualization. Prometheus Altertmanager is also included for alerting. You can access Prometheus, Grafana, and Alertmanager dashboards using `kubectl port-forward`. + +:::note +You can also expose these pods on NodePorts or behind an ingress controller. This is an advanced use case. For information about exposing the pods on NodePorts, see [NodePorts](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/node-ports.md) in the kube-prometheus GitHub repository. For information about exposing the pods behind an ingress controller, see [Expose via Ingress](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/exposing-prometheus-alertmanager-grafana-ingress.md) in the kube-prometheus GitHub repository. +::: + +## Prerequisite + +For existing cluster KOTS installations, first install Prometheus in the cluster and configure monitoring. See [Configuring Prometheus Monitoring in Existing Cluster KOTS Installations](monitoring-applications) + +## Access Prometheus + +To access the Prometheus dashboard: + +1. Run the following command to port forward the Prometheus service: + + ```bash + kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090 + ``` + +1. Access the dashboard at http://localhost:9090. + +## Access Grafana + +Users can access the Grafana dashboard by logging in using a default username and password. For information about configuring Grafana, see the [Grafana documentation](https://grafana.com/docs/). For information about constructing Prometheus queries, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/) in the Prometheus documentation. + +To access the Grafana dashboard: + +1. Run the following command to port forward the Grafana service: + + ```bash + kubectl --namespace monitoring port-forward deployment/grafana 3000 + ``` +1. Access the dashboard at http://localhost:3000. +1. Log in to Grafana: + * **Existing cluster**: Use the default Grafana username and password: `admin:admin`. + * **kURL cluster**: The Grafana password is randomly generated by kURL and is displayed on the command line after kURL provisions the cluster. To log in, use this password generated by kURL and the username `admin`. + + To retrieve the password, run the following kubectl command: + + ``` + kubectl get secret -n monitoring grafana-admin -o jsonpath="{.data.admin-password}" | base64 -d + ``` + +## Access Alertmanager + +Alerting with Prometheus has two phases: + +* Phase 1: Alerting rules in Prometheus servers send alerts to an Alertmanager. +* Phase 2: The Alertmanager then manages those alerts, including silencing, inhibition, aggregation, and sending out notifications through methods such as email, on-call notification systems, and chat platforms. + +For more information about configuring Alertmanager, see [Configuration](https://prometheus.io/docs/alerting/configuration/) in the Prometheus documentation. + +To access the Alertmanager dashboard: + +1. Run the following command to port forward the Alertmanager service: + + ``` + kubectl --namespace monitoring port-forward svc/prometheus-alertmanager 9093 + ``` + +1. Access the dashboard at http://localhost:9093. + +--- + + +# Configuring Prometheus Monitoring in Existing Cluster KOTS Installations + +import OverviewProm from "../partials/monitoring/_overview-prom.mdx" + +# Configuring Prometheus Monitoring in Existing Cluster KOTS Installations + +This topic describes how to monitor applications and clusters with Prometheus in existing cluster installations with Replicated KOTS. + +For information about how to access Prometheus, Grafana, and Alertmanager, see [Accessing Dashboards Using Port Forwarding](/enterprise/monitoring-access-dashboards). + +For information about consuming Prometheus metrics externally in kURL installations, see [Consuming Prometheus Metrics Externally](monitoring-external-prometheus). + +## Overview + + + +## Configure Prometheus Monitoring + +For existing cluster installations with KOTS, users can install Prometheus in the cluster and then connect the Admin Console to the Prometheus endpoint to enable monitoring. + +### Step 1: Install Prometheus in the Cluster {#configure-existing} + +Replicated recommends that you use CoreOS's Kube-Prometheus distribution for installing and configuring highly available Prometheus on an existing cluster. For more information, see the [kube-prometheus](https://github.com/coreos/kube-prometheus) GitHub repository. + +This repository collects Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator. + +To install Prometheus using the recommended Kube-Prometheus distribution: + +1. Clone the [kube-prometheus](https://github.com/coreos/kube-prometheus) repository to the device where there is access to the cluster. + +1. Use `kubectl` to create the resources on the cluster: + + ```bash + # Create the namespace and CRDs, and then wait for them to be available before creating the remaining resources + kubectl create -f manifests/setup + until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done + kubectl create -f manifests/ + ``` + + For advanced and cluster-specific configuration, you can customize Kube-Prometheus by compiling the manifests using jsonnet. For more information, see the [jsonnet website](https://jsonnet.org/). + + For more information about advanced Kube-Prometheus configuration options, see [Customizing Kube-Prometheus](https://github.com/coreos/kube-prometheus#customizing-kube-prometheus) in the kube-prometheus GitHub repository. + +### Step 2: Connect to a Prometheus Endpoint + +To view graphs on the Admin Console dashboard, provide the address of a Prometheus instance installed in the cluster. + +To connect the Admin Console to a Prometheus endpoint: + +1. On the Admin Console dashboard, under Monitoring, click **Configure Prometheus Address**. +1. Enter the address for the Prometheus endpoint in the text box and click **Save**. + + ![Configuring Prometheus](/images/kotsadm-dashboard-configureprometheus.png) + + Graphs appear on the dashboard shortly after saving the address. + +--- + + +# Consuming Prometheus Metrics Externally + +import OverviewProm from "../partials/monitoring/_overview-prom.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Consuming Prometheus Metrics Externally + + + +This topic describes how to consume Prometheus metrics in Replicated kURL clusters from a monitoring service that is outside the cluster. + +For information about how to access Prometheus, Grafana, and Alertmanager, see [Accessing Dashboards Using Port Forwarding](/enterprise/monitoring-access-dashboards). + +## Overview + + + +For kURL installations, if the [kURL Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) is included in the kURL installer spec, then the Prometheus monitoring system is installed alongside the application. No additional configuration is required to collect metrics and view any default and custom graphs on the Admin Console dashboard. + +Prometheus is deployed in kURL clusters as a NodePort service named `prometheus-k8s` in the `monitoring` namespace. The `prometheus-k8s` service is exposed on the IP address for each node in the cluster at port 30900. + +You can run the following command to view the `prometheus-k8s` service in your cluster: + +``` +kubectl get services -l app=kube-prometheus-stack-prometheus -n monitoring +``` +The output of the command includes details about the Prometheus service, including the type of service and the ports where the service is exposed. For example: + +``` +NAME TYPE CLUSTER_IP EXTERNAL_IP PORT(S) AGE +prometheus-k8s NodePort 10.96.2.229 9090:30900/TCP 5hr +``` +As shown in the example above, port 9090 on the `prometheus-k8s` service maps to port 30900 on each of the nodes. + +For more information about NodePort services, see [Type NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) in _Services_ in the Kubernetes documentation. + +## Prerequisite + +Before you can consume Prometheus metrics in kURL clusters externally, ensure that firewall rules on all nodes in the cluster allow inbound TCP traffic on port 30900. + +## Consume Metrics from External Services + +You can connect to the `prometheus-k8s` service on port 30900 from any node in the cluster to access Prometheus metrics emitted by kURL clusters. + +To consume Prometheus metrics from an external service: + +1. Get the external IP address for one of the nodes in the cluster. You will use this IP address in the next step to access the `prometheus-k8s` service. + + You can find the IP address for a node in the output of the following command: + + ``` + kubectl describe node NODE_NAME + ``` + Where `NODE_NAME` is the name of a node in the cluster. + + :::note + Depending on the node's network configuration, there might be different IP addresses for accessing the node from an external or internal network. For example, the IP address 10.128.0.35 might be assigned to the node in the internal network, whereas the IP address used to access the node from external or public networks is 34.28.178.93. + + Consult your infrastructure team to assist you in determining which IP address to use. + ::: + +1. In a browser, go to `http://NODE_IP_ADDRESS:30900` to verify that you can connect to the `prometheus-k8s` NodePort service. Replace `NODE_IP_ADDRESS` with the external IP address that you copied in the first step. For example, `http://34.28.178.93:30900`. + + If the connection is successful, the Prometheus UI displays in the browser. + +1. From your external monitoring solution, add Prometheus as an HTTP data source using the same URL from the previous step: `http://NODE_IP_ADDRESS:30900`. + +--- + + +# Validating SBOM Signatures + +# Validating SBOM Signatures + +This topic describes the process to perform the validation of software bill of material (SBOM) signatures for Replicated KOTS, Replicated kURL, and Troubleshoot releases. + +## About Software Bills of Materials + +A _software bill of materials_ (SBOM) is an inventory of all components used to create a software package. SBOMs have emerged as critical building blocks in software security and software supply chain risk management. + +When you install software, validating an SBOM signature can help you understand exactly what the software package is installing. This information can help you ensure that the files are compatible with your licensing policies and help determine whether there is exposure to CVEs. + +## Prerequisite + +Before you perform these tasks, you must install cosign. For more information, see the [sigstore repository](https://github.com/sigstore/cosign) in GitHub. + + +## Validate a KOTS SBOM Signature + +Each KOTS release includes a signed SBOM for KOTS Go dependencies. + +To validate a KOTS SBOM signature: + +1. Go to the [KOTS GitHub repository](https://github.com/replicatedhq/kots/releases) and download the specific KOTS release that you want to validate. +1. Extract the tar.gz file. + + **Example:** + + ``` + tar -zxvf kots_darwin_all.tar.gz + ``` + A KOTS binary and SBOM folder are created. +1. Run the following cosign command to validate the signatures: + ``` + cosign verify-blob --key sbom/key.pub --signature sbom/kots-sbom.tgz.sig sbom/kots-sbom.tgz + ``` + +## Validate a kURL SBOM Signature + +If a kURL installer is used, then signed SBOMs for kURL Go and Javascript dependencies are combined into a TAR file and are included with the release. + +To validate a kURL SBOM signature: + +1. Go to the [kURL GitHub repository](https://github.com/replicatedhq/kURL/releases) and download the specific kURL release files that you want to validate. + + There are three assets related to the SBOM: + + - `kurl-sbom.tgz` contains SBOMs for Go and Javascript dependencies + - `kurl-sbom.tgz.sig` is the digital signature for `kurl-sbom.tgz` + - `key.pub` is the public key from the key pair used to `sign kurl-sbom.tgz` + +2. Run the following cosign command to validate the signatures: + ``` + cosign verify-blob --key key.pub --signature kurl-sbom.tgz.sig kurl-sbom.tgz + + ``` + +## Validate a Troubleshoot SBOM Signature + +A signed SBOM for Troubleshoot dependencies is included in each release. + +To validate an Troubleshoot SBOM signature: + +1. Go to the [Troubleshoot GitHub repository](https://github.com/replicatedhq/troubleshoot/releases) and download the specific Troubleshoot release files that you want to validate. + + There are three assets related to the SBOM: + + - `troubleshoot-sbom.tgz` contains a software bill of materials for Troubleshoot. + - `troubleshoot-sbom.tgz.sig` is the digital signature for `troubleshoot-sbom.tgz` + - `key.pub` is the public key from the key pair used to sign `troubleshoot-sbom.tgz` + +2. Run the following cosign command to validate the signatures: + ``` + $ cosign verify-blob --key key.pub --signature troubleshoot-sbom.tgz.sig troubleshoot-sbom.tgz + + ``` + + +--- + + +# How to Set Up Backup Storage + +# How to Set Up Backup Storage + +This topic describes the process of setting up backup storage for the Replicated snapshots feature. + +## Configuring Backup Storage for Embedded Clusters + +You must configure a backup storage destination before you can create backups. This procedure describes how to configure backup storage for snapshots for _embedded clusters_ created by Replicated kURL. + +To configure snapshots for embedded clusters: + +1. On the Snapshots tab in the Admin Console, click **Check for Velero** to see whether kURL already installed Velero in the embedded cluster. + +1. If Velero was installed, update the default internal storage settings in the Admin Console because internal storage is insufficient for full backups. See [Updating Settings in the Admin Console](snapshots-updating-with-admin-console). + +1. If Velero was not installed: + + 1. Install the Velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + + 1. Install Velero and configure a storage destination using one of the following procedures. + + - [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + - [Configuring an NFS Storage Destination](snapshots-configuring-nfs) + - [Configuring Other Storage Destinations](snapshots-storage-destinations) + +1. Optionally increase the default memory for the node-agent (restic) Pod. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). + +## Configuring Backup Storage for Existing Clusters + +You must configure a backup storage destination before you can create backups. + +Follow this process to install Velero and configure the snapshots feature: + +1. Install the Velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + +1. Install Velero and configure a storage destination using one of the following procedures. + + - [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + - [Configuring an NFS Storage Destination](snapshots-configuring-nfs) + - [Configuring Other Storage Destinations](snapshots-storage-destinations) + +1. Enable access to the Velero namespace if you are using RBAC and optionally increase the default memory for the node-agent (restic) Pod. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). + +## Next Step + +After you configure a storage destination, you can create a backup. See [Creating and Scheduling Backups](snapshots-creating). + +## Additional Resources + +* [Restoring Full Backups](snapshots-restoring-full) +* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + + +--- + + +# Configuring a Host Path Storage Destination + +import InstallVelero from "../partials/snapshots/_installVelero.mdx" +import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" +import ResticDaemonSet from "../partials/snapshots/_resticDaemonSet.mdx" +import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" +import CheckVersion from "../partials/snapshots/_checkVersion.mdx" + +# Configuring a Host Path Storage Destination + +This topic describes how to install Velero and configure a host path as your storage destination for backups. + +:::note + +::: + +## Requirements + +* The host path must be a dedicated directory. Do not use a partition used by a service like Docker or Kubernetes for ephemeral storage. +* The host path must exist and be writable by the user:group 1001:1001 on all nodes in the cluster. For example, in a Linux environment you might run `sudo chown -R 1001:1001 /backups` to change the user:group permissions. + + If you use a mounted directory for the storage destination, such as one that is created with the Common Internet File System (CIFS) or Server Message Block (SMB) protocols, ensure that you configure the user:group 1001:1001 permissions on all nodes in the cluster and from the server side as well. + + You cannot change the permissions of a mounted network shared filesystem from the client side. To reassign the user:group to 1001:1001 for a directory that is already mounted, you must remount the directory. For example, for a CIFS mounted directory, specify the `uid=1001,gid=1001` mount options in the CIFS mount command. + +## Prerequisites + +Complete the following items before you perform this task: + +* Review the limitations and considerations. See [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. +* Install the velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + +## Install Velero and Configure Host Path Storage in Online Environments + +To install Velero and configure host path storage in online environments: + +1. + +1. + +1. Run the following command to configure the host path storage destination: + + ``` + kubectl kots velero configure-hostpath --namespace NAME --hostpath /PATH + ``` + + Replace: + - `NAME` with the namespace where the Replicated KOTS Admin Console is installed and running + - `PATH` with the path to the directory where the backups will be stored + + For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +## Install Velero and Configure Host Path Storage in Air Gapped Environments + +To install Velero and configure host path storage in air gapped environments: + +1. + +1. + + + +1. + +1. Run the following command to configure the host path storage destination: + + ``` + kubectl kots velero configure-hostpath \ + --namespace NAME \ + --hostpath /PATH \ + --kotsadm-registry REGISTRY_HOSTNAME[/REGISTRY_NAMESPACE] \ + --registry-username REGISTRY_USERNAME \ + --registry-password REGISTRY_PASSWORD + ``` + + Replace: + - `NAME` with the namespace where the Admin Console is installed and running + - `PATH` with the path to the directory where the backups will be stored + - `REGISTRY_HOSTNAME` with the registry endpoint where the images are hosted + - `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional) + - `REGISTRY_USERNAME` with the username to use to authenticate with the registry + - `REGISTRY_PASSWORD` with the password to use to authenticate with the registry + + For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +## Configure Host Path Storage in the Admin Console + +Alternatively, when the Admin Console and application are already installed, you can start in the Admin Console to install Velero and configure a host path storage destination. + +To install Velero and configure host path storage for existing clusters: + +1. From the Admin Console, click **Snapshots > Settings and Schedule**. + +1. Click **Add a new storage destination**. + + The Add a new destination dialog opens and shows instructions for setting up Velero with different providers. + +1. Click **Host Path**. + + ![Snapshot Provider Host Path](/images/snapshot-provider-hostpath.png) + +1. In the Configure Host Path dialog, enter the path to the directory where the backups will be stored. Click **Get instructions**. + + ![Snapshot Provider Host Path Fields](/images/snapshot-provider-hostpath-field.png) + + A dialog opens with instructions on how to set up Velero with the specified host path configuration. + +1. Follow the steps in the dialog to install Velero and configure the storage destination. + + ![Snapshot Provider File System Instructions](/images/snapshot-provider-hostpath-instructions.png) + +1. Return to the Admin Console and either click **Check for Velero** or refresh the page to verify that the Velero installation is detected. + + +## Next Steps + +* (Existing Clusters Only) Configure Velero namespace access if you are using minimal RBAC. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* (Optional) Increase the default memory limits. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* Create or schedule backups. See [Creating and Scheduling Backups](snapshots-creating). + +## Additional Resources + +* [Troubleshooting Snapshots](/enterprise/snapshots-troubleshooting-backup-restore) + + +--- + + +# Configuring an NFS Storage Destination + +import InstallVelero from "../partials/snapshots/_installVelero.mdx" +import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" +import ResticDaemonSet from "../partials/snapshots/_resticDaemonSet.mdx" +import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" +import CheckVersion from "../partials/snapshots/_checkVersion.mdx" + +# Configuring an NFS Storage Destination + +This topic describes how to install Velero and configure a Network File System (NFS) as your storage destination for backups. + +:::note + +::: + +## Requirements + +Configuring an NFS server as a snapshots storage destination has the following requirements: + +* The NFS server must be configured to allow access from all of the nodes in the cluster. +* The NFS directory must be writable by the user:group 1001:1001. +* Ensure that you configure the user:group 1001:1001 permissions for the directory on the NFS server. +* All of the nodes in the cluster must have the necessary NFS client packages installed to be able to communicate with the NFS server. For example, the `nfs-common` package is a common package used on Ubuntu. +* Any firewalls must be properly configured to allow traffic between the NFS server and clients (cluster nodes). + +## Prerequisites + +Complete the following items before you perform this task: + +* Review the limitations and considerations. See [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. +* Install the velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + +## Install Velero and Configure NFS Storage in Online Environments + +To install Velero and configure NFS storage in an online environment: + +1. + +1. + +1. Run the following command to configure the NFS storage destination: + + ``` + kubectl kots velero configure-nfs --namespace NAME --nfs-path PATH --nfs-server HOST + ``` + + Replace: + - `NAME` with the namespace where the Replicated KOTS Admin Console is installed and running + - `PATH` with the path that is exported by the NFS server + - `HOST` with the hostname or IP address of the NFS server + + For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +## Install Velero and Configure NFS Storage in Air Gapped Environments + +To install Velero and configure NFS storage in air gapped environments: + +1. + +1. + + + +1. + +1. Run the following command to configure the NFS storage destination: + + ``` + kubectl kots velero configure-nfs \ + --namespace NAME \ + --nfs-server HOST \ + --nfs-path PATH \ + --kotsadm-registry REGISTRY_HOSTNAME[/REGISTRY_NAMESPACE] \ + --registry-username REGISTRY_USERNAME \ + --registry-password REGISTRY_PASSWORD + ``` + + Replace: + - `NAME` with the namespace where the Admin Console is installed and running + - `HOST` with the hostname or IP address of the NFS server + - `PATH` with the path that is exported by the NFS server + - `REGISTRY_HOSTNAME` with the registry endpoint where the images are hosted + - `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional) + - `REGISTRY_USERNAME` with the username to use to authenticate with the registry + - `REGISTRY_PASSWORD` with the password to use to authenticate with the registry + + For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +## Configure NFS Storage in the Admin Console + +Alternatively, when the Admin Console and application are already installed, you can start in the Admin Console to install Velero and configure an NFS storage destination. + +To install Velero and configure NFS storage for existing clusters: + +1. From the Admin Console, click **Snapshots > Settings and Schedule**. + +1. Click **Add a new storage destination**. + + The Add a new destination dialog opens and shows instructions for setting up Velero with different providers. + +1. Click **NFS**. + + ![Snapshot Provider NFS](/images/snapshot-provider-nfs.png) + +1. In the Configure NFS dialog, enter the NFS server hostname or IP Address, and the path that is exported by the NFS server. Click **Get instructions**. + + ![Snapshot Provider NFS Fields](/images/snapshot-provider-nfs-fields.png) + + A dialog opens with instructions on how to set up Velero with the specified NFS configuration. + +1. Follow the steps in the dialog to install Velero and configure the storage destination. + + ![Snapshot Provider File System Instructions](/images/snapshot-provider-nfs-instructions.png) + +1. Return to the Admin Console and either click **Check for Velero** or refresh the page to verify that the Velero installation is detected. + +## Next Steps + +* (Existing Clusters Only) Configure Velero namespace access if you are using minimal RBAC. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* (Optional) Increase the default memory limits. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* Create or schedule backups. See [Creating and Scheduling Backups](snapshots-creating). + +## Additional Resources + +* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + + +--- + + +# Creating and Scheduling Backups + +# Creating and Scheduling Backups + +This topic describes how to use the Replicated snapshots feature to create backups. It also includes information about how to use the Replicated KOTS Admin Console create a schedule for automatic backups. For information about restoring, see [Restoring from Backups](snapshots-restoring-full). + +## Prerequisites + +- Before you can create backups, you must configure a storage destination: + + - [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + - [Configuring an NFS Storage Destination](snapshots-configuring-nfs) + - [Configuring Other Storage Destinations](snapshots-storage-destinations) + +- If you have multiple applications in the Admin Console, make sure that each application has its own Backup custom resource file so that they can be included in the full backup. Use the **View file** tab to check for the Backup custom resources (`kind: Backup`, `apiVersion: velero.io/v1`). + + If any Backup custom resource files are missing, contact your vendor. + +## Create a Full Backup (Recommended) {#full} + +Full backups, or _instance snapshots_, back up the Admin Console and all application data, including application volumes and manifest files. If you manage multiple applications with the Admin Console, data from all applications that support backups is included in a full backup. + +From a full backup, you can: +* Restore application and Admin Console data +* Restore only application data +* Restore only Admin Console data + +You can create a full backup with the following methods: +* [Create a Backup with the CLI](#cli-backup) +* [Create a Backup in the Admin Console](#admin-console-backup) + +### Create a Backup with the CLI {#cli-backup} + +To create a full backup with the Replicated KOTS CLI, run the following command: + + ``` + kubectl kots backup --namespace NAMESPACE + ``` + Replace `NAMESPACE` with the namespace where the Admin Console is installed. + +For more information, see [backup](/reference/kots-cli-backup-index) in _KOTS CLI_. + +### Create a Backup in the Admin Console {#admin-console-backup} + +To create a full backup in the Admin Console: + +1. To check if backups are supported for an application, go to the **View files** page, open the `upstream` folder, and confirm that the application includes a manifest file with `kind: Backup` and `apiVersion: velero.io/v1`. This manifest also shows which pod volumes are being backed up. + +1. Go to **Snapshots > Full Snapshots (Instance)**. +1. Click **Start a snapshot**. + + When the backup is complete, it appears in the list of backups on the page, as shown in the following image: + + ![Full snapshot page with one completed snapshot](/images/snapshot-instance-list.png) + +## Create a Partial Backup {#partial} + +Partial backups, or _application snapshots_, back up application volumes and application manifests only. Partial backups do not back up Admin Console data. + +:::note +Replicated recommends that you create full backups instead of partial backups because partial backups are not suitable for disaster recovery. See [Create a Full Backup](#full) above. +::: + +To create a partial backup in the Admin Console: + +1. Go to **Snapshots > Partial Snapshots (Application)**. + +1. If you manage multiple applications in the Admin Console, use the dropdown to select the application that you want to back up. + +1. Click **Start a snapshot**. + + When the snapshot is complete, it appears in the list of snapshots on the page as shown in the following image: + + ![Partial snapshot page with one completed snapshot](/images/snapshot-application-list.png) + +## Schedule Automatic Backups + +You can use the Admin Console to schedule full or partial backups. This is useful for automatically creating regular backups of Admin Console and application data. + +To schedule automatic backups in the Admin Console: + +1. Go to **Snapshots > Settings & Schedule**. + +1. Under **Automatic snapshots**, select **Full snapshots (Instance)** or **Partial snapshots (Application)** depending on the type of backup that you want to schedule. + + ![Snapshot Settings and Schedule page](/images/snapshot-schedule.png) + +1. (Partial Backups Only) If you manage multiple applications in the Admin Console, use the dropdown to select the application that you want to back up. + +1. Select **Enable automatic scheduled snapshots**. + +1. Configure the automatic backup schedule for the type of snapshots that you selected: + + * For **Schedule**, select Hourly, Daily, Weekly, or Custom. + * For **Cron Expression**, enter a cron expression to create a custom automatic backup schedule. For information about supported cron expressions, see [Cron Expressions](/reference/cron-expressions). + +1. (Optional) For **Retention Policy**, edit the amount of time that backup data is saved. By default, backup data is saved for 30 days. + + The retention policy applies to all backups, including both automatically- and manually-created backups. Changing the retention policy affects only backups created after the time of the change. +## Additional Resources + +[Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + + +--- + + +# Restoring from Backups + +import RestoreTable from "../partials/snapshots/_restoreTable.mdx" +import RestoreTypes from "../partials/snapshots/_restore-types.mdx" +import GetBackups from "../partials/snapshots/_step-get-backups.mdx" +import Restore from "../partials/snapshots/_step-restore.mdx" +import Dr from "../partials/snapshots/_limitation-dr.mdx" +import Os from "../partials/snapshots/_limitation-os.mdx" +import InstallMethod from "../partials/snapshots/_limitation-install-method.mdx" +import CliRestores from "../partials/snapshots/_limitation-cli-restores.mdx" + +# Restoring from Backups + +This topic describes how to restore from full or partial backups using Replicated snapshots. + +## Overview + + + +You can do any type of restore from a full backup using the KOTS CLI. You can also restore an application from a full or partial backup using the Admin Console. + +## Limitations + +The following limitations apply to restoring from backups using snapshots: + +* +* +* +* + +For a full list of limitations and considerations related to the snapshots feature, see [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. + +## Restore From a Full Backup Using the CLI {#full-cli} + +You can use the KOTS CLI to restore both the Admin Console and the application, the Admin Console only, or the application only. If you need to restore the Admin Console, you must use the KOTS CLI because the Admin Console gets recreated and is disconnected during the restore process. + +:::note + +::: + +To restore using the CLI, see the corresponding procedure for your environment: + +- [Existing Clusters](#existing) +- [Online kURL Clusters](#online) +- [Air Gap kURL Clusters](#air-gapped) + +### Existing Clusters {#existing} + +:::note +If you are restoring to a healthy cluster, you can skip reinstalling Velero and continue to running the `get backups` and `restore` commands in the last two steps. +::: + +To restore a full backup in an existing cluster: + +1. (New or Unhealthy Clusters Only) In the cluster where you will do the restore, install a version of Velero that is compatible with the version that was used to make the snapshot backup. + + The Velero installation command varies depending on the storage destination for the backup. For the Velero installation command, see one of the following: + + * **Host Path:** See [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + * **NFS:** See [Configuring an NFS Storage Destination](snapshots-configuring-nfs) or for the configuration steps and how to set up Velero. + * **AWS, GCP, Azure, or other S3:** See [Configuring Other Storage Destinations](snapshots-storage-destinations). + +1. + +1. + +### Online Embedded kURL Clusters {#online} + +:::note +If you are restoring to a healthy cluster, you can skip the installation and configuration steps and continue to running the `get backups` and `restore` commands in the last two steps. +::: + +To restore a full backup in a kURL cluster: + +1. (New or Unhealthy Clusters Only) Provision a cluster with kURL and install the target application in the cluster. See [Online Installation with kURL](installing-kurl). + +1. (New or Unhealthy Clusters Only) In the new kURL cluster, configure a storage destination that holds the backup you want to use: + + * **Host Path:** See [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + * **NFS:** See [Configuring an NFS Storage Destination](snapshots-configuring-nfs) or for the configuration steps and how to set up Velero. + * **AWS, GCP, Azure, or other S3:** See [Configuring Other Storage Destinations](snapshots-storage-destinations). + +1. + +1. + +### Air Gap kURL Clusters {#air-gapped} + +To restore a full backup in an air gap kURL cluster: + +1. Run the following command to install a new cluster and provide kURL with the correct registry IP address. kURL must be able to assign the same IP address to the embedded private image registry in the new cluster. + + ```bash + cat install.sh | sudo bash -s airgap kurl-registry-ip=IP + ``` + + Replace `IP` with the registry IP address. + +1. Use the KOTS CLI to configure Velero to use a storage destination. The storage backend used for backups must be accessible from the new cluster. + + * **Host Path:** See [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) + * **NFS:** See [Configuring an NFS Storage Destination](snapshots-configuring-nfs) or for the configuration steps and how to set up Velero. + * **S3-Compatible:** See [Configure S3-Compatible Storage for Air Gapped Environments](snapshots-storage-destinations#configure-s3-compatible-storage-for-air-gapped-environments) in _Configuring Other Storage Destinations_. + +1. + +1. + +## Restore the Application Only Using the Admin Console {#admin-console} + +You can restore an application from a full or partial backup using the Admin Console. + +### Restore an Application From a Full Backup + +To restore an application from a full backup: + +1. Select **Full Snapshots (Instance)** from the Snapshots tab. + + ![Full Snapshot tab](/images/full-snapshot-tab.png) + + [View a larger version of this image](/images/full-snapshot-tab.png) + +1. Click the **Restore from this backup** icon (the circular blue arrows) for the backup that you want to restore. + +1. In the **Restore from backup** dialog, select **Partial restore**. + + ![Restore Full Snapshot dialog](/images/restore-backup-dialog.png) + + [View a larger version of this image](/images/restore-backup-dialog.png) + + :::note + You can also get the CLI commands for full restores or Admin Console only restores from this dialog. + ::: + +1. At the bottom of the dialog, enter the application slug provided by your software vendor. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. + +1. Click **Confirm and restore**. + +### Restore an Application From a Partial Backup + +To restore an application from a partial backup: + +1. Select **Partial Snapshots (Application)** from the Snapshots tab. + + ![Partial Snapshot tab](/images/partial-snapshot-tab.png) + + [View a larger version of this image](/images/partial-snapshot-tab.png) + +1. Click the **Restore from this backup** icon (the circular blue arrows) for the backup that you want to restore. + + The **Restore from Partial backup (Application)** dialog opens. + +1. Under **Type your application slug to continue**, enter the application slug provided by your software vendor. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. + + ![Restore Partial Snapshot dialog](/images/restore-partial-dialog.png) + + [View a larger version of this image](/images/restore-partial-dialog.png) + +1. Click **Confirm and restore**. + +## Additional Resources + +[Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + +--- + + +# Configuring Other Storage Destinations + +import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" +import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" +import CheckVersion from "../partials/snapshots/_checkVersion.mdx" + +# Configuring Other Storage Destinations + +This topic describes installing Velero and configuring storage for Amazon Web Service (AWS), Google Cloud Provider (GCP), Microsoft Azure, and S3-compatible providers. + +To configure host path or NFS as a storage destination, see [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) and [Configuring an NFS Storage Destination](snapshots-configuring-nfs). + +:::note + +::: + +## Prerequisites + +Complete the following items before you install Velero and configure a storage destination: + +* Review the limitations and considerations. See [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. +* Install the velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). + +## Configure AWS Storage for Online Environments + +In this procedure, you install Velero and configure an AWS storage destination in online environments. + +Snapshots does not support Amazon Simple Storage Service (Amazon S3) buckets that have a bucket policy requiring the server-side encryption header. If you want to require server-side encryption for objects, you can enable default encryption on the bucket instead. For more information about Amazon S3, see the [Amazon S3](https://docs.aws.amazon.com/s3/?icmpid=docs_homepage_featuredsvcs) documentation. + +To install Velero and configure an AWS storage destination: + +1. Follow the instructions for [installing Velero on AWS](https://github.com/vmware-tanzu/velero-plugin-for-aws#setup) in the Velero documentation. + +1. Run the `velero install` command with these additional flags: + + * **Velero 1.10 and later**: Use the `--use-node-agent`, `--uploader-type=restic`, and `--use-volume-snapshots=false` flags. + * **Velero versions earlier than 1.10**: Use the `--use-restic` and `--use-volume-snapshots=false` flags. + + **Example:** + + ``` + velero install \ + --provider aws \ + --plugins velero/velero-plugin-for-aws:v1.2.0 \ + --bucket $BUCKET \ + --backup-location-config region=$REGION \ + --secret-file CREDS_FILE \ + --use-node-agent --uploader-type=restic \ + --use-volume-snapshots=false + ``` + +## Configure GCP Storage for Online Environments + +In this procedure, you install Velero and configure a GCP storage destination in online environments. + +To install Velero and configure a GCP storage destination: + +1. Follow the instructions for [installing Velero on GCP](https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup) in the Velero documentation. + +1. Run the `velero install` command with these additional flags: + * **Velero 1.10 and later**: Use the `--use-node-agent`, `--uploader-type=restic`, and `--use-volume-snapshots=false` flags. + * **Velero versions earlier than 1.10**: Use the `--use-restic` and `--use-volume-snapshots=false` flags. + + **Example:** + + ``` + velero install \ + --provider gcp \ + --plugins velero/velero-plugin-for-gcp:v1.5.0 \ + --bucket $BUCKET \ + --secret-file ./CREDS_FILE + --use-node-agent --uploader-type=restic \ + --use-volume-snapshots=false + ``` + +## Configure Azure Storage for Online Environments + +In this procedure, you install Velero and configure an Azure storage destination in online environments. + +To install Velero and configure an Azure storage destination: + +1. Follow the instructions for [installing Velero on Azure](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#setup) in the Velero documentation. + +1. Run the `velero install` command with these additional flags: + * **Velero 1.10 and later**: Use the `--use-node-agent`, `--uploader-type=restic`, and `--use-volume-snapshots=false` flags. + * **Velero versions earlier than 1.10**: Use the `--use-restic` and `--use-volume-snapshots=false` flags. + + **Example:** + + ``` + velero install \ + --provider azure \ + --plugins velero/velero-plugin-for-microsoft-azure:v1.5.0 \ + --bucket $BLOB_CONTAINER \ + --secret-file ./CREDS_FILE \ + --backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID[,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] \ + --snapshot-location-config apiTimeout=[,resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] + --use-node-agent --uploader-type=restic \ + --use-volume-snapshots=false + ``` + +## Configure S3-Compatible Storage for Online Environments + +Replicated supports the following S3-compatible object stores for storing backups with Velero: + +- Ceph RADOS v12.2.7 +- MinIO + +Run the following command to install Velero and configure an S3-compatible storage destination in an online environment. For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +``` +kubectl kots velero configure-other-s3 \ + --namespace NAME \ + --endpoint ENDPOINT \ + --region REGION \ + --bucket BUCKET \ + --access-key-id ACCESS_KEY_ID \ + --secret-access-key SECRET_ACCESS_KEY +``` + +Replace: + +- NAME with the name of the namespace where the Replicated KOTS Admin Console is installed and running +- ENDPOINT with the s3 endpoint +- REGION with the region where the bucket exists +- BUCKET with the name of the object storage bucket where backups should be stored +- ACCESS_KEY_ID with the access key id to use for accessing the bucket +- SECRET_ACCESS_KEY with the secret access key to use for accessing the bucket + +**Example:** + +``` +kubectl kots velero configure-other-s3 \ + --namespace default \ + --endpoint http://minio \ + --region minio \ + --bucket kots-snaps \ + --access-key-id XXXXXXXJTJB7M2XZUV7D \ + --secret-access-key mysecretkey +``` + +If no Velero installation is detected, instructions are displayed to install Velero and configure the storage destination. + +## Configure S3-Compatible Storage for Air Gapped Environments + +> Introduced in Replicated KOTS v1.94.0 + +The following S3-compatible object stores are supported for storing backups with Velero: + +- Ceph RADOS v12.2.7 +- MinIO + +Run the following command to install Velero and configure an S3-compatible storage destination in an air gapped environment. For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. + +```bash +kubectl kots velero configure-other-s3 \ + --namespace NAME \ + --endpoint ENDPOINT \ + --region REGION \ + --bucket BUCKET \ + --access-key-id ACCESS_KEY_ID \ + --secret-access-key SECRET_ACCESS_KEY \ + --kotsadm-registry REGISTRY_HOSTNAME[/REGISTRY_NAMESPACE] \ + --registry-username REGISTRY_USERNAME \ + --registry-password REGISTRY_PASSWORD +``` + +Replace: + +- `NAME` with the name of the namespace where the Admin Console is installed and running +- `ENDPOINT` with the s3 endpoint +- `REGION` with the region where the bucket exists +- `BUCKET` with the name of the object storage bucket where backups should be stored +- `ACCESS_KEY_ID` with the access key id to use for accessing the bucket +- `SECRET_ACCESS_KEY` with the secret access key to use for accessing the bucket +- `REGISTRY_HOSTNAME` with the registry endpoint where the images are hosted +- `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional) +- `REGISTRY_USERNAME` with the username to use to authenticate with the registry +- `REGISTRY_PASSWORD` with the password to use to authenticate with the registry + +If no Velero installation is detected, instructions are displayed to install Velero and configure the storage destination. + + + +## Next Steps + +* (Existing Clusters Only) Configure Velero namespace access if you are using minimal RBAC. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* (Optional) Increase the default memory limits. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). +* Create or schedule backups. See [Creating and Scheduling Backups](snapshots-creating). + +## Additional Resources + +* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + + +--- + + +# Troubleshooting Snapshots + +import NodeAgentMemLimit from "../partials/snapshots/_node-agent-mem-limit.mdx" + +# Troubleshooting Snapshots + +When a snapshot fails, a support bundle will be collected and stored automatically. Because this is a point-in-time collection of all logs and system state at the time of the failed snapshot, this is a good place to view the logs. + +## Velero is Crashing + +If Velero is crashing and not starting, some common causes are: + +### Invalid Cloud Credentials + +#### Symptom + +You see the following error message from Velero when trying to configure a snapshot. + +```shell +time="2020-04-10T14:22:24Z" level=info msg="Checking existence of namespace" logSource="pkg/cmd/server/server.go:337" namespace=velero +time="2020-04-10T14:22:24Z" level=info msg="Namespace exists" logSource="pkg/cmd/server/server.go:343" namespace=velero +time="2020-04-10T14:22:27Z" level=info msg="Checking existence of Velero custom resource definitions" logSource="pkg/cmd/server/server.go:372" +time="2020-04-10T14:22:31Z" level=info msg="All Velero custom resource definitions exist" logSource="pkg/cmd/server/server.go:406" +time="2020-04-10T14:22:31Z" level=info msg="Checking that all backup storage locations are valid" logSource="pkg/cmd/server/server.go:413" +An error occurred: some backup storage locations are invalid: backup store for location "default" is invalid: rpc error: code = Unknown desc = NoSuchBucket: The specified bucket does not exist + status code: 404, request id: BEFAE2B9B05A2DCF, host id: YdlejsorQrn667ziO6Xr6gzwKJJ3jpZzZBMwwMIMpWj18Phfii6Za+dQ4AgfzRcxavQXYcgxRJI= +``` + +#### Cause + +If the cloud access credentials are invalid or do not have access to the location in the configuration, Velero will crashloop. The Velero logs will be included in a support bundle, and the message will look like this. + +#### Solution + +Replicated recommends that you validate the access key / secret or service account json. + + +### Invalid Top-level Directories + +#### Symptom + +You see the following error message when Velero is starting: + +```shell +time="2020-04-10T14:12:42Z" level=info msg="Checking existence of namespace" logSource="pkg/cmd/server/server.go:337" namespace=velero +time="2020-04-10T14:12:42Z" level=info msg="Namespace exists" logSource="pkg/cmd/server/server.go:343" namespace=velero +time="2020-04-10T14:12:44Z" level=info msg="Checking existence of Velero custom resource definitions" logSource="pkg/cmd/server/server.go:372" +time="2020-04-10T14:12:44Z" level=info msg="All Velero custom resource definitions exist" logSource="pkg/cmd/server/server.go:406" +time="2020-04-10T14:12:44Z" level=info msg="Checking that all backup storage locations are valid" logSource="pkg/cmd/server/server.go:413" +An error occurred: some backup storage locations are invalid: backup store for location "default" is invalid: Backup store contains invalid top-level directories: [other-directory] +``` + +#### Cause + +This error message is caused when Velero is attempting to start, and it is configured to use a reconfigured or re-used bucket. + +When configuring Velero to use a bucket, the bucket cannot contain other data, or Velero will crash. + +#### Solution + +Configure Velero to use a bucket that does not contain other data. + +## Node Agent is Crashing + +If the node-agent Pod is crashing and not starting, some common causes are: + +### Metrics Server is Failing to Start + +#### Symptom + +You see the following error in the node-agent logs. + +```shell +time="2023-11-16T21:29:44Z" level=info msg="Starting metric server for node agent at address []" logSource="pkg/cmd/cli/nodeagent/server.go:229" +time="2023-11-16T21:29:44Z" level=fatal msg="Failed to start metric server for node agent at []: listen tcp :80: bind: permission denied" logSource="pkg/cmd/cli/nodeagent/server.go:236" +``` + +#### Cause + +This is a result of a known issue in Velero 1.12.0 and 1.12.1 where the port is not set correctly when starting the metrics server. This causes the metrics server to fail to start with a `permission denied` error in environments that do not run MinIO and have Host Path, NFS, or internal storage destinations configured. When the metrics server fails to start, the node-agent Pod crashes. For more information about this issue, see [the GitHub issue details](https://github.com/vmware-tanzu/velero/issues/6792). + +#### Solution + +Replicated recommends that you either upgrade to Velero 1.12.2 or later, or downgrade to a version earlier than 1.12.0. + +## Snapshot Creation is Failing + +### Timeout Error when Creating a Snapshot + +#### Symptom + +You see a backup error that includes a timeout message when attempting to create a snapshot. For example: + +```bash +Error backing up item +timed out after 12h0m0s +``` + +#### Cause + +This error message appears when the node-agent (restic) Pod operation timeout limit is reached. In Velero v1.4.2 and later, the default timeout is 240 minutes. + +Restic is an open-source backup tool. Velero integrates with Restic to provide a solution for backing up and restoring Kubernetes volumes. For more information about the Velero Restic integration, see [File System Backup](https://velero.io/docs/v1.10/file-system-backup/) in the Velero documentation. + +#### Solution + +Use the kubectl Kubernetes command-line tool to patch the Velero deployment to increase the timeout: + +**Velero version 1.10 and later**: + +```bash +kubectl patch deployment velero -n velero --type json -p '[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--fs-backup-timeout=TIMEOUT_LIMIT"}]' +``` + +**Velero versions less than 1.10**: + +```bash +kubectl patch deployment velero -n velero --type json -p '[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--restic-timeout=TIMEOUT_LIMIT"}]' +``` + +Replace `TIMEOUT_LIMIT` with a length of time for the node-agent (restic) Pod operation timeout in hours, minutes, and seconds. Use the format `0h0m0s`. For example, `48h30m0s`. + +:::note +The timeout value reverts back to the default value if you rerun the `velero install` command. +::: + +### Memory Limit Reached on the node-agent Pod + +#### Symptom + +The node-agent (restic) Pod is killed by the Linux kernel Out Of Memory (OOM) killer or snapshots are failing with errors simlar to: + +``` +pod volume backup failed: ... signal: killed +``` + +#### Cause + +Velero sets default limits for the velero Pod and the node-agent (restic) Pod during installation. There is a known issue with Restic that causes high memory usage, which can result in failures during snapshot creation when the Pod reaches the memory limit. + +For more information, see the [Restic backup — OOM-killed on raspberry pi after backing up another computer to same repo](https://github.com/restic/restic/issues/1988) issue in the restic GitHub repository. + +#### Solution + + + +### At least one source file could not be read + +#### Symptom + +You see the following error in Velero logs: + +``` +Error backing up item...Warning: at least one source file could not be read +``` + +#### Cause + +There are file changes between Restic's initial scan of the volume and during the backup to Restic store. + +#### Solution + +To resolve this issue, do one of the following: + +* Use [hooks](/vendor/snapshots-hooks) to export data to an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume and include that in the backup instead of the primary PVC volume. See [Configuring Backup and Restore Hooks for Snapshots](/vendor/snapshots-hooks). +* Freeze the file system to ensure all pending disk I/O operations have completed prior to taking a snapshot. For more information, see [Hook Example with fsfreeze](https://velero.io/docs/main/backup-hooks/#hook-example-with-fsfreeze) in the Velero documentation. + + +## Snapshot Restore is Failing + +### Service NodePort is Already Allocated + +#### Symptom + +In the Replicated KOTS Admin Console, you see an **Application failed to restore** error message that indicates the port number for a static NodePort is already in use. For example: + +![Snapshot Troubleshoot Service NodePort](/images/snapshot-troubleshoot-service-nodeport.png) + +[View a larger version of this image](/images/snapshot-troubleshoot-service-nodeport.png) + +#### Cause + +There is a known issue in Kubernetes versions earlier than version 1.19 where using a static NodePort for services can collide in multi-primary high availability setups when recreating the services. For more information about this known issue, see https://github.com/kubernetes/kubernetes/issues/85894. + +#### Solution + +This issue is fixed in Kubernetes version 1.19. To resolve this issue, upgrade to Kubernetes version 1.19 or later. + +For more infromation about the fix, see https://github.com/kubernetes/kubernetes/pull/89937. + +### Partial Snapshot Restore is Stuck in Progress + +#### Symptom + +In the Admin Console, you see at least one volume restore progress bar frozen at 0%. Example Admin Console display: + +![Snapshot Troubleshoot Frozen Restore](/images/snapshot-troubleshoot-frozen-restore.png) + +You can confirm this is the same issue by running `kubectl get pods -n `, and you should see at least one pod stuck in initialization: + +```shell +NAME READY STATUS RESTARTS AGE +example-mysql-0 0/1 Init:0/2 0 4m15s #<- the offending pod +example-nginx-77b878b4f-zwv2h 3/3 Running 0 4m15s +``` + +#### Cause + +We have seen this issue with Velero version 1.5.4 and opened up this issue with the project to inspect the root cause: https://github.com/vmware-tanzu/velero/issues/3686. However we have not experienced this using Velero 1.6.0 or later. + +#### Solution + +Upgrade Velero to 1.9.0. You can upgrade using Replicated kURL. Or, to follow the Velero upgrade instructions, see [Upgrading to Velero 1.9](https://velero.io/docs/v1.9/upgrade-to-1.9/) in the Velero documentation. + +### Partial Snapshot Restore Finishes with Warnings + +#### Symptom + +In the Admin Console, when the partial snapshot restore completes, you see warnings indicating that Endpoint resources were not restored: + +![Snapshot Troubleshoot Restore Warnings](/images/snapshot-troubleshoot-restore-warnings.png) + +#### Cause + +The resource restore priority was changed in Velero 1.10.3 and 1.11.0, which leads to this warning when restoring Endpoint resources. For more information about this issue, see [the issue details](https://github.com/vmware-tanzu/velero/issues/6280) in GitHub. + +#### Solution + +These warnings do not necessarily mean that the restore itself failed. The endpoints likely do exist as they are created by Kubernetes when the related Service resources were restored. However, to prevent encountering these warnings, use Velero version 1.11.1 or later. + + +--- + + +# Updating Storage Settings + +# Updating Storage Settings + +This topic describes how to update existing storage destination settings using the Replicated Admin Console. + +## Prerequisite +If you are changing from one provider to another provider, make sure that you meet the prerequisites for the storage destination. For information about prerequisites, see: + +- [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) +- [Configuring an NFS Storage Destination](snapshots-configuring-nfs) +- [Configuring Other Storage Destinations](snapshots-storage-destinations) + +## Update Storage Settings + +You can update storage destination settings for online and air gapped environments at any time using the Admin Console. + +Additionally, if Velero was automatically installed by Replicated kURL, then Replicated recommends that you change the default internal storage because it is not sufficient for disaster recovery. + +To update storage destination settings: + +1. In the Admin Console, select **Snapshots** > **Settings and Schedule**. + +1. Under storage, you can edit the existing settings or click **Add a new storage destination** and select a storage destination type. + + ![Snapshot Destination Dropdown Host Path](/images/snapshot-destination-dropdown-hostpath.png) + + The configuration fields that display depend on the type of storage destination. See the following storage destination sections for field descriptions: + + - [AWS](#aws-fields) + - [GCP](#gcp-fields) + - [Azure](#azure-fields) + - [S3-compatible](#s3-compatible-fields) + - [NFS](#nfs-fields) + - [Host Path](#host-path-fields) + +1. Click **Update storage settings**. The update can take several minutes. + +### AWS Fields + +When configuring the Admin Console to store backups on Amazon Web Services (AWS), the following fields are available: + +| Name | Description | +|------------------------------|-----------------------------------------------------------------------------------------------------------------| +| Region | The AWS region that the S3 bucket is available in | +| Bucket | The name of the S3 bucket to use | +| Path (Optional) | The path in the bucket to store all backups in | +| Access Key ID (Optional) | The AWS IAM Access Key ID that can read from and write to the bucket | +| Secret Access Key (Optional) | The AWS IAM Secret Access Key that is associated with the Access Key ID | +| Use Instance Role | When enabled, instead of providing an Access Key ID and Secret Access Key, Velero will use an instance IAM role | +| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | + +### GCP Fields + +When configuring the Admin Console to store backups on Google Cloud Provide (GCP), the following fields are available: + +| Name | Description | +|-----------------|-----------------------------------------------------------------------------------------------------------| +| Bucket | The name of the GCP storage bucket to use | +| Path (Optional) | The path in the bucket to store all backups in | +| Service Account | The GCP IAM Service Account JSON file that has permissions to read from and write to the storage location | +| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | + +### Azure Fields + +When configuring the Admin Console to store backups on Microsoft Azure, the following fields are available: + +| Name | Description | +|----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| Bucket | The name of the Azure Blob Storage Container to use | +| Path (Optional) | The path in the Blob Storage Container to store all backups in | +| Resource Group | The Resource Group name of the target Blob Storage Container | +| Storage Account | The Storage Account Name of the target Blob Storage Container | +| Subscription ID | The Subscription ID associated with the target Blob Storage Container (required only for access via Service Principle or AAD Pod Identity) | +| Tenant ID | The Tenant ID associated with the Azure account of the target Blob Storage container (required only for access via Service Principle) | +| Client ID | The Client ID of a Service Principle with access to the target Container (required only for access via Service Principle) | +| Client Secret | The Client Secret of a Service Principle with access to the target Container (required only for access via Service Principle) | +| Cloud Name | The Azure cloud for the target storage (options: AzurePublicCloud, AzureUSGovernmentCloud, AzureChinaCloud, AzureGermanCloud) | +| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | + +Only connections with Service Principles are supported at this time. + +For more information about authentication methods and setting up Azure, see [Velero plugins for Microsoft Azure](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure) in the velero-plugin-for-microsoft-azure GitHub repository. + +### S3-Compatible Fields + +Replicated supports the following S3-compatible object stores for storing backups with Velero: + +* Ceph RADOS v12.2.7. For more information, see the [Ceph](https://docs.ceph.com/en/quincy/) documentation. +* MinIO. For more information, see the [MinIO](https://docs.min.io/docs/minio-quickstart-guide.html) documentation. + +When configuring the Admin Console to store backups on S3-compatible storage, the following fields are available: + +| Name | Description | +|------------------------------|-----------------------------------------------------------------------------------------------------------------| +| Region | The AWS region that the S3 bucket is available in | +| Endpoint | The endpoint to use to connect to the bucket | +| Bucket | The name of the S3 bucket to use | +| Path (Optional) | The path in the bucket to store all backups in | +| Access Key ID (Optional) | The AWS IAM Access Key ID that can read from and write to the bucket | +| Secret Access Key (Optional) | The AWS IAM Secret Access Key that is associated with the Access Key ID | +| Use Instance Role | When enabled, instead of providing an Access Key ID and Secret Access Key, Velero will use an instance IAM role | +| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | + +### NFS Fields + +When configuring the Admin Console to store backups on network file system (NFS) storage, the following fields are available: + +| Name | Description | +|--------|----------------------------------------------| +| Server | The hostname or IP address of the NFS server | +| Path | The path that is exported by the NFS server | + +### Host Path Fields + +When configuring the Admin Console to store backups on host path storage, the following fields are available: + +**Host path**: Enter the path to the directory on the node. Although the path can be local, Replicated recommends that you use an external host path. + + +--- + + +# Installing the Velero CLI + +# Installing the Velero CLI + +You install the Velero CLI before installing Velero and configuring a storage destination for backups. + +:::note +For embedded clusters created with Replicated kURL, if the kURL Installer spec included the Velero add-on, then Velero was automatically installed with default internal storage. Replicated recommends that you proceed to change the default internal storage because it is insufficient for disaster recovery. See [Updating Storage Settings in the Admin Console](snapshots-updating-with-admin-console). +::: + +## Install the Velero CLI in an Online Cluster + +To install the Velero CLI in an online cluster: + +1. Do one of the following: + + - (Embedded kURL cluster) Run an SSH command to access and authenticate to your cluster node. + - (Existing cluster) Open a terminal in the environment that you manage the cluster from, which can be a local machine that has kubectl installed. + +1. Check for the latest supported release of the Velero CLI for **Linux AMD64** in the Velero GitHub repo at https://github.com/vmware-tanzu/velero/releases. Although earlier versions of Velero are supported, Replicated recommends using the latest supported version. For more information about supported versions, see [Velero Version Compatibility](/vendor/snapshots-overview#velero-version-compatibility). + + Note the version number for the next step. + +1. Run the following command to download the latest supported Velero CLI version for the **Linux AMD64** operating system to the cluster: + + ``` + curl -LO https://github.com/vmware-tanzu/velero/releases/download/VERSION/velero-VERSION-linux-amd64.tar.gz + ``` + + Replace VERSION with the version number using the format `vx.x.x` + + **Example:** + + ``` + curl -LO https://github.com/vmware-tanzu/velero/releases/download/v1.10.1/velero-v1.10.1-linux-amd64.tar.gz + ``` + +1. Run the following command to uncompress the TAR file: + + ``` + tar zxvf velero-VERSION-linuxamd64.tar.gz + ``` + Replace VERSION with the version number using the format `vx.x.x`. + +1. Run the following command to install the Velero CLI: + + ``` + sudo mv velero-VERSION-linux-amd64/velero /usr/local/bin/velero + ``` + Replace VERSION with the version number using the format `vx.x.x`. + +1. Run `velero version` to test that the Velero CLI installation worked correctly. + + You might get an error message stating that there are no matches for the server version. This is acceptable, as long as you get a confirmation for the client version. After the Velero installation, you also see the server version. + +## Install the Velero CLI in an Air Gapped Cluster + +To install the Velero CLI in an air gapped cluster: + +1. From a computer with internet access, check for the latest supported release of the Velero CLI for **Linux AMD64** in the Velero GitHub repo at https://github.com/vmware-tanzu/velero/releases. Although earlier versions of Velero are supported, Replicated recommends using the latest supported version. See [Velero Version Compatibility](/vendor/snapshots-overview#velero-version-compatibility). + + Note the version number for the next step. + +1. Run the following command to download the latest supported Velero CLI version for the **Linux AMD64** operating system to the cluster: + + ``` + curl -LO https://github.com/vmware-tanzu/velero/releases/download/VERSION/velero-VERSION-linux-amd64.tar.gz + ``` + + Replace VERSION with the version number using the format `vx.x.x` + + **Example:** + + ``` + curl -LO https://github.com/vmware-tanzu/velero/releases/download/v1.10.1/velero-v1.10.1-linux-amd64.tar.gz + ``` + +1. Copy the TAR file to the air gapped node. + +1. Run the following command to uncompress the TAR file: + + ``` + tar zxvf velero-VERSION-linuxamd64.tar.gz + ``` + Replace VERSION with the version number using the format `vx.x.x`. + +1. Run the following command to install the Velero CLI: + + ``` + sudo mv velero-VERSION-linux-amd64/velero /usr/local/bin/velero + ``` + + Replace VERSION with the version number using the format `vx.x.x`. + +1. Run `velero version` to test that the Velero CLI installation worked correctly. + + You might get an error message stating that there are no matches for the server version. This is acceptable, as long as you get a confirmation for the client version. After the Velero installation, you should see the server version also. + + +## Next Step + +Install Velero and configure a storage destination using one of the following procedures: + +- [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) +- [Configuring an NFS Storage Destination](snapshots-configuring-nfs) +- [Configuring Other Storage Destinations](snapshots-storage-destinations) + +--- + + +# Configuring Namespace Access and Memory Limit + +import NodeAgentMemLimit from "../partials/snapshots/_node-agent-mem-limit.mdx" +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" + +# Configuring Namespace Access and Memory Limit + +This topic describes how to configure namespace access and the memory limit for Velero. + +## Overview + +The Replicated KOTS Admin Console requires access to the namespace where Velero is installed. If your Admin Console is running with minimal role-based-access-control (RBAC) privileges, you must enable the Admin Console to access Velero. + +Additionally, if the application uses a large amount of memory, you can configure the default memory limit to help ensure that Velero runs successfully with snapshots. + +## Configure Namespace Access + +This section applies only to _existing cluster_ installations (online and air gap) where the Admin Console is running with minimal role-based-access-control (RBAC) privileges. + +Run the following command to enable the Admin Console to access the Velero namespace: + +``` +kubectl kots velero ensure-permissions --namespace ADMIN_CONSOLE_NAMESPACE --velero-namespace VELERO_NAMESPACE +``` +Replace: +* `ADMIN_CONSOLE_NAMESPACE` with the namespace on the cluster where the Admin Console is running. +* `VELERO_NAMESPACE` with the namespace on the cluster where Velero is installed. + +For more information, see [`velero ensure-permissions`](/reference/kots-cli-velero-ensure-permissions/) in the KOTS CLI documentation. For more information about RBAC privileges for the Admin Console, see [Kubernetes RBAC](/vendor/packaging-rbac). + +## Configure Memory Limit + +This section applies to all online and air gap installations. + +Velero sets default limits for the velero Pod and the node-agent (restic) Pod during installation. There is a known issue with restic that causes high memory usage, which can result in failures during backup creation when the Pod reaches the memory limit. + + + +## Additional Resources + +* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) + + +--- + + +# Understanding Application Status Details in the Admin Console + +import StatusesTable from "../partials/status-informers/_statusesTable.mdx" +import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" +import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" + +# Understanding Application Status Details in the Admin Console + +This topic describes how to view the status of an application on the Replicated KOTS Admin Console dashboard. It also describes how Replicated KOTS collects and aggregates the application status. +## View Status Details + +The application status displays on the dashboard of the Admin Console. Viewing the status details can be helpful for troubleshooting. + +To view the status details, click **Details** next to the status on the dashboard. + +![Status Details](/images/kotsadm-dashboard-appstatus.png) + +## About Application Status + +To display application status on the Admin Console dashboard, KOTS aggregates the status of specific Kubernetes resources for the application. + +The following resource types are supported for displaying application status: + +* Deployment +* StatefulSet +* Service +* Ingress +* PersistentVolumeClaims (PVC) +* DaemonSet + +Applications can specify one or more of the supported Kubernetes workloads listed above. KOTS watches all specified workloads for state changes. + +For more information about how to interpret the application status displayed on the Admin Console dashboard, see [Resource Statuses](#resource-statuses) and [Aggregate Application Status](#aggregate-application-status) below. + +### Resource Statuses + +Possible application statuses are Ready, Updating, Degraded, Unavailable, and Missing. + +The following table lists the supported Kubernetes resources and the conditions that contribute to each status: + + + +### Aggregate Application Status + + + + + +--- + + +# Generating Support Bundles from the Admin Console + +import GenerateBundleAdminConsole from "../partials/support-bundles/_generate-bundle-admin-console.mdx" + +# Generating Support Bundles from the Admin Console + +This topic describes how to generate support bundles from the KOTS Admin Console. + +## Generate a Bundle from the Admin Console + + + +--- + + +# Performing Updates in Existing Clusters + +import AdminConsole from "../partials/updating/_admin-console.mdx" +import AdminConsoleAirGap from "../partials/updating/_admin-console-air-gap.mdx" +import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" +import BuildAirGapBundle from "../partials/install/_airgap-bundle-build.mdx" +import DownloadAirGapBundle from "../partials/install/_airgap-bundle-download.mdx" +import ViewAirGapBundle from "../partials/install/_airgap-bundle-view-contents.mdx" + +# Performing Updates in Existing Clusters + +This topic describes how to perform updates in existing cluster installations with Replicated KOTS. It includes information about how to update applications and the version of KOTS running in the cluster. + +## Update an Application + +You can perform an application update using the KOTS Admin Console or the KOTS CLI. You can also set up automatic updates. See [Configuring Automatic Updates](/enterprise/updating-apps). + +### Using the Admin Console + +#### Online Environments + + + +#### Air Gap Environments + + + +### Using the KOTS CLI + +You can use the KOTS CLI [upstream upgrade](/reference/kots-cli-upstream-upgrade) command to update an application in existing cluster installations. + +#### Online Environments + +To update an application in online environments: + +```bash +kubectl kots upstream upgrade APP_SLUG -n ADMIN_CONSOLE_NAMESPACE +``` +Where: +* `APP_SLUG` is the unique slug for the application. See [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. +* `ADMIN_CONSOLE_NAMESPACE` is the namespace where the Admin Console is running. + +:::note +Add the `--deploy` flag to automatically deploy this version. +::: + +#### Air Gap Environments + +To update an application in air gap environments: + +1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the target release is promoted to build and download the new `.airgap` bundle: + + + +1. + +1. + +1. Run the following command to update the application: + + ```bash + kubectl kots upstream upgrade APP_SLUG \ + --airgap-bundle NEW_AIRGAP_BUNDLE \ + --kotsadm-registry REGISTRY_HOST[/REGISTRY_NAMESPACE] \ + --registry-username RO_USERNAME \ + --registry-password RO_PASSWORD \ + -n ADMIN_CONSOLE_NAMESPACE + ``` + Replace: + * `APP_SLUG` with the unique slug for the application. See [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. + * `NEW_AIRGAP_BUNDLE` with the `.airgap` bundle for the target application version. + * `REGISTRY_HOST` with the private registry that contains the Admin Console images. + * `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional). + * `RO_USERNAME` and `RO_PASSWORD` with the username and password for an account that has read-only access to the private registry. + * `ADMIN_CONSOLE_NAMESPACE` with the namespace where the Admin Console is running. + +:::note +Add the `--deploy` flag to automatically deploy this version. +::: + +## Update KOTS + +This section describes how to update the version of Replicated KOTS running in your cluster. For information about the latest versions of KOTS, see [KOTS Release Notes](/release-notes/rn-app-manager). + +:::note +Downgrading KOTS to a version earlier than what is currently deployed is not supported. +::: + +### Online Environments + +To update KOTS in an online existing cluster: + +1. Run _one_ of the following commands to update the KOTS CLI to the target version of KOTS: + + - **Install or update to the latest version**: + + ``` + curl https://kots.io/install | bash + ``` + + - **Install or update to a specific version**: + + ``` + curl https://kots.io/install/VERSION | bash + ``` + Where `VERSION` is the target KOTS version. + + For more KOTS CLI installation options, including information about how to install or update without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + +1. Run the following command to update the KOTS Admin Console to the same version as the KOTS CLI: + + ```bash + kubectl kots admin-console upgrade -n NAMESPACE + ``` + Replace `NAMESPACE` with the namespace in your cluster where KOTS is installed. + +### Air Gap Environments + +To update KOTS in an existing air gap cluster: + +1. Download the target version of the following assets from the [Releases](https://github.com/replicatedhq/kots/releases/latest) page in the KOTS GitHub repository: + * KOTS Admin Console `kotsadm.tar.gz` bundle + * KOTS CLI plugin + + Ensure that you can access the downloaded bundles from the environment where the Admin Console is running. + +1. Install or update the KOTS CLI to the version that you downloaded. See [Manually Download and Install](/reference/kots-cli-getting-started#manually-download-and-install) in _Installing the KOTS CLI_. + +1. + +1. Run the following command using registry read-only credentials to update the KOTS Admin Console: + + ``` + kubectl kots admin-console upgrade \ + --kotsadm-registry REGISTRY_HOST \ + --registry-username RO_USERNAME \ + --registry-password RO_PASSWORD \ + -n NAMESPACE + ``` + Replace: + * `REGISTRY_HOST` with the same private registry from the previous step. + * `RO_USERNAME` with the username for credentials with read-only permissions to the registry. + * `RO_PASSWORD` with the password associated with the username. + * `NAMESPACE` with the namespace on your cluster where KOTS is installed. + + For help information, run `kubectl kots admin-console upgrade -h`. + +--- + + +# Configuring Automatic Updates + +# Configuring Automatic Updates + +This topic describes how to configure automatic updates for applications installed in online (internet-connected) environments. + +## Overview + +For applications installed in an online environment, the Replicated KOTS Admin Console automatically checks for new versions once every four hours by default. After the Admin Console checks for updates, it downloads any new versions of the application and displays them on the **Version History** tab. + +You can edit this default cadence to customize how often the Admin Console checks for and downloads new versions. + +You can also configure the Admin Console to automatically deploy new versions of the application after it downloads them. + +The Admin Console only deploys new versions automatically if preflight checks pass. By default, the Admin Console does not automatically deploy any version of an application. + +## Limitations + +Automatic updates have the following limitations: + +* Automatic updates are not supported for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. + +* Automatic updates are not supported for applications installed in air gap environments with no outbound internet access. + +* Automatically deploying new versions is not supported when KOTS is installed with minimal RBAC. This is because all preflight checks must pass for the new version to be automatically deployed, and preflight checks that require cluster-scoped access will fail in minimal RBAC environments. + +## Set Up Automatic Updates + +To configure automatic updates: + +1. In the Admin Console, go to the **Version History** tab and click **Configure automatic updates**. + + The **Configure automatic updates** dialog opens. + +1. Under **Automatically check for updates**, use the default or select a cadence (Hourly, Daily, Weekly, Never, Custom) from the dropdown list. + + To turn off automatic updates, select **Never**. + + To define a custom cadence, select **Custom**, then enter a cron expression in the text field. For more information about cron expressions, see [Cron Expressions](/reference/cron-expressions). Configured automatic update checks use the local server time. + + ![Configure automatic updates](/images/automatic-updates-config.png) + +1. Under **Automatically deploy new versions**, select an option. The available options depend on whether semantic versioning is enabled for the channel. + * **For channels that use semantic versioning**: (v1.58.0 and later) Select an option in the dropdown + to specify the versions that the Admin Console automatically deploys. For example, + to automatically deploy only new patch and minor versions, select + **Automatically deploy new patch and minor versions**. + * **For channels that do not use semantic versioning**: (v1.67.0 and later) Optionally select **Enable automatic deployment**. + When this checkbox is enabled, the Admin Console automatically deploys each new version of the application that it downloads. + +--- + + +# Performing Updates in Embedded Clusters + +import UpdateAirGapAdm from "../partials/embedded-cluster/_update-air-gap-admin-console.mdx" +import UpdateAirGapCli from "../partials/embedded-cluster/_update-air-gap-cli.mdx" +import UpdateAirGapOverview from "../partials/embedded-cluster/_update-air-gap-overview.mdx" +import DoNotDowngrade from "../partials/embedded-cluster/_warning-do-not-downgrade.mdx" +import Overview from "../partials/embedded-cluster/_update-overview.mdx" + +# Performing Updates in Embedded Clusters + +This topic describes how to perform updates for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. + +:::note +If you are instead looking for information about Replicated kURL, see [Performing Updates in kURL Clusters](updating-kurl). +::: + +## Overview + + + +The following diagram demonstrates how updates are performed with Embedded Cluster in online (internet-connected) environments: + +![Embedded Cluster updates Kubernetes and an app in a customer environment](/images/embedded-cluster-update.png) + +[View a larger version of this image](/images/embedded-cluster-update.png) + +As shown in the diagram above, users check for available updates from the KOTS Admin Console. When deploying the new version, both the application and the cluster infrastructure are updated as needed. + +## Update in Online Clusters + + + +To perform an update with Embedded Cluster: + +1. In the Admin Console, go to the **Version history** tab. + + All versions available for upgrade are listed in the **Available Updates** section: + + ![Version history page](/images/ec-upgrade-version-history.png) + + [View a larger version of this image](/images/ec-upgrade-version-history.png) + +1. Click **Deploy** next to the target version. + +1. On the **Config** screen of the upgrade wizard, make any necessary changes to the configuration for the application. Click **Next**. + + ![Config screen in the upgrade wizard](/images/ec-upgrade-wizard-config.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-config.png) + + :::note + Any changes made on the **Config** screen of the upgrade wizard are not set until the new version is deployed. + ::: + +1. On the **Preflight** screen, view the results of the preflight checks. + + ![Preflight screen in the upgrade wizard](/images/ec-upgrade-wizard-preflights.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-preflights.png) + +1. On the **Confirm** screen, click **Deploy**. + + ![Confirmation screen in the upgrade wizard](/images/ec-upgrade-wizard-confirm.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-confirm.png) + + During updates, the Admin Console is unavailable. A modal is displayed with a message that the update is in progress. + + :::note + KOTS can experience downtime during an update, such as in single-node installations. If downtime occurs, refreshing the page results in a connection error. Users can refresh the page again after the update is complete to access the Admin Console. + ::: + +## Update in Air Gap Clusters + + + + + +### Upload the New Version From the Command Line + +To update by uploading the air gap bundle for the new version from the command line: + + + +### Upload the New Version From the Admin Console + +To update by uploading the air gap bundle for the new version from the Admin Console: + + + +--- + + +# About kURL Cluster Updates + +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# About kURL Cluster Updates + + + +This topic provides an overview of Replicated kURL cluster updates. For information about how to perform updates in kURL clusters, see [Performing Updates in kURL Clusters](updating-kurl). + +## Overview + +The Replicated kURL installer spec specifies the kURL add-ons and the Kubernetes version that are deployed in kURL clusters. You can run the kURL installation script to apply the latest installer spec and update the cluster. + +## About Kubernetes Updates {#kubernetes} + +The version of Kubernetes running in a kURL cluster can be upgraded by one or more minor versions. + +The Kubernetes upgrade process in kURL clusters steps through one minor version at a time. For example, upgrades from Kubernetes 1.19.x to 1.26.x install versions 1.20.x, 1.21x, 1.22.x, 1.23.x, 1.24.x, and 1.25.x before installing 1.26.x. + +The installation script automatically detects when the Kubernetes version in your cluster must be updated. When a Kubernetes upgrade is required, the script first prints a prompt: `Drain local node and apply upgrade?`. When you confirm the prompt, it drains and upgrades the local primary node where the script is running. + +Then, if there are any remote primary nodes to upgrade, the script drains each sequentially and prints a command that you must run on the node to upgrade. For example, the command that that script prints might look like the following: `curl -sSL https://kurl.sh/myapp/upgrade.sh | sudo bash -s hostname-check=master-node-2 kubernetes-version=v1.24.3`. + +The script polls the status of each remote node until it detects that the Kubernetes upgrade is complete. Then, it uncordons the node and proceeds to cordon and drain the next node. This process ensures that only one node is cordoned at a time. After upgrading all primary nodes, the script performs the same operation sequentially on all remote secondary nodes. + +### Air Gap Multi-Version Kubernetes Updates {#kubernetes-multi} + +To upgrade Kubernetes by more than one minor version in air gapped kURL clusters, you must provide a package that includes the assets required for the upgrade. + +When you run the installation script to upgrade, the script searches for the package in the `/var/lib/kurl/assets/` directory. The script then lists any required assets that are missing, prints a command to download the missing assets as a `.tar.gz` package, and prompts you to provide an absolute path to the package in your local directory. For example: + +``` +⚙ Upgrading Kubernetes from 1.23.17 to 1.26.3 +This involves upgrading from 1.23 to 1.24, 1.24 to 1.25, and 1.25 to 1.26. +This may take some time. +⚙ Downloading assets required for Kubernetes 1.23.17 to 1.26.3 upgrade +The following packages are not available locally, and are required: + kubernetes-1.24.12.tar.gz + kubernetes-1.25.8.tar.gz + +You can download them with the following command: + + curl -LO https://kurl.sh/bundle/version/v2023.04.24-0/19d41b7/packages/kubernetes-1.24.12,kubernetes-1.25.8.tar.gz + +Please provide the path to the file on the server. +Absolute path to file: +``` + +## About Add-ons and KOTS Updates {#add-ons} + +If the application vendor updated any add-ons in the kURL installer spec since the last time that you ran the installation script in your cluster, the script automatically updates the add-ons after updating Kubernetes (if required). + +For a complete list of add-ons that can be included in the kURL installer spec, including the KOTS add-on, see [Add-ons](https://kurl.sh/docs/add-ons/antrea) in the kURL documentation. + +### Containerd and Docker Add-on Updates + +The installation script upgrades the version of the Containerd or Docker container runtime if required by the installer spec. For example, if your cluster uses Containerd version 1.6.4 and the spec is updated to use 1.6.18, then Containerd is updated to 1.6.18 in your cluster when you run the installation script. + +The installation script also supports migrating from Docker to Containerd as Docker is not supported in Kubernetes versions 1.24 and later. If the install script detects a change from Docker to Containerd, it installs Containerd, loads the images found in Docker, and removes Docker. + +For information about the container runtime add-ons, see [Containerd Add-On](https://kurl.sh/docs/add-ons/containerd) and [Docker Add-On](https://kurl.sh/docs/add-ons/docker) in the kURL documentation. + +### KOTS Updates (KOTS Add-on) + +The version of KOTS that is installed in a kURL cluster is set by the [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm), which is defined in the kURL installer spec. + +For example, if the version of KOTS running in your cluster is 1.109.0, and the KOTS add-on in the kURL installer spec is updated to 1.109.12, then the KOTS version in your cluster is updated to 1.109.12 when you update the cluster. + +--- + + +# Performing Updates in kURL Clusters + +import InstallerRequirements from "../partials/updating/_installerRequirements.mdx" +import UpgradePrompt from "../partials/updating/_upgradePrompt.mdx" +import AdminConsole from "../partials/updating/_admin-console.mdx" +import AdminConsoleAirGap from "../partials/updating/_admin-console-air-gap.mdx" +import DownloadKurlBundle from "../partials/install/_download-kurl-bundle.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Performing Updates in kURL Clusters + + + +This topic describes how to perform updates in Replicated kURL installations. It includes procedures for updating an application, as well as for updating the versions of Kubernetes, Replicated KOTS, and add-ons in a kURL cluster. + +For more information about managing nodes in kURL clusters, including how to safely reset, reboot, and remove nodes when performing maintenance tasks, see [Managing Nodes](https://kurl.sh/docs/install-with-kurl/managing-nodes) in the open source kURL documentation. + +## Update an Application + +For kURL installations, you can update an application from the Admin Console. You can also set up automatic updates. See [Configuring Automatic Updates](/enterprise/updating-apps). + +### Online Environments + + + +### Air Gap Environments + + + +## Update the kURL Cluster + +After updating the kURL installer spec, you can rerun the kURL installation script to update a kURL cluster. For more information about kURL cluster udpates, see [About kURL Cluster Updates](/enterprise/updating-kurl-about). + +For more information about managing nodes in kURL clusters, including how to safely reset, reboot, and remove nodes when performing maintenance tasks, see [Managing Nodes](https://kurl.sh/docs/install-with-kurl/managing-nodes) in the open source kURL documentation. + +:::important +The Kubernetes scheduler automatically reschedules Pods to other nodes during maintenance. Any deployments or StatefulSets with a single replica experience downtime while being rescheduled. +::: + +### Online Environments + +To update the kURL cluster in an online environment: + +1. Edit the kURL installer spec as desired. For example, update the version of Kubernetes or add, remove, or update add-ons. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). + +1. Run the kURL installation script on any primary node in the cluster: + + ```bash + curl -sSL https://k8s.kurl.sh/APP_SLUG | sudo bash -s ADVANCED_OPTIONS + ``` + Replace: + * `APP_SLUG` with the unique slug for the application. + * `ADVANCED_OPTIONS` optionally with any flags listed in [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. + + To use no advanced installation options, remove `-s ADVANCED_OPTIONS` from the command. + + See the following recommendations for advanced options: + + + +1. + +### Air Gap Environments + +For air gap installations, you must load images on each node in the cluster before you can run the installation script to update Kubernetes and any add-ons. This is because upgraded components might have Pods scheduled on any node in the cluster. + +To update the kURL cluster in an air gap environment: + +1. Edit the kURL installer spec as desired. For example, update the version of Kubernetes or add, remove, or update add-ons. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). + +1. Repeat the following steps on each node in the cluster to download and extract the kURL `.tar.gz` air gap bundle for the updated spec: + + 1. Download the kURL `.tar.gz` air gap bundle from the channel where the new kURL installer spec is promoted: + + * To download the kURL air gap bundle for the Stable channel: + + + + * To download the kURL bundle for channels other than Stable: + + ```bash + replicated channel inspect CHANNEL + ``` + Replace `CHANNEL` with the exact name of the target channel, which can include uppercase letters or special characters, such as `Unstable` or `my-custom-channel`. + + In the output of this command, copy the curl command with the air gap URL. + + 1. Extract the contents of the bundle: + + ```bash + tar -xvzf FILENAME.tar.gz + ``` + Replace `FILENAME` with the name of the downloaded kURL `.tar.gz` air gap bundle. + +1. Run the following KURL script to ensure all required images are available: + + ```bash + cat tasks.sh | sudo bash -s load-images + ``` + + :::note + The kURL installation script that you will run in the next step also performs a check for required images and prompts you to run the `load-images` command if any images are missing. + ::: + +1. Run the kURL installation script on any primary node in the cluster with the `airgap` option: + + ```bash + cat install.sh | sudo bash -s airgap OTHER_ADVANCED_OPTIONS + ``` + Replace `OTHER_ADVANCED_OPTIONS` optionally with any flags listed in [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. + + See the following recommendations for advanced options: + + +1. + + :::note + If Kubernetes must be upgraded by more than one minor version, the script automatically searches for the required Kubernetes assets in the `/var/lib/kurl/assets/` directory. If the assets are not available, the script prints a command to download the assets as a `tar.gz` package. Download and provide the absolute path to the package when prompted to continue with the upgrade. + ::: + +--- + + +# Updating Licenses in the Admin Console + +# Updating Licenses in the Admin Console + +This topic describes how to update a license from the KOTS Admin Console. + +## Update Online Licenses + +To update licenses in online environments: + +1. In the Admin Console, go to the **License** tab. + +1. Click **Sync license** to get the latest updates. + + ![Online License](/images/online-license-tab.png) + + [View a larger version of this image](/images/online-license-tab.png) + + :::note + If no changes are detected, a **License is already up to date** message appears. + ::: + + When the license is updated, KOTS makes a new version available that includes the license changes: + + ![License updated successfully](/images/kots-license-update-message.png) + + [View a larger version of this image](/images/kots-license-update-message.png) + +1. In the dialog, click **Go to new version** to navigate to the **Version history** page. + +1. On the **Version history** page, next to the new version labeled **License Change**, click **Deploy** then **Yes, deploy**. + + ![Deploy license change](/images/kots-deploy-license-change.png) + + [View a larger version of this image](/images/kots-deploy-license-change.png) + + The version with the license change is then displayed as the currently deployed version, as shown below: + + ![Currently deployed version](/images/kots-license-change-currently-deployed.png) + + [View a larger version of this image](/images/kots-license-change-currently-deployed.png) + +## Update Air Gap Licenses + +To update licenses in air gap environments: + +1. Download the new license. Ensure that it is available on the machine where you can access a browser. + +1. In the Admin Console, go to the **License** tab. + +1. Click **Upload license** and select the new license. + + ![Airgap License](/images/airgap-license-tab.png) + + [View a larger version of this image](/images/airgap-license-tab.png) + + :::note + If no changes are detected, a **License is already up to date** message appears. + ::: + + When the license is updated, KOTS makes a new version available that includes the license changes: + + ![License updated successfully](/images/kots-airgap-license-update-message.png) + + [View a larger version of this image](/images/kots-airgap-license-update-message.png) + +1. In the dialog, click **Go to new version** to navigate to the **Version history** page. + +1. On the **Version history** page, next to the new version labeled **License Change**, click **Deploy** then **Yes, deploy**. + + ![Deploy license change](/images/kots-deploy-license-change.png) + + [View a larger version of this image](/images/kots-deploy-license-change.png) + + The version with the license change is then displayed as the currently deployed version, as shown below: + + ![Currently deployed version](/images/kots-license-change-currently-deployed.png) + + [View a larger version of this image](/images/kots-license-change-currently-deployed.png) + +## Upgrade from a Community License + +If you have a community license, you can change your license by uploading a new one. This allows you to upgrade from a community version of the software without having to reinstall the Admin Console and the application. + +To change a community license to another license: + +1. Download the new license. +1. In the **License** tab of the Admin Console, click **Change license**. +1. In the dialog, upload the new license file. + +--- + + +# Patching with Kustomize + +# Patching with Kustomize + +This topic describes how to use Kustomize to patch an application before deploying. + +## Overview + +Replicated KOTS uses Kustomize to let you make patches to an application outside of the options available in the KOTS Admin Console **Config** page. _Kustomizations_ are the Kustomize configuration objects, defined in `kustomization.yaml` files, that describe how to transform or generate other Kubernetes objects. + +These kustomizations overlay the application resource files and can persist after release updates. For example, you can kustomize the number of replicas that you want to continually use in your environment or specify what `nodeSelectors` to use for a deployment. + +For more information, see the [Kustomize website](https://kustomize.io). + +## Limitation + +For Helm charts deployed with version `kots.io/v1beta2` of the KOTS HelmChart custom resource, editing the downstream Kustomization files to make changes to the application before deploying is not supported. This is because KOTS does not use Kustomize when installing Helm charts with the `kots.io/v1beta2` HelmChart custom resource. For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +## About the Directory Structure + +You can patch an application with Kustomize from the **View files** page in the Admin Console. The **View files** page shows the Kubernetes manifest files for the application. + +The following images shows an example of the file directory on the View files page: + +![Kustomize Directory Structure](/images/kustomize-dir-structure.png) + +[View a larger version of this image](/images/kustomize-dir-structure.png) + +For more information about each of the sections in the file directory, see the following sections: + +- [Upstream](#upstream) +- [Base](#base) +- [Overlays](#overlays) +- [Rendered](#rendered) +- [skippedFiles](#skippedfiles) + +### Upstream + +The following table describes the `upstream` directory and whether custom changes persist after an update: + + + + + + + + + + + + + + +
    DirectoryChanges Persist?Description
    upstreamNo, except for the userdata subdirectory

    The upstream directory exactly mirrors the content pushed to a release.

    Contains the template functions, preflight checks, support bundle, config options, license, and so on.

    Contains a userdata subdirectory that includes user data files such as the license file and the config file.

    + +### Base + +The following table describes the `base` directory and whether custom changes persist after an update: + + + + + + + + + + + + + + +
    DirectoryChanges Persist?Description
    baseNo

    After KOTS processes and renders the upstream, it puts those files in the base directory.

    Only the deployable application files, such as files deployable with kubectl apply, are placed here.

    Any non-deployable manifests, such as template functions, preflight checks, and configuration options, are removed.

    + + +### Overlays + +The `overlays` directory contains the following subdirectories that apply specific kustomizations to the `base` directory when deploying a version to the cluster. + The following table describes the subdirectories and specifies whether the custom changes made in each subdirectory persist after an update. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    SubdirectoryChanges Persist?Description
    midstreamNoContains KOTS-specific kustomizations, such as:
    • Backup labels, such as those used to configure Velero.
    • Image pull secret definitions and patches to inject the imagePullSecret field into relevant manifests (such as deployments, stateful sets, and jobs).
    downstreamYes

    Contains user-defined kustomizations that are applied to the midstream directory and deployed to the cluster.

    Only one downstream subdirectory is supported. It is automatically created and named this-cluster when the Admin Console is installed.

    To add kustomizations, see Patch an Application.

    midstream/chartsNo

    Appears only when the useHelmInstall property in the HelmChart custom resource is set to true.

    Contains a subdirectory for each Helm chart. Each Helm chart has its own kustomizations because each chart is rendered and deployed separately from other charts and manifests.

    The subcharts of each Helm chart also have their own kustomizations and are rendered separately. However, these subcharts are included and deployed as part of the parent chart.

    downstream/chartsYes

    Appears only when the useHelmInstall property in the HelmChart custom resource is set to true.

    Contains a subdirectory for each Helm chart. Each Helm chart has its own kustomizations because each chart is rendered and deployed separately from other charts and manifests.

    The subcharts of each Helm chart also have their own kustomizations and are rendered separately. However, these subcharts are included and deployed as part of the parent chart.

    + +### Rendered + +The following table describes the `rendered` directory and whether custom changes persist after an update: + + + + + + + + + + + + + + + + + + + +
    DirectoryChanges Persist?Description
    renderedNo

    Contains the final rendered application manifests that are deployed to the cluster.

    The rendered files are created when KOTS processes the base by applying the corresponding overlays and the user-defined kustomizations. KOTS puts the rendered files in the rendered directory.

    rendered/chartsNo

    Appears only when the useHelmInstall property in the HelmChart custom resource is set to true.

    Contains a subdirectory for each rendered Helm chart. Each Helm chart is deployed separately from other charts and manifests.

    The rendered subcharts of each Helm chart are included and deployed as part of the parent chart.

    + +### skippedFiles + +The `skippedFiles` directory lists files that KOTS is not able to process or render, such as invalid YAML files. + +The `_index.yaml` file contains metadata and details about the errors, such as which files they were found in and sometimes the line number of the error. + +## Patch an Application + +To patch the application with Kustomize so that your changes persist between updates, edit the files in the `overlays/downstream/this-cluster` directory. + +The Admin Console overwrites the `upstream` and `base` directories each time you upgrade the application to a later version. + +To patch an application: + +1. On the View Files tab in the Admin Console, click **Need to edit these files? Click here to learn how**. + + ![edit-patches-kots-app](/images/edit-patches-kots-app.png) + +1. To download the application bundle locally: + + ```shell + kubectl kots download --namespace APP_NAMESPACE --slug APP_SLUG + ``` + Replace: + * `APP_NAMESPACE` with the namespace on the cluster where the application is deployed. + * `APP_SLUG` with the unique slug for the application. + + You can copy these values from the dialog that appears when you click **Need to edit these files? Click here to learn how**. + +1. Create a Kubernetes manifest YAML file and make any desired edits. You only need to add the fields and values that you want to change because this patch file overwrites the corresponding values in the `base` directory. For example, the following `Deployment` patch manifest file shows an edit only to the number of replicas. None of the other values in the `base/deployment.yaml` file will be overwritten. + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example-nginx + spec: + replicas: 2 + ``` + +1. Add the filename that you created in the previous step to the `patches` field in the `kustomization.yaml` file, located in `/overlays/downstream/this-cluster`. The `downstream/this-cluster` subdirectory is where custom changes (patches) persist when releases are updated. These changes are in turn applied to the `midstream` directory. For more information, see [overlays](#overlays). + + **Example:** + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + bases: + - ../../midstream + kind: Kustomization + patches: + - path: ./FILENAME.yaml + ``` + +1. Upload your changes to the cluster: + + ```shell + kubectl kots upload --namespace APP_NAMESPACE --slug APP_SLUG ~/APP-SLUG + ``` + +1. On the Version History tab in the Admin Console, click **Diff** to see the new version of the application with the diff of the changes that you uploaded. + + ![kustomize-view-history-diff](/images/kustomize-view-history-diff.png) + + [View a larger version of this image](/images/kustomize-view-history-diff.png) + +1. Click **Deploy** to apply the changes. + + ![kustomize-view-history-deploy](/images/kustomize-view-history-deploy.png) + +1. Verify your changes. For example, running the following command shows that there are two NGINX pods running after deploying two replicas in the example YAML above: + + ```shell + kubectl get po | grep example-nginx + ``` + **Example output:** + + ```shell + example-nginx-f5c49fdf6-bf584 1/1 Running 0 1h + example-nginx-t6ght74jr-58fhr 1/1 Running 0 1m + ``` + + +--- + + +# Updating TLS Certificates in kURL Clusters + +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Updating TLS Certificates in kURL Clusters + + + +This topic describes how to upload custom TLS certificates for Replicated kURL clusters. + +## Overview + +For kURL clusters, the default Replicated KOTS self-signed certificate automatically renews 30 days before the expiration date. + +If you have uploaded a custom TLS certificate instead, then no renewal is attempted, even if the certificate is expired. In this case, you must manually upload a new custom certificate. + +For information about TLS renewal for registry and Kubernetes control plane with Replicated kURL, see [TLS Certificates](https://kurl.sh/docs/install-with-kurl/setup-tls-certs) in the kURL documentation. + +## Update Custom TLS Certificates + +If you are using a custom TLS certificate in a kURL cluster, you manually upload a new certificate when the previous one expires. + +:::important +Adding the `acceptAnonymousUploads` annotation temporarily creates a vulnerability for an attacker to maliciously upload TLS certificates. After TLS certificates have been uploaded, the vulnerability is closed again. + +Replicated recommends that you complete this upload process quickly to minimize the vulnerability risk. +::: + +To upload a new custom TLS certificate: + +1. Run the following annotation command to restore the ability to upload new TLS certificates: + + ```bash + kubectl -n default annotate secret kotsadm-tls acceptAnonymousUploads=1 --overwrite + ``` +1. Run the following command to get the name of the kurl-proxy server: + + ```bash + kubectl get pods -A | grep kurl-proxy | awk '{print $2}' + ``` + +1. Run the following command to delete the kurl-proxy pod. The pod automatically restarts after the command runs. + + ```bash + kubectl delete pods PROXY_SERVER + ``` + + Replace PROXY_SERVER with the name of the kurl-proxy server that you got in the previous step. + +1. After the pod has restarted, direct your browser to `http://:8800/tls` and go through the upload process in the user interface. + + +--- + + +# Introduction to KOTS + +import Kots from "../docs/partials/kots/_kots-definition.mdx" + +# Introduction to KOTS + +This topic provides an introduction to the Replicated KOTS installer, including information about KOTS features, installation options, and user interfaces. + +:::note +The Replicated KOTS entitlement is required to install applications with KOTS. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. +::: + +## Overview + + + +KOTS communicates securely with the Replicated Vendor Portal to synchronize customer licenses, check for available application updates, send instance data, share customer-generated support bundles, and more. + +Installing an application with KOTS provides access to features such as: + +* Support for air gap installations in environments with limited or no outbound internet access +* Support for installations on VMs or bare metal servers, when using Replicated Embedded Cluster or Replicated kURL +* The KOTS Admin Console, which provides a user interface where customers can install and manage their application instances +* Instance telemetry automatically sent to the Vendor Portal for instances running in customer environments +* Strict preflight checks that block installation if environment requirements are not met +* Backup and restore with Replicated snapshots +* Support for marking releases as required to prevent users from skipping them during upgrades + +KOTS is an open source project that is maintained by Replicated. For more information, see the [kots](https://github.com/replicatedhq/kots) repository in GitHub. + +## About Installing with KOTS + +KOTS can be used to install Kubernetes applications and Helm charts in the following environments: +* Clusters provisioned on VMs or bare metal servers with Replicated Embedded Cluster or Replicated kURL +* Existing clusters brought by the user +* Online (internet-connected) or air-gapped (disconnected) environments + +To install an application with KOTS, users first run an installation script that installs KOTS in the target cluster and deploys the KOTS Admin Console. After KOTS is installed, users can log in to the KOTS Admin Console to upload their license file, configure the application, run preflight checks, and install and deploy the application. + +The following diagram demonstrates how a single release promoted to the Stable channel in the Vendor Portal can be installed with KOTS in an embedded cluster on a VM, in an existing air-gapped cluster, and in an existing internet-connected cluster: + +Embedded cluster, air gap, and existing cluster app installation workflows + +[View a larger version of this image](/images/kots-installation-overview.png) + +As shown in the diagram above: +* For installations in existing online (internet-connected) clusters, users run a command to install KOTS in their cluster. +* For installations on VMs or bare metal servers, users run an Embedded Cluster or kURL installation script that both provisions a cluster in their environment and installs KOTS in the cluster. +* For installations in air-gapped clusters, users download air gap bundles for KOTS and the application from the Replicated Download Portal and then provide the bundles during installation. + +All users must have a valid license file to install with KOTS. After KOTS is installed in the cluster, users can access the KOTS Admin Console to provide their license and deploy the application. + +For more information about how to install applications with KOTS, see the [Installing an Application](/enterprise/installing-overview) section. + +## KOTS User Interfaces + +This section describes the KOTS interfaces available to users for installing and managing applications. + +### KOTS Admin Console + +KOTS provides an Admin Console to make it easy for users to install, manage, update, configure, monitor, backup and restore, and troubleshoot their application instance from a GUI. + +The following shows an example of the Admin Console dashboard for an application: + +![Admin Console Dashboard](/images/guides/kots/application.png) + +[View a larger version of this image](/images/guides/kots/application.png) + +For applications installed with Replicated Embedded Cluster in a VM or bare metal server, the Admin Console also includes a **Cluster Management** tab where users can add and manage nodes in the embedded cluster, as shown below: + +![Admin console dashboard with Cluster Management tab](/images/gitea-ec-ready.png) + +[View a larger version of this image](/images/gitea-ec-ready.png) + +### KOTS CLI + +The KOTS command-line interface (CLI) is a kubectl plugin. Customers can run commands with the KOTS CLI to install and manage their application instances with KOTS programmatically. + +For information about getting started with the KOTS CLI, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + +The KOTS CLI can also be used to install an application without needing to access the Admin Console. This can be useful for automating installations and upgrades, such as in CI/CD pipelines. For information about how to perform headless installations from the command line, see [Installing with the KOTS CLI](/enterprise/installing-existing-cluster-automation). + +--- + + +# Introduction to Replicated + +--- +pagination_prev: null +--- + +import ApiAbout from "/docs/partials/vendor-api/_api-about.mdx" +import Replicated from "/docs/partials/getting-started/_replicated-definition.mdx" +import Helm from "/docs/partials/helm/_helm-definition.mdx" +import Kots from "/docs/partials/kots/_kots-definition.mdx" +import KotsEntitlement from "/docs/partials/kots/_kots-entitlement-note.mdx" +import SDKOverview from "/docs/partials/replicated-sdk/_overview.mdx" +import CSDL from "/docs/partials/getting-started/_csdl-overview.mdx" +import PreflightSbAbout from "/docs/partials/preflights/_preflights-sb-about.mdx" + +# Introduction to Replicated + +This topic provides an introduction to the Replicated Platform, including a platform overview and a list of key features. It also describes the Commercial Software Distribution Lifecycle and how Replicated features can be used in each phase of the lifecycle. + +## About the Replicated Platform + + + +The Replicated Platform features are designed to support ISVs during each phase of the Commercial Software Distribution Lifecycle. For more information, see [Commercial Software Distribution Lifecycle](#csdl) below. + +The following diagram demonstrates the process of using the Replicated Platform to distribute an application, install the application in a customer environment, and support the application after installation: + +![replicated platform features workflow](/images/replicated-platform.png) + +[View a larger version of this image](/images/replicated-platform.png) + +The diagram above shows an application that is packaged with the [**Replicated SDK**](/vendor/replicated-sdk-overview). The application is tested in clusters provisioned with the [**Replicated Compatibility Matrix**](/vendor/testing-about), then added to a new release in the [**Vendor Portal**](/vendor/releases-about) using an automated CI/CD pipeline. + +The application is then installed by a customer ("Big Bank") on a VM. To install, the customer downloads their license, which grants proxy access to the application images through the [**Replicated proxy registry**](/vendor/private-images-about). They also download the installation assets for the [**Replicated Embedded Cluster**](/vendor/embedded-overview) installer. + +Embedded Cluster runs [**preflight checks**](/vendor/preflight-support-bundle-about) to verify that the environment meets the installation requirements, provisions a cluster on the VM, and installs [**Replicated KOTS**](intro-kots) in the cluster. KOTS provides an [**Admin Console**](intro-kots#kots-admin-console) where the customer enters application-specific configurations, runs application preflight checks, optionally joins nodes to the cluster, and then deploys the application. After installation, customers can manage both the application and the cluster from the Admin Console. + +Finally, the diagram shows how [**instance data**](/vendor/instance-insights-event-data) is automatically sent from the customer environment to the Vendor Portal by the Replicated SDK API and the KOTS Admin Console. Additionally, tooling from the open source [**Troubleshoot**](https://troubleshoot.sh/docs/collect/) project is used to generate and send [**support bundles**](/vendor/preflight-support-bundle-about), which include logs and other important diagnostic data. + +## Replicated Platform Features + +The following describes the key features of the Replicated Platform. + +### Compatibility Matrix + +Replicated Compatibility Matrix can be used to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports various Kubernetes distributions and versions and can be interacted with through the Vendor Portal or the Replicated CLI. + +For more information, see [About Compatibility Matrix](/vendor/testing-about). + +### Embedded Cluster + +Replicated Embedded Cluster is a Kubernetes installer based on the open source Kubernetes distribution k0s. With Embedded Cluster, users install and manage both the cluster and the application together as a single appliance on a VM or bare metal server. In this way, Kubernetes is _embedded_ with the application. + +Additionally, each version of Embedded Cluster includes a specific version of [Replicated KOTS](#kots) that is installed in the cluster during installation. KOTS is used by Embedded Cluster to deploy the application and also provides the Admin Console UI where users can manage both the application and the cluster. + +For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). + +### KOTS (Admin Console) {#kots} + +KOTS is a kubectl plugin and in-cluster Admin Console that installs Kubernetes applications in customer-controlled environments. + +KOTS is used by [Replicated Embedded Cluster](#embedded-cluster) to deploy applications and also to provide the Admin Console UI where users can manage both the application and the cluster. KOTS can also be used to install applications in existing Kubernetes clusters in customer-controlled environments, including clusters in air-gapped environments with limited or no outbound internet access. + +For more information, see [Introduction to KOTS](intro-kots). + +### Preflight Checks and Support Bundles + + + +For more information, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). + +### Proxy Registry + +The Replicated proxy registry grants proxy access to an application's images using the customer's unique license. This means that customers can get access to application images during installation without the vendor needing to provide registry credentials. + +For more information, see [About the Replicated Proxy Registry](/vendor/private-images-about). + +### Replicated SDK + +The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. It provides an in-cluster API that can be used to communicate with the Vendor Portal. For example, the SDK API can return details about the customer's license or report telemetry on the application instance back to the Vendor Portal. + +For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +### Vendor Portal + +The Replicated Vendor Portal is the web-based user interface that you can use to configure and manage all of the Replicated features for distributing and managing application releases, supporting your release, viewing customer insights and reporting, and managing teams. + +The Vendor Portal can also be interacted with programmatically using the following developer tools: + +* **Replicated CLI**: The Replicated CLI can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams, license files, and so on. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* **Vendor API v3**: The Vendor API can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams and license files. For more information, see [Using the Vendor API v3](/reference/vendor-api-using). + +## Commercial Software Distribution Lifecycle {#csdl} + +Replicated Platform features are designed to support ISVs in each phase of the Commercial Software Distribution Lifecycle shown below: + +![software distribution lifecycle wheel](/images/software-dev-lifecycle.png) + +[View a larger version of this image](/images/software-dev-lifecycle.png) + + + +For more information about to download a copy of The Commercial Software Distribution Handbook, see [The Commercial Software Distribution Handbook](https://www.replicated.com/the-commercial-software-distribution-handbook). + +The following describes the phases of the software distribution lifecycle: + +* **[Develop](#develop)**: Application design and architecture decisions align with customer needs, and development teams can quickly iterate on new features. +* **[Test](#test)**: Run automated tests in several customer-representative environments as part of continuous integration and continuous delivery (CI/CD) workflows. +* **[Release](#release)**: Use channels to share releases with external and internal users, publish release artifacts securely, and use consistent versioning. +* **[License](#license)**: Licenses are customized to each customer and are easy to issue, manage, and update. +* **[Install](#install)**: Provide unique installation options depending on customers' preferences and experience levels. +* **[Report](#report)**: Make more informed prioritization decisions by collecting usage and performance metadata for application instances running in customer environments. +* **[Support](#support)**: Diagnose and resolve support issues quickly. + +For more information about the Replicated features that support each of these phases, see the sections below. + +### Develop + +The Replicated SDK exposes an in-cluster API that can be developed against to quickly integrate and test core functionality with an application. For example, when the SDK is installed alongside an application in a customer environment, the in-cluster API can be used to send custom metrics from the instance to the Replicated vendor platform. + +For more information about using the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +### Test + +The Replicated Compatibility Matrix rapidly provisions ephemeral Kubernetes clusters, including multi-node and OpenShift clusters. When integrated into existing CI/CD pipelines for an application, the Compatibility Matrix can be used to automatically create a variety of customer-representative environments for testing code changes. + +For more information, see [About Compatibility Matrix](/vendor/testing-about). + +### Release + +Release channels in the Replicated Vendor Portal allow ISVs to make different application versions available to different customers, without needing to maintain separate code bases. For example, a "Beta" channel can be used to share beta releases of an application with only a certain subset of customers. + +For more information about working with channels, see [About Channels and Releases](/vendor/releases-about). + +Additionally, the Replicated proxy registry grants proxy access to private application images using the customers' license. This ensures that customers have the right access to images based on the channel they are assigned. For more information about using the proxy registry, see [About the Replicated Proxy Registry](/vendor/private-images-about). + +### License + +Create customers in the Replicated Vendor Portal to handle licensing for your application in both online and air gap environments. For example: +* License free trials and different tiers of product plans +* Create and manage custom license entitlements +* Verify license entitlements both before installation and during runtime +* Measure and report usage + +For more information about working with customers and custom license fields, see [About Customers](/vendor/licenses-about). + +### Install + +Applications distributed with the Replicated Platform can support multiple different installation methods from the same application release, helping you to meet your customers where they are. For example: + +* Customers who are not experienced with Kubernetes or who prefer to deploy to a dedicated cluster in their environment can install on a VM or bare metal server with the Replicated Embedded Cluster installer. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). +* Customers familiar with Kubernetes and Helm can install in their own existing cluster using Helm. For more information, see [Installing with Helm](/vendor/install-with-helm). +* Customers installing into environments with limited or no outbound internet access (often referred to as air-gapped environments) can securely access and push images to their own internal registry, then install using Helm or a Replicated installer. For more information, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap). + +### Report + +When installed alongside an application, the Replicated SDK and Replicated KOTS automatically send instance data from the customer environment to the Replicated Vendor Portal. This instance data includes health and status indicators, adoption metrics, and performance metrics. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). + +ISVs can also set up email and Slack notifications to get alerted of important instance issues or performance trends. For more information, see [Configuring Instance Notifications](/vendor/instance-notifications-config). + +### Support + +Support teams can use Replicated features to more quickly diagnose and resolve application issues. For example: + +- Customize and generate support bundles, which collect and analyze redacted information from the customer's cluster, environment, and application instance. See [About Preflights Checks and Support Bundles](/vendor/preflight-support-bundle-about). +- Provision customer-representative environments with Compatibility Matrix to recreate and diagnose issues. See [About Compatibility Matrix](/vendor/testing-about). +- Get insights into an instance's status by accessing telemetry data, which covers the health of the application, the current application version, and details about the infrastructure and cluster where the application is running. For more information, see [Customer Reporting](/vendor/customer-reporting). For more information, see [Customer Reporting](/vendor/customer-reporting). + + +--- + + +# Home + +--- +slug: / +pagination_next: null +--- + +# Home + +

    +
      +
    • + chat bubble icon +

      What's New?

      +
    • +
    • +

      Embedded Cluster 2.0 Release

      +

      The 2.0 release brings improvements to architecture that increase the reliability and stability of Embedded Cluster.

      +
    • +
    • + Learn more +
    • +
    +
      +
    • + lightbulb icon +

      Did You Know?

      +
    • +
    • +

      Manage Supported Install Methods Per Customer

      +

      Control which installation methods are available for each customer from the **Install types** field in the customer's license.

      +
    • +
    • + Learn more +
    • +
    +
    +
    + +
    +
    + + +
    +
    + +
    +
    + + +
    +
    + + + +
    +
    + + +
    +
    + +
    + +--- + + +# _airgap-bundle + +Air gap bundles (`.airgap`) contain the images needed to install and run a single release of your application in _air gap_ environments with no outbound internet access. + +--- + + +# _nginx-deployment + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + annotations: + backup.velero.io/backup-volumes: nginx-content + spec: + containers: + - name: nginx + image: nginx + resources: + limits: + memory: '256Mi' + cpu: '500m' + requests: + memory: '32Mi' + cpu: '100m' +``` + +--- + + +# _nginx-k8s-app + +```yaml +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "nginx" +spec: + descriptor: + links: + - description: Open App + # needs to match applicationUrl in kots-app.yaml + url: "http://nginx" +``` + +--- + + +# _nginx-kots-app + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: nginx +spec: + title: App Name + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png + statusInformers: + - deployment/nginx + ports: + - serviceName: "nginx" + servicePort: 80 + localPort: 8888 + applicationUrl: "http://nginx" +``` + +--- + + +# _nginx-service + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx + annotations: + kots.io/when: '{{repl not IsKurl }}' +spec: + type: ClusterIP + ports: + - port: 80 + selector: + app: nginx +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx + annotations: + kots.io/when: '{{repl IsKurl }}' +spec: + type: NodePort + ports: + - port: 80 + nodePort: 8888 + selector: + app: nginx +``` + +--- + + +# _build-source-code + +Add one or more jobs to compile your application source code and build images. The build jobs that you create vary depending upon your application and your CI/CD platform. For additional guidance, see the documentation for your CI/CD platform. + +--- + + +# _test-recs + +* **Application Testing:** Traditional application testing includes unit, integration, and end-to-end tests. These tests are critical for application reliability, and Compatibility Matrix is designed to to incorporate and use your application testing. + +* **Performance Testing:** Performance testing is used to benchmark your application to ensure it can handle the expected load and scale gracefully. Test your application under a range of workloads and scenarios to identify any bottlenecks or performance issues. Make sure to optimize your application for different Kubernetes distributions and configurations by creating all of the environments you need to test in. + +* **Smoke Testing:** Using a single, conformant Kubernetes distribution to test basic functionality of your application with default (or standard) configuration values is a quick way to get feedback if something is likely to be broken for all or most customers. Replicated also recommends that you include each Kubernetes version that you intend to support in your smoke tests. + +* **Compatibility Testing:** Because applications run on various Kubernetes distributions and configurations, it is important to test compatibility across different environments. Compatibility Matrix provides this infrastructure. + +* **Canary Testing:** Before releasing to all customers, consider deploying your application to a small subset of your customer base as a _canary_ release. This lets you monitor the application's performance and stability in real-world environments, while minimizing the impact of potential issues. Compatibility Matrix enables canary testing by simulating exact (or near) customer environments and configurations to test your application with. + +--- + + +# _openshift-pool + +:::note +Due to the time it takes to start an OpenShift cluster, a warm pool of OpenShift clusters is maintained. +When available, an OpenShift cluster from the pool starts in approximately two minutes with default disks. +When starting a cluster with a disk size different than the default, an additional four minutes is added to the warm cluster start time. +::: + + +--- + + +# _overview + +Replicated Compatibility Matrix quickly provisions ephemeral clusters of different Kubernetes distributions and versions, such as OpenShift, EKS, and Replicated kURL. + +You can use Compatibility Matrix to get kubectl access to running clusters within minutes or less. This allows you to more easily test your code in a range of different environments before releasing to customers. + +Example use cases for Compatibility Matrix include: +* Run tests before releasing a new version of your application to validate compatibility with supported Kubernetes distributions +* Get access to a cluster to develop on and quickly test changes +* Reproduce a reported issue on a customer-representative environment for troubleshooting + + +--- + + +# _prerequisites + +* Create an account in the Replicated Vendor Portal. See [Creating a Vendor Account](/vendor/vendor-portal-creating-account). + +* Install the Replicated CLI and then authorize the CLI using your vendor account. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* If you have a contract, you can purchase more credits by going to [**Compatibility Matrix > Buy additional credits**](https://vendor.replicated.com/compatibility-matrix). Otherwise, you can request credits by going to [**Compatibility Matrix > Request more credits**](https://vendor.replicated.com/compatibility-matrix) in the Vendor Portal. For more information, see [Billing and Credits](/vendor/testing-about#billing-and-credits). + + +--- + + +# _supported-clusters-overview + +Compatibility Matrix can create clusters on virtual machines (VMs), such as kind, k3s, RKE2, and Red Hat OpenShift OKD, and also create cloud-managed clusters, such as EKS, GKE and AKS: + +* Cloud-based Kubernetes distributions are run in a Replicated managed and controlled cloud account to optimize and deliver a clusters quickly and reliably. The Replicated account has control planes ready and adds a node group when you request it, making the cluster available much faster than if you try to create your own cluster with your own cloud account. + +* VMs run on Replicated bare metal servers located in several data centers, including data centers physically in the European Union. + +To view an up-to-date list of the available cluster distributions, including the supported Kubernetes versions, instance types, and maximum nodes for each distribution, run [`replicated cluster versions`](/reference/replicated-cli-cluster-versions). + +For detailed information about the available cluster distributions, see [Supported Compatibility Matrix Cluster Types](testing-supported-clusters). + + +--- + + +# _collab-existing-user + +If a team member adds a GitHub username to their Vendor Portal account that already exists in the collab repository, then the Vendor Portal does _not_ change the role that the existing user is assigned in the collab repository. + +However, if the RBAC policy assigned to this member in the Vendor Portal later changes, or if the member is removed from the Vendor Portal team, then the Vendor Portal updates or removes the user in the collab repository accordingly. + + +--- + + +# _collab-rbac-important + +:::important +The RBAC policy that you specify also determines the level of access that the user has to the Replicated collab repository in GitHub. By default, the Read Only policy grants the user read access to the collab repository. + +For more information about managing user access to the collab repository from the Vendor Portal, see [Managing Access to the Collab Repository](team-management-github-username). +::: + + +--- + + +# _collab-rbac-resources-important + +:::important +When you update an existing RBAC policy to add one or more `team/support-issues` resource, the GitHub role in the Replicated collab repository of every team member that is assigned to that policy and has a GitHub username saved in their account is updated accordingly. +::: + + +--- + + +# _collab-repo-about + +The replicated-collab organization in GitHub is used for tracking and collaborating on escalations, bug reports, and feature requests that are sent by members of a Vendor Portal team to the Replicated team. Replicated creates a unique repository in the replicated-collab organization for each Vendor Portal team. Members of a Vendor Portal team submit issues to their unique collab repository on the Support page in the [Vendor Portal](https://vendor.replicated.com/support). + +For more information about the collab repositories and how they are used, see [Replicated Support Paths and Processes](https://community.replicated.com/t/replicated-vendor-support-paths-and-processes/850) in _Replicated Community_. + + +--- + + +# _affixExample + +```yaml +groups: +- name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. + items: + - name: username + title: Username + type: text + required: true + affix: left + - name: password + title: Password + type: password + required: true + affix: right +``` + +--- + + +# _defaultExample + +```yaml +- name: custom_key + title: Set your secret key for your app + description: Paste in your Custom Key + items: + - name: key + title: Key + type: text + value: "" + default: change me +``` +![Default change me value displayed under the config field](/images/config-default.png) + +[View a larger version of this image](/images/config-default.png) + +--- + + +# _helpTextExample + +```yaml +- name: http_settings + title: HTTP Settings + items: + - name: http_enabled + title: HTTP Enabled + help_text: Check to enable the HTTP listener + type: bool +``` +![Config field with help text underneath](/images/config-help-text.png) + +[View a larger version of this image](/images/config-help-text.png) + +--- + + +# _hiddenExample + +```yaml +- name: secret_key + title: Secret Key + type: password + hidden: true + value: "{{repl RandomString 40}}" +``` + +--- + + +# _item-types + +- `bool` +- `dropdown` +- `file` +- `heading` +- `label` +- `password` +- `radio` +- `select_one` (Deprecated) +- `text` +- `textarea` + + +--- + + +# _nameExample + +```yaml +- name: http_settings + title: HTTP Settings + items: + - name: http_enabled + title: HTTP Enabled + type: bool +``` + +--- + + +# _property-when + +It can be useful to conditionally show or hide fields so that your users are only provided the configuration options that are relevant to them. This helps to reduce user error when configuring the application. Conditional statements in the `when` property can be used to evaluate things like the user's environment, license entitlements, and configuration choices. For example: +* The Kubernetes distribution of the cluster +* If the license includes a specific feature entitlement +* The number of users that the license permits +* If the user chooses to bring their own external database, rather than using an embedded database offered with the application + +You can construct conditional statements in the `when` property using KOTS template functions. KOTS template functions are a set of custom template functions based on the Go text/template library. For more information, see [About Template Functions](/reference/template-functions-about). + +--- + + +# _randomStringNote + +:::note +When you assign a template function that generates a value to a `value` property, you can use the `readonly` and `hidden` properties to define whether or not the generated value is ephemeral or persistent between changes to the configuration settings for the application. For more information, see [RandomString](template-functions-static-context#randomstring) in _Static Context_. +::: + +--- + + +# _readonlyExample + +```yaml +- name: key + title: Key + type: text + value: "" + default: change me +- name: unique_key + title: Unique Key + type: text + value: "{{repl RandomString 20}}" + readonly: true +``` +![Default change me value displayed under the config field](/images/config-readonly.png) + +[View a larger version of this image](/images/config-readonly.png) + +--- + + +# _recommendedExample + +```yaml +- name: recommended_field + title: My recommended field + type: bool + default: "0" + recommended: true +``` +![config field with green recommended tag](/images/config-recommended-item.png) + +[View a larger version of this image](/images/config-recommended-item.png) + +--- + + +# _regexValidationExample + +``` +- name: smtp-settings + title: SMTP Settings + - name: smtp_password + title: SMTP Password + type: password + required: true + validation: + regex: + pattern: ^(?:[\w@#$%^&+=!*()_\-{}[\]:;"'<>,.?\/|]){8,16}$ + message: The password must be between 8 and 16 characters long and can contain a combination of uppercase letters, lowercase letters, digits, and special characters. + - name: jwt_token + title: JWT token + type: file + validation: + regex: + pattern: ^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$ + message: Upload a file with valid JWT token. +``` + + +--- + + +# _requiredExample + +```yaml + - name: custom_key + title: Set your secret key for your app + description: Paste in your Custom Key + items: + - name: key + title: Key + type: text + value: "" + default: change me + required: true +``` +![config field with yellow required tag](/images/config-required-item.png) + +[View a larger version of this image](/images/config-required-item.png) + +--- + + +# _typeExample + +```yaml +- name: group_title + title: Group Title + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` +![field named HTTP Enabled with disabled checkbox](/images/config-screen-bool.png) + +[View a larger version of this image](/images/config-screen-bool.png) + +--- + + +# _valueExample + +```yaml +- name: custom_key + title: Set your secret key for your app + description: Paste in your Custom Key + items: + - name: key + title: Key + type: text + value: "{{repl RandomString 20}}" +``` +![config field with random string as HTML input](/images/config-value-randomstring.png) + +[View a larger version of this image](/images/config-value-randomstring.png) + +--- + + +# _when-note + +:::note +`when` is a property of both groups and items. See [Group Properties > `when`](/reference/custom-resource-config#when) above. +::: + +--- + + +# _when-requirements + +* The `when` property accepts the following types of values: + * Booleans + * Strings that match "true", "True", "false", or "False" + + [KOTS template functions](/reference/template-functions-about) can be used to render these supported value types. +* For the `when` property to evaluate to true, the values compared in the conditional statement must match exactly without quotes + + +--- + + +# _whenExample + +```yaml +- name: database_settings_group + title: Database Settings + items: + - name: db_type + title: Database Type + type: radio + default: external + items: + - name: external + title: External + - name: embedded + title: Embedded DB + - name: database_host + title: Database Hostname + type: text + when: repl{{ (ConfigOptionEquals "db_type" "external")}} + - name: database_password + title: Database Password + type: password + when: repl{{ (ConfigOptionEquals "db_type" "external")}} +``` + +External option selected and conditional fields displayed + +[View a larger version of this image](/images/config-when-enabled.png) + +Embedded DB option selected and no additional fields displayed + +[View a larger version of this image](/images/config-when-disabled.png) + +--- + + +# _boolExample + +```yaml +bool_config_field: + value: "1" +``` +```yaml +bool_config_field: + value: "0" +``` + + +--- + + +# _config-values-procedure + +During installation, KOTS automatically generates a ConfigValues file and saves the file in a directory called `upstream`. After installation, you can view the generated ConfigValues file in the Admin Console **View files** tab or from the command line by running the `kubectl kots get config` command. + +To get the ConfigValues file from an installed application instance: + +1. Install the target release in a development environment. You can either install the release with Replicated Embedded Cluster or install in an existing cluster with KOTS. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + +1. Depending on the installer that you used, do one of the following to get the ConfigValues for the installed instance: + + * **For Embedded Cluster installations**: In the Admin Console, go to the **View files** tab. In the filetree, go to **upstream > userdata** and open **config.yaml**, as shown in the image below: + + ![ConfigValues file in the Admin Console View Files tab](/images/admin-console-view-files-configvalues.png) + + [View a larger version of this image](/images/admin-console-view-files-configvalues.png) + + * **For KOTS installations in an existing cluster**: Run the `kubectl kots get config` command to view the generated ConfigValues file: + + ```bash + kubectl kots get config --namespace APP_NAMESPACE --decrypt + ``` + Where: + * `APP_NAMESPACE` is the cluster namespace where KOTS is running. + * The `--decrypt` flag decrypts all configuration fields with `type: password`. In the downloaded ConfigValues file, the decrypted value is stored in a `valuePlaintext` field. + + The output of the `kots get config` command shows the contents of the ConfigValues file. For more information about the `kots get config` command, including additional flags, see [kots get config](/reference/kots-cli-get-config). + +--- + + +# _configValuesExample + +```yaml +apiVersion: kots.io/v1beta1 +kind: ConfigValues +spec: + values: + text_config_field_name: + default: Example default value + value: Example user-provided value + boolean_config_field_name: + value: "1" + password_config_field_name: + valuePlaintext: examplePassword +``` + + +--- + + +# _fileExample + +```yaml +file_config_field: + filename: my-file.txt + value: JVBERi0xLjQKMSAw... +``` + + +--- + + +# _passwordExample + +```yaml +password_config_field: + valuePlaintext: myPlainTextPassword +``` + + +--- + + +# _selectOneExample + +```yaml +radio_config_field: + value: option_name +``` + + +--- + + +# _textExample + +```yaml +text_config_field: + value: This is a text field value. +``` + + +--- + + +# _textareaExample + +```yaml +textarea_config_field: + value: This is a text area field value. +``` + + +--- + + +# _wizard + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Custom Domains**. + +1. In the section for the target Replicated endpoint, click **Add your first custom domain** for your first domain, or click **Add new domain** for additional domains. + + The **Configure a custom domain** wizard opens. + + custom domain wizard + +1. For **Domain**, enter the custom domain. Click **Save & continue**. + +1. For **Create CNAME**, copy the text string and use it to create a CNAME record in your DNS account. Click **Continue**. + +1. For **Verify ownership**, copy the text string and use it to create a TXT record in your DNS account. Click **Validate & continue**. + + Your changes can take up to 24 hours to propagate. + +1. For **TLS cert creation verification**, copy the text string and use it to create a TXT record in your DNS account. Click **Validate & continue**. + + Your changes can take up to 24 hours to propagate. + +1. For **Use Domain**, to set the new domain as the default, click **Yes, set as default**. Otherwise, click **Not now**. + + :::note + Replicated recommends that you do _not_ set a domain as the default until you are ready for it to be used by customers. + ::: + +The Vendor Portal marks the domain as **Configured** after the verification checks for ownership and TLS certificate creation are complete. + +--- + + +# _additionalImages + +```yaml +additionalImages: + - jenkins/jenkins:lts +``` + + +--- + + +# _additionalNamespaces + +```yaml +additionalNamespaces: + - "*" +``` + + +--- + + +# _allowRollback + +```yaml +allowRollback: false +``` + + +--- + + +# _graphs + +```yaml +graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' +``` + + +--- + + +# _icon + +```yaml +icon: https://support.io/img/logo.png +``` + + +--- + + +# _minKotsVersion + +```yaml +minKotsVersion: "1.71.0" +``` + + +--- + + +# _ports-applicationURL + +
  • (Optional) ports.applicationUrl: When set to the same URL that is specified in the `descriptor.links.url` field of the Kubernetes SIG Application custom resource, KOTS adds a link on the Admin Console dashboard where the given service can be accessed. This process automatically links to the hostname in the browser (where the Admin Console is being accessed) and appends the specified `localPort`.

    If not set, then the URL defined in the `descriptor.links.url` field of the Kubernetes SIG Application is linked on the Admin Console dashboard.

  • + +--- + + +# _ports-kurl-note + +:::note +KOTS does not automatically create port forwards for installations on VMs or bare metal servers with Replicated Embedded Cluster or Replicated kURL. This is because it cannot be verified that the ports are secure and authenticated. Instead, Embedded Cluster or kURL creates a NodePort service to make the Admin Console accessible on a port on the node (port `8800` for kURL or port `30000` for Embedded Cluster). + +You can expose additional ports on the node for Embedded Cluster or kURL installations by creating NodePort services. For more information, see [Exposing Services Using NodePorts](/vendor/kurl-nodeport-services). +::: + +--- + + +# _ports-localPort + +
  • ports.localPort: The port to map on the local workstation.
  • + +--- + + +# _ports-serviceName + +
  • ports.serviceName: The name of the service that receives the traffic.
  • + +--- + + +# _ports-servicePort + +
  • ports.servicePort: The containerPort of the Pod where the service is running.

  • + +--- + + +# _ports + +```yaml +ports: + - serviceName: web + servicePort: 9000 + localPort: 9000 + applicationUrl: "http://web" +``` + + +--- + + +# _proxyRegistryDomain + +```yaml +proxyRegistryDomain: "proxy.mycompany.com" +``` + +--- + + +# _releaseNotes + +```yaml +releaseNotes: Fixes a bug and adds a new feature. +``` + + +--- + + +# _replicatedRegistryDomain + +```yaml +replicatedRegistryDomain: "registry.mycompany.com" +``` + + +--- + + +# _requireMinimalRBACPrivileges + +```yaml +requireMinimalRBACPrivileges: false +``` + + +--- + + +# _servicePort-note + +:::note +Ensure that you use the `containerPort` and not the `servicePort`. The `containerPort` and `servicePort` are often the same port, though it is possible that they are different. +::: + +--- + + +# _statusInformers + +```yaml +statusInformers: + - deployment/my-web-svc + - deployment/my-worker +``` +The following example shows excluding a specific status informer based on a user-supplied value from the Admin Console Configuration screen: +```yaml +statusInformers: + - deployment/my-web-svc + - '{{repl if ConfigOptionEquals "option" "value"}}deployment/my-worker{{repl else}}{{repl end}}' +``` + + +--- + + +# _supportMinimalRBACPrivileges + +```yaml +supportMinimalRBACPrivileges: true +``` + + +--- + + +# _targetKotsVersion + +```yaml +targetKotsVersion: "1.85.0" +``` + + +--- + + +# _title + +```yaml +title: My Application +``` + + +--- + + +# _change-channel + +You can change the channel a customer is assigned at any time. For installations with Replicated KOTS, when you change the customer's channel, the customer can synchronize their license in the Replicated Admin Console to fetch the latest release on the new channel and then upgrade. The Admin Console always fetches the latest release on the new channel, regardless of the presence of any releases on the channel that are marked as required. + +--- + + +# _download + +You can download customer and instance data from the **Download CSV** dropdown on the **Customers** page: + +![Download CSV button in the Customers page](/images/customers-download-csv.png) + +[View a larger version of this image](/images/customers-download-csv.png) + +The **Download CSV** dropdown has the following options: + +* **Customers**: Includes details about your customers, such as the customer's channel assignment, license entitlements, expiration date, last active timestamp, and more. + +* (Recommended) **Customers + Instances**: Includes details about the instances assoicated with each customer, such as the Kubernetes distribution and cloud provider of the cluster where the instance is running, the most recent application instance status, if the instance is active or inactive, and more. The **Customers + Instances** data is a super set of the customer data, and is the recommended download for most use cases. + +You can also export customer instance data as JSON using the Vendor API v3 `customer_instances` endpoint. For more information, see [Get customer instance report in CSV or JSON format](https://replicated-vendor-api.readme.io/reference/listappcustomerinstances) in the Vendor API v3 documentation. + +--- + + +# _definition + +Replicated Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. Embedded Cluster is based on the open source Kubernetes distribution k0s. For more information, see the [k0s documentation](https://docs.k0sproject.io/stable/). + +For software vendors, Embedded Cluster provides a Config for defining characteristics of the cluster that will be created in the customer environment. Additionally, each version of Embedded Cluster includes a specific version of Replicated KOTS, ensuring compatibility between KOTS and the cluster. For enterprise users, cluster updates are done automatically at the same time as application updates, allowing users to more easily keep the cluster up-to-date without needing to use kubectl. + +--- + + +# _ec-config + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + version: 2.1.3+k8s-1.30 +``` + +--- + + +# _multi-node-ha-arch + +The following diagram shows the architecture of an HA multi-node Embedded Cluster installation: + +![Embedded Cluster multi-node architecture with high availability](/images/embedded-architecture-multi-node-ha.png) + +[View a larger version of this image](/images/embedded-architecture-multi-node-ha.png) + +As shown in the diagram above, in HA installations with Embedded Cluster: +* A single replica of the Embedded Cluster Operator is deployed and runs on a controller node. +* A single replica of the KOTS Admin Console is deployed and runs on a controller node. +* Three replicas of rqlite are deployed in the kotsadm namespace. Rqlite is used by KOTS to store information such as support bundles, version history, application metadata, and other small amounts of data needed to manage the application. +* For installations that include disaster recovery, the Velero pod is deployed on one node. The Velero Node Agent runs on each node in the cluster. The Node Agent is a Kubernetes DaemonSet that performs backup and restore tasks such as creating snapshots and transferring data during restores. +* For air gap installations, two replicas of the air gap image registry are deployed. + +Any Helm [`extensions`](/reference/embedded-config#extensions) that you include in the Embedded Cluster Config are installed in the cluster depending on the given chart and whether or not it is configured to be deployed with high availability. + +--- + + +# _port-reqs + +This section lists the ports used by Embedded Cluster. These ports must be open and available for both single- and multi-node installations. + +#### Ports Used by Local Processes + +The following ports must be open and available for use by local processes running on the same node. It is not necessary to create firewall openings for these ports. + +* 2379/TCP +* 9099/TCP +* 10248/TCP +* 10257/TCP +* 10259/TCP + +#### Ports Required for Bidirectional Communication Between Nodes + +The following ports are used for bidirectional communication between nodes. + +For multi-node installations, create firewall openings between nodes for these ports. + +For single-node installations, ensure that there are no other processes using these ports. Although there is no communication between nodes in single-node installations, these ports are still required. + +* 2380/TCP +* 4789/UDP +* 6443/TCP +* 7443/TCP +* 9091/TCP +* 9443/TCP +* 10249/TCP +* 10250/TCP +* 10256/TCP + +#### Admin Console Port + +The KOTS Admin Console requires that port 30000/TCP is open and available. Create a firewall opening for port 30000/TCP so that the Admin Console can be accessed by the end user. + +Additionally, port 30000 must be accessible by nodes joining the cluster. + +If port 30000 is occupied, you can select a different port for the Admin Console during installation. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + +#### LAM Port + +The Local Artifact Mirror (LAM) requires that port 50000/TCP is open and available. + +If port 50000 is occupied, you can select a different port for the LAM during installation. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + + +--- + + +# _proxy-install-limitations + +**Limitations:** + +* If any of your [Helm extensions](/reference/embedded-config#extensions) make requests to the internet, the given charts need to be manually configured so that those requests are made to the user-supplied proxy server instead. Typically, this requires updating the Helm values to set HTTP proxy, HTTPS proxy, and no proxy. Note that this limitation applies only to network requests made by your Helm extensions. The proxy settings supplied to the install command are used to pull the containers required to run your Helm extensions. + +* Proxy settings cannot be changed after installation or during upgrade. + +--- + + +# _proxy-install-reqs + +**Requirement:** Proxy installations require Embedded Cluster 1.5.1 or later with Kubernetes 1.29 or later. + +--- + + +# _requirements + +* Linux operating system + +* x86-64 architecture + +* systemd + +* At least 2GB of memory and 2 CPU cores + +* The disk on the host must have a maximum P99 write latency of 10 ms. This supports etcd performance and stability. For more information about the disk write latency requirements for etcd, see [Disks](https://etcd.io/docs/latest/op-guide/hardware/#disks) in _Hardware recommendations_ and [What does the etcd warning “failed to send out heartbeat on time” mean?](https://etcd.io/docs/latest/faq/) in the etcd documentation. + +* The filesystem at `/var/lib/embedded-cluster` has 40Gi or more of total space and must be less than 80% full + + The directory used for data storage can be changed by passing the `--data-dir` flag with the Embedded Cluster `install` command. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + + Note that in addition to the primary `/var/lib/embedded-cluster` directory, Embedded Cluster creates directories and files in the following locations: + + - `/etc/cni` + - `/etc/k0s` + - `/opt/cni` + - `/opt/containerd` + - `/run/calico` + - `/run/containerd` + - `/run/k0s` + - `/sys/fs/cgroup/kubepods` + - `/sys/fs/cgroup/system.slice/containerd.service` + - `/sys/fs/cgroup/system.slice/k0scontroller.service` + - `/usr/libexec/k0s` + - `/var/lib/calico` + - `/var/lib/cni` + - `/var/lib/containers` + - `/var/lib/kubelet` + - `/var/log/calico` + - `/var/log/containers` + - `/var/log/pods` + - `/usr/local/bin/k0s` + +* (Online installations only) Access to replicated.app and proxy.replicated.com or your custom domain for each + +* Embedded Cluster is based on k0s, so all k0s system requirements and external runtime dependencies apply. See [System requirements](https://docs.k0sproject.io/stable/system-requirements/) and [External runtime dependencies](https://docs.k0sproject.io/stable/external-runtime-deps/) in the k0s documentation. + + +--- + + +# _update-air-gap-admin-console + +1. On a machine with browser access (for example, where you accessed the Admin Console to configure the application), download the air gap bundle for the new version using the same curl command that you used to install. For example: + + ```bash + curl -f https://replicated.app/embedded/APP_SLUG/CHANNEL_SLUG?airgap=true -H "Authorization: LICENSE_ID" -o APP_SLUG-CHANNEL_SLUG.tgz + ``` + For more information, see [Install](/enterprise/installing-embedded-air-gap#install). + +1. Untar the tarball. For example: + + ```bash + tar -xvzf APP_SLUG-CHANNEL_SLUG.tgz + ``` + Ensure that the `.airgap` air gap bundle is present. + +1. On the same machine, use a browser to access the Admin Console. + +1. On the **Version history** page, click **Upload new version** and choose the `.airgap` air gap bundle you downloaded. + +1. When the air gap bundle has been uploaded, click **Deploy** next to the new version. + +1. On the **Config** screen of the upgrade wizard, make any necessary changes to the configuration for the application. Click **Next**. + + ![Config screen in the upgrade wizard](/images/ec-upgrade-wizard-config.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-config.png) + + :::note + Any changes made on the **Config** screen of the upgrade wizard are not set until the new version is deployed. + ::: + +1. On the **Preflight** screen, view the results of the preflight checks. + + ![Preflight screen in the upgrade wizard](/images/ec-upgrade-wizard-preflights.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-preflights.png) + +1. On the **Confirm** screen, click **Deploy**. + + ![Confirmation screen in the upgrade wizard](/images/ec-upgrade-wizard-confirm.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-confirm.png) + +--- + + +# _update-air-gap-cli + +1. SSH onto a controller node in the cluster and download the air gap bundle for the new version using the same curl command that you used to install. For example: + + ```bash + curl -f https://replicated.app/embedded/APP_SLUG/CHANNEL_SLUG?airgap=true -H "Authorization: LICENSE_ID" -o APP_SLUG-CHANNEL_SLUG.tgz + ``` + + For more information, see [Install](/enterprise/installing-embedded-air-gap#install). + +1. Untar the tarball. For example: + + ```bash + tar -xvzf APP_SLUG-CHANNEL_SLUG.tgz + ``` + Ensure that the `.airgap` air gap bundle is present. + +1. Use the `update` command to upload the air gap bundle and make this new version available in the Admin Console. For example: + + ```bash + ./APP_SLUG update --airgap-bundle APP_SLUG.airgap + ``` + +1. When the air gap bundle has been uploaded, open a browser on the same machine and go to the Admin Console. + +1. On the **Version history** page, click **Deploy** next to the new version. + + ![Version history page](/images/ec-upgrade-version-history.png) + + [View a larger version of this image](/images/ec-upgrade-version-history.png) + +1. On the **Config** screen of the upgrade wizard, make any necessary changes to the configuration for the application. Click **Next**. + + ![Config screen in the upgrade wizard](/images/ec-upgrade-wizard-config.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-config.png) + + :::note + Any changes made on the **Config** screen of the upgrade wizard are not set until the new version is deployed. + ::: + +1. On the **Preflight** screen, view the results of the preflight checks. + + ![Preflight screen in the upgrade wizard](/images/ec-upgrade-wizard-preflights.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-preflights.png) + +1. On the **Confirm** screen, click **Deploy**. + + ![Confirmation screen in the upgrade wizard](/images/ec-upgrade-wizard-confirm.png) + + [View a larger version of this image](/images/ec-upgrade-wizard-confirm.png) + +--- + + +# _update-air-gap-overview + +To upgrade an installation, new air gap bundles can be uploaded to the Admin Console from the browser or with the Embedded Cluster binary from the command line. + +Using the binary is faster and allows the user to download the air gap bundle directly to the machine where the Embedded Cluster is running. Using the browser is slower because the user must download the air gap bundle to a machine with a browser, then upload that bundle to the Admin Console, and then the Admin Console can process it. + +--- + + +# _update-overview + +When you update an application installed with Embedded Cluster, you update both the application and the cluster infrastructure together, including Kubernetes, KOTS, and other components running in the cluster. There is no need or mechanism to update the infrastructure on its own. + +When you deploy a new version, any changes to the cluster are deployed first. The Admin Console waits until the cluster is ready before updatng the application. + +Any changes made to the Embedded Cluster Config, including changes to the Embedded Cluster version, Helm extensions, and unsupported overrides, trigger a cluster update. + +When performing an upgrade with Embedded Cluster, the user is able to change the application config before deploying the new version. Additionally, the user's license is synced automatically. Users can also make config changes and sync their license outside of performing an update. This requires deploying a new version to apply the config change or license sync. + +--- + + +# _warning-do-not-downgrade + +:::important +Do not downgrade the Embedded Cluster version. This is not supported but is not prohibited, and it can lead to unexpected behavior. +::: + +--- + + +# _create-promote-release + +Create a new release and promote it to the Unstable channel. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + +--- + + +# _csdl-overview + +Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. + +Replicated has developed the Commercial Software Distribution Lifecycle to represents the stages that are essential for every company that wants to deliver their software securely and reliably to customer controlled environments. + +This lifecycle was inspired by the DevOps lifecycle and the Software Development Lifecycle (SDLC), but it focuses on the unique things that must be done to successfully distribute third party, commercial software to tens, hundreds, or thousands of enterprise customers. + +--- + + +# _gitea-ec-config + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + version: 2.1.3+k8s-1.30 +``` + +--- + + +# _gitea-helmchart-cr-ec + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: gitea +spec: + # chart identifies a matching chart from a .tgz + chart: + name: gitea + chartVersion: 1.0.6 + optionalValues: + - when: 'repl{{ eq Distribution "embedded-cluster" }}' + recursiveMerge: false + values: + service: + type: NodePort + nodePorts: + http: "32000" +``` + +--- + + +# _gitea-helmchart-cr + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: gitea +spec: + # chart identifies a matching chart from a .tgz + chart: + name: gitea + chartVersion: 1.0.6 +``` + + +--- + + +# _gitea-k8s-app-cr + +```yaml +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "gitea" +spec: + descriptor: + links: + - description: Open App + # needs to match applicationUrl in kots-app.yaml + url: "http://gitea" +``` + + +--- + + +# _gitea-kots-app-cr-ec + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: gitea +spec: + title: Gitea + statusInformers: + - deployment/gitea + ports: + - serviceName: "gitea" + servicePort: 3000 + localPort: 32000 + applicationUrl: "http://gitea" + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png +``` + + +--- + + +# _gitea-kots-app-cr + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: gitea +spec: + title: Gitea + statusInformers: + - deployment/gitea + ports: + - serviceName: "gitea" + servicePort: 3000 + localPort: 8888 + applicationUrl: "http://gitea" + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png +``` + +--- + + +# _grafana-config + +```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: grafana-config + spec: + groups: + - name: grafana + title: Grafana + description: Grafana Configuration + items: + - name: admin_user + title: Admin User + type: text + default: 'admin' + - name: admin_password + title: Admin Password + type: password + default: 'admin' + ``` + +--- + + +# _grafana-helmchart + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: grafana +spec: + # chart identifies a matching chart from a .tgz + chart: + name: grafana + chartVersion: 9.6.5 + values: + admin: + user: "repl{{ ConfigOption `admin_user`}}" + password: "repl{{ ConfigOption `admin_password`}}" +``` + +--- + + +# _grafana-k8s-app + +```yaml +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "grafana" +spec: + descriptor: + links: + - description: Open App + # needs to match applicationUrl in kots-app.yaml + url: "http://grafana" +``` + +--- + + +# _grafana-kots-app + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: grafana +spec: + title: Grafana + statusInformers: + - deployment/grafana + ports: + - serviceName: "grafana" + servicePort: 3000 + localPort: 8888 + applicationUrl: "http://grafana" + icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png +``` + +--- + + +# _kubernetes-training + +:::note +This tutorial assumes that you have a working knowledge of Kubernetes. For an introduction to Kubernetes and free training resources, see [Training](https://kubernetes.io/training/) in the Kubernetes documentation. +::: + + +--- + + +# _labs-intro + +Replicated also offers a sandbox environment where you can complete several beginner, intermediate, and advanced labs. The sandbox environment automatically provisions the required Kubernetes cluster or VM where you will install a sample application as part of the labs. + +To get started with an introductory lab, see [Deploy a Hello World Application with Replicated](https://play.instruqt.com/replicated/tracks/hello-world). + + +--- + + +# _related-topics + +For more information about the subjects in the getting started tutorials, see the following topics: + +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [Linter Rules](/reference/linter) +* [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) +* [Performing Updates in Existing Clusters](/enterprise/updating-app-manager) + + +--- + + +# _replicated-definition + +Replicated is a commercial software distribution platform. Independent software vendors (ISVs) can use features of the Replicated Platform to distribute modern commercial software into complex, customer-controlled environments, including on-prem and air gap. + +--- + + +# _test-your-changes + +Install the release to test your changes. For Embedded Cluster installations, see [Performing Udpates in Embedded Clusters](/enterprise/updating-embedded). For existing cluster installations with KOTS, see [Performing Updates in Existing Clusters](/enterprise/updating-app-manager). + +--- + + +# _tutorial-intro + +This tutorial introduces you to the Replicated features for software vendors and their enterprise users. It is designed to familiarize you with the key concepts and processes that you use as a software vendor when you package and distribute your application with Replicated. + +In this tutorial, you use a set of sample manifest files for a basic NGINX application to learn how to: +* Create and promote releases for an application as a software vendor +* Install and update an application on a Kubernetes cluster as an enterprise user + + +--- + + +# _vm-requirements + +For this tutorial, the VM must meet the following requirements: + + * Ubuntu 18.04 + * At least 8 GB of RAM + * 4 CPU cores + * At least 50GB of disk space + + :::note + If you use a virtual machine that is behind a firewall, make sure that port 8800 (and any other ports you attempt to access through the internet) are allowed to accept traffic. GCP and AWS typically require firewall rule creation to expose ports. + ::: + +For the complete list of system requirements for the kURL, see [kURL Requirements](/enterprise/installing-general-requirements#kurl-requirements) in _Installation Requirements_. + + +--- + + +# _gitops-not-recommended + +:::important +KOTS Auto-GitOps is a legacy feature and is **not recommended** for use. For modern enterprise customers that prefer software deployment processes that use CI/CD pipelines, Replicated recommends the [Helm CLI installation method](/vendor/install-with-helm), which is more commonly used in these types of enterprise environments. +::: + +--- + + +# _gitops-limitation + +The KOTS Auto-GitOps workflow is not supported for installations with the HelmChart custom resource `apiVersion: kots.io/v1beta2` or the HelmChart custom resource `apiVersion: kots.io/v1beta1` with `useHelmInstall: true`. + + +--- + + +# _helm-builder-requirements + +The `builder` key has the following requirements and recommendations: +* Replicated recommends that you include only the minimum Helm values in the `builder` key that are required to template the Helm chart with the correct image tags. +* Use only static, or _hardcoded_, values in the `builder` key. You cannot use template functions in the `builder` key because values in the `builder` key are not rendered in a customer environment. +* Any `required` Helm values that need to be set to render the chart templates must have a value supplied in the `builder` key. For more information about the Helm `required` function, see [Using the 'required' function](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-required-function) in the Helm documentation. + +--- + + +# _helm-cr-builder-airgap-intro + +In the `builder` key, you provide the minimum Helm values required to render the chart templates so that the output includes any images that must be included in the air gap bundle. The Vendor Portal uses these values to render the Helm chart templates when building the `.airgap` bundle for the release. + +--- + + +# ... + +For example, a Helm chart might include a conditional PostgreSQL Deployment, as shown in the Helm template below: + +```yaml +{{- if .Values.postgresql.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgresql + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:10.17" + ports: + - name: postgresql + containerPort: 80 +# ... +{{- end }} +``` + +To ensure that the `postgresql` image is included in the air gap bundle for the release, the `postgresql.enabled` value is added to the `builder` key of the HelmChart custom resource and is hardcoded to `true`: + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + values: + postgresql: + enabled: repl{{ ConfigOptionEquals "postgres_type" "embedded_postgres"}} + builder: + postgresql: + enabled: true +``` + +--- + + +# _helm-cr-chart-name + +The name of the chart. This value must exactly match the `name` field from a `Chart.yaml` in a `.tgz` chart archive that is also included in the release. If the names do not match, then the installation can error or fail. + + +--- + + +# _helm-cr-chart-release-name + +Specifies the release name to use when installing this instance of the Helm chart. Defaults to the chart name. + +The release name must be unique across all charts deployed in the namespace. To deploy multiple instances of the same Helm chart in a release, you must add an additional HelmChart custom resource with a unique release name for each instance of the Helm chart. + +Must be a valid Helm release name that matches regex `^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` and is no longer than 53 characters. + + +--- + + +# _helm-cr-chart-version + +The version of the chart. This value must match the `version` field from a `Chart.yaml` in a `.tgz` chart archive that is also included in the release. + + +--- + + +# _helm-cr-chart + +The `chart` key allows for a mapping between the data in this definition and the chart archive itself. +More than one `kind: HelmChart` can reference a single chart archive, if different settings are needed. + +--- + + +# _helm-cr-exclude + +The attribute is a value for making optional charts. The `exclude` attribute can be parsed by template functions. + +When Replicated KOTS processes Helm charts, it excludes the entire chart if the output of the `exclude` field can be parsed as a boolean evaluating to `true`. + +For more information about optional charts, template functions, and how KOTS processes Helm charts, see: + +* [Optional Charts](/vendor/helm-optional-charts) +* [About Template Function Contexts](template-functions-about) +* [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) + +--- + + +# _helm-cr-namespace + +The `namespace` key specifies an alternative namespace where Replicated KOTS installs the Helm chart. **Default:** The Helm chart is installed in the same namespace as the Admin Console. The `namespace` attribute can be parsed by template functions. For more information about template functions, see [About template function contexts](template-functions-about). + + +If you specify a namespace in the HelmChart `namespace` field, you must also include the same namespace in the `additionalNamespaces` field of the Application custom resource manifest file. KOTS creates the namespaces listed in the `additionalNamespaces` field during installation. For more information, see [additionalNamespaces](custom-resource-application#additionalnamespaces) in the _Application_ reference. + +--- + + +# _helm-cr-optional-values-recursive-merge + +The `optionalValues.recursiveMerge` boolean defines how KOTS merges `values` and `optionalValues`: + +* When `optionalValues.recursiveMerge` is false, the top level keys in `optionalValues` override the top level keys in `values`. By default, `optionalValues.recursiveMerge` is set to false. + +* When `optionalValues.recursiveMerge` is true, all keys from `values` and `optionalValues` are included. In the case of a conflict where there is a matching key in `optionalValues` and `values`, KOTS uses the value of the key from `optionalValues`. + +--- + + +# _helm-cr-optional-values-when + +The `optionalValues.when` field defines a conditional statement that must evaluate to true for the given values to be set. Evaluation of the conditional in the `optionalValues.when` field is deferred until render time in the customer environment. + +Use KOTS template functions to write the `optionalValues.when` conditional statement. The following example shows a conditional statement for selecting a database option on the Admin Console configuration screen: + +```yaml +optionalValues: + - when: repl{{ ConfigOptionEquals "postgres_type" "external_postgres"}} +``` + +For more information about using KOTS template functions, see [About Template Functions](/reference/template-functions-about). + +--- + + +# _helm-cr-optional-values + +The `optionalValues` key can be used to set values in the Helm chart `values.yaml` file when a given conditional statement evaluates to true. For example, if a customer chooses to include an optional application component in their deployment, it might be necessary to include Helm chart values related to the optional component. + +`optionalValues` includes the following properties: + +* `optionalValues.when`: Defines a conditional statement using KOTS template functions. If `optionalValues.when` evaluates to true, then the values specified in `optionalValues` are set. + +* `optionalValues.recursiveMerge`: Defines how `optionalValues` is merged with `values`. + +* `optionalValues.values`: An array of key-value pairs. + +--- + + +# _helm-cr-upgrade-flags + +Specifies additional flags to pass to the `helm upgrade` command for charts. These flags are passed in addition to any flags Replicated KOTS passes by default. The values specified here take precedence if KOTS already passes the same flag. The `helmUpgradeFlags` attribute can be parsed by template functions. For more information about template functions, see [About template function contexts](template-functions-about). + +KOTS uses `helm upgrade` for _all_ deployments of an application, not just upgrades, by specifying the `--install` flag. For non-boolean flags that require an additional argument, such as `--timeout 1200s`, you must use an equal sign (`=`) or specify the additional argument separately in the array. + +**Example:** + +```yaml +helmUpgradeFlags: + - --timeout + - 1200s + - --history-max=15 +``` + +--- + + +# _helm-cr-values + +The `values` key can be used to set or delete existing values in the Helm chart `values.yaml` file. Any values that you include in the `values` key must match values in the Helm chart `values.yaml`. For example, `spec.values.images.pullSecret` in the HelmChart custom resource matches `images.pullSecret` in the Helm chart `values.yaml`. + +During installation or upgrade with KOTS, `values` is merged with the Helm chart `values.yaml` in the chart archive. Only include values in the `values` key that you want to set or delete. + +--- + + +# _helm-cr-weight-limitation + +The `weight` field is _not_ supported for HelmChart custom resources with `useHelmInstall: false`. + +--- + + +# _helm-cr-weight + +Determines the order in which KOTS applies the Helm chart. Charts are applied by weight in ascending order, with lower weights applied first. **Supported values:** Positive or negative integers. **Default:** `0` + +In KOTS v1.99.0 and later, `weight` also determines the order in which charts are uninstalled. Charts are uninstalled by weight in descending order, with higher weights uninstalled first. For more information about uninstalling applications, see [remove](kots-cli-remove) in _KOTS CLI_. + +For more information, see [Orchestrating Resource Deployment](/vendor/orchestrating-resource-deployment). + +--- + + +# _helm-definition + +Helm is a popular open source package manager for Kubernetes applications. Many ISVs use Helm to configure and deploy Kubernetes applications because it provides a consistent, reusable, and sharable packaging format. For more information, see the [Helm documentation](https://helm.sh/docs). + +--- + + +# _helm-install-beta + +The Helm installation method is Beta and is not recommended for production releases. The features and availability of the Helm installation method are subject to change. + +--- + + +# _helm-install-prereqs + +* The customer used to install must have a valid email address. This email address is only used as a username for the Replicated registry and is never contacted. For more information about creating and editing customers in the Vendor Portal, see [Creating a Customer](/vendor/releases-creating-customer). + +* The customer used to install must have the **Existing Cluster (Helm CLI)** install type enabled. For more information about enabling install types for customers in the Vendor Portal, see [Managing Install Types for a License](licenses-install-types). + +* To ensure that the Replicated proxy registry can be used to grant proxy access to your application images during Helm installations, you must create an image pull secret for the proxy registry and add it to your Helm chart. To do so, follow the steps in [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry). + +* Declare the SDK as a dependency in your Helm chart. For more information, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. + +--- + + +# _helm-package + +```bash +helm package -u PATH_TO_CHART +``` +Where: +* `-u` or `--dependency-update` is an option for the `helm package` command that updates chart dependencies before packaging. For more information, see [Helm Package](https://helm.sh/docs/helm/helm_package/) in the Helm documentation. +* `PATH_TO_CHART` is the path to the Helm chart in your local directory. For example, `helm package -u .`. + +The Helm chart, including any dependencies, is packaged and copied to your current directory in a `.tgz` file. The file uses the naming convention: `CHART_NAME-VERSION.tgz`. For example, `postgresql-8.1.2.tgz`. + +--- + + +# _helm-template-limitation + +Helm's `lookup` function and some values in the built-in `Capabilities` object are not supported with the `kots.io/v1beta1` HelmChart custom resource. + + This is because KOTS uses the `helm template` command to render chart templates locally. During rendering, Helm does not have access to the cluster where the chart will be installed. For more information, see [Kubernetes and Chart Functions](https://helm.sh/docs/chart_template_guide/function_list/#kubernetes-and-chart-functions) in the Helm documentation. + +--- + + +# _helm-version-limitation + +Support for Helm v2, including security patches, ended on November 13, 2020. If you specified `helmVersion: v2` in any HelmChart custom resources, update your references to v3. By default, KOTS uses Helm v3 to process all Helm charts. + +--- + + +# _hook-weights-limitation + +Hook weights below -9999 are not supported. All hook weights must be set to a value above -9999 to ensure the Replicated image pull secret is deployed before any resources are pulled. + +--- + + +# _hooks-limitation + +The following hooks are not supported and are ignored if they are present: + * `test` + * `pre-rollback` + * `post-rollback` + +--- + + +# _installer-only-annotation + +Any other Kubernetes resources in the release (such as Kubernetes Deployments or Services) must include the `kots.io/installer-only` annotation. + +The `kots.io/installer-only` annotation indicates that the Kubernetes resource is used only by the Replicated installers (Embedded Cluster, KOTS, and kURL). + +Example: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + annotations: + kots.io/installer-only: "true" +``` + +--- + + +# _kots-helm-cr-description + +To deploy Helm charts, KOTS requires a unique HelmChart custom resource for each Helm chart `.tgz` archive in the release. You configure the HelmChart custom resource to provide the necessary instructions to KOTS for processing and preparing the chart for deployment. Additionally, the HelmChart custom resource creates a mapping between KOTS and your Helm chart to allow Helm values to be dynamically set during installation or upgrade. + +--- + + +# _replicated-deprecated + +The HelmChart custom resource `apiVersion: kots.io/v1beta1` is deprecated. For installations with Replicated KOTS v1.99.0 and later, use the HelmChart custom resource with `apiVersion: kots.io/v1beta2` instead. See [HelmChart v2](/reference/custom-resource-helmchart-v2) and [Confguring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). + +--- + + +# _replicated-helm-migration + +You cannot migrate existing Helm charts in existing installations from the `useHelmInstall: false` installation method to a different method. If KOTS already installed the Helm chart previously in the environment using a HelmChart custom resource with `apiVersion: kots.io/v1beta1` and `useHelmInstall: false`, then KOTS does not attempt to install the chart using a different method and displays the following error message: `Deployment method for chart has changed`. + +To change the installation method from `useHelmInstall: false` to a different method, the user must reinstall your application in a new environment. + +--- + + +# Helm chart values.yaml + +Using KOTS template functions in the [Config](/reference/template-functions-config-context) context allows you to set Helm values based on user-supplied values from the KOTS Admin Console configuration page. + +For example, the following Helm chart `values.yaml` file contains `postgresql.enabled`, which is set to `false`: + +```yaml +# Helm chart values.yaml +postgresql: + enabled: false +``` +The following HelmChart custom resource contains a mapping to `postgresql.enabled` in its `values` key: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + values: + postgresql: + enabled: repl{{ ConfigOptionEquals `postgres_type` `embedded_postgres`}} +``` + +The `values.postgresql.enabled` field in the HelmChart custom resource above uses the Replicated [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to evaluate the user's selection for a `postgres_type` configuration option. + +During installation or upgrade, the template function is rendered to true or false based on the user's selction. Then, KOTS sets the matching `postgresql.enabled` value in the Helm chart `values.yaml` file accordingly. + +--- + + +# KOTS HelmChart custom resource + +Using KOTS template functions in the [License](/reference/template-functions-license-context) context allows you to set Helm values based on the unique license file used for installation or upgrade. + +For example, the following HelmChart custom resource uses the Replicated [LiencseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function to evaluate if the license has the boolean `newFeatureEntitlement` field set to `true`: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + values: + newFeature: + enabled: repl{{ LicenseFieldValue "newFeatureEntitlement" }} +``` + +During installation or upgrade, the LicenseFieldValue template function is rendered based on the user's license. Then, KOTS sets the matching `newFeature.enabled` value in the Helm chart `values.yaml` file accordingly. + +--- + + +# _v2-native-helm-cr-example + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + # chart identifies a matching chart from a .tgz + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + exclude: "repl{{ ConfigOptionEquals `include_chart` `include_chart_no`}}" + + # weight determines the order that charts are applied, with lower weights first. + weight: 42 + + # helmUpgradeFlags specifies additional flags to pass to the `helm upgrade` command. + helmUpgradeFlags: + - --skip-crds + - --no-hooks + - --timeout + - 1200s + - --history-max=15 + + # values are used in the customer environment as a pre-render step + # these values are supplied to helm template + values: + postgresql: + enabled: repl{{ ConfigOptionEquals `postgres_type` `embedded_postgres`}} + + optionalValues: + - when: "repl{{ ConfigOptionEquals `postgres_type` `external_postgres`}}" + recursiveMerge: false + values: + postgresql: + postgresqlDatabase: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_database`}}repl{{ end}}" + postgresqlUsername: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_username`}}repl{{ end}}" + postgresqlHost: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_host`}}repl{{ end}}" + postgresqlPassword: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_password`}}repl{{ end}}" + postgresqlPort: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_port`}}repl{{ end}}" + # adds backup labels to postgresql if the license supports snapshots + - when: "repl{{ LicenseFieldValue `isSnapshotSupported` }}" + recursiveMerge: true + values: + postgresql: + commonLabels: + kots.io/backup: velero + kots.io/app-slug: my-app + podLabels: + kots.io/backup: velero + kots.io/app-slug: my-app + + # namespace allows for a chart to be installed in an alternate namespace to + # the default + namespace: samplechart-namespace + + # builder values render the chart with all images and manifests. + # builder is used to create `.airgap` packages and to support end users + # who use private registries + builder: + postgresql: + enabled: true +``` + +--- + + +# _docker-compatibility + +- Docker Hub + + :::note + To avoid the November 20, 2020 Docker Hub rate limits, use the `kots docker ensure-secret` CLI command. For more information, see [Avoiding Docker Hub Rate Limits](image-registry-rate-limits). + ::: + +- Quay +- Amazon Elastic Container Registry (ECR) +- Google Container Registry (GCR) +- Azure Container Registry (ACR) +- Harbor +- Sonatype Nexus + +--- + + +# _image-registry-settings + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    HostnameSpecify a registry domain that uses the Docker V2 protocol.
    UsernameSpecify the username for the domain.
    PasswordSpecify the password for the domain.
    Registry NamespaceSpecify the registry namespace. The registry namespace is the path between the registry and the image name. For example, `my.registry.com/namespace/image:tag`. For air gap environments, this setting overwrites the registry namespace where images where pushed when KOTS was installed.
    Disable Pushing Images to Registry(Optional) Select this option to disable KOTS from pushing images. Make sure that an external process is configured to push images to your registry instead. Your images are still read from your registry when the application is deployed.
    + +--- + + +# _access-admin-console + +By default, during installation, KOTS automatically opens localhost port 8800 to provide access to the Admin Console. Using the `--no-port-forward` flag with the `kots install` command prevents KOTS from creating a port forward to the Admin Console. + +After you install with the `--no-port-forward` flag, you can optionally create a port forward so that you can log in to the Admin Console in a browser window. + +To access the Admin Console: + +1. If you installed in a VM where you cannot open a browser window, forward a port on your local machine to `localhost:8800` on the remote VM using the SSH client: + + ```bash + ssh -L LOCAL_PORT:localhost:8800 USERNAME@IP_ADDRESS + ``` + Replace: + * `LOCAL_PORT` with the port on your local machine to forward. For example, `9900` or `8800`. + * `USERNAME` with your username for the VM. + * `IP_ADDRESS` with the IP address for the VM. + + **Example**: + + The following example shows using the SSH client to forward port 8800 on your local machine to `localhost:8800` on the remote VM. + + ```bash + ssh -L 8800:localhost:8800 user@ip-addr + ``` + +1. Run the following KOTS CLI command to open localhost port 8800, which forwards to the Admin Console service: + + ```bash + kubectl kots admin-console --namespace NAMESPACE + ``` + Replace `NAMESPACE` with the namespace where the Admin Console was installed. + + For more information about the `kots admin-console` command, see [admin-console](/reference/kots-cli-admin-console-index) in the _KOTS CLI_ documentation. + +1. Open a browser window and go to `https://localhost:8800`. + +1. Log in to the Admin Console using the password that you created as part of the `kots install` command. + +--- + + +# _airgap-bundle-build + +* If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. +* If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. + + Release history link on a channel card + + [View a larger version of this image](/images/release-history-link.png) + + ![Build button on the Release history page](/images/release-history-build-airgap-bundle.png) + + [View a larger version of this image](/images/release-history-build-airgap-bundle.png) + +--- + + +# _airgap-bundle-download + +After the build completes, download the bundle. Ensure that you can access the downloaded bundle from the environment where you will install the application. + +--- + + +# _airgap-bundle-view-contents + +(Optional) View the contents of the downloaded bundle: + + ```bash + tar -zxvf AIRGAP_BUNDLE + ``` + + Where `AIRGAP_BUNDLE` is the filename for the `.airgap` bundle that you downloaded. + +--- + + +# _airgap-license-download + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page. + +1. Click on the name of the target customer and go to the **Manage customer** tab. + +1. Under **License options**, enable the **Airgap Download Enabled** option. Click **Save Changes**. + + ![Airgap Download Enabled option](/images/airgap-download-enabled.png) + + [View a larger version of this image](/images/airgap-download-enabled.png) + +1. At the top of the screen, click **Download license** to download the air gap enabled license. + + ![Download air gap license](/images/download-airgap-license.png) + + [View a larger version of this image](/images/download-airgap-license.png) + +--- + + +# _automation-intro-embedded + +When you use the KOTS CLI to install an application in a kURL cluster, you first run the kURL installation script to provision the cluster and automatically install KOTS in the cluster. Then, you can run the `kots install` command to install the application. + +--- + + +# _automation-intro-existing + +When you use the KOTS CLI to install an application in an existing cluster, you install both the application and Replicated KOTS with a single command. + +--- + + +# _config-values-procedure + +To get the ConfigValues file from an installed application instance: + +1. Install the target release in a development environment. You can either install the release with Replicated Embedded Cluster or install in an existing cluster with KOTS. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Online Installation in Existing Clusters](/enterprise/installing-existing-cluster). + +1. Depending on the installer that you used, do one of the following to get the ConfigValues for the installed instance: + + * **For Embedded Cluster installations**: In the Admin Console, go to the **View files** tab. In the filetree, go to **upstream > userdata** and open **config.yaml**, as shown in the image below: + + ![ConfigValues file in the Admin Console View Files tab](/images/admin-console-view-files-configvalues.png) + + [View a larger version of this image](/images/admin-console-view-files-configvalues.png) + + * **For KOTS installations in an existing cluster**: Run the `kubectl kots get config` command to view the generated ConfigValues file: + + ```bash + kubectl kots get config --namespace APP_NAMESPACE --decrypt + ``` + Where: + * `APP_NAMESPACE` is the cluster namespace where KOTS is running. + * The `--decrypt` flag decrypts all configuration fields with `type: password`. In the downloaded ConfigValues file, the decrypted value is stored in a `valuePlaintext` field. + + The output of the `kots get config` command shows the contents of the ConfigValues file. For more information about the `kots get config` command, including additional flags, see [kots get config](/reference/kots-cli-get-config). + +--- + + +# _download-kotsadm-bundle + +Download the `kotsadm.tar.gz` air gap bundle from the [Releases](https://github.com/replicatedhq/kots/releases) page in the kots repository in GitHub. Ensure that you can access the downloaded bundle from the environment where you will install the application. + +:::note +The version of the `kotsadm.tar.gz` air gap bundle used must be compatible with the version of the `.airgap` bundle for the given application release. +::: + +--- + + +# _download-kurl-bundle + +```bash +export REPLICATED_APP=APP_SLUG +curl -LS https://k8s.kurl.sh/bundle/$REPLICATED_APP.tar.gz -o $REPLICATED_APP.tar.gz +``` +Where `APP_SLUG` is the unqiue slug for the application. + +--- + + +# _ec-prereqs + +* Ensure that your installation environment meets the Embedded Cluster requirements. See [Embedded Cluster Requirements](/enterprise/installing-embedded-requirements). + +* The application release that you want to install must include an [Embedded Cluster Config](/reference/embedded-config). + +* The license used to install must have the **Embedded Cluster Enabled** license field enabled. See [Creating and Managing Customers](/vendor/releases-creating-customer). + +--- + + +# _embedded-ha-step + +(HA Installation Only) If you are installing in HA mode and did not already preconfigure a load balancer, you are prompted during the installation. Do one of the following: + + - If you are using the internal load balancer, leave the prompt blank and proceed with the installation. + + - If you are using an external load balancer, pass the load balancer address. + +--- + + +# _embedded-login-password + +After the installation command finishes, note the `Kotsadm` and `Login with password (will not be shown again)` fields in the output of the command. You use these to log in to the Admin Console. + + The following shows an example of the `Kotsadm` and `Login with password (will not be shown again)` fields in the output of the installation command: + + ``` + Installation + Complete ✔ + + Kotsadm: http://10.128.0.35:8800 + Login with password (will not be shown again): 3Hy8WYYid + + This password has been set for you by default. It is recommended that you change + this password; this can be done with the following command: + kubectl kots reset-password default + ``` + +--- + + +# _extract-kurl-bundle + +In your installation environment, extract the contents of the kURL `.tar.gz` bundle that you downloaded: + + ```bash + tar -xvzf $REPLICATED_APP.tar.gz + ``` + +--- + + +# _firewall-openings-intro + +The domains for the services listed in the table below need to be accessible from servers performing online installations. No outbound internet access is required for air gap installations. + +For services hosted at domains owned by Replicated, the table below includes a link to the list of IP addresses for the domain at [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json) in GitHub. Note that the IP addresses listed in the `replicatedhq/ips` repository also include IP addresses for some domains that are _not_ required for installation. + +For any third-party services hosted at domains not owned by Replicated, consult the third-party's documentation for the IP address range for each domain, as needed. + +--- + + +# _firewall-openings + +The domains for the services listed in the table below need to be accessible from servers performing online installations. No outbound internet access is required for air gap installations. + +For services hosted at domains owned by Replicated, the table below includes a link to the list of IP addresses for the domain at [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json) in GitHub. Note that the IP addresses listed in the `replicatedhq/ips` repository also include IP addresses for some domains that are _not_ required for installation. + +For third-party services hosted at domains not owned by Replicated, the table below lists the required domains. Consult the third-party's documentation for the IP address range for each domain, as needed. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    HostEmbedded ClusterHelmKOTS Existing ClusterkURLDescription
    Docker HubNot RequiredNot RequiredRequiredRequiredSome dependencies of KOTS are hosted as public images in Docker Hub. The required domains for this service are `index.docker.io`, `cdn.auth0.com`, `*.docker.io`, and `*.docker.com.`
    `replicated.app`RequiredRequired***RequiredRequired

    Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.

    `proxy.replicated.com`RequiredRequiredRequired*Required*

    Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.

    `registry.replicated.com`Required**RequiredRequired**Required**

    Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.

    `kots.io`Not RequiredNot RequiredRequiredNot RequiredRequests are made to this domain when installing the Replicated KOTS CLI. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.
    `github.com`Not RequiredNot RequiredRequiredNot RequiredRequests are made to this domain when installing the Replicated KOTS CLI. For information about retrieving GitHub IP addresses, see [About GitHub's IP addresses](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-githubs-ip-addresses) in the GitHub documentation.

    `k8s.kurl.sh`

    `s3.kurl.sh`

    Not RequiredNot RequiredNot RequiredRequired

    kURL installation scripts and artifacts are served from [kurl.sh](https://kurl.sh). An application identifier is sent in a URL path, and bash scripts and binary executables are served from kurl.sh. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.

    For the range of IP addresses for `k8s.kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L34-L39) in GitHub.

    The range of IP addresses for `s3.kurl.sh` are the same as IP addresses for the `kurl.sh` domain. For the range of IP address for `kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L28-L31) in GitHub.

    `amazonaws.com`Not RequiredNot RequiredNot RequiredRequired`tar.gz` packages are downloaded from Amazon S3 during installations with kURL. For information about dynamically scraping the IP ranges to allowlist for accessing these packages, see [AWS IP address ranges](https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html#aws-ip-download) in the AWS documentation.
    + +* Required only if the application uses the [Replicated proxy registry](/vendor/private-images-about). + +** Required only if the application uses the [Replicated registry](/vendor/private-images-replicated). + +*** Required only if the [Replicated SDK](/vendor/replicated-sdk-overview) if included as a dependency of the application Helm chart. + +--- + + +# _ha-load-balancer-about + +A load balancer is required for high availability mode. If your vendor has chosen to use the internal load balancer with the kURL EKCO add-on, you do not need to provide your own external load balancer. An external load balancer can be preferred when clients outside the cluster need access to the cluster's Kubernetes API. + +If you decide to use an external load balancer, the external load balancer must be a TCP forwarding load balancer. For more information, see [Prerequisites](#prerequisites). + +The health check for an apiserver is a TCP check on the port that the kube-apiserver listens on. The default value is `:6443`. For more information about the kube-apiserver external load balancer, see [Create load balancer for kube-apiserver](https://kubernetes.io/docs/setup/independent/high-availability/#create-load-balancer-for-kube-apiserver) in the Kubernetes documentation. + +--- + + +# _ha-load-balancer-prereq + +- If you are installing in high availability (HA) mode, a load balancer is required. You can use the kURL internal load balancer if the [Embedded kURL Cluster Operator (EKCO) Add-On](https://kurl.sh/docs/add-ons/ekco) is included in the kURL Installer spec. Or, you can bring your own external load balancer. An external load balancer might be preferred when clients outside the cluster need access to the cluster's Kubernetes API. + + To install in HA mode, complete the following prerequisites: + - (Optional) If you are going to use the internal EKCO load balancer, you can preconfigure it by passing `| sudo bash -s ha ekco-enable-internal-load-balancer` with the kURL install command. Otherwise, you are prompted for load balancer details during installation. For more information about the EKCO Add-on, see [EKCO Add-On](https://kurl.sh/docs/add-ons/ekco) in the open source kURL documentation. + - To use an external load balancer, ensure that the load balancer meets the following requirements: + - Must be a TCP forwarding load balancer + - Must be configured to distribute traffic to all healthy control plane nodes in its target list + - The health check must be a TCP check on port 6443 + + For more information about how to create a load balancer for kube-apirserver, see [Create load balancer for kube-apiserver](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/#create-load-balancer-for-kube-apiserver) in the Kubernetes documentation. + + You can optionally preconfigure the external loader by passing the `load-balancer-address=HOST:PORT` flag with the kURL install command. Otherwise, you are prompted to provide the load balancer address during installation. + +--- + + +# _install-kots-cli-airgap + +Install the KOTS CLI. See [Manually Download and Install](/reference/kots-cli-getting-started#manually-download-and-install) in _Installing the KOTS CLI_. + +--- + + +# _install-kots-cli + +Install the KOTS CLI: + + ``` + curl https://kots.io/install | bash + ``` + + For more installation options, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + +--- + + +# _intro-air-gap + +The procedures in this topic apply to installation environments that do not have access to the internet, known as _air gap_ environments. + +--- + + +# _intro-embedded + +This topic describes how to use Replicated kURL to provision an embedded cluster in a virtual machine (VM) or bare metal server and install an application in the cluster. + +--- + + +# _intro-existing + +This topic describes how to use Replicated KOTS to install an application in an existing Kubernetes cluster. + +--- + + +# _kots-airgap-version-match + +:::note +The versions of the KOTS CLI and the `kotsadm.tar.gz` bundle must match. You can check the version of the KOTS CLI with `kubectl kots version`. +::: + +--- + + +# _kots-install-prompts + +When prompted by the `kots install` command: + 1. Provide the namespace where you want to install both KOTS and the application. + 1. Create a new password for logging in to the Admin Console. + + **Example**: + + ```shell + $ kubectl kots install application-name + Enter the namespace to deploy to: application-name + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password to be used for the Admin Console: •••••••• + • Waiting for Admin Console to be ready ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + + ``` + + After the `kots install` command completes, it creates a port forward to the Admin Console. The Admin Console is exposed internally in the cluster and can only be accessed using a port forward. + +--- + + +# _kubernetes-compatibility + +| KOTS Versions | Kubernetes Compatibility | +|------------------------|-----------------------------| +| 1.117.0 and later | 1.31, 1.30, 1.29 | +| 1.109.1 to 1.116.1 | 1.30, 1.29, 1.28 | +| 1.105.2 to 1.109.0 | 1.29, 1.28 | + + +--- + + +# _kurl-about + +Replicated kURL is an open source project. For more information, see the [kURL documentation](https://kurl.sh/docs/introduction/). + +--- + + +# _license-file-prereq + +* Download your license file. Ensure that you can access the downloaded license file from the environment where you will install the application. See [Downloading Customer Licenses](/vendor/licenses-download). + +--- + + +# _placeholder-airgap-bundle + +* `PATH_TO_AIRGAP_BUNDLE` with the path to the `.airgap` bundle for the application release. You can build and download the air gap bundle for a release in the [Vendor Portal](https://vendor.replicated.com) on the **Release history** page for the channel where the release is promoted. + + Alternatively, for information about building and downloading air gap bundles with the Vendor API v3, see [Trigger airgap build for a channel's release](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbuild) and [Get airgap bundle download URL for the active release on the channel](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) in the Vendor API v3 documentation. + +--- + + +# _placeholder-app-name-UI + +* `APP_NAME` with the name of the application. The `APP_NAME` is included in the installation command that your vendor gave you. This is a unique identifier that KOTS will use to refer to the application that you install. + +--- + + +# _placeholder-namespace-embedded + +* `NAMESPACE` with the namespace where Replicated kURL installed Replicated KOTS when creating the cluster. By default, kURL installs KOTS in the `default` namespace. + +--- + + +# _placeholder-namespace-existing + +* `NAMESPACE` with the namespace where you want to install both the application and KOTS. + +--- + + +# _placeholder-ro-creds + +* `REGISTRY_HOST` with the same hostname for the private registry where you pushed the Admin Console images. + +* `RO_USERNAME` and `RO_PASSWORD` with the username and password for an account that has read-only access to the private registry. + + :::note + KOTS stores these read-only credentials in a Kubernetes secret in the same namespace where the Admin Console is installed. + + KOTS uses these credentials to pull the images. To allow KOTS to pull images, the credentials are automatically created as an imagePullSecret on all of the Admin Console Pods. + ::: + +--- + + +# _placeholders-global + +* `APP_NAME` with a name for the application. This is the unique name that KOTS will use to refer to the application that you install. + +* `PASSWORD` with a shared password for accessing the Admin Console. + +* `PATH_TO_LICENSE` with the path to your license file. See [Downloading Customer Licenses](/vendor/licenses-download). For information about how to download licenses with the Vendor API v3, see [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) in the Vendor API v3 documentation. + +* `PATH_TO_CONFIGVALUES` with the path to the ConfigValues file. + +--- + + +# _prereqs-embedded-cluster + +* Ensure that your environment meets the minimum system requirements. See [kURL Installation Requirements](/enterprise/installing-kurl-requirements). + +* Review the advanced installation options available for the kURL installer. See [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. + +--- + + +# _prereqs-existing-cluster + +* Ensure that your cluster meets the minimum system requirements. See [Minimum System Requirements](/enterprise/installing-general-requirements#minimum-system-requirements) in _Installation Requirements_. + +* Ensure that you have at least the minimum RBAC permissions in the cluster required to install KOTS. See [RBAC Requirements](/enterprise/installing-general-requirements#rbac-requirements) in _Installation Requirements_. + + :::note + If you manually created RBAC resources for KOTS as described in [Namespace-scoped RBAC Requirements](/enterprise/installing-general-requirements#namespace-scoped), include both the `--ensure-rbac=false` and `--skip-rbac-check` flags when you run the `kots install` command. + + These flags prevent KOTS from checking for or attempting to create a Role with `* * *` permissions in the namespace. For more information about these flags, see [install](/reference/kots-cli-install) or [admin-console upgrade](/reference/kots-cli-admin-console-upgrade). + ::: + +* Review the options available with the `kots install` command before installing. The `kots install` command includes several optional flags to support different installation use cases. For a list of options, see [install](/reference/kots-cli-install) in the _KOTS CLI_ documentation. + +--- + + +# _provision-cluster-intro + +This procedure describes how to use kURL to provision an embedded cluster on a VM or bare metal server. When you create a cluster with kURL, kURL also automatically installs Replicated KOTS in the `default` namespaces in the cluster. + +--- + + +# _push-kotsadm-images + +Extract the KOTS Admin Console container images from the `kotsadm.tar.gz` bundle and push the images to your private registry: + + ``` + kubectl kots admin-console push-images ./kotsadm.tar.gz REGISTRY_HOST \ + --registry-username RW_USERNAME \ + --registry-password RW_PASSWORD + ``` + + Replace: + + * `REGISTRY_HOST` with the hostname for the private registry. For example, `private.registry.host` or `my-registry.example.com/my-namespace`. + + * `RW_USERNAME` and `RW_PASSWORD` with the username and password for an account that has read and write access to the private registry. + + :::note + KOTS does not store or reuse these read-write credentials. + ::: + +--- + + +# _airgap-telemetry + +For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. The Replicated SDK also stores any custom metrics within its Secret. + +The telemetry and custom metrics stored in the Secret are collected when a support bundle is generated in the environment. When the support bundle is uploaded to the Vendor Portal, the telemetry and custom metrics are associated with the correct customer and instance ID, and the Vendor Portal updates the instance insights and event data accordingly. + + +--- + + +# _notifications-about + +:::note +Configuring notifications for customer instance changes is in public Beta. Features and functionality are subject to change as we continue to iterate this functionality towards General Availability. +::: + +Notifications can help catch problems before they happen and let you proactively contact customers to prevent support cases. For example, you can be notified of a degraded status and you can contact your customer about fixing it before the instance goes down. This approach can make issues quicker and easier to solve, and improve the customer experience with less down time. + +For more information about how application status is determined, see [Resource Statuses](insights-app-status#resource-statuses) in _Enabling and Understanding Application Status_. For more information about events that might trigger notifications, see [How the Vendor Portal Generates Events and Insights](instance-insights-event-data#about-events) in _About Instance and Event Data_. + + +--- + + +# _supported-resources-status + +The following resource types are supported: + +* Deployment +* StatefulSet +* Service +* Ingress +* PersistentVolumeClaims (PVC) +* DaemonSet + +--- + + +# _admin-console-about + +KOTS provides an Admin Console that lets your customers manage your application. You can customize the Admin Console. For example, you can customize the Config screen to allow customers to specify inputs related to unique options that your application provides. You can also include your own branding on the Admin Console, configure status informers, and add custom graphs. + +--- + + +# _download-portal-about + +The Replicated Download Portal can be used to share license files, air gap bundles, and other assets with customers. A unique Download Portal link is available for each customer. The Download Portal uses information from the customer's license to make the relevant assets available for download, such as: +* The license file +* `.airgap` bundles for the application releases that the customer has access to based on their channel assignment +* The Replicated KOTS Admin Console `kotsadm.tar.gz` air gap bundle +* The Replicated kURL `.tgz` air gap bundle +* Preflight, support bundle, and KOTS CLI kubectl plugins + +--- + + +# _embedded-kubernetes-definition + +_Embedded Kubernetes_ refers to delivering a Kubernetes distribution alongside an application, so that both Kubernetes and the application are deployed in the customer environment. Embedding Kubernetes allows software vendors to install their Kubernetes application in non-Kubernetes customer-controlled environments, such as virtual machines (VMs) or bare metal servers. Additionally, software vendors that embed Kubernetes with their application have greater control over the charactersitics of the cluster where their application is installed. This allows vendors to deliver a cluster that meets their application's requirements, which can help reduce errors during installation. + +--- + + +# _kots-definition + +Replicated KOTS is a kubectl plugin and an in-cluster Admin Console that provides highly successful installations of Helm charts and Kubernetes applications into customer-controlled environments, including on-prem and air gap environments. + +--- + + +# _kots-entitlement-note + +:::note +The Replicated KOTS entitlement is required to install applications with KOTS. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. +::: + +--- + + +# _ensure-rbac + + + --ensure-rbac + bool + When false, KOTS does not attempt to create the RBAC resources necessary to manage applications. Default: true. If a role specification is needed, use the generate-manifests command. + + + +--- + + +# _help + + + -h, --help + + Help for the command. + + + +--- + + +# _kotsadm-namespace + + + --kotsadm-namespace + string +

    Set to override the registry namespace of KOTS Admin Console images. Used for air gap installations. For more information, see [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped).

    Note: Replicated recommends that you use --kotsadm-registry instead of --kotsadm-namespace to override both the registry hostname and, optionally, the registry namespace with a single flag.

    + + +--- + + +# _kotsadm-registry + + + --kotsadm-registry + string + Set to override the registry hostname and namespace of KOTS Admin Console images. Used for air gap installations. For more information, see [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). + + +--- + + +# _registry-password + + + --registry-password + string + Password to use to authenticate with the application registry. Used for air gap installations. + + +--- + + +# _registry-username + + + --registry-username + string + Username to use to authenticate with the application registry. Used for air gap installations. + + +--- + + +# _skip-rbac-check + + + --skip-rbac-check + bool + When true, KOTS does not validate RBAC permissions. Default: false + + + +--- + + +# _strict-sec-context-yaml + +```yaml +securityContext: + fsGroup: 1001 + runAsGroup: 1001 + runAsNonRoot: true + runAsUser: 1001 + seccompProfile: + type: RuntimeDefault + supplementalGroups: + - 1001 +``` + + +--- + + +# _strict-security-context + +import StrictSecContextYaml from "./_strict-sec-context-yaml.mdx" + + + --strict-security-context + bool + +

    Set to true to explicitly enable strict security contexts for all KOTS Pods and containers.

    +

    By default, KOTS Pods and containers are not deployed with a specific security context. When true, --strict-security-context does the following:

    +
      +
    • Ensures containers run as a non-root user
    • +
    • Sets the specific UID for the containers (1001)
    • +
    • Sets the GID for volume ownership and permissions (1001)
    • +
    • Applies the default container runtime seccomp profile for security
    • +
    • Ensures the container is not run with privileged system access
    • +
    • Prevents the container from gaining more privileges than its parent process
    • +
    • Ensures the container's root filesystem is mounted as read-only
    • +
    • Removes all Linux capabilities from the container
    • +
    +

    The following shows the securityContext for KOTS Pods when --strict-security-context is set:

    + +

    Default: false

    + :::note + Might not work for some storage providers. + ::: + + + + +--- + + +# _use-minimal-rbac + + + --use-minimal-rbac + bool +

    When true, KOTS RBAC permissions are limited to the namespace where it is installed.

    To use --use-minimal-rbac, the application must support namespace-scoped installations and the user must have the minimum RBAC permissions required by KOTS in the target namespace. For a complete list of requirements, see Namespace-scoped RBAC Requirements​ in Installation Requirements. Default: false

    + + + +--- + + +# _wait-duration + + + --wait-duration + string + Timeout out to be used while waiting for individual components to be ready. Must be in Go duration format. Example: 10s, 2m + + + +--- + + +# _with-minio + + + --with-minio + bool + When true, KOTS deploys a local MinIO instance for storage and attempts to change any MinIO-based snapshots (hostpath and NFS) to the local-volume-provider plugin. See local-volume-provider in GitHub. Default: true + + + +--- + + +# _installers + +To provision a cluster on a VM or bare metal server, kURL uses a spec that is defined in a manifest file with `apiVersion: cluster.kurl.sh/v1beta1` and `kind: Installer`. This spec (called a _kURL installer_) lists the kURL add-ons that will be included in the cluster. kURL provides add-ons for networking, storage, ingress, and more. kURL also provides a KOTS add-on, which installs KOTS in the cluster and deploys the KOTS Admin Console. You can customize the kURL installer according to your application requirements. + +--- + + +# _kurl-availability + +:::note +Replicated kURL is available only for existing customers. If you are not an existing kURL user, use Replicated Embedded Cluster instead. For more information, see [Using Embedded Cluster](/vendor/embedded-overview). + +kURL is a Generally Available (GA) product for existing customers. For more information about the Replicated product lifecycle phases, see [Support Lifecycle Policy](/vendor/policies-support-lifecycle). +::: + +--- + + +# _kurl-definition + +kURL is an open source project maintained by Replicated that software vendors can use to create custom Kubernetes distributions that are embedded with their application. Enterprise customers can then run a kURL installation script on their virtual machine (VM) or bare metal server to provision a cluster and install the application. This allows software vendors to distribute Kubernetes applications to customers that do not have access to a cluster in their environment. + +For more information about the kURL open source project, see the [kURL website](https://kurl.sh). + +--- + + +# _allow-privilege-escalation + +```yaml +spec: + allowPrivilegeEscalation: true +``` + + +--- + + +# _application-icon + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +spec: + icon: https://example.com/app-icon.png +``` + + +--- + + +# _application-spec + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +``` + + +--- + + +# _application-statusInformers + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +spec: + statusInformers: + - deployment/example-nginx +``` + + +--- + + +# _config-option-invalid-regex-validator + +**Correct**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: jwt_file + title: jwt_file + type: file + validation: + regex: + pattern: "^[A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" // valid RE2 regular expression + message: "JWT is invalid" +``` + +**Incorrect**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: jwt_file + title: jwt_file + type: file + validation: + regex: + pattern: "^/path/([A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" // invalid RE2 regular expression + message: "JWT is invalid" +``` + + +--- + + +# _config-option-invalid-type + +**Correct**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: group_title + title: Group Title + items: + - name: http_enabled + title: HTTP Enabled + type: bool # bool is a valid type +``` + +**Incorrect**:: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: group_title + title: Group Title + items: + - name: http_enabled + title: HTTP Enabled + type: unknown_type # unknown_type is not a valid type +``` + + +--- + + +# _config-option-is-circular + +**Incorrect**: + +```yaml +spec: + groups: + - name: example_settings + items: + - name: example_default_value + type: text + value: repl{{ ConfigOption "example_default_value" }} +``` + + +--- + + +# _config-option-password-type + +```yaml +spec: + groups: + - name: ports + items: + - name: my_secret + type: password +``` + + +--- + + +# _config-option-regex-validator-invalid-type + +**Correct**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: jwt_file + title: jwt_file + type: file // valid item type + validation: + regex: + pattern: "^[A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" + message: "JWT is invalid" +``` + +**Incorrect**: + +```yaml +spec: + groups: + - name: authentication + title: Authentication + description: Configure application authentication below. + - name: jwt_file + title: jwt_file + type: bool // invalid item type + validation: + regex: + pattern: "^[A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" + message: "JWT is invalid" +``` + + +--- + + +# _config-spec + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +``` + + +--- + + +# _container-image-latest-tag + +```yaml +spec: + containers: + - image: nginx:latest +``` + + +--- + + +# _container-image-local-image-name + +```yaml +spec: + containers: + - image: LocalImageName +``` + + +--- + + +# _container-resource-limits + +```yaml +spec: + containers: + - name: nginx + resources: + requests: + memory: '32Mi' + cpu: '100m' + # note the lack of a limit field +``` + + +--- + + +# _container-resource-requests + +```yaml +spec: + containers: + - name: nginx + resources: + limits: + memory: '256Mi' + cpu: '500m' + # note the lack of a requests field +``` + + +--- + + +# _container-resources + + +```yaml +spec: + containers: + - name: nginx + # note the lack of a resources field +``` + + +--- + + +# _deprecated-kubernetes-installer-version + +**Correct**: + +```yaml +apiVersion: cluster.kurl.sh/v1beta1 +kind: Installer +``` + +**Incorrect**: + +```yaml +apiVersion: kurl.sh/v1beta1 +kind: Installer +``` + + +--- + + +# _hardcoded-namespace + +```yaml +metadata: + name: spline-reticulator + namespace: graphviz-pro +``` + + +--- + + +# _invalid-helm-release-name + +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +spec: + chart: + releaseName: samplechart-release-1 +``` + + +--- + + +# _invalid-kubernetes-installer + +**Correct**: + +```yaml +apiVersion: cluster.kurl.sh/v1beta1 +kind: Installer +spec: + kubernetes: + version: 1.24.5 +``` + +**Incorrect**: + +```yaml +apiVersion: cluster.kurl.sh/v1beta1 +kind: Installer +spec: + kubernetes: + version: 1.24.x + ekco: + version: latest +``` + + +--- + + +# _invalid-min-kots-version + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +spec: + minKotsVersion: 1.0.0 +``` + + +--- + + +# _invalid-rendered-yaml + +**Example Helm Chart**: +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +metadata: + name: nginx-chart +spec: + chart: + name: nginx-chart + chartVersion: 0.1.0 + helmVersion: v3 + useHelmInstall: true + builder: {} + values: + image: repl{{ ConfigOption `nginx_image`}} +``` + +**Correct Config**: +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: nginx-config +spec: + groups: + - name: nginx-deployment-config + title: nginx deployment config + items: + - name: nginx_image + title: image + type: text + default: "nginx" +``` + +**Resulting Rendered Helm Chart**: +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +metadata: + name: nginx-chart +spec: + chart: + name: nginx-chart + chartVersion: 0.1.0 + helmVersion: v3 + useHelmInstall: true + builder: {} + values: + image: nginx +``` +**Incorrect Config**: +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: nginx-config +spec: + groups: + - name: nginx-deployment-config + items: + - name: nginx_image + title: image + type: text + default: "***HIDDEN***" +``` + +**Resulting Lint Error**: +```json +{ + "lintExpressions": [ + { + "rule": "invalid-rendered-yaml", + "type": "error", + "message": "yaml: did not find expected alphabetic or numeric character: image: ***HIDDEN***", + "path": "nginx-chart.yaml", + "positions": null + } + ], + "isLintingComplete": false +} +``` +**Incorrectly Rendered Helm Chart**: +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +metadata: + name: nginx-chart +spec: + chart: + name: nginx-chart + chartVersion: 0.1.0 + helmVersion: v3 + useHelmInstall: true + builder: {} + values: + image: ***HIDDEN*** +``` + + +--- + + +# _invalid-target-kots-version + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +spec: + targetKotsVersion: 1.0.0 +``` + + +--- + + +# _invalid-yaml + +**Correct**: + +```yaml +spec: + kubernetes: + version: 1.24.5 +``` + +**Incorrect**: + +```yaml +spec: + kubernetes: version 1.24.x +``` + + +--- + + +# _invalid_type + +**Correct**: + +```yaml +ports: + - serviceName: "example" + servicePort: 80 +``` + +**Incorrect**: + +```yaml +ports: + - serviceName: "example" + servicePort: "80" +``` + + +--- + + +# _linter-definition + +The linter checks the manifest files in Replicated KOTS releases to ensure that there are no YAML syntax errors, that all required manifest files are present in the release to support installation with KOTS, and more. + + +--- + + +# _may-contain-secrets + +```yaml +data: + ENV_VAR_1: "y2X4hPiAKn0Pbo24/i5nlInNpvrL/HJhlSCueq9csamAN8g5y1QUjQnNL7btQ==" +``` + + +--- + + +# _missing-api-version-field + +```yaml +apiVersion: kots.io/v1beta1 +``` + + +--- + + +# _missing-kind-field + +```yaml +kind: Config +``` + + +--- + + +# _preflight-spec + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +``` + + +--- + + +# _privileged + +```yaml +spec: + privileged: true +``` + + +--- + + +# _repeat-option-malformed-yamlpath + +```yaml +spec: + groups: + - name: ports + items: + - name: service_port + yamlPath: 'spec.ports[0]' +``` + + +--- + + +# _repeat-option-missing-template + +```yaml +spec: + groups: + - name: ports + items: + - name: service_port + title: Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app + yamlPath: 'spec.ports[0]' + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app +``` + + +--- + + +# _repeat-option-missing-valuesByGroup + +```yaml +spec: + groups: + - name: ports + items: + - name: service_port + title: Service Port + type: text + repeatable: true + valuesByGroup: + ports: + port-default-1: "80" +``` + + +--- + + +# _replicas-1 + +```yaml +spec: + replicas: 1 +``` + + +--- + + +# _resource-limits-cpu + +```yaml +spec: + containers: + - name: nginx + resources: + limits: + memory: '256Mi' + # note the lack of a cpu field +``` + + +--- + + +# _resource-limits-memory + +```yaml +spec: + containers: + - name: nginx + resources: + limits: + cpu: '500m' + # note the lack of a memory field +``` + + +--- + + +# _resource-requests-cpu + +```yaml +spec: + containers: + - name: nginx + resources: + requests: + memory: '32Mi' + # note the lack of a cpu field +``` + + +--- + + +# _resource-requests-memory + +```yaml +spec: + containers: + - name: nginx + resources: + requests: + cpu: '100m' + # note the lack of a memory field +``` + + +--- + + +# _troubleshoot-spec + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +``` + + +--- + + +# _volume-docker-sock + +```yaml +spec: + volumes: + - hostPath: + path: /var/run/docker.sock +``` + + +--- + + +# _volumes-host-paths + +```yaml +spec: + volumes: + - hostPath: + path: /data +``` + + +--- + + +# _limitation-ec + +Monitoring applications with Prometheus is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). + +--- + + +# _overview-prom + +The KOTS Admin Console can use the open source systems monitoring tool Prometheus to collect metrics on an application and the cluster where the application is installed. Prometheus components include the main Prometheus server, which scrapes and stores time series data, an Alertmanager for alerting on metrics, and Grafana for visualizing metrics. For more information about Prometheus, see [What is Prometheus?](https://prometheus.io/docs/introduction/overview/) in the Prometheus documentation. + +The Admin Console exposes graphs with key metrics collected by Prometheus in the **Monitoring** section of the dashboard. By default, the Admin Console displays the following graphs: + +* Cluster disk usage +* Pod CPU usage +* Pod memory usage + +In addition to these default graphs, application developers can also expose business and application level metrics and alerts on the dashboard. + +The following screenshot shows an example of the **Monitoring** section on the Admin Console dashboard with the Disk Usage, CPU Usage, and Memory Usage default graphs: + +Graphs on the Admin Console dashboard + +[View a larger version of this image](/images/kotsadm-dashboard-graph.png) + +--- + + +# _analyzers-note + +For basic examples of checking CPU, memory, and disk capacity, see [Node Resources Analyzer](https://troubleshoot.sh/reference/analyzers/node-resources/) in the Troubleshoot documentation. + + +--- + + +# _http-requests-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: preflight-checks +spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." +``` + +--- + + +# _http-requests-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." +``` + +--- + + +# _k8s-distro-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - distribution: + checkName: Kubernetes distribution + outcomes: + - fail: + when: "== docker-desktop" + message: The application does not support Docker Desktop Clusters + - fail: + when: "== microk8s" + message: The application does not support Microk8s Clusters + - fail: + when: "== minikube" + message: The application does not support Minikube Clusters + - pass: + when: "== eks" + message: EKS is a supported distribution + - pass: + when: "== gke" + message: GKE is a supported distribution + - pass: + when: "== aks" + message: AKS is a supported distribution + - pass: + when: "== kurl" + message: KURL is a supported distribution + - pass: + when: "== digitalocean" + message: DigitalOcean is a supported distribution + - warn: + message: Unable to determine the distribution of Kubernetes +``` + +--- + + +# _k8s-distro-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - distribution: + checkName: Kubernetes distribution + outcomes: + - fail: + when: "== docker-desktop" + message: The application does not support Docker Desktop Clusters + - fail: + when: "== microk8s" + message: The application does not support Microk8s Clusters + - fail: + when: "== minikube" + message: The application does not support Minikube Clusters + - pass: + when: "== eks" + message: EKS is a supported distribution + - pass: + when: "== gke" + message: GKE is a supported distribution + - pass: + when: "== aks" + message: AKS is a supported distribution + - pass: + when: "== kurl" + message: KURL is a supported distribution + - pass: + when: "== digitalocean" + message: DigitalOcean is a supported distribution + - warn: + message: Unable to determine the distribution of Kubernetes +``` + +--- + + +# _k8s-version-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - clusterVersion: + outcomes: + - fail: + when: "< 1.25.0" + message: The application requires Kubernetes 1.25.0 or later, and recommends 1.28.0. + uri: https://www.kubernetes.io + - warn: + when: "< 1.28.0" + message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.28.0 or later. + uri: https://kubernetes.io + - pass: + message: Your cluster meets the recommended and required versions of Kubernetes. +``` + +--- + + +# _k8s-version-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - clusterVersion: + outcomes: + - fail: + when: "< 1.25.0" + message: The application requires Kubernetes 1.25.0 or later, and recommends 1.28.0. + uri: https://www.kubernetes.io + - warn: + when: "< 1.28.0" + message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.28.0 or later. + uri: https://kubernetes.io + - pass: + message: Your cluster meets the recommended and required versions of Kubernetes. +``` + +--- + + +# _mysql-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + collectors: + - mysql: + collectorName: mysql + uri: 'repl{{ ConfigOption "db_user" }}:repl{{ConfigOption "db_password" }}@tcp(repl{{ ConfigOption "db_host" }}:repl{{ConfigOption "db_port" }})/repl{{ ConfigOption "db_name" }}' + analyzers: + - mysql: + # `strict: true` prevents installation from continuing if the preflight check fails + strict: true + checkName: Must be MySQL 8.x or later + collectorName: mysql + outcomes: + - fail: + when: connected == false + message: Cannot connect to MySQL server + - fail: + when: version < 8.x + message: The MySQL server must be at least version 8 + - pass: + message: The MySQL server is ready +``` + +--- + + +# _mysql-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + {{ if eq .Values.global.mysql.enabled true }} + collectors: + - mysql: + collectorName: mysql + uri: '{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false' + {{ end }} + analyzers: + {{ if eq .Values.global.mysql.enabled true }} + - mysql: + checkName: Must be MySQL 8.x or later + collectorName: mysql + outcomes: + - fail: + when: connected == false + message: Cannot connect to MySQL server + - fail: + when: version < 8.x + message: The MySQL server must be at least version 8 + - pass: + message: The MySQL server is ready + {{ end }} +``` + +--- + + +# _node-count-cr + + + +--- + + +# _node-count-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Node Count Check + outcomes: + - fail: + when: 'count() > {{ .Values.global.maxNodeCount }}' + message: "The cluster has more than {{ .Values.global.maxNodeCount }} nodes." + - pass: + message: You have the correct number of nodes. +``` + +--- + + +# _node-cpu-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - nodeResources: + checkName: Total CPU Cores in the cluster is 4 or greater + outcomes: + - fail: + when: "sum(cpuCapacity) < 4" + message: The cluster must contain at least 4 cores + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: There are at least 4 cores in the cluster +``` + +--- + + +# _node-cpu-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Total CPU Cores in the cluster is 4 or greater + outcomes: + - fail: + when: "sum(cpuCapacity) < 4" + message: The cluster must contain at least 4 cores + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: There are at least 4 cores in the cluster +``` + +--- + + +# _node-ephem-storage-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - nodeResources: + checkName: Every node in the cluster must have at least 40 GB of ephemeral storage, with 100 GB recommended + outcomes: + - fail: + when: "min(ephemeralStorageCapacity) < 40Gi" + message: All nodes must have at least 40 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(ephemeralStorageCapacity) < 100Gi" + message: All nodes are recommended to have at least 100 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: All nodes have at least 100 GB of ephemeral storage. +``` + +--- + + +# _node-ephem-storage-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Every node in the cluster must have at least 40 GB of ephemeral storage, with 100 GB recommended + outcomes: + - fail: + when: "min(ephemeralStorageCapacity) < 40Gi" + message: All nodes must have at least 40 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(ephemeralStorageCapacity) < 100Gi" + message: All nodes are recommended to have at least 100 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: All nodes have at least 100 GB of ephemeral storage. +``` + +--- + + +# _node-mem-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - nodeResources: + checkName: Every node in the cluster must have at least 8 GB of memory, with 32 GB recommended + outcomes: + - fail: + when: "min(memoryCapacity) < 8Gi" + message: All nodes must have at least 8 GB of memory. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(memoryCapacity) < 32Gi" + message: All nodes are recommended to have at least 32 GB of memory. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: All nodes have at least 32 GB of memory. +``` + +--- + + +# _node-mem-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Every node in the cluster must have at least 8 GB of memory, with 32 GB recommended + outcomes: + - fail: + when: "min(memoryCapacity) < 8Gi" + message: All nodes must have at least 8 GB of memory. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(memoryCapacity) < 32Gi" + message: All nodes are recommended to have at least 32 GB of memory. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: All nodes have at least 32 GB of memory. +``` + +--- + + +# _node-req-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - nodeResources: + checkName: Node requirements + filters: + # Must have 1 node with 16 GB (available) memory and 5 cores (on a single node) with amd64 architecture + allocatableMemory: 16Gi + cpuArchitecture: amd64 + cpuCapacity: "5" + outcomes: + - fail: + when: "count() < 1" + message: This application requires at least 1 node with 16GB available memory and 5 cpu cores with amd64 architecture + - pass: + message: This cluster has a node with enough memory and cpu cores +``` + +--- + + +# _node-req-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - nodeResources: + checkName: Node requirements + filters: + # Must have 1 node with 16 GB (available) memory and 5 cores (on a single node) with amd64 architecture + allocatableMemory: 16Gi + cpuArchitecture: amd64 + cpuCapacity: "5" + outcomes: + - fail: + when: "count() < 1" + message: This application requires at least 1 node with 16GB available memory and 5 cpu cores with amd64 architecture + - pass: + message: This cluster has a node with enough memory and cpu cores +``` + +--- + + +# _node-storage-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: my-app +spec: + analyzers: + - storageClass: + checkName: Required storage classes + storageClassName: "default" + outcomes: + - fail: + message: Could not find a storage class called "default". + - pass: + message: A storage class called "default" is present. +``` + +--- + + +# _node-storage-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + analyzers: + - storageClass: + checkName: Required storage classes + storageClassName: "default" + outcomes: + - fail: + message: Could not find a storage class called "default". + - pass: + message: A storage class called "default" is present. +``` + +--- + + +# _preflights-add-analyzers + +You must add analyzers to analyze the data from the collectors that you specified. Define the criteria for the pass, fail, and warn outcomes, and specify custom messages for each. + +For example, you can set a `fail` outcome if the MySQL version is less than the minimum required. Then, specify a message to display that informs your customer of the reasons for the failure and steps they can take to fix the issue. + +--- + + +# _preflights-define-xref + +For more information about defining collectors and analyzers, see [Collecting Data](https://troubleshoot.sh/docs/collect/) +and [Analyzing Data](https://troubleshoot.sh/docs/analyze/) in the Troubleshoot documentation. + + +--- + + +# _preflights-define + +Any preflight checks you run are dependent on your application needs. This section gives some guidance about how to think about using collectors and analyzers to design preflight checks. + +--- + + +# _preflights-sb-about + +Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. Troubleshoot is a kubectl plugin that provides diagnostic tools for Kubernetes applications. For more information, see the open source [Troubleshoot](https://troubleshoot.sh/docs/collect/) documentation. + +Preflight checks and support bundles analyze data from customer environments to provide insights that help users to avoid or troubleshoot common issues with an application: +* **Preflight checks** run before an application is installed to check that the customer environment meets the application requirements. +* **Support bundles** collect troubleshooting data from customer environments to help users diagnose problems with application deployments. + +--- + + +# _preflights-sb-note + +For a comprehensive overview, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). + + +--- + + +# _preflights-spec-locations + +For more information about specifications, see [About Specifications](preflight-support-bundle-about#about-specifications) in _About Preflight Checks and Support Bundles_. + +--- + + +# _preflights-strict + +If any strict preflight checks are configured, the `--skip-preflights` flag are not honored because the preflight checks must run and contain no failures before the application is deployed. + +When the `--deploy` option is provided and there are strict preflight checks, the preflight checks always run. The deployment waits for up to 15 minutes for the preflight checks to complete. If the checks complete without strict preflight failures, the release deploys. If the checks do not complete within 15 minutes, the release does not deploy. If there are one or more strict preflight failures, the release does not deploy. + +For more information about strict preflight checks, see [Defining Preflight Checks](/vendor/preflight-defining). + +--- + + +# _step-creds + +Provide read-only credentials for the external private registry in your Replicated account. This allows Replicated to access the images through the proxy registry. See [Add Credentials for an External Registry](packaging-private-images#add-credentials-for-an-external-registry) in _Connecting to an External Registry_. + +--- + + +# _step-custom-domain + +(Optional) Add a custom domain for the proxy registry instead of `proxy.replicated.com`. See [Using Custom Domains](custom-domains-using). + +--- + + +# _redactors-about + +Troubleshoot has built-in redactors to prevent sensitive data from being collected when support bundles are generated. You can add more redactors if needed. For more information, see [Redacting Data](https://troubleshoot.sh/docs/redact/) in the Troubleshoot documentation. + + +--- + + +# _required-releases-description + +When a release is required, KOTS requires users to upgrade to that version before they can upgrade to a later version. For example, if you select **Prevent this release from being skipped during upgrades** for release v2.0.0, users with v1.0.0 deployed must upgrade to v2.0.0 before they can upgrade to a version later than v2.0.0, such as v2.1.0. + +--- + + +# _required-releases-limitations + +Required releases have the following limitations: + + * Required releases are supported in KOTS v1.68.0 and later. + * After users deploy a required version, they can no longer redeploy (roll back to) versions earlier than the required version, even if `allowRollback` is true in the Application custom resource manifest. For more information, see [`allowRollback`](/reference/custom-resource-application#allowrollback) in the Application custom resource topic. + * If you change the channel an existing customer is assigned to, the Admin Console always fetches the latest release on the new channel, regardless of any required releases on the channel. For more information, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. + * Required releases are supported for KOTS installations only and are not supported for releases installed with Helm. The **Prevent this release from being skipped during upgrades** option has no affect if the user installs with Helm. + +--- + + +# _version-label-reqs-helm + +* The version label for the release must match the version label from one of the `Chart.yaml` files in the release. +* If there is one Helm chart in the release, Replicated automatically uses the version from the `Chart.yaml` file. +* If there is more than one Helm chart in the release, Replicated uses the version label from one of the `Chart.yaml` files. You can edit the version label for the release to use the version label from a different `Chart.yaml` file. + +--- + + +# _app + + + --app + string + The app slug or app ID to use in all calls. The default uses the $REPLICATED_APP environment variable. + + + +--- + + +# _authorize-with-token-note + +:::note +The `replicated login` command creates a token after you log in to your vendor account in a browser and saves it to a config file. Alteratively, if you do not have access to a browser, you can set the `REPLICATED_API_TOKEN` environment variable to authenticate. For more information, see [(Optional) Set Environment Variables](#env-var) below. +::: + +--- + + +# _authtype + + + --authtype + string + Authorization type for the registry. Default: password + + + +--- + + +# _chart-yaml-dir-reqs + +:::note +If your release supports installations with Replicated KOTS, `--yaml-dir` is required. If your release supports installations with the Helm CLI only, either `--yaml-dir` or `--chart` can be used. +::: + +--- + + +# _help + + + -h, --help + + Help for the command. + + + +--- + + +# _login + +Authorize the Replicated CLI: + + ``` + replicated login + ``` + + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + + Authorize replicated cli web page + + [View a larger version of this image](/images/authorize-repl-cli.png) + +--- + + +# _logout + +(Optional) When you are done using the Replicated CLI, remove any stored credentials created by the `replicated login` command: + + ``` + replicated logout + ``` + +--- + + +# _output + + + --output + string + +

    The output format to use. Valid values: json or table. Some commands also support wide Default: table

    + + + +--- + + +# _password-stdin + + + --password-stdin + + Takes the password from stdin. + + + +--- + + +# _password + + + --password + string + The password to use when authenticating to the registry. + + + +--- + + +# _skip-validation + + + --skip-validation + + Skips the validation of the registry (not recommended). + + + +--- + + +# _sudo-install + +:::note +If you do not have root access to the `/usr/local/bin` directory, you can install with sudo by running `sudo mv replicated /usr/local/bin/replicated` instead of `mv replicated /usr/local/bin/replicated`. +::: + +--- + + +# _token-stdin + + + --token-stdin + + Takes the token from stdin. + + + +--- + + +# _token + + + --token + string + The API token used to access your application in the Vendor API. The default uses the $REPLICATED_API_TOKEN environment variable. + + + +--- + + +# _username + + + --username + string + The username with which to authenticate to the registry. + + + +--- + + +# _verify-install + +Verify that the installation was successful: + + ``` + replicated --help + ``` + +--- + + +# _yaml-dir + + + --yaml-dir + path + The directory containing multiple YAML manifest files for a release. (Required) + + + +--- + + +# _401-unauthorized + +:::note +If you see a `401 Unauthorized` error message, log out of the Replicated registry by running `helm registry logout registry.replicated.com` and then run `helm package . --dependency-update` again. +::: + +--- + + +# Chart.yaml + +```yaml +# Chart.yaml +dependencies: +- name: replicated + repository: oci://registry.replicated.com/library + version: 1.1.1 +``` + +For the latest version information for the Replicated SDK, see the [replicated-sdk repository](https://github.com/replicatedhq/replicated-sdk/releases) in GitHub. + + +--- + + +# _integration-mode-install + +You can install the Replicated SDK in integration mode to develop locally against the SDK API without needing to add the SDK to your application, create a release in the Replicated Vendor Portal, or make changes in your environment. You can also use integration mode to test sending instance data to the Vendor Portal, including any custom metrics that you configure. + +To use integration mode, install the Replicated SDK as a standalone component using a valid Development license created in the Vendor Portal. After you install in integration mode, the SDK provides default mock data for requests to the SDK API `app` endpoints. Requests to the `license` endpoints use the real data from your Development license. + +To install the SDK in integration mode: + +1. Create a Development license that you can use to install the SDK in integration mode: + + 1. In the Vendor Portal, go to **Customers** and click **Create customer**. + + 1. Complete the following fields: + + 1. For **Customer name**, add a name for the customer. + + 1. For **Assigned channel**, assign the customer to the channel that you use for testing. For example, Unstable. + + 1. For **Customer type**, select **Development**. + + 1. For **Customer email**, add the email address that you want to use for the license. + + 1. For **Install types**, ensure that the **Existing Cluster (Helm CLI)** option is enabled. + + 1. (Optional) Add any license field values that you want to use for testing: + + 1. For **Expiration policy**, you can add an expiration date for the license. + + 1. For **Custom fields**, you can add values for any custom license fields in your application. For information about how to create custom license fields, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). + + 1. Click **Save Changes**. + +1. On the **Manage customer** page for the customer you created, click **Helm install instructions**. + + Helm install instructions button on the manage customer page + + [View a larger version of this image](/images/helm-install-instructions-button.png) + +1. In the **Helm install instructions** dialog, copy and run the command to log in to the Replicated registry. + + Registry login command in the Helm install instructions dialog + + [View a larger version of this image](/images/helm-install-instructions-registry-login.png) + +1. From the same dialog, copy and run the command to install the SDK in integration mode: + + SDK integration mode install command in the Helm install instructions dialog + + [View a larger version of this image](/images/helm-install-instructions-sdk-integration.png) + +1. Make requests to the SDK API from your application. You can access the SDK API for testing by forwarding the API service to your local machine. For more information, see [Port Forwarding the SDK API Service](/vendor/replicated-sdk-development#port-forward). + +--- + + +# _kots-version-req + +To install the SDK with a Replicated installer, KOTS v1.104.0 or later and the SDK version 1.0.0-beta.12 or later are required. You can verify the version of KOTS installed with `kubectl kots version`. For Replicated Embedded Cluster installations, you can see the version of KOTS that is installed by your version of Embedded Cluster in the [Embedded Cluster Release Notes](/release-notes/rn-embedded-cluster). + +--- + + +# _overview + +The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. The SDK can be installed alongside applications packaged as Helm charts or Kubernetes manifests. The SDK can be installed using the Helm CLI or KOTS. + +For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). + +Replicated recommends that the SDK is distributed with all applications because it provides access to key Replicated functionality, such as: + +* Automatic access to insights and operational telemetry for instances running in customer environments, including granular details about the status of different application resources. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). +* An in-cluster API that you can use to embed Replicated features into your application, including: + * Collect custom metrics on instances running in online or air gap environments. See [Configuring Custom Metrics](/vendor/custom-metrics). + * Check customer license entitlements at runtime. See [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk) and [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). + * Provide update checks to alert customers when new versions of your application are available for upgrade. See [Support Update Checks in Your Application](/reference/replicated-sdk-apis#support-update-checks-in-your-application) in _Replicated SDK API_. + * Programmatically name or tag instances from the instance itself. See [Programatically Set Tags](/reference/replicated-sdk-apis#post-appinstance-tags). + +--- + + +# _registry-logout + +If you see a 401 Unauthorized error after running `helm dependency update`, run the following command to remove credentials from the Replicated registry, then re-run `helm dependency update`: + +```bash +helm registry logout registry.replicated.com +``` + +For more information, see [401 Unauthorized Error When Updating Helm Dependencies](replicated-sdk-installing#401). + +--- + + +# values.yaml + +When a user installs a Helm chart that includes the Replicated SDK as a dependency, a set of default SDK values are included in the `replicated` key of the parent chart's values file. + +For example: + +```yaml +# values.yaml + +replicated: + enabled: true + appName: gitea + channelID: 2jKkegBMseH5w... + channelName: Beta + channelSequence: 33 + integration: + enabled: true + license: {} + parentChartURL: oci://registry.replicated.com/gitea/beta/gitea + releaseCreatedAt: "2024-11-25T20:38:22Z" + releaseNotes: 'CLI release' + releaseSequence: 88 + replicatedAppEndpoint: https://replicated.app + versionLabel: Beta-1234 +``` + +These `replicated` values can be referenced by the application or set during installation as needed. For example, if users need to add labels or annotations to everything that runs in their cluster, then they can pass the labels or annotations to the relevant value in the SDK subchart. + +For the default Replicated SDK Helm chart values file, see [values.yaml.tmpl](https://github.com/replicatedhq/replicated-sdk/blob/main/chart/values.yaml.tmpl) in the [replicated-sdk](https://github.com/replicatedhq/replicated-sdk) repository in GitHub. + +The SDK Helm values also include a `replicated.license` field, which is a string that contains the YAML representation of the customer license. For more information about the built-in fields in customer licenses, see [Built-In License Fields](licenses-using-builtin-fields). + +--- + + +# _checkVersion + +Run `velero version --client-only` to check the version of the velero CLI that you installed as part of [Installing the Velero CLI](snapshots-velero-cli-installing). + +--- + + +# _installVelero + +Run one of the following commands to install Velero, depending on the version of the velero CLI you are using: + + * **Velero v1.10 and later**: + + ```bash + velero install \ + --no-default-backup-location \ + --no-secret \ + --use-node-agent --uploader-type=restic \ + --use-volume-snapshots=false \ + --plugins velero/velero-plugin-for-aws:v1.5.3 + ``` + + * **Velero versions earlier than v1.10**: + + ```bash + velero install \ + --no-default-backup-location \ + --no-secret \ + --use-restic \ + --use-volume-snapshots=false \ + --plugins velero/velero-plugin-for-aws:v1.5.3 + ``` + +--- + + +# _limitation-cli-restores + +Only full backups can be restored using the KOTS CLI. To restore an application from a partial backup, use the Admin Console. See [Restore the Application Only Using the Admin Console](/enterprise/snapshots-restoring-full#admin-console). + +--- + + +# _limitation-dr + +Only full backups that include both the application and the Admin Console can be restored to a new cluster in disaster recovery scenarios. Partial backups that include the application only _cannot_ be restored to a new cluster, and are therefore not useable for disaster recovery. + +--- + + +# _limitation-install-method + +Snapshots can be restored only to clusters that use the same installation method as the cluster the snapshot was taken from. For example, snapshots taken in an online (internet-connected) cluster must be restored to an online cluster. + +--- + + +# _limitation-no-ec-support + +The KOTS Snapshots feature is supported for existing cluster installations with KOTS and Replicated kURL installations only. Snapshots is not supported for Replicated Embedded Cluster installations. For more information about configuring backup and restore for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). + +--- + + +# _limitation-os + +Snapshots must be restored on the same operating system that the snapshot was taken on. For example, snapshots taken on a CentOS cluster must be restored on a CentOS cluster. + +--- + + +# _node-agent-mem-limit + +Increase the default memory limit for the node-agent (restic) Pod if your application is particularly large. For more information about configuring Velero resource requests and limits, see [Customize resource requests and limits](https://velero.io/docs/v1.10/customize-installation/#customize-resource-requests-and-limits) in the Velero documentation. + +For example, the following kubectl commands will increase the memory limit for the node-agent (restic) daemon set from the default of 1Gi to 2Gi. + +**Velero 1.10 and later**: + +``` +kubectl -n velero patch daemonset node-agent -p '{"spec":{"template":{"spec":{"containers":[{"name":"node-agent","resources":{"limits":{"memory":"2Gi"}}}]}}}}' +``` + +**Velero versions earlier than 1.10**: + +``` +kubectl -n velero patch daemonset restic -p '{"spec":{"template":{"spec":{"containers":[{"name":"restic","resources":{"limits":{"memory":"2Gi"}}}]}}}}' +``` + +Alternatively, you can potentially avoid the node-agent (restic) Pod reaching the memory limit during snapshot creation by running the following kubectl command to lower the memory garbage collection target percentage on the node-agent (restic) daemon set: + +**Velero 1.10 and later**: + +``` +kubectl -n velero set env daemonset/node-agent GOGC=1 +``` + +**Velero versions earlier than 1.10**: + +``` +kubectl -n velero set env daemonset/restic GOGC=1 +``` + +--- + + +# _registryCredentialsNote + +:::note +It is typical for the velero and node-agent (restic) Pods to be in the `ErrImagePull` or `ImagePullBackOff` state after you run the `velero install` command because Velero does not support passing registry credentials during installation. In Replicated KOTS v1.94.0 and later, this situation resolves itself after you complete the instructions to configure the storage destination. + +If you are on an earlier version of KOTS, Replicated recommends that you upgrade to KOTS v1.94.0 or later. Otherwise, you must patch the Velero deployment manually and add the image pull secret to access the registry. +::: + +--- + + +# _resticDaemonSet + +Configure the Restic DaemonSet specification if your cluster uses one of the following Kubernetes distributions: + * RancherOS + * OpenShift + * Microsoft Azure + * VMware Tanzu Kubernetes Grid Integrated Edition + +For information about how to configure the Restic DaemonSet for these distributions, see [Configure Restic DaemonSet spec](https://velero.io/docs/v1.9/restic/#configure-restic-daemonset-spec) in the Velero documentation. + +--- + + +# _restore-types + +Snapshots supports the following types of restores: +* Restore both the application and the KOTS Admin Console (also referred to as a _full_ restore) +* Restore the KOTS Admin Console only +* Restore the application only (also referred to as a _partial_ restore) + +--- + + +# _restoreTable + + + + + + + + + + + + + + + + + + + + + + +
    Restore TypeDescriptionInterface to Use
    Full restoreRestores the Admin Console and the application.KOTS CLI
    Partial restoreRestores the application only.KOTS CLI or Admin Console
    Admin consoleRestores the Admin Console only.KOTS CLI
    + +--- + + +# _step-get-backups + +Run the [`kubectl kots get backups`](/reference/kots-cli-get-backups) command to get the list of full backups for the instance. + +--- + + +# _step-restore + +Run the following command to restore a full backup: + + ```bash + kubectl kots restore --from-backup BACKUP + ``` + Replace `BACKUP` with the the name of the backup to restore from. + + For more information about the available options for the `kots restore` command, including application-only and Admin Console-only options, see [restore](/reference/kots-cli-restore-index/). + +--- + + +# _updateDefaultStorage + +If Velero is already installed, you can update your storage destination in the Replicated Admin Console. + +For embedded clusters with the Velero add-on, you must update the default internal storage settings in the Admin Console because internal storage is insufficient for full backups. + +For more information about updating storage, see [Updating Settings in the Admin Console](snapshots-updating-with-admin-console). + +--- + + +# _aggregate-status-intro + +When you provide more than one Kubernetes resource, Replicated aggregates all resource statuses to display a single application status. + +Replicated uses the least available resource status to represent the aggregate application status. For example, if at least one resource has an Unavailable status, then the aggregate application status is Unavailable. + +--- + + +# _aggregateStatus + +The following table describes the resource statuses that define each aggregate application status: + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Resource StatusesAggregate Application Status
    No status available for any resourceMissing
    One or more resources UnavailableUnavailable
    One or more resources DegradedDegraded
    One or more resources UpdatingUpdating
    All resources ReadyReady
    + +--- + + +# _statusesTable + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    DeploymentStatefulSetServiceIngressPVCDaemonSet
    ReadyReady replicas equals desired replicasReady replicas equals desired replicasAll desired endpoints are ready, any load balancers have been assignedAll desired backend service endpoints are ready, any load balancers have been assignedClaim is boundReady daemon pods equals desired scheduled daemon pods
    UpdatingThe deployed replicas are from a different revisionThe deployed replicas are from a different revisionN/AN/AN/AThe deployed daemon pods are from a different revision
    DegradedAt least 1 replica is ready, but more are desiredAt least 1 replica is ready, but more are desiredAt least one endpoint is ready, but more are desiredAt least one backend service endpoint is ready, but more are desiredN/AAt least one daemon pod is ready, but more are desired
    UnavailableNo replicas are readyNo replicas are readyNo endpoints are ready, no load balancer has been assignedNo backend service endpoints are ready, no load balancer has been assignedClaim is pending or lostNo daemon pods are ready
    MissingMissing is an initial deployment status indicating that informers have not reported their status because the application has just been deployed and the underlying resource has not been created yet. After the resource is created, the status changes. However, if a resource changes from another status to Missing, then the resource was either deleted or the informers failed to report a status.
    + +--- + + +# _configmap-note + +:::note +Alternatively, you can use a ConfigMap (`kind: ConfigMap`) if the specification will not collect private information from the cluster. +::: + +--- + + +# _customize-support-bundle-spec + +When customizing your support bundle specifications, consider the following guidelines: + +- The `clusterInfo` and `clusterResources` collectors are useful because they collect a large amount of data to help with installation and debugging. + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: collectors + spec: + collectors: + - clusterInfo: + exclude: false + - clusterResources: + exclude: false + ``` +- You can edit the default collector properties. If `clusterResources` is defined in your specification, the default namespace cannot be removed, but you can add a namespace to the `namespaces` field. + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: collectors + spec: + collectors: + - clusterInfo: + exclude: false + - clusterResources: + namespaces: + - default + - APP_NAMESPACE + ``` + Replace `APP_NAMESPACE` with the application namespace. + +- Add application Pod logs and set the collection limits for the number of lines logged. Typically the selector attribute is matched to the labels. + + 1. To get the labels for an application, inspect the Pods YAML. + + 1. Create collectors to include logs from these pods in a bundle. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector. You can include the `logs` collector specification multiple times. + + The limits field can support `maxAge` or `maxLines`. This limits the output to the constraints provided. **Default:** `maxLines: 10000` + + **Example:** + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: collectors + spec: + collectors: + - logs: + selector: + - app=api + namespace: default + limits: + maxLines: 10000 + ``` + +- Add any custom collectors to the file. Collectors that Replicated recommends considering are: + + - **Kubernetes resources:** Use for custom resource definitions (CRDs), secrets, and ConfigMaps, if they are required for your application to work. + - **Databases:** Return a selection of rows or entire tables. + - **Volumes:** Ensure that an application's persistent state files exist, are readable/writeable, and have the right permissions. + - **Pods:** Run a pod from a custom image. + - **Files:** Copy files from pods and hosts. + - **HTTP:** Consume your own application APIs with HTTP requests. If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. + +- Add analyzers based on conditions that you expect for your application. For example, you might require that a cluster have at least 2 CPUs and 4GB memory available. + + Good analyzers clearly identify failure modes. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log. + + At a minimum, include application log analyzers. A simple text analyzer can detect specific log lines and inform an end user of remediation steps. + + Analyzers that Replicated recommends considering are: + + - **Resource statuses:** Check the status of various resources, such as Deployments, StatefulSets, Jobs, and so on. + - **Regular expressions:** Analyze arbitrary data. + - **Databases:** Check the version and connection status. +. +- If needed, you can add custom the redactors to the default redactors. Disabling the redactors is not recommended. + +--- + + +# _deploy-status-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: + - deploymentStatus: + name: api + namespace: default + outcomes: + - fail: + when: "< 1" + message: The API deployment does not have any ready replicas. + - warn: + when: "= 1" + message: The API deployment has only a single ready replica. + - pass: + message: There are multiple replicas of the API deployment ready. +``` + +--- + + +# _deploy-status-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: [] + analyzers: + - deploymentStatus: + name: api + namespace: default + outcomes: + - fail: + when: "< 1" + message: The API deployment does not have any ready replicas. + - warn: + when: "= 1" + message: The API deployment has only a single ready replica. + - pass: + message: There are multiple replicas of the API deployment ready. +``` + +--- + + +# _ec-support-bundle-intro + +Embedded Cluster includes a default support bundle spec that collects both host- and cluster-level information. + +The host-level information is useful for troubleshooting failures related to host configuration like DNS, networking, or storage problems. Cluster-level information includes details about the components provided by Replicated, such as the Admin Console and Embedded Cluster operator that manage install and upgrade operations. If the cluster has not installed successfully and cluster-level information is not available, then it is excluded from the bundle. + +In addition to the host- and cluster-level details provided by the default Embedded Cluster spec, support bundles generated for Embedded Cluster installations also include app-level details provided by any custom support bundle specs that you included in the application release. + + +--- + + +# _generate-bundle-admin-console + +The Replicated KOTS Admin Console includes a **Troubleshoot** page where you can generate a support bundle and review remediation suggestions for troubleshooting. You can also download the support bundle from the Admin Console. + +To generate a support bundle in the KOTS Admin Console: + +1. Log in to the Admin Console and go to the **Troubleshoot** tab. + +1. Click **Analyze** to start analyzing the application. Or, copy the command provided to generate a bundle from the CLI. + + The analysis executes the support bundle plugin. After the analysis completes, the bundle is available on the **Troubleshoot** tab in the Admin Console. If any known issues are detected, they are highlighted with possible remediation suggestions. + + :::note + No data leaves the cluster. Data is never sent across the internet or to anyone else. + ::: + +1. (Optional) If enabled for your online installation, you might also see a **Send bundle to vendor** button available. Clicking this button will send the support bundle directly to your vendor. Replicated recommendeds following up with your vendor to let them know the bundle has been provided. + Send bundle to vendor screen + + [View a larger version of this image](/images/send-bundle-to-vendor.png) + +1. (Optional) Click **Download bundle** to download the support bundle. You can send the bundle to your vendor for assistance. + +--- + + +# _generate-bundle-default-kots + +For KOTS installations, you can generate a support bundle using the default KOTS spec. This is useful if the application does not have a support bundle spec included. + +#### Online Environments + +In an online environment, run the following command to generate a support bundle using the default KOTS spec: + +``` +kubectl support-bundle https://kots.io +``` + +#### Air Gap Environments + +For air gap environments, perform the following steps to generate a support bundle using the default KOTS spec: + +1. Run the following command from a computer with internet access to download the default KOTS spec: + + ``` + curl -o spec.yaml https://kots.io -H 'User-agent:Replicated_Troubleshoot/v1beta1' + ``` + +1. Upload the `spec.yaml` file to your air gap server. + +1. Run the following command to create a support bundle using the uploaded `spec.yaml` file: + + ``` + kubectl support-bundle /path/to/spec.yaml + ``` + +--- + + +# _generate-bundle-ec + +There are different steps to generate a support bundle depending on the version of Embedded Cluster installed. + +### For Versions 1.17.0 and Later + +For Embedded Cluster 1.17.0 and later, you can run the Embedded Cluster `support-bundle` command to generate a support bundle. + +The `support-bundle` command uses the default Embedded Cluster support bundle spec to collect both cluster- and host-level information. It also automatically includes any application-specific support bundle specs in the generated bundle. + +To generate a support bundle: + +1. SSH onto a controller node. + + :::note + You can SSH onto a worker node to generate a support bundle that contains information specific to that node. However, when run on a worker node, the `support-bundle` command does not capture cluster-wide information. + ::: + +1. Run the following command: + + ```bash + sudo ./APP_SLUG support-bundle + ``` + + Where `APP_SLUG` is the unique slug for the application. + +### For Versions Earlier Than 1.17.0 + +For Embedded Cluster versions earlier than 1.17.0, you can generate a support bundle from the shell using the kubectl support-bundle plugin. + +To generate a bundle with the support-bundle plugin, you pass the default Embedded Cluster spec to collect both cluster- and host-level information. You also pass the `--load-cluster-specs` flag, which discovers all support bundle specs that are defined in Secrets or ConfigMaps in the cluster. This ensures that any application-specific specs are also included in the bundle. For more information, see [Discover Cluster Specs](https://troubleshoot.sh/docs/support-bundle/discover-cluster-specs/) in the Troubleshoot documentation. + +To generate a bundle: + +1. SSH onto a controller node. + +1. Use the Embedded Cluster shell command to start a shell with access to the cluster: + + ```bash + sudo ./APP_SLUG shell + ``` + Where `APP_SLUG` is the unique slug for the application. + + The output looks similar to the following: + + ```bash + __4___ + _ \ \ \ \ Welcome to APP_SLUG debug shell. + <'\ /_/_/_/ This terminal is now configured to access your cluster. + ((____!___/) Type 'exit' (or CTRL+d) to exit. + \0\0\0\0\/ Happy hacking. + ~~~~~~~~~~~ + root@alex-ec-2:/home/alex# export KUBECONFIG="/var/lib/embedded-cluster/k0s/pki/admin.conf" + root@alex-ec-2:/home/alex# export PATH="$PATH:/var/lib/embedded-cluster/bin" + root@alex-ec-2:/home/alex# source <(kubectl completion bash) + root@alex-ec-2:/home/alex# source /etc/bash_completion + ``` + + The appropriate kubeconfig is exported, and the location of useful binaries like kubectl and the preflight and support-bundle plugins is added to PATH. + + :::note + The shell command cannot be run on non-controller nodes. + ::: + +2. Generate the support bundle using the default Embedded Cluster spec and the `--load-cluster-specs` flag: + + ```bash + kubectl support-bundle --load-cluster-specs /var/lib/embedded-cluster/support/host-support-bundle.yaml + ``` + + +--- + + +# _generate-bundle-host + +To generate a kURL host support bundle: + +1. Do one of the following: + + - Save the host support bundle YAML file on the host. For more information about creating a YAML spec for a host support bundle, see [Create a Host Support Bundle Spec](/vendor/support-host-support-bundles#create-a-host-support-bundle-spec). + + - Run the following command to download the default kURL host support bundle YAML file from the Troubleshoot repository: + + ``` + kubectl support-bundle https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/host/default.yaml + ``` + + :::note + For air gap environments, download the YAML file and copy it to the air gap machine. + ::: + +1. Run the following command on the host to generate a support bundle: + + ``` + ./support-bundle --interactive=false PATH/FILE.yaml + ``` + + Replace: + - `PATH` with the path to the host support bundle YAML file. + - `FILE` with the name of the host support bundle YAML file from your vendor. + + :::note + Root access is typically not required to run the host collectors and analyzers. However, depending on what is being collected, you might need to run the support-bundle binary with elevated permissions. For example, if you run the `filesystemPerformance` host collector against `/var/lib/etcd` and the user running the binary does not have permissions on this directory, the collection process fails. + ::: + +1. Share the host support bundle with your vendor's support team, if needed. + +1. Repeat these steps for each node because there is no method to generate host support bundles on remote hosts. If you have a multi-node kURL cluster, you must run the support-bundle binary on each node to generate a host support bundle for each node. + +--- + + +# _generate-bundle + +Run the following command to generate a bundle: + +```bash +kubectl support-bundle --load-cluster-specs +``` + +The `--load-cluster-specs` flag automatically discovers all support bundle specs that are defined in Secrets or ConfigMaps in the cluster. For more information, see [Discover Cluster Specs](https://troubleshoot.sh/docs/support-bundle/discover-cluster-specs/) in the Troubleshoot documentation. + +For a complete list of options with the `kubectl support-bundle` command, run `kubectl support-bundle --help`. For more information, see [Collecting a Support Bundle](https://troubleshoot.sh/docs/support-bundle/collecting/) in the Troubleshoot documentation. + +--- + + +# _http-requests-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." +``` + +--- + + +# _http-requests-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." +``` + +--- + + +# _install-plugin + +The support-bundle plugin (a kubectl plugin) is required to generate support bundles from the command line. + +You can install the support-bundle plugin using krew or install it manually from the release archives. + +:::note +For Replicated Embedded Cluster and Replicated kURL installations, the support-bundle plugin is automatically installed on all of the control plane nodes. You can skip this prerequisite. +::: + +#### Install or Upgrade using krew + +To install the support-bundle plugin using krew, do one of the following: + +* If krew is _not_ installed already, run the following command to install krew and the support-bundle plugin at the same time: + + ``` + curl https://krew.sh/support-bundle | bash + ``` + +* If krew is installed already, run the following command to install the plug-in: + + ``` + kubectl krew install support-bundle + ``` + +* To upgrade your existing support-bundle plugin using krew: + + ``` + kubectl krew upgrade support-bundle + ``` + +#### Install Manually + +If you do not want to install the plugin using krew or want an easier way to install the plugin in an air gap environment, you can install the plugin manually from the release archives. + +To install the support-bundle plugin manually: + +1. Run the following command to download and unarchive the latest release, and move the plugin to your $PATH: + + ``` + curl -L https://github.com/replicatedhq/troubleshoot/releases/latest/download/support-bundle_linux_amd64.tar.gz | tar xzvf - + sudo mv ./support-bundle /usr/local/bin/kubectl-support_bundle + ``` + :::note + If you do not have root access, or choose not to add the support-bundle plugin to your path, you can run the binary directly from where you unzipped it by executing `./support-bundle`. If you choose not to put the plugin into your $PATH, then replace all instances of `kubectl support-bundle` in these instructions with `./support-bundle` or with the absolute path to the binary. + ::: + +1. (Optional) Run the following command to test that the installation is working: + + ``` + kubectl support-bundle --help + ``` + + +--- + + +# _k8s-version-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: + - clusterVersion: + outcomes: + - fail: + message: This application relies on kubernetes features only present in 1.16.0 + and later. + uri: https://kubernetes.io + when: < 1.16.0 + - warn: + message: Your cluster is running a version of kubernetes that is out of support. + uri: https://kubernetes.io + when: < 1.24.0 + - pass: + message: Your cluster meets the recommended and quired versions of Kubernetes. +``` + +--- + + +# _k8s-version-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: [] + analyzers: + - clusterVersion: + outcomes: + - fail: + message: This application relies on kubernetes features only present in 1.16.0 + and later. + uri: https://kubernetes.io + when: < 1.16.0 + - warn: + message: Your cluster is running a version of kubernetes that is out of support. + uri: https://kubernetes.io + when: < 1.24.0 + - pass: + message: Your cluster meets the recommended and quired versions of Kubernetes. +``` + +--- + + +# _logs-limits-cr + +```yaml +apiVersion: troubleshoot.replicated.com/v1beta1 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - logs: + selector: + - app.kubernetes.io/name=myapp + namespace: '{{repl Namespace }}' + limits: + maxAge: 720h + maxLines: 10000 +``` + +--- + + +# _logs-limits-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - logs: + selector: + - app.kubernetes.io/name=myapp + namespace: {{ .Release.Namespace }} + limits: + maxAge: 720h + maxLines: 10000 +``` + +--- + + +# _logs-selectors-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - logs: + namespace: example-namespace + selector: + - app=slackernews-nginx + - logs: + namespace: example-namespace + selector: + - app=slackernews-api + - logs: + namespace: example-namespace + selector: + - app=slackernews-frontend + - logs: + selector: + - app=postgres + analyzers: + - textAnalyze: + checkName: Axios Errors + fileName: slackernews-frontend-*/slackernews.log + regex: "error - AxiosError" + outcomes: + - pass: + when: "false" + message: "Axios errors not found in logs" + - fail: + when: "true" + message: "Axios errors found in logs" +``` + +--- + + +# _logs-selectors-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - logs: + namespace: {{ .Release.Namespace }} + selector: + - app=slackernews-nginx + - logs: + namespace: {{ .Release.Namespace }} + selector: + - app=slackernews-api + - logs: + namespace: {{ .Release.Namespace }} + selector: + - app=slackernews-frontend + - logs: + selector: + - app=postgres + analyzers: + - textAnalyze: + checkName: Axios Errors + fileName: slackernews-frontend-*/slackernews.log + regex: "error - AxiosError" + outcomes: + - pass: + when: "false" + message: "Axios errors not found in logs" + - fail: + when: "true" + message: "Axios errors found in logs" +``` + +--- + + +# _node-resources-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: + - nodeResources: + checkName: One node must have 2 GB RAM and 1 CPU Cores + filters: + allocatableMemory: 2Gi + cpuCapacity: "1" + outcomes: + - fail: + when: count() < 1 + message: Cannot find a node with sufficient memory and cpu + - pass: + message: Sufficient CPU and memory is available + - nodeResources: + checkName: Must have at least 3 nodes in the cluster + outcomes: + - fail: + when: "count() < 3" + message: This application requires at least 3 nodes + - warn: + when: "count() < 5" + message: This application recommends at last 5 nodes. + - pass: + message: This cluster has enough nodes. + - nodeResources: + checkName: Each node must have at least 40 GB of ephemeral storage + outcomes: + - fail: + when: "min(ephemeralStorageCapacity) < 40Gi" + message: Noees in this cluster do not have at least 40 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(ephemeralStorageCapacity) < 100Gi" + message: Nodes in this cluster are recommended to have at least 100 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: The nodes in this cluster have enough ephemeral storage. +``` + +--- + + +# _node-resources-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: [] + analyzers: + - nodeResources: + checkName: One node must have 2 GB RAM and 1 CPU Cores + filters: + allocatableMemory: 2Gi + cpuCapacity: "1" + outcomes: + - fail: + when: count() < 1 + message: Cannot find a node with sufficient memory and cpu + - pass: + message: Sufficient CPU and memory is available + - nodeResources: + checkName: Must have at least 3 nodes in the cluster + outcomes: + - fail: + when: "count() < 3" + message: This application requires at least 3 nodes + - warn: + when: "count() < 5" + message: This application recommends at last 5 nodes. + - pass: + message: This cluster has enough nodes. + - nodeResources: + checkName: Each node must have at least 40 GB of ephemeral storage + outcomes: + - fail: + when: "min(ephemeralStorageCapacity) < 40Gi" + message: Noees in this cluster do not have at least 40 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - warn: + when: "min(ephemeralStorageCapacity) < 100Gi" + message: Nodes in this cluster are recommended to have at least 100 GB of ephemeral storage. + uri: https://kurl.sh/docs/install-with-kurl/system-requirements + - pass: + message: The nodes in this cluster have enough ephemeral storage. +``` + +--- + + +# _node-status-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: + - nodeResources: + checkName: Node status check + outcomes: + - fail: + when: "nodeCondition(Ready) == False" + message: "Not all nodes are online." + - warn: + when: "nodeCondition(Ready) == Unknown" + message: "Not all nodes are online." + - pass: + message: "All nodes are online." +``` + +--- + + +# _node-status-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: [] + analyzers: + - nodeResources: + checkName: Node status check + outcomes: + - fail: + when: "nodeCondition(Ready) == False" + message: "Not all nodes are online." + - warn: + when: "nodeCondition(Ready) == Unknown" + message: "Not all nodes are online." + - pass: + message: "All nodes are online." +``` + +--- + + +# _redis-mysql-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - mysql: + collectorName: mysql + uri: 'root:my-secret-pw@tcp(localhost:3306)/mysql' + parameters: + - character_set_server + - collation_server + - init_connect + - innodb_file_format + - innodb_large_prefix + - innodb_strict_mode + - log_bin_trust_function_creators + - redis: + collectorName: my-redis + uri: rediss://default:replicated@server:6380 +``` + +--- + + +# _redis-mysql-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - mysql: + collectorName: mysql + uri: 'root:my-secret-pw@tcp(localhost:3306)/mysql' + parameters: + - character_set_server + - collation_server + - init_connect + - innodb_file_format + - innodb_large_prefix + - innodb_strict_mode + - log_bin_trust_function_creators + - redis: + collectorName: my-redis + uri: rediss://default:replicated@server:6380 +``` + +--- + + +# _run-pods-cr + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: + - runPod: + collectorName: "static-hi" + podSpec: + containers: + - name: static-hi + image: alpine:3 + command: ["echo", "hi static!"] + analyzers: + - textAnalyze: + checkName: Said hi! + fileName: /static-hi.log + regex: 'hi static' + outcomes: + - fail: + message: Didn't say hi. + - pass: + message: Said hi! +``` + +--- + + +# _run-pods-secret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle +stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - runPod: + collectorName: "static-hi" + podSpec: + containers: + - name: static-hi + image: alpine:3 + command: ["echo", "hi static!"] + analyzers: + - textAnalyze: + checkName: Said hi! + fileName: /static-hi.log + regex: 'hi static' + outcomes: + - fail: + message: Didn't say hi. + - pass: + message: Said hi! +``` + +--- + + +# _support-bundle-add-analyzers + +Add analyzers based on conditions that you expect for your application. For example, you might require that a cluster have at least 2 CPUs and 4GB memory available. + + Good analyzers clearly identify failure modes. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log. + + At a minimum, include application log analyzers. A simple text analyzer can detect specific log lines and inform an end user of remediation steps. + + Analyzers that Replicated recommends considering are: + +- **Resource statuses:** Check the status of various resources, such as Deployments, StatefulSets, Jobs, and so on. +- **Regular expressions:** Analyze arbitrary data. +- **Databases:** Check the version and connection status. + +--- + + +# _support-bundle-add-logs + +Replicated recommends adding application Pod logs and set the collection limits for the number of lines logged. Typically the selector attribute is matched to the labels. + +To get the labels for an application, either inspect the YAML or run `kubectl get pods --show-labels`. + +After the labels are discovered, create collectors to include logs from these pods in a bundle. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector. You can include the `logs` collector as many times as needed. + +The `limits` field can support `maxAge` or `maxLines`. This limits the output to the constraints provided. **Default:** `maxLines: 10000` + +--- + + +# _support-bundle-custom-collectors + +Add any custom collectors to the file. Collectors that Replicated recommends considering are: + +- **Kubernetes resources:** Use for custom resource definitions (CRDs), Secrets, and ConfigMaps, if they are required for your application to work. +- **Databases:** Return a selection of rows or entire tables. +- **Volumes:** Ensure that an application's persistent state files exist, are readable/writeable, and have the right permissions. +- **Pods:** Run a Pod from a custom image. +- **Files:** Copy files from Pods and hosts. +- **HTTP:** Consume your own application APIs with HTTP requests. If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. + +--- + + +# _go-sprig + +KOTS template functions are based on the Go text/template library. All functionality of the Go templating language, including if statements, loops, and variables, is supported with KOTS template functions. For more information, see [text/template](https://golang.org/pkg/text/template/) in the Go documentation. + +Additionally, KOTS template functions can be used with all functions in the Sprig library. Sprig provides several template functions for the Go templating language, such as type conversion, string, and integer math functions. For more information, see [Sprig Function Documentation](https://masterminds.github.io/sprig/). + +--- + + +# KOTS Config custom resource + +The following example uses: +* KOTS [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function to evaluate the number of seats permitted by the license +* Sprig [atoi](https://masterminds.github.io/sprig/conversion.html) function to convert the string values returned by LicenseFieldValue to integers +* [Go binary comparison operators](https://pkg.go.dev/text/template#hdr-Functions) `gt`, `lt`, `ge`, and `le` to compare the integers + +```yaml +# KOTS Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_group + title: Example Config + items: + - name: small + title: Small (100 or Fewer Seats) + type: text + default: Default for small teams + # Use le and atoi functions to display this config item + # only when the value of the numSeats entitlement is + # less than or equal to 100 + when: repl{{ le (atoi (LicenseFieldValue "numSeats")) 100 }} + - name: medium + title: Medium (101-1000 Seats) + type: text + default: Default for medium teams + # Use ge, le, and atoi functions to display this config item + # only when the value of the numSeats entitlement is + # greater than or equal to 101 and less than or equal to 1000 + when: repl{{ (and (ge (atoi (LicenseFieldValue "numSeats")) 101) (le (atoi (LicenseFieldValue "numSeats")) 1000)) }} + - name: large + title: Large (More Than 1000 Seats) + type: text + default: Default for large teams + # Use gt and atoi functions to display this config item + # only when the value of the numSeats entitlement is + # greater than 1000 + when: repl{{ gt (atoi (LicenseFieldValue "numSeats")) 1000 }} +``` + +As shown in the image below, if the user's license contains `numSeats: 150`, then the `medium` item is displayed on the **Config** page and the `small` and `large` items are not displayed: + +Config page displaying the Medium (101-1000 Seats) item + +[View a larger version of this image](/images/config-example-numseats.png) + +--- + + +# _ne-comparison + +In the example below, the `ingress_type` field is displayed on the **Config** page only when the distribution of the cluster is _not_ [Replicated Embedded Cluster](/vendor/embedded-overview). This ensures that only users deploying to their own existing cluster are able to select the method for ingress. + +The following example uses: +* KOTS [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where KOTS is running +* [ne](https://pkg.go.dev/text/template#hdr-Functions) (_not equal_) Go binary operator to compare the rendered value of the Distribution template function to a string, then return `true` if the values are not equal to one another + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config +spec: + groups: + # Ingress settings + - name: ingress_settings + title: Ingress Settings + description: Configure Ingress + items: + - name: ingress_type + title: Ingress Type + help_text: | + Select how traffic will ingress to the appliction. + type: radio + items: + - name: ingress_controller + title: Ingress Controller + - name: load_balancer + title: Load Balancer + default: "ingress_controller" + required: true + when: 'repl{{ ne Distribution "embedded-cluster" }}' + # Database settings + - name: database_settings + title: Database + items: + - name: postgres_type + help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres +``` + +The following image shows how the `ingress_type` field is hidden when the distribution of the cluster is `embedded-cluster`. Only the `postgres_type` item is displayed: + +Config page with a Postgres field + +[View a larger version of this image](/images/config-example-distribution-not-ec.png) + +Conversely, when the distribution of the cluster is not `embedded-cluster`, both fields are displayed: + +Config page with Ingress and Postgres fields + +[View a larger version of this image](/images/config-example-distribution-not-ec-2.png) + +--- + + +# KOTS Config custom resource + +The following example uses: +* KOTS [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where KOTS is running +* [eq](https://pkg.go.dev/text/template#hdr-Functions) (_equal_) Go binary operator to compare the rendered value of the Distribution template function to a string, then return the boolean truth of the comparison + +```yaml +# KOTS Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + description: Example fields for using Distribution template function + items: + - name: gke_distribution + type: label + title: "You are deploying to GKE" + # Use the eq binary operator to check if the rendered value + # of the KOTS Distribution template function is equal to gke + when: repl{{ eq Distribution "gke" }} + - name: openshift_distribution + type: label + title: "You are deploying to OpenShift" + when: repl{{ eq Distribution "openShift" }} + - name: eks_distribution + type: label + title: "You are deploying to EKS" + when: repl{{ eq Distribution "eks" }} + ... +``` + +The following image shows how only the `gke_distribution` item is displayed on the **Config** page when KOTS is running in a GKE cluster: + +Config page with the text You are deploying to GKE + +--- + + +# _use-cases + +Common use cases for KOTS template functions include rendering values during installation or upgrade, such as: +* Customer-specific license field values +* User-provided configuration values +* Information about the customer environment, such the number of nodes or the Kubernetes version in the cluster where the application is installed +* Random strings + +KOTS template functions can also be used to work with integer, boolean, float, and string values, such as doing mathematical operations, trimming leading and trailing spaces, or converting string values to integers or booleans. + +--- + + +# _admin-console-air-gap + +import BuildAirGapBundle from "../install/_airgap-bundle-build.mdx" +import DownloadAirGapBundle from "../install/_airgap-bundle-download.mdx" +import ViewAirGapBundle from "../install/_airgap-bundle-view-contents.mdx" + +To perform an air gap update from the Admin Console: + +1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the target release is promoted to build and download the new `.airgap` bundle: + + + +1. +1. +1. In the Admin Console, go to the **Version History** tab. +1. Click **Upload a new version**. + + A new upstream version displays in the list of available versions. + + ![New Version Available](/images/new-version-available.png) + +1. (Optional) When there are multiple versions of an application, you can compare +the changes between them by clicking **Diff releases** in the right corner. + + You can review changes between any two arbitrary releases by clicking the icon in the header + of the release column. Select the two versions to compare, and click **Diff releases** + to show the relative changes between the two releases. + + ![Diff Releases](/images/diff-releases.png) + ![New Changes](/images/new-changes.png) + +1. (Optional) Click the **View preflight checks** icon to view or re-run the preflight checks. + + ![Preflight Checks](/images/preflight-checks.png) + +1. Return to the **Version History** tab and click **Deploy** next to the target version. + +--- + + +# _admin-console + +To perform an update from the Admin Console: + +1. In the Admin Console, go to the **Version History** tab. +1. Click **Check for updates**. + + A new upstream version displays in the list of available versions. + + New Version Available + + [View a larger version of this image](/images/new-version-available.png) + +1. (Optional) When there are multiple versions of an application, you can compare +the changes between them by clicking **Diff releases** in the right corner. + + You can review changes between any two arbitrary releases by clicking the icon in the header + of the release column. Select the two versions to compare, and click **Diff releases** + to show the relative changes between the two releases. + + Diff Releases + + [View a larger version of this image](/images/diff-releases.png) + + New Changes + + [View a larger version of this image](/images/new-changes.png) + +1. (Optional) Click the **View preflight checks** icon to view or re-run the preflight checks. + + Preflight checks + + [View a larger version of this image](/images/preflight-checks.png) + +1. Return to the **Version History** tab and click **Deploy** next to the target version. + +--- + + +# _installerRequirements + +* **installer-spec-file**: If you used the `installer-spec-file` flag to pass a `patch.yaml` file when you installed, you must pass the same `patch.yaml` file when you upgrade. This prevents the installer from overwriting any configuration from your `patch.yaml` file and making changes to the add-ons in your cluster. For example: `installer-spec-file="./patch.yaml"`. + +* **app-version-label**: By default, the script also upgrades your application to the latest version when you run the installation script. + + You can specify a target application version with the `app-version-label` flag. To avoid upgrading your application version, set the `app-version-label` flag to the currently installed application version. For example: `app-version-label=1.5.0`. + +--- + + +# _upgradePrompt + +(Kubernetes Upgrades Only) If a Kubernetes upgrade is required, the script automatically prints a `Drain local node and apply upgrade?` prompt. Confirm the prompt to drain the local primary node and apply the Kubernetes upgrade to the control plane. + + The script continues to drain and upgrade nodes sequentially. For each node, the script prints a command that you must run on the node to upgrade Kubernetes. For more information, see [About Kubernetes Updates](/enterprise/updating-kurl-about#kubernetes) in _About kURL Cluster Updates_. + +--- + + +# _api-about + +The Vendor API is the API for the Vendor Portal. This API can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams and license files. + +--- + + +# _team-token-note + +:::note +Team API tokens are deprecated and cannot be generated. If you are already using team API tokens, Replicated recommends that you migrate to Service Accounts or User API tokens instead because these options provide better granular control over token access. +::: + +--- + + +# Cron Expressions + +# Cron Expressions + +This topic describes the supported cron expressions that you can use to schedule automatic application update checks and automatic backups in the KOTS Admin Console. + +For more information, see [Configuring Automatic Updates](/enterprise/updating-apps) and [Schedule Automatic Backups](/enterprise/snapshots-creating#schedule-automatic-backups) in _Creating and Scheduling Backups_. + +## Syntax + +``` + +``` + +## Fields + +The following table lists the required cron fields and supported values: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Required FieldAllowed ValuesAllowed Special Characters
    Minute0 through 59, - *
    Hour0 through 23, - *
    Day-of-month1 through 31, - * ?
    Month1 through 12 or JAN through DEC, - *
    Day-of-week1 through 7 or SUN through SAT, - * ?
    + +## Special Characters + +Replicated uses an external cron Go library. For more information about it's usage, see [cron](https://pkg.go.dev/github.com/robfig/cron/v3). + +The following table describes the supported special characters: + + + + + + + + + + + + + + + + + + + + + + +
    Special CharacterDescription
    Comma (,)Specifies a list or multiple values, which can be consecutive or not. For example, 1,2,4 in the Day-of-week field signifies every Monday, Tuesday, and Thursday.
    Dash (-)Specifies a contiguous range. For example, 4-6 in the Month field signifies April through June.
    Asterisk (*)Specifies that all of the values for the field are used. For example, using * in the Month field means that all of the months are included in the schedule.
    Question mark (?) Specifies that one or another value can be used. For example, enter 5 for Day-of-the-month and ? for Day-of-the-week to check for updates on the 5th day of the month, regardless of which day of the week it is.
    + +## Predefined Schedules + +You can use one of the following predefined schedule values instead of a cron expression: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Schedule ValueDescriptionEquivalent Cron Expression
    @yearly (or @annually)Runs once a year, at midnight on January 1.0 0 1 1 *
    @monthlyRuns once a month, at midnight on the first of the month.0 0 1 * *
    @weeklyRun once a week, at midnight on Saturday.0 0 * * 0
    @daily (or @midnight)Runs once a day, at midnight.0 0 * * *
    @hourlyRuns once an hour, at the beginning of the hour.0 * * * *
    @never

    Disables the schedule completely. Only used by KOTS.

    This value can be useful when you are calling the API directly or are editing the KOTS configuration manually.

    0 * * * *
    @default

    Selects the default schedule option (every 4 hours). Begins when the Admin Console starts up.

    This value can be useful when you are calling the API directly or are editing the KOTS configuration manually.

    0 * * * *
    + +## Intervals + +You can also schedule the job to operate at fixed intervals, starting at the time the job is added or when cron is run: + +``` +@every DURATION +``` + +Replace `DURATION` with a string that is accepted by time.ParseDuration, with the exception of seconds. Seconds are not supported by KOTS. For more information about duration strings, see [time.ParseDuration](http://golang.org/pkg/time/#ParseDuration) in the Go Time documentation. + +As with standard cron expressions, the interval does not include the job runtime. For example, if a job is scheduled to run every 10 minutes, and the job takes 4 minutes to run, there are 6 minutes of idle time between each run. + +## Examples + +The following examples show valid cron expressions to schedule checking for updates: + +- At 11:30 AM every day: + + ``` + 30 11 * * * + ``` + +- After 1 hour and 45 minutes, and then every interval following that: + + ``` + @every 1h45m + ``` + + +--- + + +# About Custom Resources + +# About Custom Resources + +You can include custom resources in releases to control the experience for applications installed with Replicated KOTS. + +Custom resources are consumed by KOTS, the Admin Console, or by other kubectl plugins. Custom resources are packaged as part of the application, but are _not_ deployed to the cluster. + +## KOTS Custom Resources + +The following are custom resources in the `kots.io` API group: + +| API Group/Version | Kind | Description | +|---------------|------|-------------| +| kots.io/v1beta1 | [Application](custom-resource-application) | Adds additional metadata (branding, release notes and more) to an application | +| kots.io/v1beta1 | [Config](custom-resource-config)| Defines a user-facing configuration screen in the Admin Console | +| kots.io/v1beta2 | [HelmChart](custom-resource-helmchart-v2) | Identifies an instantiation of a Helm Chart | +| kots.io/v1beta1 | [LintConfig](custom-resource-lintconfig) | Customizes the default rule levels for the KOTS release linter | + +## Other Custom Resources + +The following are custom resources in API groups other than `kots.io` that can be included in a KOTS release to configure additional functionality: + +| API Group/Version | Kind | Description | +|---------------|------|-------------| +| app.k8s.io/v1beta1 | [SIG Application](https://github.com/kubernetes-sigs/application#kubernetes-applications) | Defines metadata about the application | +| cluster.kurl.sh/v1beta1 | [Installer](https://kurl.sh/docs/create-installer/) | Defines a Replicated kURL distribution | +| embeddedcluster.replicated.com/v1beta1 | [Config](/reference/embedded-config) | Defines a Replicated Embedded Cluster distribution | +| troubleshoot.replicated.com/v1beta2 | [Preflight](custom-resource-preflight) | Defines the data to collect and analyze for custom preflight checks | +| troubleshoot.replicated.com/v1beta2 | [Redactor](https://troubleshoot.sh/reference/redactors/overview/) | Defines custom redactors that apply to support bundles and preflight checks | +| troubleshoot.sh/v1beta2 | [Support Bundle](custom-resource-preflight) | Defines the data to collect and analyze for a support bundle | +| velero.io/v1 | [Backup](https://velero.io/docs/v1.10/api-types/backup/) | A Velero backup request, triggered when the user initiates a backup with Replicated snapshots | + + + +--- + + +# Application + +import Title from "../partials/custom-resource-application/_title.mdx" +import Icon from "../partials/custom-resource-application/_icon.mdx" +import ReleaseNotes from "../partials/custom-resource-application/_releaseNotes.mdx" +import AllowRollback from "../partials/custom-resource-application/_allowRollback.mdx" +import AdditionalNamespaces from "../partials/custom-resource-application/_additionalNamespaces.mdx" +import AdditionalImages from "../partials/custom-resource-application/_additionalImages.mdx" +import RequireMinimalRBACPrivileges from "../partials/custom-resource-application/_requireMinimalRBACPrivileges.mdx" +import SupportMinimalRBACPrivileges from "../partials/custom-resource-application/_supportMinimalRBACPrivileges.mdx" +import Ports from "../partials/custom-resource-application/_ports.mdx" +import StatusInformers from "../partials/custom-resource-application/_statusInformers.mdx" +import Graphs from "../partials/custom-resource-application/_graphs.mdx" +import GraphsTemplates from "../partials/custom-resource-application/_graphs-templates.mdx" +import TargetKotsVersion from "../partials/custom-resource-application/_targetKotsVersion.mdx" +import MinKotsVersion from "../partials/custom-resource-application/_minKotsVersion.mdx" +import ProxyRegistryDomain from "../partials/custom-resource-application/_proxyRegistryDomain.mdx" +import ReplicatedRegistryDomain from "../partials/custom-resource-application/_replicatedRegistryDomain.mdx" +import ServicePortNote from "../partials/custom-resource-application/_servicePort-note.mdx" +import PortsServiceName from "../partials/custom-resource-application/_ports-serviceName.mdx" +import PortsLocalPort from "../partials/custom-resource-application/_ports-localPort.mdx" +import PortsServicePort from "../partials/custom-resource-application/_ports-servicePort.mdx" +import PortsApplicationURL from "../partials/custom-resource-application/_ports-applicationURL.mdx" +import KurlNote from "../partials/custom-resource-application/_ports-kurl-note.mdx" + +# Application + +The Application custom resource enables features such as branding, release notes, port forwarding, dashboard buttons, app status indicators, and custom graphs. + +There is some overlap between the Application custom resource manifest file and the [Kubernetes SIG Application custom resource](https://github.com/kubernetes-sigs/application/blob/master/docs/api.md). For example, enabling features such as [adding a button to the dashboard](/vendor/admin-console-adding-buttons-links) requires the use of both the Application and SIG Application custom resources. + +The following is an example manifest file for the Application custom resource: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-application +spec: + title: My Application + icon: https://support.io/img/logo.png + releaseNotes: These are our release notes + allowRollback: false + targetKotsVersion: "1.60.0" + minKotsVersion: "1.40.0" + requireMinimalRBACPrivileges: false + additionalImages: + - jenkins/jenkins:lts + additionalNamespaces: + - "*" + ports: + - serviceName: web + servicePort: 9000 + localPort: 9000 + applicationUrl: "http://web" + statusInformers: + - deployment/my-web-svc + - deployment/my-worker + graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' +``` + +## title + + + + + + + + +
    DescriptionThe application title. Used on the license upload and in various places in the Replicated Admin Console.
    Example</td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## icon + +<table> + <tr> + <th>Description</th> + <td>The icon file for the application. Used on the license upload, in various places in the Admin Console, and in the Download Portal. The icon can be a remote URL or a Base64 encoded image. Base64 encoded images are required to display the image in air gap installations with no outbound internet access.</td> + </tr> + <tr> + <th>Example</th> + <td><Icon/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + + +## releaseNotes + +<table> + <tr> + <th>Description</th> + <td>The release notes for this version. These can also be set when promoting a release.</td> + </tr> + <tr> + <th>Example</th> + <td><ReleaseNotes/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## allowRollback + +<table> + <tr> + <th>Description</th> + <td> + <p>Enable this flag to create a <strong>Rollback</strong> button on the Admin Console Version History page.</p> + <p>If an application is guaranteed not to introduce backwards-incompatible versions, such as through database migrations, then the <code>allowRollback</code> flag can allow end users to easily roll back to previous versions from the Admin Console.</p> + <p>Rollback does not revert any state. Rather, it recovers the YAML manifests that are applied to the cluster.</p> + </td> + </tr> + <tr> + <th>Example</th> + <td><AllowRollback/></td> + </tr> + <tr> + <th>Default</th> + <td><code>false</code></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Embedded Cluster 1.17.0 and later supports partial rollbacks of the application version. Partial rollbacks are supported only when rolling back to a version where there is no change to the [Embedded Cluster Config](/reference/embedded-config) compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same Embedded Cluster Config.</td> + </tr> +</table> + + +## additionalNamespaces + +<table> + <tr> + <th>Description</th> + <td> + <p>An array of additional namespaces as strings that Replicated KOTS creates on the cluster. For more information, see <a href="/vendor/operator-defining-additional-namespaces">Defining Additional Namespaces</a>.</p> + <p>In addition to creating the additional namespaces, KOTS ensures that the application secret exists in the namespaces. KOTS also ensures that this application secret has access to pull the application images, including both images that are used and any images you add in the <code>additionalImages</code> field. This pull secret is automatically added to all manifest files that use private images.</p> + <p>For dynamically created namespaces, specify <code>"*"</code>.</p> + </td> + </tr> + <tr> + <th>Example</th> + <td><AdditionalNamespaces/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## additionalImages + +<table> + <tr> + <th>Description</th> + <td><p>An array of strings that reference images to be included in air gap bundles and pushed to the local registry during installation.</p><p>KOTS detects images from the PodSpecs in the application. Some applications, such as Operators, might need to include additional images that are not referenced until runtime. For more information, see <a href="/vendor/operator-defining-additional-images">Defining Additional Images</a>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><AdditionalImages/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## requireMinimalRBACPrivileges + +<table> + <tr> + <th>Description</th> + <td><p><code>requireMinimalRBACPrivileges</code> applies to existing clusters only.</p><p>Requires minimal role-based access control (RBAC) be used for all customer installations. When set to <code>true</code>, KOTS creates a namespace-scoped Role and RoleBinding, instead of the default cluster-scoped ClusterRole and ClusterRoleBinding.</p><p>For additional requirements and limitations related to using namespace-scoped RBAC, see <a href="/vendor/packaging-rbac#min-rbac">About Namespace-scoped RBAC</a> in <em>Configuring KOTS RBAC</em>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><RequireMinimalRBACPrivileges/></td> + </tr> + <tr> + <th>Default</th> + <td><code>false</code></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No</td> + </tr> +</table> + +## supportMinimalRBACPrivileges + +<table> + <tr> + <th>Description</th> + <td><p><code>supportMinimalRBACPrivileges</code> applies to existing clusters only.</p><p>Allows minimal role-based access control (RBAC) be used for all customer installations. When set to <code>true</code>, KOTS supports creating a namespace-scoped Role and RoleBinding, instead of the default cluster-scoped ClusterRole and ClusterRoleBinding.</p><p> Minimal RBAC is not used by default. It is only used when the <code>--use-minimal-rbac</code> flag is passed to the <code>kots install</code> command.</p><p>For additional requirements and limitations related to using namespace-scoped RBAC, see <a href="/vendor/packaging-rbac#min-rbac">About Namespace-scoped RBAC</a> in <em>Configuring KOTS RBAC</em>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><SupportMinimalRBACPrivileges/></td> + </tr> + <tr> + <th>Default</th> + <td><code>false</code></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No</td> + </tr> +</table> + +## ports + +<table> +<tr> + <th>Description</th> + <td> + <p>Extra ports (additional to the <code>8800</code> Admin Console port) that are port-forwarded when running the <code>kubectl kots admin-console</code> command. With ports specified, KOTS can establish port forwarding to simplify connections to the deployed application. When the application starts and the service is ready, the KOTS CLI will print a message in the terminal with the URL where the port-forwarded service can be accessed. For more information, see <a href="/vendor/admin-console-port-forward">Port Forwarding Services with KOTS</a>.</p> + <KurlNote/> + <p>The <code>ports</code> key has the following fields:</p> + <ul> + <PortsServiceName/> + <PortsServicePort/> + <ServicePortNote/> + <PortsLocalPort/> + <PortsApplicationURL/> + For more information about adding links to port forwarded services, see <a href="/vendor/admin-console-port-forward#add-link">Add a Link to a Port-Forwarded Service in the Admin Console</a>. + </ul> + </td> + </tr> + <tr> + <th>Example</th> + <td><Ports/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td><p>Go templates are supported in the `serviceName` and `applicationUrl` fields only.</p><p>Using Go templates in the `localPort` or `servicePort` fields results in an installation error similar to the following: `json: cannot unmarshal string into Go struct field ApplicationPort.spec.ports.servicePort of type int`.</p></td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## statusInformers + +<table> + <tr> + <th>Description</th> + <td> + <p>Resources to watch and report application status back to the user. When you include <code>statusInformers</code>, the dashboard can indicate when the application deployment is complete and the application is ready for use.</p> + <p><code>statusInformers</code> use the format <code>[namespace/]type/name</code>, where namespace is optional.</p> + <p>For more information about including statusInformers, see <a href="/vendor/admin-console-display-app-status">Adding Resource Status Informers</a>.</p> + </td> + </tr> + <tr> + <th>Example</th> + <td><StatusInformers/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## graphs + +<table> + <tr> + <th>Description</th> + <td><p>Custom graphs to include on the Admin Console application dashboard.For more information about how to create custom graphs, see <a href="/vendor/admin-console-prometheus-monitoring">Adding Custom Graphs</a>.</p><p><code>graphs</code> has the following fields:</p><ul><li><code>graphs.title</code>: The graph title.</li><li><code>graphs.query</code>: The Prometheus query.</li><li><code>graphs.legend</code>: The legend to use for the query line. You can use Prometheus templating in the <code>legend</code> fields with each element returned from the Prometheus query. <p><GraphsTemplates/></p></li><li><code>graphs.queries</code>: A list of queries containing a <code>query</code> and <code>legend</code>.</li> <li><code>graphs.yAxisFormat</code>: The format of the Y axis labels with support for all Grafana units. For more information, see <a href="https://grafana.com/docs/features/panels/graph/#left-y-right-y">Visualizations</a> in the Grafana documentation.</li><li><code>graphs.yAxisTemplate</code>: Y axis labels template.</li></ul></td> + </tr> + <tr> + <th>Example</th> + <td><Graphs/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td> + <p>Yes</p> + </td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No</td> + </tr> +</table> + +## proxyRegistryDomain + +:::important +`proxyRegistryDomain` is deprecated. For information about how to use a custom domain for the Replicated proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). +::: + +<table> + <tr> + <th>Description</th> + <td><p>The custom domain used for proxy.replicated.com. For more information, see <a href="/vendor/custom-domains-using">Using Custom Domains</a>.</p> <p>Introduced in KOTS v1.91.1.</p> </td> + </tr> + <tr> + <th>Example</th> + <td><ProxyRegistryDomain/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +## replicatedRegistryDomain + +:::important +`replicatedRegistryDomain` is deprecated. For information about how to use a custom domain for the Replicated registry, see [Using Custom Domains](/vendor/custom-domains-using). +::: + +<table> + <tr> + <th>Description</th> + <td><p>The custom domain used for registry.replicated.com. For more information, see <a href="/vendor/custom-domains-using">Using Custom Domains</a>.</p><p>Introduced in KOTS v1.91.1.</p> </td> + </tr> + <tr> + <th>Example</th> + <td><ReplicatedRegistryDomain/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>Yes</td> + </tr> +</table> + +## targetKotsVersion + +<table> + <tr> + <th>Description</th> + <td><p>The KOTS version that is targeted by the release. For more information, see <a href="/vendor/packaging-kots-versions">Setting Minimum and Target Versions for KOTS</a>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><TargetKotsVersion/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No. Setting <code>targetKotsVersion</code> to a version earlier than the KOTS version included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: <code>Error: This version of App Name requires a different version of KOTS from what you currently have installed.</code>. To avoid installation failures, do not use <code>targetKotsVersion</code> in releases that support installation with Embedded Cluster.</td> + </tr> +</table> + +## minKotsVersion (Beta) + +<table> + <tr> + <th>Description</th> + <td><p>The minimum KOTS version that is required by the release. For more information, see <a href="/vendor/packaging-kots-versions">Setting Minimum and Target Versions for KOTS</a>.</p></td> + </tr> + <tr> + <th>Example</th> + <td><MinKotsVersion/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> + <tr> + <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> + <td>No. Setting <code>minKotsVersion</code> to a version later than the KOTS version included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: <code>Error: This version of App Name requires a different version of KOTS from what you currently have installed.</code>. To avoid installation failures, do not use <code>minKotsVersion</code> in releases that support installation with Embedded Cluster.</td> + </tr> +</table> + + +--- + + +# Velero Backup Resource for Snapshots + +# Velero Backup Resource for Snapshots + +This topic provides information about the supported fields in the Velero Backup resource for the Replicated KOTS snapshots feature. + +## Overview + +The Velero Backup custom resource enables the KOTS snapshots backup and restore feature. The backend of this feature uses the Velero open source project to back up Kubernetes manifests and persistent volumes. + +## Example + +The following shows an example of the Velero Backup resource: + +```yaml +apiVersion: velero.io/v1 +kind: Backup +metadata: + name: backup + annotations: + # `pvc-volume` will be the only volume included in the backup + backup.velero.io/backup-volumes: pvc-volume +spec: + includedNamespaces: + - '*' + excludedNamespaces: + - some-namespace + orderedResources: + pods: mysql/mysql-cluster-replica-0,mysql/mysql-cluster-replica-1 + persistentvolumes: pvc-12345,pvc-67890 + ttl: 720h + hooks: + resources: + - + name: my-hook + includedNamespaces: + - '*' + excludedNamespaces: + - some-namespace + includedResources: + - pods + excludedResources: [] + labelSelector: + matchLabels: + app: velero + component: server + pre: + - + exec: + container: my-container + command: + - /bin/uname + - -a + onError: Fail + timeout: 10s + post: [] +``` + +## Supported Fields for Full Backups with Snapshots {#fields} + +For partial backups with the snapshots feature, you can use all of the fields that Velero supports. See [Backups](https://velero.io/docs/v1.10/api-types/backup/) in the Velero documentation. + +However, not all fields are supported for full backups. The table below lists the fields that are supported for full backups with snapshots: + +<table> + <tr> + <th width="50%">Field Name</th> + <th width="50%">Description</th> + </tr> + <tr> + <td><code>includedNamespaces</code></td> + <td>(Optional) Specifies an array of namespaces to include in the backup. If unspecified, all namespaces are included.</td> + </tr> + <tr> + <td><code>excludedNamespaces</code></td> + <td>(Optional) Specifies an array of namespaces to exclude from the backup.</td> + </tr> + <tr> + <td><code>orderedResources</code></td> + <td>(Optional) Specifies the order of the resources to collect during the backup process. This is a map that uses a key as the plural resource. Each resource name has the format NAMESPACE/OBJECTNAME. The object names are a comma delimited list. For cluster resources, use OBJECTNAME only.</td> + </tr> + <tr> + <td><code>ttl</code></td> + <td> Specifies the amount of time before this backup is eligible for garbage collection. <b>Default:</b><code>720h</code> (equivalent to 30 days). This value is configurable only by the customer.</td> + </tr> + <tr> + <td><code>hooks</code></td> + <td>(Optional) Specifies the actions to perform at different times during a backup. The only supported hook is executing a command in a container in a pod (uses the pod exec API). Supports <code>pre</code> and <code>post</code> hooks.</td> + </tr> + <tr> + <td><code>hooks.resources</code></td> + <td>(Optional) Specifies an array of hooks that are applied to specific resources.</td> + </tr> + <tr> + <td><code>hooks.resources.name</code></td> + <td>Specifies the name of the hook. This value displays in the backup log.</td> + </tr> + <tr> + <td><code>hooks.resources.includedNamespaces</code></td> + <td>(Optional) Specifies an array of namespaces that this hook applies to. If unspecified, the hook is applied to all namespaces.</td> + </tr> + <tr> + <td><code>hooks.resources.excludedNamespaces</code></td> + <td>(Optional) Specifies an array of namespaces to which this hook does not apply.</td> + </tr> + <tr> + <td><code>hooks.resources.includedResources</code></td> + <td>Specifies an array of pod resources to which this hook applies.</td> + </tr> + <tr> + <td><code>hooks.resources.excludedResources</code></td> + <td>(Optional) Specifies an array of resources to which this hook does not apply.</td> + </tr> + <tr> + <td><code>hooks.resources.labelSelector</code></td> + <td>(Optional) Specifies that this hook only applies to objects that match this label selector.</td> + </tr> + <tr> + <td><code>hooks.resources.pre</code></td> + <td>Specifies an array of <code>exec</code> hooks to run before executing custom actions.</td> + </tr> + <tr> + <td><code>hooks.resources.post</code></td> + <td>Specifies an array of <code>exec</code> hooks to run after executing custom actions. Supports the same arrays and fields as <code>pre</code> hooks.</td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec</code></td> + <td>Specifies the type of the hook. <code>exec</code> is the only supported type.</td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec.container</code></td> + <td>(Optional) Specifies the name of the container where the specified command will be executed. If unspecified, the first container in the pod is used.</td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec.command</code></td> + <td>Specifies the command to execute. The format is an array.</td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec.onError</code></td> + <td>(Optional) Specifies how to handle an error that might occur when executing the command. <b>Valid values:</b> <code>Fail</code> and <code>Continue</code> <b>Default:</b> <code>Fail</code></td> + </tr> + <tr> + <td><code>hooks.resources.[post/pre].exec.timeout</code></td> + <td>(Optional) Specifies how many seconds to wait for the command to finish executing before the action times out. <b>Default:</b> <code>30s</code></td> + </tr> +</table> + +## Limitations {#limitations} + +- The following top-level Velero fields, or children of `spec`, are not supported in full backups: + + - `snapshotVolumes` + - `volumeSnapshotLocations` + - `labelSelector` + - `includedResources` + - `excludedResources` + + :::note + Some of these fields are supported for hook arrays, as described in the previous field definition table. See [Supported Fields for Full Backups with Snapshots](#fields) above. + ::: + +- All resources are included in the backup by default. However, resources can be excluded by adding `velero.io/exclude-from-backup=true` to the manifest files that you want to exclude. For more information, see [Configuring Snapshots](/vendor/snapshots-configuring-backups). + + + +--- + + +# Config + +import ItemTypes from "../partials/config/_item-types.mdx" +import PropertyWhen from "../partials/config/_property-when.mdx" +import RandomStringNote from "../partials/config/_randomStringNote.mdx" +import NameExample from "../partials/config/_nameExample.mdx" +import TypeExample from "../partials/config/_typeExample.mdx" +import DefaultExample from "../partials/config/_defaultExample.mdx" +import ValueExample from "../partials/config/_valueExample.mdx" +import RequiredExample from "../partials/config/_requiredExample.mdx" +import RecommendedExample from "../partials/config/_recommendedExample.mdx" +import HiddenExample from "../partials/config/_hiddenExample.mdx" +import ReadonlyExample from "../partials/config/_readonlyExample.mdx" +import WhenExample from "../partials/config/_whenExample.mdx" +import AffixExample from "../partials/config/_affixExample.mdx" +import HelpTextExample from "../partials/config/_helpTextExample.mdx" +import RegexValidationExample from "../partials/config/_regexValidationExample.mdx" +import WhenRequirements from "../partials/config/_when-requirements.mdx" +import WhenNote from "../partials/config/_when-note.mdx" + +# Config + +The Config custom resource can be provided by a vendor to specify a Config page in the Replicated Admin Console for collecting customer supplied values and template function rendering. + +The settings that appear on the Admin Console Config page are specified as an array configuration _groups_ and _items_. + +The following example shows three groups defined in the Config custom resource manifest file, and how these groups are displayed on the Admin Console Config page. + +For more information about the properties of groups and items, see [Group Properties](#group-properties) and [Item Properties](#item-properties) below. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: my-application +spec: + groups: + - name: example_group + title: First Group + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" + - name: example_group_2 + title: Second Group + when: false + items: + - name: key + title: Key + type: textarea + - name: hostname + title: Hostname + type: text + - name: example_group_3 + title: Third Group + items: + - name: email-address + title: Email Address + type: text + - name: password_text + title: Password + type: password + value: '{{repl RandomString 10}}' +``` +![Three groups of items on the config page](/images/config-screen-groups.png) +[View a larger version of this image](/images/config-screen-groups.png) + +## Group Properties + +Groups have a `name`, `title`, `description` and an array of `items`. + +### `description` + +Descriptive help text for the group that displays on the Admin Console Config page. Supports markdown formatting. + +To provide help text for individual items on the Config page, use the item `help-text` property. See [help_text](#help_text) below. + +```yaml +spec: + groups: + - name: example_group + title: First Group + # Provide a description of the input fields in the group + description: Select whether or not to enable HTTP. + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` + +### `name` + +A unique identifier for the group. + +```yaml +spec: + groups: + # The name must be unique + - name: example_group + title: First Group + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` + +### `title` + +The title of the group that displays on the Admin Console Config page. + +```yaml +spec: + groups: + - name: example_group + # First Group is the heading that appears on the Config page + title: First Group + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` + +### `when` + +The `when` property denotes groups that are displayed on the Admin Console **Config** page only when a condition evaluates to true. When the condition evaluates to false, the group is not displayed. + +<PropertyWhen/> + +:::note +`when` is a property of both groups and items. See [Item Properties > `when`](/reference/custom-resource-config#when-item) below. +::: + +#### Requirements and Limitations + +The `when` group property has the following requirements and limitations: + +<WhenRequirements/> + +#### Example + +In the following example, the `example_group_2` group of items will be displayed on the **Config** page only when the user enables the `http_enabled` configuration field. This example uses the KOTS [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to evaluate the value of the `http_enabled` configuration field. + +```yaml +spec: + groups: + - name: example_group + title: First Group + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" + - name: example_group_2 + title: Second Group + # This group is displayed only when the `http_enabled` field is selected + when: repl{{ ConfigOptionEquals "http_enabled" "1" }} + items: + - name: key + title: Key + type: textarea + - name: hostname + title: Hostname + type: text + - name: example_group_3 + title: Third Group + items: + - name: email-address + title: Email Address + type: text + - name: password_text + title: Password + type: password + value: '{{repl RandomString 10}}' +``` + +![Only the first and third groups appear on the config screen](/images/config-screen-group-when-false.png) +[View a larger version of this image](/images/config-screen-group-when-false.png) + +For additional examples, see [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional). + +### `items` + +Each group contains an array of items that map to input fields on the Admin Console Config screen. All items have `name`, `title`, and `type` properties and belong to a single group. + +For more information, see [Item Properties](#item-properties) and [Item Types](#item-types) below. + +## Item Properties + +Items have a `name`, `title`, `type`, and other optional properties. + +### `affix` + +<table> + <tr> + <th>Description</th> + <td> + <p>Items can be affixed <code>left</code> or <code>right</code>. Affixing items allows them to appear in the Admin Console on the same line.</p><p>Specify the <code>affix</code> field to all of the items in a particular group to preserve the line spacing and prevent the appearance of crowded text.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><AffixExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `default` + +<table> + <tr> + <th>Description</th> + <td> + <p>Defines the default value for the config item. If the user does not provide a value for the item, then the <code>default</code> value is applied.</p> + <p>If the <code>default</code> value is not associated with a <code>password</code> type config item, then it appears as placeholder text in the Admin Console.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><DefaultExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td><p>Yes. Every time the user makes a change to their configuration settings for the application, any template functions used in the <code>default</code> property are reevaluated.</p></td> + </tr> +</table> + +### `help_text` + +<table> + <tr> + <th>Description</th> + <td> + <p>Displays a helpful message below the <code>title</code> for the config item in the Admin Console.</p> + <p>Markdown syntax is supported. For more information about markdown syntax, see <a href="https://guides.github.com/features/mastering-markdown/">Basic writing and formatting syntax</a> in the GitHub Docs.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><HelpTextExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `hidden` + +<table> + <tr> + <th>Description</th> + <td> + <p>Hidden items are not visible in the Admin Console.</p> + <p><RandomStringNote/></p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><HiddenExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `name` + +<table> + <tr> + <th>Description</th> + <td><p>A unique identifier for the config item. Item names must be unique both within the group and across all groups. The item <code>name</code> is not displayed in the Admin Console.</p><p> The item <code>name</code> can be used with KOTS template functions in the Config context (such as ConfigOption or ConfigOptionEquals) to return the value of the item. For more information, see <a href="/reference/template-functions-config-context">Config Context</a>.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>Yes</td> + </tr> + <tr> + <th>Example</th> + <td><NameExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `readonly` + +<table> + <tr> + <th>Description</th> + <td> + <p>Readonly items are displayed in the Admin Console and users cannot edit their value.</p> + <p><RandomStringNote/></p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><ReadonlyExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `recommended` + +<table> + <tr> + <th>Description</th> + <td><p>Displays a Recommended tag for the config item in the Admin Console.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td> + <RecommendedExample/> + </td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `required` + +<table> + <tr> + <th>Description</th> + <td><p>Displays a Required tag for the config item in the Admin Console. A required item prevents the application from starting until it has a value.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><RequiredExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `title` + +<table> + <tr> + <th>Description</th> + <td><p>The title of the config item that displays in the Admin Console.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>Yes</td> + </tr> + <tr> + <th>Example</th> + <td><HelpTextExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `type` + +<table> + <tr> + <th>Description</th> + <td> + <p>Each item has a <code>type</code> property that defines the type of user input accepted by the field.</p> + <p>The <code>type</code> property supports the following values: <ItemTypes/></p> + <p>For information about each type, see <a href="#item-types">Item Types</a>.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>Yes</td> + </tr> + <tr> + <th>Example</th> + <td><TypeExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +### `value` + +<table> + <tr> + <th>Description</th> + <td> + <p>Defines the value of the config item. Data that you add to <code>value</code> appears as the HTML input value for the config item in the Admin Console.</p> + <p>If the config item is not readonly, then the data that you add to <code>value</code> is overwritten by any user input for the item. If the item is readonly, then the data that you add to <code>value</code> cannot be overwritten.</p> + </td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td><ValueExample/></td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td><p>Yes</p><RandomStringNote/></td> + </tr> +</table> + +### `when` {#when-item} + +<table> + <tr> + <th>Description</th> + <td><p>The <code>when</code> property denotes items that are displayed on the Admin Console <strong>Config</strong> page only when a condition evaluates to true. When the condition evaluates to false, the item is not displayed.</p><PropertyWhen/><p>The `when` item property has the following requirements and limitations:</p><WhenRequirements/><ul><li><code>when</code> cannot be applied to the items nested under a <code>radio</code>, <code>dropdown</code> or <code>select_one</code> item. To conditionally show or hide <code>radio</code>, <code>dropdown</code> or <code>select_one</code> items, apply the <code>when</code> property to the item itself.</li></ul><WhenNote/></td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td> + <p>Display the <code>database_host</code> and <code>database_password</code> items only when the user selects <code>external</code> for the <code>db_type</code> item:</p><p><WhenExample/></p><p>For additional examples, see <a href="/vendor/config-screen-conditional">Using Conditional Statements in Configuration Fields</a>.</p> + </td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>Yes</td> + </tr> +</table> + +### `validation` + +<table> + <tr> + <th>Description</th> + <td><p>The <code>validation</code> property can be used to validate an item's value, <br/>allowing you to specify custom validation rules that determine whether the value is valid or not.</p></td> + </tr> + <tr> + <th>Required?</th> + <td>No</td> + </tr> + <tr> + <th>Example</th> + <td> + <p>Validates and returns if <code>password</code> value is not matching the regex. <br/>The <code>jwt_token</code> file content is only validated if the file is uploaded since it is optional.</p> + <RegexValidationExample/> + </td> + </tr> + <tr> + <th>Supports Go templates?</th> + <td>No</td> + </tr> +</table> + +For information about supported validation types, see [Item Validation](#item-validation). + +## Item Types + +The section describes each of the item types: +<ItemTypes/> + +### `bool` +The `bool` input type should use a "0" or "1" to set the value +```yaml + - name: group_title + title: Group Title + items: + - name: http_enabled + title: HTTP Enabled + type: bool + default: "0" +``` + +![Boolean selector on the configuration screen](/images/config-screen-bool.png) + +[View a larger version of this image](/images/config-screen-bool.png) + +### `dropdown` + +> Introduced in KOTS v1.114.0 + +The `dropdown` item type includes one or more nested items that are displayed in a dropdown on the Admin Console config screen. Dropdowns are especially useful for displaying long lists of options. You can also use the [`radio`](#radio) item type to display radio buttons for items with shorter lists of options. + +To set a default value for `dropdown` items, set the `default` field to the name of the target nested item. + +```yaml +spec: + groups: + - name: example_settings + title: My Example Config + items: + - name: version + title: Version + default: version_latest + type: dropdown + items: + - name: version_latest + title: latest + - name: version_123 + title: 1.2.3 + - name: version_124 + title: 1.2.4 + - name: version_125 + title: 1.2.5 +``` + +![Dropdown item type on config screen](/images/config-screen-dropdown.png) + +[View a larger version of this image](/images/config-screen-dropdown.png) + +![Dropdown item type expanded](/images/config-screen-dropdown-open.png) + +[View a larger version of this image](/images/config-screen-dropdown-open.png) + +### `file` +A `file` is a special type of form field that renders an [`<input type="file" />`](https://www.w3schools.com/tags/tag_input.asp) HTML element. +Only the contents of the file, not the name, are captured. +See the [`ConfigOptionData`](template-functions-config-context#configoptiondata) template function for examples on how to use the file contents in your application. + +```yaml + - name: certs + title: TLS Configuration + items: + - name: tls_private_key_file + title: Private Key + type: file + - name: tls_certificate_file + title: Certificate + type: file +``` + +![File input field on the configuration screen](/images/config-screen-file.png) + +[View a larger version of this image](/images/config-screen-file.png) + +### `heading` +The `heading` type allows you to display a group heading as a sub-element within a group. +This is useful when you would like to use a config group to group items together, but still separate the items visually. + +```yaml + - name: ldap_settings + title: LDAP Server Settings + items: + ... + - name: ldap_schema + type: heading + title: LDAP schema + ... +``` + +![Heading on the configuration screen](/images/config-screen-heading.png) + +[View a larger versio of this image](/images/config-screen-heading.png) + +### `label` +The `label` type allows you to display an input label. +```yaml + - name: email + title: Email + items: + - name: email-address + title: Email Address + type: text + - name: description + type: label + title: "Note: The system will send you an email every hour." +``` +![Email address label on the configuration screen](/images/config-screen-label.png) + +[View a larger version of this image](/images/config-screen-label.png) + +### `password` +The `password` type is a text field that hides the character input. + +```yaml + - name: password_text + title: Password Text + type: password + value: '{{repl RandomString 10}}' +``` + +![Password text field on the configuration screen](/images/config-screen-password.png) + +[View a larger version of this image](/images/config-screen-password.png) + +### `radio` + +> Introduced in KOTS v1.114.0 + +The `radio` item type includes one or more nested items that are displayed as radio buttons on the Admin Console config screen. Radio buttons are especially useful for displaying short lists of options. You can also use the [`dropdown`](#dropdown) item type for items with longer lists of options. + +To set a default value for `radio` items, set the `default` field to the name of the target nested item. + +```yaml +spec: + groups: + - name: example_settings + title: My Example Config + items: + - name: authentication_type + title: Authentication Type + default: authentication_type_anonymous + type: radio + items: + - name: authentication_type_anonymous + title: Anonymous + - name: authentication_type_password + title: Password +``` + +### `select_one` (Deprecated) + +:::important +The `select_one` item type is deprecated and is not recommended for use. To display config items with multiple options, use the [`radio`](#radio) or [`dropdown`](#dropdown) item types instead. +::: + +`select_one` items must contain nested items. The nested items are displayed as radio buttons in the Admin Console. + +You can use the `name` field of a `select_one` item with KOTS template functions in the Config context (such as ConfigOption or ConfigOptionEquals) to return the option selected by the user. + +For example, if the user selects the **Password** option for the `select_one` item shown below, then the template function `'{{repl ConfigOption "authentication_type"}}'` is rendered as `authentication_type_password`. For more information about working with template functions in the Config context, see [Config Context](/reference/template-functions-config-context). + +```yaml +spec: + groups: + - name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Nginx welcome page. + items: + - name: authentication_type + title: Authentication Type + default: authentication_type_anonymous + type: select_one + items: + - name: authentication_type_anonymous + title: Anonymous + - name: authentication_type_password + title: Password +``` + +![Select one field on the configuration screen](/images/config-screen-selectone.png) + +### `text` +A `text` input field allows users to enter a string value. +Optionally, all additional properties are available for this input type. + +```yaml + - name: example_text_input + title: Example Text Input + type: text +``` + +![Text field on the configuration screen](/images/config-screen-text.png) + +:::important +Do not store secrets or passwords in `text` items because they are not encrypted or masked and can be easily accessed. Instead, use [`password`](#password) items. +::: + +### `textarea` +A `textarea` items creates a multi-line text input for when users have to enter a sizeable amount of text. + +```yaml + - name: custom_key + title: Set your secret key for your app + description: Paste in your Custom Key + items: + - name: key + title: Key + type: textarea + - name: hostname + title: Hostname + type: text +``` +![Text area field on the configuration screen](/images/config-screen-textarea.png) + +## Item Validation + +A `validation` can be specified to validate the value of an item. `regex` is the supported validation type. + +Based on specified validation rules, the item is validated and a validation message is returned if the validation rule is not satisfied. A default message is returned if there is an empty validation message. + +The validation rules are as follows: + +- An item is validated only when its value is not empty. +- Items of types `text`, `textarea`, `password`, and `file` are validated, but `repeatable` items are not validated. +- If an item is marked as `hidden` or if its `when` condition is set to `false`, the item is not validated. +- If a group `when` condition is set to `false`, the items in the group are not validated. + +### `regex` +For applications installed with KOTS v1.98.0 or later, a `regex` can be used to validate whether an item's value matches the provided regular expression `pattern`. The regex pattern should be of the [RE2 regular expression](https://github.com/google/re2/wiki/Syntax) type and can validate the `text`, `textarea`, `password`, and `file` field types. + + The default validation message is `Value does not match regex`. + +<RegexValidationExample/> + +![Password validation error](/images/regex_password_validation_error.png) + +![File validation error only when uploaded](/images/regex_file_validation_error.png) + +## Repeatable Items + +A repeatable config item copies a YAML array entry or YAML document for as many values as are provided. Any number of values can be added to a repeatable item to generate additional copies. + +To make an item repeatable, set `repeatable` to true: + +```yaml + - name: ports_group + items: + - name: serviceport + title: Service Port + type: text + repeatable: true +``` + +Repeatable items do not use the `default` or `value` fields, but instead a `valuesByGroup` field. +`valuesByGroup` must have an entry for the parent Config Group name, with all of the default `key:value` pairs nested in the group. At least one default entry is required for the repeatable item: + +```yaml + valuesByGroup: + ports_group: + port-default-1: "80" +``` + +### Limitations + +* Repeatable items work only for text, textarea, and file types. +* Repeatable item names must only consist of lower case alphanumeric characters. +* Repeatable items are only supported for Kubernetes manifests, not Helm charts. + +### Template Targets + +Repeatable items require that you provide at least one `template`. The `template` defines a YAML target in the manifest to duplicate for each repeatable item. + +Required fields for a template target are `apiVersion`, `kind`, and `name`. + +`namespace` is an optional template target field to match a YAML document's `metadata.namespace` property when the same filename is used across multiple namespaces. + +The entire YAML node at the target is duplicated, including nested fields. + +The `yamlPath` field of the `template` must denote index position for arrays using square brackets. For example, `spec.ports[0]` selects the first port entry for duplication. All duplicate YAML is appended to the final array in the `yamlPath`. + +`yamlPath` must end with an array. + +**Example:** + +```yaml + templates: + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app + yamlPath: 'spec.ports[0]' +``` + +If the `yamlPath` field is not present, the entire YAML document matching the `template` is replaced with a copy for each of the repeatable item entries. The `metadata.name` field of the new document reflects the repeatable item `key`. + +### Templating + +The repeat items are called with the delimeters `repl[[ .itemName ]]` or `[[repl .itemName ]]`. These delimiters can be placed anywhere inside of the `yamlPath` target node: + +```yaml + - port: repl{{ ConfigOption "[[repl .serviceport ]]" | ParseInt }} + name: '[[repl .serviceport ]]' +``` +This repeatable templating is not compatible with sprig templating functions. It is designed for inserting repeatable `keys` into the manifest. Repeatable templating can be placed inside of Replicated config templating. + +### Ordering + +Repeatable templates are processed before config template rendering. + +Repeatable items are processed in order of the template targets in the Config Spec file. Effectively, this ordering is from the top of the Config Spec, by Config Group, by Config Item, and then by template target. + +```yaml + - name: ports_group + items: + - name: serviceport + title: Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 #processed first + kind: Service + name: my-service + namespace: my-app + yamlPath: 'spec.ports[0]' + - apiVersion: v1 #processed second + kind: Service + name: my-service + namespace: my-app + {other item properties ...} + - name: other_ports + title: Other Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 #processed third + kind: Service + name: my-other-service + namespace: my-app + {other item properties ...} + - name: deployments + items: + - name: deployment-name + title: Deployment Names + type: text + repeatable: true + templates: + - apiVersion: apps/v1 #processed fourth + kind: Deployment + name: my-deployment + namespace: my-app + {other item properties ...} +``` + +## Repeatable Examples + +In these examples, the default service port of "80" is included with the release. Port 443 is added as an additional port on the Admin Console configuration page, which is stored in the ConfigValues file. + +### Repeatable Item Example for a yamlPath + +**Config custom resource manifest file:** + +```yaml + - name: ports_group + items: + - name: serviceport + title: Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app + yamlPath: spec.ports[0] + valuesByGroup: + ports_group: + port-default-1: "80" +``` + +**Config values:** +```yaml +apiVersion: kots.io/v1beta1 +kind: ConfigValues +metadata: + name: example_app +spec: + values: + port-default-1: + repeatableItem: serviceport + value: "80" + serviceport-8jdn2bgd: + repeatableItem: serviceport + value: "443" +``` + +**Template manifest:** +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "[[repl .serviceport ]]" | ParseInt }} + name: '[[repl .serviceport ]]' + selector: + app: repeat_example + component: my-deployment +``` + +**After repeatable config processing:** + +**Note**: This phase is internal to configuration rendering for KOTS. This example is only provided to further explain the templating process.* + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "port-default-1" | ParseInt }} + name: 'port-default-1' + - port: repl{{ ConfigOption "serviceport-8jdn2bgd" | ParseInt }} + name: 'serviceport-8jdn2bgd' + selector: + app: repeat_example + component: my-deployment +``` + +**Resulting manifest:** +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-app +spec: + type: NodePort + ports: + - port: 80 + name: port-default-1 + - port: 443 + name: serviceport-8jdn2bgd + selector: + app: repeat_example + component: my-deployment +``` + +### Repeatable Item Example for an Entire Document +**Config spec:** +```yaml + - name: ports_group + items: + - name: serviceport + title: Service Port + type: text + repeatable: true + templates: + - apiVersion: v1 + kind: Service + name: my-service + namespace: my-app + valuesByGroup: + ports_group: + port-default-1: "80" +``` + +**Config values:** +```yaml +apiVersion: kots.io/v1beta1 +kind: ConfigValues +metadata: + name: example_app +spec: + values: + port-default-1: + repeatableItem: serviceport + value: "80" + serviceport-8jdn2bgd: + repeatableItem: serviceport + value: "443" +``` + +**Template manifest:** +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "[[repl .serviceport ]]" | ParseInt }} + selector: + app: repeat_example + component: repl[[ .serviceport ]] +``` + +**After repeatable config processing:** + +**Note**: This phase is internal to configuration rendering for KOTS. This example is only provided to further explain the templating process.* + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: port-default-1 + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "port-default-1" | ParseInt }} + selector: + app: repeat_example + component: port-default-1 +--- +apiVersion: v1 +kind: Service +metadata: + name: serviceport-8jdn2bgd + namespace: my-app +spec: + type: NodePort + ports: + - port: repl{{ ConfigOption "serviceport-8jdn2bgd" | ParseInt }} + selector: + app: repeat_example + component: serviceport-8jdn2bgd +``` + +**Resulting manifest:** +```yaml +apiVersion: v1 +kind: Service +metadata: + name: port-default-1 + namespace: my-app +spec: + type: NodePort + ports: + - port: 80 + selector: + app: repeat_example + component: port-default-1 +--- +apiVersion: v1 +kind: Service +metadata: + name: serviceport-8jdn2bgd + namespace: my-app +spec: + type: NodePort + ports: + - port: 443 + selector: + app: repeat_example + component: serviceport-8jdn2bgd +``` + + +--- + + +# HelmChart v2 + +import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" +import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" +import Chart from "../partials/helm/_helm-cr-chart.mdx" +import ChartName from "../partials/helm/_helm-cr-chart-name.mdx" +import ChartVersion from "../partials/helm/_helm-cr-chart-version.mdx" +import ChartReleaseName from "../partials/helm/_helm-cr-chart-release-name.mdx" +import HelmUpgradeFlags from "../partials/helm/_helm-cr-upgrade-flags.mdx" +import Values from "../partials/helm/_helm-cr-values.mdx" +import Weight from "../partials/helm/_helm-cr-weight.mdx" +import Exclude from "../partials/helm/_helm-cr-exclude.mdx" +import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" +import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" +import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" +import Namespace from "../partials/helm/_helm-cr-namespace.mdx" +import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" +import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" +import V2Example from "../partials/helm/_v2-native-helm-cr-example.mdx" +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" + +# HelmChart v2 + +> Introduced in Replicated KOTS v1.99.0 + +<KotsHelmCrDescription/> + +For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +## Example + +The following is an example manifest file for the HelmChart v2 custom resource: + +<V2Example/> + +## chart + +<Chart/> + +### chart.name + +<ChartName/> + +### chart.chartVersion + +<ChartVersion/> + +## releaseName + +<ChartReleaseName/> + +## weight + +<Weight/> + +## helmUpgradeFlags + +<HelmUpgradeFlags/> + +## exclude + +<Exclude/> + +## values + +<Values/> + +For more information about using `values`, see [Setting Helm Chart Values with KOTS](/vendor/helm-optional-value-keys). + +## optionalValues + +<OptionalValues/> + +For more information about using `optionalValues`, see [Setting Helm Chart Values with KOTS](/vendor/helm-optional-value-keys). + +### optionalValues.when + +<OptionalValuesWhen/> + +### optionalValues.recursiveMerge + +<OptionalValuesRecursiveMerge/> + +**Default**: False + +For an example of recursive and non-recursive merging, see [About Recursive Merge](/vendor/helm-optional-value-keys#recursive-merge). + +## namespace + +<Namespace/> + +## builder + +The `builder` key is used to provide Helm values that are used during various stages of processing the Helm chart. + +The `builder` key is required for the following use cases: + +* To create an `.airgap` bundle for installations into air gap environments. + + <BuilderAirgapIntro/> + + For more information, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). + +* To support online installations that use a local private registry, the `builder` field renders the Helm chart with all of the necessary images so that KOTS knows where to pull the images. + + You cannot prevent customers from configuring a local private registry in the Admin Console. If you think any of your customers will use a local private registry, you should use the `builder` key. For more information, see [Configuring Local Image Registries](/enterprise/image-registry-settings). + +<HelmBuilderRequirements/> + +* Use the same `builder` configuration to support the use of local registries in both online and air gap installations. If you already configured the `builder` key to support air gap installations, then no additional configuration is required. + +**Example:** + +<BuilderExample/> + + +--- + + +# HelmChart v1 (Deprecated) + +import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" +import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" +import Chart from "../partials/helm/_helm-cr-chart.mdx" +import ChartName from "../partials/helm/_helm-cr-chart-name.mdx" +import ChartVersion from "../partials/helm/_helm-cr-chart-version.mdx" +import ChartReleaseName from "../partials/helm/_helm-cr-chart-release-name.mdx" +import HelmUpgradeFlags from "../partials/helm/_helm-cr-upgrade-flags.mdx" +import Values from "../partials/helm/_helm-cr-values.mdx" +import Weight from "../partials/helm/_helm-cr-weight.mdx" +import WeightLimitation from "../partials/helm/_helm-cr-weight-limitation.mdx" +import Exclude from "../partials/helm/_helm-cr-exclude.mdx" +import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" +import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" +import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" +import Namespace from "../partials/helm/_helm-cr-namespace.mdx" +import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" +import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" +import Deprecated from "../partials/helm/_replicated-deprecated.mdx" +import ReplicatedHelmMigration from "../partials/helm/_replicated-helm-migration.mdx" +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" + + +# HelmChart v1 (Deprecated) + +:::important +<Deprecated/> +::: + +<KotsHelmCrDescription/> + +For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +## Example + +The following is an example manifest file for the HelmChart v1 custom resource: + +```yaml +apiVersion: kots.io/v1beta1 +kind: HelmChart +metadata: + name: samplechart +spec: + # chart identifies a matching chart from a .tgz + chart: + name: samplechart + chartVersion: 3.1.7 + releaseName: samplechart-release-1 + + exclude: "repl{{ ConfigOptionEquals `include_chart` `include_chart_no`}}" + + # helmVersion identifies the Helm Version used to render the chart. Default is v3. + helmVersion: v3 + + # useHelmInstall identifies the kots.io/v1beta1 installation method + useHelmInstall: true + + # weight determines the order that charts with "useHelmInstall: true" are applied, with lower weights first. + weight: 42 + + # helmUpgradeFlags specifies additional flags to pass to the `helm upgrade` command. + helmUpgradeFlags: + - --skip-crds + - --no-hooks + - --timeout + - 1200s + - --history-max=15 + + # values are used in the customer environment, as a pre-render step + # these values will be supplied to helm template + values: + postgresql: + enabled: repl{{ ConfigOptionEquals `postgres_type` `embedded_postgres`}} + + optionalValues: + - when: "repl{{ ConfigOptionEquals `postgres_type` `external_postgres`}}" + recursiveMerge: false + values: + postgresql: + postgresqlDatabase: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_database`}}repl{{ end}}" + postgresqlUsername: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_username`}}repl{{ end}}" + postgresqlHost: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_host`}}repl{{ end}}" + postgresqlPassword: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_password`}}repl{{ end}}" + postgresqlPort: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_port`}}repl{{ end}}" + + # namespace allows for a chart to be installed in an alternate namespace to + # the default + namespace: samplechart-namespace + + # builder values provide a way to render the chart with all images + # and manifests. this is used in Replicated to create `.airgap` packages + builder: + postgresql: + enabled: true +``` + +## chart + +<Chart/> + +### chart.name + +<ChartName/> + +### chart.chartVersion + +<ChartVersion/> + +### chart.releaseName + +> Introduced in Replicated KOTS v1.73.0 + +<ChartReleaseName/> + +## helmVersion + +Identifies the Helm Version used to render the chart. Acceptable values are `v2` or `v3`. `v3` is the default when no value is specified. + +:::note +<VersionLimitation/> +::: + +## useHelmInstall + +Identifies the method that KOTS uses to install the Helm chart: +* `useHelmInstall: true`: KOTS uses Kustomize to modify the chart then repackages the resulting manifests to install. This was previously referred to as the _native Helm_ installation method. + +* `useHelmInstall: false`: KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. This was previously referred to as the _Replicated Helm_ installation method. + + :::note + <ReplicatedHelmMigration/> + ::: + +For more information about how KOTS deploys Helm charts when `useHelmInstall` is `true` or `false`, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +## weight + +<WeightLimitation/> + +<Weight/> + +## helmUpgradeFlags + +The `helmUpgradeFlags` field is _not_ supported for HelmChart custom resources with `useHelmInstall: false`. + +<HelmUpgradeFlags/> + +## values + +<Values/> + +## exclude + +<Exclude/> + +## optionalValues + +<OptionalValues/> + +### optionalValues.when + +<OptionalValuesWhen/> + +### optionalValues.recursiveMerge + +:::note +`recursiveMerge` is available in KOTS v1.38.0 and later. +::: + +<OptionalValuesRecursiveMerge/> + +**Default**: False + +## namespace + +<Namespace/> + +## builder + +<BuilderAirgapIntro/> + +<HelmBuilderRequirements/> + +**Example:** + +<BuilderExample/> + + +--- + + +# Identity (Beta) + +:::important +This topic is deleted from the product documentation because this Beta feature is deprecated. +::: + +# Identity (Beta) + +The Identity custom resource allows you to configure the Replicated identity service for your application. + +The following is an example manifest file for the Identity custom resource: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Identity +metadata: + name: my-application +spec: + identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/dex + oidcRedirectUris: + - https://{{repl ConfigOption "ingress_hostname"}}/oidc/login/callback + supportedProviders: [ oidc ] + requireIdentityProvider: true + roles: + - id: member + name: Member + description: Can see every member and non-secret team in the organization. + - id: owner + name: Owner + description: Has full administrative access to the entire organization. + oauth2AlwaysShowLoginScreen: false + signingKeysExpiration: 6h + idTokensExpiration: 24h + webConfig: + title: My App + theme: + logoUrl: data:image/png;base64,<encoded_base64_stream> + logoBase64: <base64 encoded png file> + styleCssBase64: <base64 encoded [styles.css](https://github.com/dexidp/dex/blob/v2.27.0/web/themes/coreos/styles.css) file> + faviconBase64: <base64 encoded png file> +``` + +## identityIssuerURL +**(required)** This is the canonical URL that all clients must use to refer to the OIDC identity service. +If a path is provided, the HTTP service will listen at a non-root URL. + +## oidcRedirectUris +**(required)** A registered set of redirect URIs. +When redirecting from the Replicated app manager identity OIDC server to the client, the URI requested to redirect to must match one of these values. + +## supportedProviders +A list of supported identity providers. +If unspecified, all providers will be available. + +## requireIdentityProvider +If true, require the identity provider configuration to be set by the customer before the app can be deployed. + +## roles +**(`id` required)** A list of roles to be mapped to identity provider groups by the customer on the Replicated Admin Console identity service configuration page. + +## oauth2AlwaysShowLoginScreen +If true, show the identity provider selection screen even if there's only one configured. +Default `false`. + +## signingKeysExpiration +Defines the duration of time after which the SigningKeys will be rotated. +Default `6h`. + +## idTokensExpiration +Defines the duration of time for which the IdTokens will be valid. +Default `24h`. + +## webConfig +Can be used for branding the application identity login screen. + + +--- + + +# LintConfig + +import LinterDefinition from "../partials/linter-rules/_linter-definition.mdx" + +# LintConfig + +<LinterDefinition/> + +The linter runs automatically against releases that you create in the Replicated vendor portal, and displays any error or warning messages in the vendor portal UI. + +The linter rules have default levels that can be overwritten. You can configure custom levels by adding a LintConfig manifest file (`kind: LintConfig`) to the release. Specify the rule name and level you want the rule to have. Rules that are not included in the LintConfig manifest file keep their default level. For information about linter rules and their default levels, see [Linter Rules](/reference/linter). + +The supported levels are: + +<table> + <tr> + <th width="20%">Level</th> + <th width="80%">Description</th> + </tr> + <tr> + <td>error</td> + <td>The rule is enabled and shows as an error.</td> + </tr> + <tr> + <td>warn</td> + <td>The rule is enabled and shows as a warning.</td> + </tr> + <tr> + <td>info</td> + <td>The rule is enabled and shows an informational message.</td> + </tr> + <tr> + <td>off</td> + <td>The rule is disabled.</td> + </tr> + </table> + + +## Example +The following example manifest file overwrites the level for the application-icon to `off` to disable the rule. Additionally, the level for the application-statusInformers rule is changed to `error`, so instead of the default warning, it displays an error if the application is missing status informers. + +```yaml +apiVersion: kots.io/v1beta1 +kind: LintConfig +metadata: + name: default-lint-config +spec: + rules: + - name: application-icon + level: "off" + - name: application-statusInformers + level: "error" +``` + + +--- + + +# Preflight and Support Bundle + +# Preflight and Support Bundle + +You can define preflight checks and support bundle specifications for Replicated KOTS and Helm installations. + +Preflight collectors and analyzers provide cluster operators with clear feedback for any missing requirements or incompatibilities in the target environment before an application is deployed. Preflight checks are not automatically included in releases, so you must define them if you want to include them with a release. + +Support bundles collect and analyze troubleshooting data from a cluster and help diagnose problems with application deployments. For KOTS, default support bundles are automatically included with releases, and can be customized. For Helm installations, support bundles are not pre-enabled and must be defined if you want to use them. + +Collectors and analyzers are configured in Preflight and Support Bundle custom resources. + +:::note +Built-in redactors run by default for preflight checks and support bundles to protect sensitive information. +::: + +## Defining Custom Resources + +To define preflight checks or customize the default support bundle settings, add the corresponding custom resource YAML to your release. Then add custom collector and analyzer specifications to the custom resource. For more information about these troubleshoot features and how to configure them, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). + +The following sections show basic Preflight and Support Bundle custom resource definitions. + +### Preflight + +The Preflight custom resource uses `kind: Preflight`: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: sample +spec: + collectors: [] + analyzers: [] +``` + +### Support Bundle + +The Support Bundle custom resource uses `kind: SupportBundle`: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: sample +spec: + collectors: [] + analyzers: [] +``` + +## Global Fields + +Global fields, also known as shared properties, are fields that are supported on all collectors or all analyzers. The following sections list the global fields for [collectors](#collector-global-fields) and [analyzers](#analyzer-global-fields) respectively. + +Additionally, each collector and analyzer has its own fields. For more information about collector- and analyzer-specific fields, see the [Troubleshoot documentation](https://troubleshoot.sh/docs/). + +### Collector Global Fields + +The following fields are supported on all optional collectors for preflights and support bundles. For a list of collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. + +<table> + <tr> + <th width="30%">Field Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>collectorName</code></td> + <td>(Optional) A collector can specify the <code>collectorName</code> field. In some collectors, this field controls the path where result files are stored in the support bundle.</td> + </tr> + <tr> + <td><code>exclude</code></td> + <td>(Optional) (KOTS Only) Based on the runtime available configuration, a conditional can be specified in the <code>exclude</code> field. This is useful for deployment techniques that allow templating for Replicated KOTS and the optional KOTS Helm component. When this value is <code>true</code>, the collector is not included.</td> + </tr> +</table> + +### KOTS Collector Example + +This is an example of collector definition for a KOTS support bundle: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: sample +spec: + collectors: + - collectd: + collectorName: "collectd" + image: busybox:1 + namespace: default + hostPath: "/var/lib/collectd/rrd" + imagePullPolicy: IfNotPresent + imagePullSecret: + name: my-temporary-secret + data: + .dockerconfigjson: ewoJICJhdXRocyI6IHsKzCQksHR0cHM6Ly9pbmRleC5kb2NrZXIuaW8vdjEvIjoge30KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy4xMiAoZGFyd2luKSIKCX0sCgkiY3JlZHNTdG9yZSI6ICJkZXNrdG9wIiwKCSJleHBlcmltZW50YWwiOiAiZGlzYWJsZWQiLAoJInN0YWNrT3JjaGVzdHJhdG9yIjogInN3YXJtIgp9 + type: kubernetes.io/dockerconfigjson +``` + +### Analyzer Global Fields + +The following fields are supported on all optional analyzers for preflights and support bundles. For a list of analyzers, see [Analyzing Data](https://troubleshoot.sh/docs/analyze/) in the Troubleshoot documentation. + +<table> + <tr> + <th width="30%">Field Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>collectorName</code></td> + <td>(Optional) An analyzer can specify the <code>collectorName</code> field.</td> + </tr> + <tr> + <td><code>exclude</code></td> + <td>(Optional) (KOTS Only) A condition based on the runtime available configuration can be specified in the <code>exclude</code> field. This is useful for deployment techniques that allow templating for KOTS and the optional KOTS Helm component. When this value is <code>true</code>, the analyzer is not included.</td> + </tr> + <tr> + <td><code>strict</code></td> + <td>(Optional) (KOTS Only) An analyzer can be set to <code>strict: true</code> so that <code>fail</code> outcomes for that analyzer prevent the release from being deployed by KOTS until the vendor-specified requirements are met. When <code>exclude: true</code> is also specified, <code>exclude</code> overrides <code>strict</code> and the analyzer is not executed.</td> + </tr> +</table> + +### KOTS Analyzer Example + +This is an example of an KOTS analyzer definition with a strict preflight check and `exclude` set for installations that do not use Replicated kURL. In this case, the strict preflight is enforced on an embedded cluster but not on an existing cluster or air gap cluster. + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: check-kubernetes-version +spec: + analyzers: + - clusterVersion: + exclude: 'repl{{ (not IsKurl) }}' + strict: true + outcomes: + - fail: + when: "< 1.16.0" + message: The application requires Kubernetes 1.16.0 or later + uri: https://kubernetes.io + - warn: + when: "< 1.17.0" + message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.17.0 or later. + uri: https://kubernetes.io + - pass: + message: Your cluster meets the recommended and required versions of Kubernetes. +``` + + +--- + + +# Redactor (KOTS Only) + +# Redactor (KOTS Only) + +This topic describes how to define redactors with the Redactor custom resource. + +:::note +Custom redactors defined with the Redactor resource apply only to installations with Replicated KOTS. +::: + +## Overview + +Preflight checks and support bundles include built-in redactors. These built-in redactors use regular expressions to identify and hide potentially sensitive data before it is analyzed. For example, the built-in redactors hide values that match common patterns for data sources, passwords, and user IDs that can be found in standard database connection strings. They also hide environment variables with names that begin with words like token, password, or user. To view the complete list of regex patterns for the built-in redactors, see [`redact.go`](https://github.com/replicatedhq/troubleshoot/blob/main/pkg/redact/redact.go#L204) in the open-source Troubleshoot GitHub repo. + +For Replicated KOTS installations, you can also add custom redactors to support bundles using the Redactor custom resource manifest file. For example, you can redact API keys or account numbers, depending on your customer needs. For more information about redactors, see [Redacting Data](https://troubleshoot.sh/docs/redact/) in the Troubleshoot documentation. + +## Defining Custom Redactors + +You can add custom redactors for KOTS installations using the following basic Redactor custom resource manifest file (`kind: Redactor`): + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Redactor +metadata: + name: sample +spec: + redactors: [] +``` + +## Objects and Fields + +A redactor supports two objects: `fileSelector` and `removals`. These objects specify the files the redactor applies to and how the redactions occur. For more information and examples of these fields, see [KOTS Redactor Example](#example) below and [Redactors](https://troubleshoot.sh/docs/redact/redactors/) in the Troubleshoot documentation. + +### fileSelector + +The `fileSelector` object determines which files the redactor is applied to. If this object is omitted from the manifest file, the redactor is applied to all files. This object supports the following optional fields: + +<table> + <tr> + <th width="30%">Field Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>file</code></td> + <td>(Optional) Specifies a single file for redaction.</td> + </tr> + <tr> + <td><code>files</code></td> + <td>(Optional) Specifies multiple files for redaction.</td> + </tr> +</table> + +Globbing is used to match files. For example, <code>/my/test/glob/*</code> matches <code>/my/test/glob/file</code>, but does not match <code>/my/test/glob/subdir/file</code>. + +### removals + +The `removals` object is required and defines the redactions that occur. This object supports the following fields. At least one of these fields must be specified: + +<table> + <tr> + <th width="30%">Field Name</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>regex</code></td> + <td>(Optional) Allows a regular expression to be applied for removal and redaction on lines that immediately follow a line that matches a filter. The <code>selector</code> field is used to identify lines, and the <code>redactor</code> field specifies a regular expression that runs on the line after any line identified by <code>selector</code>. If <code>selector</code> is empty, the redactor runs on every line. Using a <code>selector</code> is useful for removing values from pretty-printed JSON, where the value to be redacted is pretty-printed on the line beneath another value.<br></br><br></br>Matches to the regex are removed or redacted, depending on the construction of the regex. Any portion of a match not contained within a capturing group is removed entirely. The contents of capturing groups tagged <code>mask</code> are masked with <code>***HIDDEN***</code>. Capturing groups tagged <code>drop</code> are dropped.</td> + </tr> + <tr> + <td><code>values</code></td> + <td>(Optional) Specifies values to replace with the string <code>***HIDDEN***</code>.</td> + </tr> + <tr> + <td><code>yamlPath</code></td> + <td>(Optional) Specifies a <code>.</code>-delimited path to the items to be redacted from a YAML document. If an item in the path is the literal string <code>*</code>, the redactor is applied to all options at that level.<br></br><br></br>Files that fail to parse as YAML or do not contain any matches are not modified. Files that do contain matches are re-rendered, which removes comments and custom formatting. Multi-document YAML is not fully supported. Only the first document is checked for matches, and if a match is found, later documents are discarded entirely.</td> + </tr> +</table> + +## KOTS Redactor Example {#example} + +The following example shows `regex` and `yamlPath` redaction for a support bundle: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Redactor +metadata: + name: my-redactor-name +spec: + redactors: + - name: all files # as no file is specified, this redactor will run against all files + removals: + regex: + - redactor: (another)(?P<mask>.*)(here) # this will replace anything between the strings `another` and `here` with `***HIDDEN***` + - selector: 'S3_ENDPOINT' # remove the value in lines immediately following those that contain the string `S3_ENDPOINT` + redactor: '("value": ").*(")' + yamlPath: + - "abc.xyz.*" # redact all items in the array at key `xyz` within key `abc` in YAML documents +``` + + +--- + + +# Embedded Cluster Install Command Options + +import ProxyLimitations from "../partials/embedded-cluster/_proxy-install-limitations.mdx" +import ProxyRequirements from "../partials/embedded-cluster/_proxy-install-reqs.mdx" + + +# Embedded Cluster Install Command Options + +This topic describes the options available with the Embedded Cluster install command. For more information about how to install with Embedded Cluster, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded). + +## Usage + +```bash +sudo ./APP_SLUG install --license LICENSE_FILE [flags] +``` +* `APP_SLUG` is the unique application slug +* `LICENSE_FILE` is the customer's license + +## Flags + +<table> + <tr> + <th width="35%">Flag</th> + <th width="65%">Description</th> + </tr> + <tr> + <td>`--admin-console-password`</td> + <td> + <p>Set the password for the Admin Console. The password must be at least six characters in length. If not set, the user is prompted to provide an Admin Console password.</p> + </td> + </tr> + <tr> + <td>`--admin-console-port`</td> + <td> + <p>Port on which to run the KOTS Admin Console. **Default**: By default, the Admin Console runs on port 30000.</p> + <p>**Limitation:** It is not possible to change the port for the Admin Console during a restore with Embedded Cluster. For more information, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery).</p> + </td> + </tr> + <tr> + <td>`--airgap-bundle`</td> + <td>The Embedded Cluster air gap bundle used for installations in air-gapped environments with no outbound internet access. For information about how to install in an air-gapped environment, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap).</td> + </tr> + <tr> + <td>`--cidr`</td> + <td> + <p>The range of IP addresses that can be assigned to Pods and Services, in CIDR notation. **Default:** By default, the CIDR block is `10.244.0.0/16`.</p> + <p>**Requirement**: Embedded Cluster 1.16.0 or later.</p> + </td> + </tr> + <tr> + <td>`--config-values`</td> + <td> + <p>Path to the ConfigValues file for the application. The ConfigValues file can be used to pass the application configuration values from the command line during installation, such as when performing automated installations as part of CI/CD pipelines. For more information, see [Automating Installation with Embedded Cluster](/enterprise/installing-embedded-automation).</p> + <p><strong>Requirement:</strong> Embedded Cluster 1.18.0 and later.</p> + </td> + </tr> + <tr> + <td>`--data-dir`</td> + <td> + <p>The data directory used by Embedded Cluster. **Default**: `/var/lib/embedded-cluster`</p> + <p>**Requirement**: Embedded Cluster 1.16.0 or later.</p> + <p>**Limitations:**</p> + <ul> + <li>The data directory for Embedded Cluster cannot be changed after the cluster is installed.</li> + <li>For multi-node installations, the same data directory that is supplied at installation is used for all nodes joined to the cluster. You cannot choose a different data directory when joining nodes with the Embedded Cluster `join` command. For more information about joining nodes, see [Add Nodes to a Cluster](/enterprise/embedded-manage-nodes#add-nodes) in _Managing Multi-Node Clusters with Embedded Cluster_.</li> + <li>If you use the `--data-dir` flag to change the data directory during installation, then you must use the same location when restoring in a disaster recovery scenario. For more information about disaster recovery with Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery).</li> + <li>Replicated does not support using symlinks for the Embedded Cluster data directory. Use the `--data-dir` flag instead of symlinking `/var/lib/embedded-cluster`.</li> + </ul> + </td> + </tr> + <tr> + <td>`--http-proxy`</td> + <td> + <p>Proxy server to use for HTTP.</p> + <ProxyRequirements/> + <ProxyLimitations/> + </td> + </tr> + <tr> + <td>`--https-proxy`</td> + <td> + <p>Proxy server to use for HTTPS.</p> + <ProxyRequirements/> + <ProxyLimitations/> + </td> + </tr> + <tr> + <td>`--local-artifact-mirror-port`</td> + <td> + <p>Port on which to run the Local Artifact Mirror (LAM). **Default**: By default, the LAM runs on port 50000.</p> + </td> + </tr> + <tr> + <td>`--network-interface`</td> + <td> + <p>The name of the network interface to bind to for the Kubernetes API. A common use case of `--network-interface` is for multi-node clusters where node communication should happen on a particular network. **Default**: If a network interface is not provided, the first valid, non-local network interface is used.</p> + </td> + </tr> + <tr> + <td>`--no-proxy`</td> + <td> + <p>Comma-separated list of hosts for which not to use a proxy.</p> + <p>For single-node installations, pass the IP address of the node where you are installing. For multi-node installations, when deploying the first node, pass the list of IP addresses for all nodes in the cluster (typically in CIDR notation). The network interface's subnet will automatically be added to the no-proxy list if the node's IP address is not already included.</p> + <p>The following are never proxied:</p> + <ul> + <li>Internal cluster communication (`localhost`, `127.0.0.1`, `.cluster.local`, `.svc`)</li> + <li>The CIDR block used for assigning IPs to Kubernetes Pods and Services. By default, the CIDR block is `10.244.0.0/16`. For information about how to change this default, see [Set IP Address Range for Pods and Services](#set-ip-address-range-for-pods-and-services).</li> + </ul> + <p>To ensure your application's internal cluster communication is not proxied, use fully qualified domain names like `my-service.my-namespace.svc` or `my-service.my-namespace.svc.cluster.local`.</p> + <ProxyRequirements/> + <ProxyLimitations/> + </td> + </tr> + <tr> + <td>`--private-ca`</td> + <td> + <p>The path to trusted certificate authority (CA) certificates. Using the `--private-ca` flag ensures that the CA is trusted by the installation. KOTS writes the CA certificates provided with the `--private-ca` flag to a ConfigMap in the cluster.</p> + <p>The KOTS [PrivateCACert](/reference/template-functions-static-context#privatecacert) template function returns the ConfigMap containing the private CA certificates supplied with the `--private-ca` flag. You can use this template function to mount the ConfigMap so your containers trust the CA too.</p> + </td> + </tr> +</table> + +## Examples + +### Air Gap Install + +```bash +sudo ./my-app install --license license.yaml --airgap-bundle myapp.airgap +``` + +### Change the Admin Console and LAM Ports + +```bash +sudo ./my-app install --license license.yaml --admin-console-port=20000 --local-artifact-mirror-port=40000 +``` + +### Change the Data Directory + +```bash +sudo ./my-app install --license license.yaml --data-dir /data/embedded-cluster +``` + +### Headless (Automated) Install + +```bash +sudo ./my-app install --license license.yaml \ + --config-values configvalues.yaml \ + --admin-console-password password +``` + +### Install Behind a Proxy + +```bash +sudo ./APP_SLUG install --license license.yaml \ + --http-proxy=HOST:PORT \ + --https-proxy=HOST:PORT \ + --no-proxy=LIST_OF_HOSTS +``` +Where: + +* `HOST:PORT` is the host and port of the proxy server +* `LIST_OF_HOSTS` is the list of hosts to not proxy. For example, the IP address of the node where you are installing. Or, for multi-node clusters, the list of IP addresses for all nodes in the cluster, typically in CIDR notation. + +### Install Behind an MITM Proxy + +```bash +sudo ./my-app install --license license.yaml --private-ca /path/to/private-ca-bundle \ + --http-proxy=http://10.128.0.0:3300 \ + --https-proxy=http://10.128.0.0:3300 \ + --no-proxy=123.89.46.4,10.96.0.0/16,*.example.com +``` + +### Set Admin Console Password + +```bash +sudo ./my-app install --license license.yaml --admin-console-password password +``` + +### Set IP Address Range for Pods and Services + +```bash +sudo ./my-app install --license license.yaml --cidr 172.16.136.0/16 +``` + +### Use a Specific Network Interface + +```bash +sudo ./my-app install --license license.yaml --network-interface eno167777 +``` + +--- + + +# Embedded Cluster Config + +# Embedded Cluster Config + +This topic is a reference for the Replicated Embedded Cluster Config custom resource. For more information about Embedded Cluster, see [Using Embedded Cluster](/vendor/embedded-overview). + +:::note +Embedded Cluster is in beta. If you are instead looking for information about creating Kubernetes Installers with Replicated kURL, see the [Replicated kURL](/vendor/packaging-embedded-kubernetes) section. +::: + +## Overview + +To install your application with Embedded Cluster, an Embedded Cluster Config must be created in a release. Embedded Cluster installation artifacts are available only for releases that include an Embedded Cluster Config. + +The Embedded Cluster Config lets you define several aspects of the Kubernetes cluster that will be created. + +### Limitations + +* The Embedded Cluster Config does not support the use of Go template functions, including [KOTS template functions](/reference/template-functions-about). + +For additional property-specific limitations, see the sections below. + +### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + version: 2.1.3+k8s-1.30 + roles: + controller: + name: management + labels: + management: "true" + custom: + - name: app + labels: + app: "true" + extensions: + helm: + repositories: + - name: ingress-nginx + url: https://kubernetes.github.io/ingress-nginx + charts: + - name: ingress-nginx + chartname: ingress-nginx/ingress-nginx + namespace: ingress-nginx + version: "4.8.3" + values: | + controller: + service: + type: NodePort + nodePorts: + http: "80" + https: "443" + # Known issue: Only use image tags for multi-architecture images. + # Set digest to empty string to ensure the air gap builder uses + # single-architecture images. + image: + digest: "" + digestChroot: "" + admissionWebhooks: + patch: + image: + digest: "" +``` + +## version + +You must specify which version of Embedded Cluster to install. Each version of Embedded Cluster includes particular versions of components like KOTS (Admin Console) and OpenEBS. + +For a full list of versions, see the Embedded Cluster [releases page](https://github.com/replicatedhq/embedded-cluster/releases) in GitHub. It's recommended to keep this version as up to date as possible because Embedded Cluster is changing rapidly. + +## roles + +You can optionally customize node roles in the Embedded Cluster Config using the `roles` key. + +If the `roles` key is configured, users select one or more roles to assign to a node when it is joined to the cluster. A single node can be assigned: +* The `controller` role, which designates nodes that run the Kubernetes control plane +* One or more `custom` roles +* Both the `controller` role _and_ one or more `custom` roles + +For more information about how to assign node roles in the Admin Console, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + +If the `roles` key is _not_ configured, all nodes joined to the cluster are assigned the `controller` role. The `controller` role designates nodes that run the Kubernetes control plane. Controller nodes can also run other workloads, such as application or Replicated KOTS workloads. + +For more information, see the sections below. + +### controller + +By default, all nodes joined to a cluster are assigned the `controller` role. + +You can customize the `controller` role in the following ways: +* Change the `name` that is assigned to controller nodes. By default, controller nodes are named “controller”. If you plan to create any `custom` roles, Replicated recommends that you change the default name for the `controller` role to a term that is easy to understand, such as "management". This is because, when you add `custom` roles, both the name of the `controller` role and the names of any `custom` roles are displayed to the user when they join a node. +* Add one or more `labels` to be assigned to all controller nodes. See [labels](#labels). + +#### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + roles: + controller: + name: management + labels: + management: "true" # Label applied to "management" nodes +``` + +### custom + +You can add `custom` roles that users can assign to one or more nodes in the cluster. Each `custom` role that you add must have a `name` and can also have one or more `labels`. See [labels](#labels). + +Adding `custom` node roles is useful if you need to assign application workloads to specific nodes in multi-node clusters. For example, if your application has graphics processing unit (GPU) workloads, you could create a `custom` role that will add a `gpu=true` label to any node that is assigned the role. This allows you to then schedule GPU workloads on nodes labled `gpu=true`. Or, if your application includes any resource-intensive workloads (such as a database) that must be run on dedicated nodes, you could create a `custom` role that adds a `db=true` label to the node. This way, the database workload could be assigned to a certain node or nodes. + +#### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + roles: + custom: + - name: app + labels: + app: "true" # Label applied to "app" nodes +``` + +### labels + +You can define Kubernetes labels for the default `controller` role and any `custom` roles that you add. When `labels` are defined, Embedded Cluster applies the label to any node in the cluster that is assigned the given role. Labels are useful for tasks like assigning workloads to nodes. + +#### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + roles: + controller: + name: management + labels: + management: "true" # Label applied to "management" nodes + custom: + - name: db + labels: + db: "true" # Label applied to "db" nodes + - name: gpu + labels: + gpu: "true" # Label applied to "gpu" nodes +``` + +## extensions + +If you need to install Helm charts before your application and as part of the Embedded Cluster itself, you can do this with Helm extensions. One situation where this is useful is if you want to ship an ingress controller, because Embedded Cluster does not yet include one. + +Helm extensions are updated when new versions of your application are deployed from the Admin Console. So, for example, you can change the values for a Helm extension from one release to another, and those changes will be applied to the cluster when the new release is deployed. + +The format for specifying Helm extensions uses the same k0s Helm extensions format from the k0s configuration. For more information about these fields, see the [k0s documentation](https://docs.k0sproject.io/stable/helm-charts/#example). + +### Limitation + +If a Helm extension is removed from the Embedded Cluster Config, the associated Helm chart is not removed from the cluster. + +### Requirements + +* The `version` field is required. Failing to specify a chart version will cause problems for upgrades. + +* If you need to install multiple charts in a particular order, set the `order` field to a value greater than or equal to 10. Numbers below 10 are reserved for use by Embedded Cluster to deploy things like a storage provider and the Admin Console. If an `order` is not provided, Helm extensions are installed with order 10. + +### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + extensions: + helm: + repositories: + - name: ingress-nginx + url: https://kubernetes.github.io/ingress-nginx + charts: + - name: ingress-nginx + chartname: ingress-nginx/ingress-nginx + namespace: ingress-nginx + version: "4.8.3" + values: | + controller: + service: + type: NodePort + nodePorts: + http: "80" + https: "443" + # Known issue: Only use image tags for multi-architecture images. + # Set digest to empty string to ensure the air gap builder uses + # single-architecture images. + image: + digest: "" + digestChroot: "" + admissionWebhooks: + patch: + image: + digest: "" +``` + +## unsupportedOverrides + +:::important +This feature should be used with caution by advanced users who understand the risks and ramifications of changing the default configuration. +::: + +Unsupported overrides allow you to override Embedded Cluster's default configuration, including the k0s config and the Helm values for extensions like KOTS and OpenEBS. This should be used with caution because changes here are untested and can disrupt or break Embedded Clusters. Any issues that are caused by unsupported overrides are not supported. + +While they should be used with caution, unsupported overrides are useful if you need to make changes that are not otherwise exposed by Embedded Cluster. + +### Override the k0s Config + +By default, Embedded Cluster uses a k0s config that is tested and known to work for Embedded Clusters. In some circumstances, you might want to change the k0s config. + +For more information on the k0s config, see [Configuration options](https://docs.k0sproject.io/stable/configuration/#configuration-file-reference) in the k0s documentation. + +For example, you can do the following to enable WireGuard-based encryption. Note that other configuration might be necessary. See [`spec.network.calico`](https://docs.k0sproject.io/stable/configuration/#specnetworkcalico) in the k0s documentation for more details. +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + unsupportedOverrides: + k0s: | + config: + spec: + network: + calico: + wireguard: true +``` + +#### Limtiations + +* The `spec.api` and `spec.storage` keys in the k0s config cannot be changed after installation. Whereas most keys in the k0s config apply to the whole cluster, these two keys are set for each node. Embedded Cluster cannot update these keys on each individual node during updates, so they cannot be changed after installation. + +* Overrides overwrite the corresponding fields in the k0s configuration. They are not merged into Embedded Cluster’s default configuration. When using overrides to override a list, for example, ensure that you include other elements in the list that Embedded Cluster includes by default. + +### Override the Helm Values for Built-In Extensions + +Embedded Cluster deploys built-in extensions like KOTS and OpenEBS to provide capabilities like storage and application management. These extensions are deployed with Helm, and the Helm values for each can be modified if necessary. + +To modify these values, you can use the `unsupportedOverrides.builtInExtensions` key of the Embedded Cluster Config. Each chart you want to modify is an item in the array. The `name` key identifies the Helm chart that you want to modify, and the `values` key is a string where you specify your modified Helm values. Your modified values are merged into the values used by Embedded Cluster. + +The following are the built-in extensions available for modification: + +- `openebs` +- `admin-console` +- `velero` +- `embedded-cluster-operator` + +#### Example + +```yaml +apiVersion: embeddedcluster.replicated.com/v1beta1 +kind: Config +spec: + unsupportedOverrides: + builtInExtensions: + - name: openebs + values: | + key: value +``` + + +--- + + +# admin-console garbage-collect-images + +# admin-console garbage-collect-images + +Starts image garbage collection. +The KOTS Admin Console must be running and an application must be installed in order to use this command. + +### Usage +```bash +kubectl kots admin-console garbage-collect-images -n <namespace> +``` + +This command supports all [global flags](kots-cli-global-flags). + +| Flag | Type | Description | +|:--------------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `-h, --help` | | help for admin-console | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | +| `--ignore-rollback` | string | force images garbage collection even if rollback is enabled for the application (default false). Note: this may impact the ability to rollback the application to a previous version. | + +### Examples +```bash +kubectl kots admin-console garbage-collect-images -n default +``` + + +--- + + +# admin-console generate-manifests + +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import StrictSecContext from "../partials/kots-cli/_strict-sec-context-yaml.mdx" + +# admin-console generate-manifests + +Running this command will create a directory on the workstation containing the Replicated Admin Console manifests. These assets can be used to deploy KOTS to a cluster through other workflows, such as kubectl, to provide additional customization of the Admin Console before deploying. + +### Limitations + +* `generate-manifests` does not support generating manifests for Red Hat OpenShift clusters or GKE Autopilot clusters if executed without a Kubernetes cluster context. + +* To upgrade a KOTS instance that has ever been on version 1.72.0 or earlier, you must run `generate-manifests` with a Kubernetes cluster context. + +* The `admin-console generate-manifests` command does not accept the [`--strict-security-context`](/reference/kots-cli-install#usage) flag, which deploys KOTS Pods with a security context. To generate Admin Console manifests with a security context, add the following to the Pod templates for Deployments and StatefulSets deployed by KOTS: + + <StrictSecContext/> + +### Usage +```bash +kubectl kots admin-console generate-manifests [flags] +``` + +This command supports the following flags: + +<table> + <tr> + <td>Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <tr> + <td><code>--rootdir</code></td> + <td>string</td> + <td>Root directory where the YAML will be written (default `${HOME}` or `%USERPROFILE%`)</td> + </tr> + <tr> + <td><code>--namespace</code></td> + <td>string</td> + <td>Target namespace for the Admin Console</td> + </tr> + <tr> + <td><code>--shared-password</code></td> + <td>string</td> + <td>Shared password to use when deploying the Admin Console</td> + </tr> + <tr> + <td><code>--http-proxy</code></td> + <td>string</td> + <td>Sets HTTP_PROXY environment variable in all KOTS Admin Console components</td> + </tr> + <tr> + <td><code>--http-proxy</code></td> + <td>string</td> + <td>Sets HTTP_PROXY environment variable in all KOTS Admin Console</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <tr> + <td><code>--no-proxy</code></td> + <td>string</td> + <td>Sets NO_PROXY environment variable in all KOTS Admin Console components</td> + </tr> + <tr> + <td><code>--private-ca-configmap</code></td> + <td>string</td> + <td>Name of a ConfigMap containing private CAs to add to the kotsadm deployment</td> + </tr> + <RegistryPassword/> + <RegistryUsername/> + <tr> + <td><code>--with-minio</code></td> + <td>bool</td> + <td>Set to true to include a local minio instance to be used for storage (default true)</td> + </tr> + <tr> + <td><code>--minimal-rbac</code></td> + <td>bool</td> + <td>Set to true to include a local minio instance to be used for storage (default true)</td> + </tr> + <tr> + <td><code>--additional-namespaces</code></td> + <td>string</td> + <td>Comma delimited list to specify additional namespace(s) managed by KOTS outside where it is to be deployed. Ignored without with <code>--minimal-rbac=true</code></td> + </tr> + <tr> + <td><code>--storage-class</code></td> + <td>string</td> + <td>Sets the storage class to use for the KOTS Admin Console components. <strong>Default:</strong> unset, which means the default storage class will be used</td> + </tr> +</table> + +### Examples +```bash +kubectl kots admin-console generate-manifests +kubectl kots admin-console generate-manifests --rootdir ./manifests +kubectl kots admin-console generate-manifests --namespace kotsadm --minimal-rbac=true --additional-namespaces="app1,app3" +``` + + +--- + + +# admin-console + +# admin-console + +Enables access to the KOTS Admin Console from a local machine. + +This command opens localhost port 8800, which forwards to the `kotsadm` service. +Alternatively you can specify the `--port` flag to specify a port other than 8800. + +To access the Admin Console, browse to http://localhost:8800 after running this command. + +### Usage +```bash +kubectl kots admin-console [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +|:------------------|--------|---------------------------------------------------------------------------------| +| `-h, --help` | | Help for admin-console. | +| `-n, --namespace` | string | The namespace where the Admin Console is running. **Default:** "default" | +| `--port` | string | Override the local port on which to access the Admin Console. **Default:** 8800 | + +### Examples +```bash +kubectl kots admin-console --namespace kots-sentry +``` + + +--- + + +# admin-console push-images + +# admin-console push-images + +Pushes images from an air gap bundle to a private registry. +The air gap bundle can be either a KOTS Admin Console release or an application release. + +### Usage +```bash +kubectl kots admin-console push-images [airgap-bundle] [private-registry] [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +|:------------------------|--------|-------------------------------------| +| `-h, --help` | | Help for the command | +| `--registry-username` | string | username for the private registry | +| `--registry-password` | string | password for the private registry | +| `--skip-registry-check` | bool | Set to `true` to skip the connectivity test and validation of the provided registry information. **Default:** `false` | + +### Examples +```bash +kubectl kots admin-console push-images ./kotsadm.tar.gz private.registry.host/app-name \ + --registry-username rw-username \ + --registry-password rw-password +``` + + +--- + + +# admin-console upgrade + +# admin-console upgrade + +import EnsureRBAC from "../partials/kots-cli/_ensure-rbac.mdx" +import Help from "../partials/kots-cli/_help.mdx" +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import SkipRBACCheck from "../partials/kots-cli/_skip-rbac-check.mdx" +import StrictSecurityContext from "../partials/kots-cli/_strict-security-context.mdx" +import WaitDuration from "../partials/kots-cli/_wait-duration.mdx" +import WithMinIO from "../partials/kots-cli/_with-minio.mdx" + +Upgrades the KOTS Admin Console to match the version of KOTS CLI. + + +### Usage +```bash +kubectl kots admin-console upgrade [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: +<table> + <tr> + <th width="30%">Flag</th> + <th>Type</th> + <th>Description</th> + </tr> + <EnsureRBAC/> + <Help/> + <KotsadmNamespace/> + <KotsadmRegistry/> + <RegistryPassword/> + <RegistryUsername/> + <SkipRBACCheck/> + <StrictSecurityContext/> + <WaitDuration/> + <WithMinIO/> +</table> + +### Examples +```bash +kubectl kots admin-console upgrade --namespace kots-sentry +kubectl kots admin-console upgrade --ensure-rbac=false +``` + + +--- + + +# backup + +# backup + +Create a full instance snapshot for disaster recovery. + +### Usage + +```bash +kubectl kots backup [flags] +``` + +This command supports the following flags: + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------------------- | +| `-h, --help` | | Help for `backup`. | +| `-n, --namespace` | string | The namespace where the Admin Console is running. **Default:** `default` | +| `-o, --output` | string | The output format. Supports JSON. Defaults to plain text if not set. | +| `--wait`. | bool | Wait for the backup to finish. **Default:** true | + +### Example + +```bash +kubectl kots backup --namespace kots-sentry +``` + + +--- + + +# backup ls + +# backup ls + +:::note +This command is deprecated. Use [`kubectl kots get backups`](/reference/kots-cli-get-backups) instead. +::: + +Show a list of all the available instance snapshots for disaster recovery. + +### Usage + +```bash +kubectl kots backup ls [flags] +``` + +This command supports the following flags: + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | Help for `backup ls`. | +| `-n, --namespace` | string | Filter by the namespace the Admin Console was installed in. | + +### Example + +```bash +kubectl kots backup ls --namespace kots-sentry +``` + + +--- + + +# docker ensure-secret + +# docker ensure-secret + +Creates an image pull secret for Docker Hub that the Admin Console can utilize to avoid [rate limiting](/enterprise/image-registry-rate-limits). +The credentials are validated before creating the image pull secret. +Running this command creates a new application version, based on the latest version, with the new image pull secret added to all Kubernetes manifests that have images. +In order for this secret to take effect to avoid rate limiting, the new version must be deployed. + +### Usage + +```bash +kubectl kots docker ensure-secret [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| ----------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for ensure-secret | +| `--dockerhub-username` | string | DockerHub username to be used _(required)_ | +| `--dockerhub-password` | string | DockerHub password to be used _(required)_ | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | + +### Example + +```bash +kubectl kots docker ensure-secret --dockerhub-username sentrypro --dockerhub-password password --namespace sentry-pro +``` + + +--- + + +# docker + +# docker + +KOTS Docker interface + +### Usage + +```bash +kubectl kots docker [command] +``` + +This command supports all [global flags](kots-cli-global-flags). + + +--- + + +# download + +# download + +Retrieves a copy of the application manifests from the cluster, and store them in a specific directory structure on your workstation. +Requires a running application with the KOTS Admin Console. + +## Usage +```bash +kubectl kots download [app-slug] [flags] +``` + +* _Replace `[app-slug]` with the application slug provided by your software vendor (required)._ For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:----------------------------|--------|-----------------------------------------------------------------------------------------------------------------------| +| `--decrypt-password-values` | bool | decrypt password values to plaintext | +| `--dest` | string | the directory to store the application in _(defaults to current working dir)_ | +| `--current` | bool | download the archive of the currently deployed app version | +| `--sequence` | int | sequence of the app version to download the archive for (defaults to the latest version unless --current flag is set) | +| `-h, --help` | | help for download | +| `-n, --namespace` | string | the namespace to download from _(default `"default"`)_ | +| `--overwrite` | | overwrite any local files, if present | +| `-o, --output` | string | output format (currently supported: json) _(defaults to plain text if not set)_ | + +## Example +```bash +kubectl kots download kots-sentry --namespace kots-sentry --dest ./manifests --overwrite +``` + + +--- + + +# enable-ha + +# enable-ha + +(Deprecated) Runs the rqlite StatefulSet as three replicas for data replication and high availability. + +This command is deprecated and will be removed in a future release. The EKCO add-on for Replicated kURL now scales up the rqlite StatefulSet automatically when three or more nodes are healthy and the OpenEBS localpv storage class is available. For more information, see [EKCO add-on](https://kurl.sh/docs/add-ons/ekco#kotsadm) in the kURL documentation. + +## Usage +```bash +kubectl kots enable-ha [flags] +``` + +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:---------------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--wait-duration` | string | Timeout used while waiting for individual components to be ready. Must be in Go duration format. For example, `10s` or `2m`. See [func ParseDuration](https://pkg.go.dev/time#ParseDuration) in the Go documentation. | +| `-h, --help` | | Help for `enable-ha`. | +| `-n, --namespace` | string | The namespace where the Admin Console is running _(required)_ | + +## Example +```bash +kubectl kots enable-ha --namespace kots-sentry +``` + + +--- + + +# get apps + +# get apps + +The `kots get apps` command lists installed applications. + +### Usage + +```bash +kubectl kots get apps [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for get apps | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | + +### Example + +```bash +kubectl kots get apps -n default +``` + + +--- + + +# get backups + +# get backups + +The `kots get backups` command lists available full snapshots (instance). + +### Usage + +```bash +kubectl kots get backups [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for get backups | +| `-n, --namespace` | string | filter by the namespace in which the Admin Console is/was installed | + +### Examples + +Basic + +```bash +kubectl kots get backups +``` + +Filtering by a namespace + +```bash +kubectl kots get backups -n default +``` + + +--- + + +# get config + +# get config + +The `kots get config` command returns the `configValues` file for an application. + +### Usage + +```bash +kubectl kots get config [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `--appslug` | string | The slug of the target application. Required when more than one application is deployed. Your software vendor provides the application slug. For more information, see <a href="/vendor/vendor-portal-manage-app#slug">Get the Application Slug</a> in <em>Managing Applications</em>.| +| `--current` | bool | When set, the `configValues` file for the currently deployed version of the application is retrieved.| +| `--sequence` | int | Retrieves the `configValues` file for the specified application sequence. **Default**: Latest (unless the `--current` flag is set).| +| `--decrypt` | bool | Decrypts password items within the configuration.| +| `-h, --help` | | Help for `get config`.| +| `-n, --namespace` | string | (Required) The namespace where the Admin Console is running.| + +### Example + +```bash +kubectl kots get config -n default --sequence 5 --appslug myapp +``` + + +--- + + +# get + +# get + +The `kots get` command shows information about one or more resources. + +### Usage +```bash +kubectl kots get [resource] [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +|:----------------------|------|-------------| +| `-o, --output` | | Output format. **Supported formats**: `json`. | + +### Resources + +* `apps` lists installed applications. +* `backups` lists available full snapshots (instance). +* `config` lists the **configValues** for an application. +* `restores` lists created full snapshot restores. +* `versions` lists the versions available for a given `app-slug`. + + +--- + + +# get restores + +# get restores + +The `kots get restores` command lists created full snapshot restores. + +### Usage + +```bash +kubectl kots get restores [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for get restores | +| `-n, --namespace` | string | filter by the namespace in which the Admin Console is/was installed | + +### Examples + +Basic + +```bash +kubectl kots get restores +``` + +Filtering by a namespace + +```bash +kubectl kots get restores -n default +``` + + +--- + + +# get versions + +# get versions + +The `kots get versions` command lists all versions of an application. + +> Introduced in KOTS v1.61.0 + +### Usage + +```bash +kubectl kots get versions [app-slug] [flags] +``` + +- _Replace `[app-slug]` with the app slug for your KOTS application (required)._ +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :------------------------ | ------ | --------------------------------------------------------------------------------------------------- | +| `-h, --help` | | Help for `get versions`. | +| `-n, --namespace` | string | (Required) The namespace where the Admin Console is running. | +| `--current-page` | int | Offset, by page size, at which to start retrieving versions. **Default:** 0 | +| `--page-size` | int | Number of versions to return. **Default:** 20 | +| `--pin-latest` | int | When set to true, always returns the latest version at the beginning. **Default:** false | +| `--pin-latest-deployable` | int | When set to true, always returns the latest version that can be deployed. The latest deployable version can differ from the latest version if a required version, which cannot be skipped, is present. **Default:** false | +| `-o, --output` | string | Output format. **Supported formats:** `json`. **Default:** Plain text | + +### Example + +```bash +kubectl kots get versions kots-sentry -n default +``` + + +--- + + +# Installing the KOTS CLI + +# Installing the KOTS CLI + +Users can interact with the Replicated KOTS CLI to install and manage applications with Replicated KOTS. The KOTS CLI is a kubectl plugin that runs locally on any computer. + + +## Prerequisite + +Install kubectl, the Kubernetes command-line tool. See [Install Tools](https://kubernetes.io/docs/tasks/tools/) in the Kubernetes documentation. + +:::note +If you are using a cluster created with Replicated kURL, kURL already installed both kubectl and the KOTS CLI when provisioning the cluster. For more information, see [Online Installation with kURL](/enterprise/installing-kurl) and [Air Gap Installation with kURL](/enterprise/installing-kurl-airgap). +::: + +## Install + +To install the latest version of the KOTS CLI to `/usr/local/bin`, run: + +```bash +curl https://kots.io/install | bash +``` + +To install to a directory other than `/usr/local/bin`, run: + +```bash +curl https://kots.io/install | REPL_INSTALL_PATH=/path/to/cli bash +``` + +To install a specific version of the KOTS CLI, run: + +```bash +curl https://kots.io/install/<version> | bash +``` + +To verify your installation, run: + +```bash +kubectl kots --help +``` + +## Install without Root Access + +You can install the KOTS CLI on computers without root access or computers that cannot write to the `/usr/local/bin` directory. + +To install the KOTS CLI without root access, you can do any of the following: + +* (Online Only) [Install to a Different Directory](#install-to-a-different-directory) +* (Online Only) [Install Using Sudo](#install-using-sudo) +* (Online or Air Gap) [Manually Download and Install](#manually-download-and-install) + +### Install to a Different Directory + +You can set the `REPL_INSTALL_PATH` environment variable to install the KOTS CLI to a directory other than `/usr/local/bin` that does not require elevated permissions. + +**Example:** + +In the following example, the installation script installs the KOTS CLI to `~/bin` in the local directory. You can use the user home symbol `~` in the `REPL_INSTALL_PATH` environment variable. The script expands `~` to `$HOME`. + +```bash +curl -L https://kots.io/install | REPL_INSTALL_PATH=~/bin bash +``` + +### Install Using Sudo + +If you have sudo access to the directory where you want to install the KOTS CLI, you can set the `REPL_USE_SUDO` environment variable so that the installation script prompts you for your sudo password. + +When you set the `REPL_USE_SUDO` environment variable to any value, the installation script uses sudo to create and write to the installation directory as needed. The script prompts for a sudo password if it is required for the user executing the script in the specified directory. + +**Example:** + +In the following example, the script uses sudo to install the KOTS CLI to the default `/usr/local/bin` directory. + +```bash +curl -L https://kots.io/install | REPL_USE_SUDO=y bash +``` + +**Example:** + +In the following example, the script uses sudo to install the KOTS CLI to the `/replicated/bin` directory. + +```bash +curl -L https://kots.io/install | REPL_INSTALL_PATH=/replicated/bin REPL_USE_SUDO=y bash +``` + +### Manually Download and Install + +You can manually download and install the KOTS CLI binary to install without root access, rather than using the installation script. + +Users in air gap environments can also follow this procedure to install the KOTS CLI. + +To manually download and install the KOTS CLI: + +1. Download the KOTS CLI release for your operating system. + + You can run one of the following commands to download the latest version of the KOTS CLI from the [Releases](https://github.com/replicatedhq/kots/releases/latest) page in the KOTS GitHub repository: + + * **MacOS (AMD and ARM)**: + + ```bash + curl -L https://github.com/replicatedhq/kots/releases/latest/download/kots_darwin_all.tar.gz + ``` + + * **Linux (AMD)**: + + ```bash + curl -L https://github.com/replicatedhq/kots/releases/latest/download/kots_linux_amd64.tar.gz + ``` + + * **Linux (ARM)**: + + ```bash + curl -L https://github.com/replicatedhq/kots/releases/latest/download/kots_linux_arm64.tar.gz + ``` + +1. Unarchive the `.tar.gz` file that you downloaded: + + * **MacOS (AMD and ARM)**: + + ```bash + tar xvf kots_darwin_all.tar.gz + ``` + * **Linux (AMD)**: + + ```bash + tar xvf kots_linux_amd64.tar.gz + ``` + * **Linux (ARM)**: + + ```bash + tar xvf kots_linux_arm64.tar.gz + ``` + +1. Rename the `kots` executable to `kubectl-kots` and move it to one of the directories that is in your PATH environment variable. This ensures that the system can access the executable when you run KOTS CLI commands. + + :::note + You can run `echo $PATH` to view the list of directories in your PATH. + ::: + + Run one of the following commands, depending on if you have write access to the target directory: + + * **You have write access to the directory**: + + ```bash + mv kots /PATH_TO_TARGET_DIRECTORY/kubectl-kots + ``` + Replace `PATH_TO_TARGET_DIRECTORY` with the path to a directory that is in your PATH environment variable. For example, `/usr/local/bin`. + + * **You do _not_ have write access to the directory**: + + ```bash + sudo mv kots /PATH_TO_TARGET_DIRECTORY/kubectl-kots + ``` + Replace `PATH_TO_TARGET_DIRECTORY` with the path to a directory that is in your PATH environment variable. For example, `/usr/local/bin`. + +1. Verify the installation: + + ``` + kubectl kots --help + ``` + +## Uninstall + +The KOTS CLI is a plugin for the Kubernetes kubectl command line tool. The KOTS CLI plugin is named `kubectl-kots`. + +For more information about working with kubectl, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. + +To uninstall the KOTS CLI: + +1. Find the location where the `kubectl-kots` plugin is installed on your `PATH`: + + ``` + kubectl plugin list kubectl-kots cli + ``` + +2. Delete `kubectl-kots`: + + ``` + sudo rm PATH_TO_KOTS + ``` + Replace `PATH_TO_KOTS` with the location where `kubectl-kots` is installed. + + **Example**: + + ``` + sudo rm /usr/local/bin/kubectl-kots + ``` + + +--- + + +# Global flags + +# Global flags + +All KOTS CLI commands support a set of global flags to be used to connect to the cluster. + +| Flag | Type | Description | +|---|---|---| +| `--as` | string | Username to impersonate for the operation | +| `--as-group` | stringArray | Group to impersonate for the operation, this flag can be repeated to specify multiple groups. | +| `--cache-dir` | string | Default HTTP cache directory (default "~/.kube/http-cache") | +| `--certificate-authority` | string | Path to a cert file for the certificate authority | +| `--client-certificate` | string | Path to a client certificate file for TLS | +| `--client-key` | string | Path to a client key file for TLS | +| `--cluster` | string | The name of the kubeconfig cluster to use | +| `--context` | string | The name of the kubeconfig context to use | +| `--insecure-skip-tls-verify` | bool | If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure | +| `--kubeconfig` | string | Path to the kubeconfig file to use for CLI requests. | +| `-n, --namespace` | string | If present, the namespace scope for this CLI request | +| `--request-timeout` | string | The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") | +| `-s, --server` | string | The address and port of the Kubernetes API server | +| `--token` | string | Bearer token for authentication to the API server | +| `--user` | string | The name of the kubeconfig user to use | + + +--- + + +# identity-service enable-shared-password + +# identity-service enable-shared-password + +Enable the shared password login option in the KOTS Admin Console. + +### Usage + +```bash +kubectl kots identity-service enable-shared-password [flags] +``` + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------ | +| `-n, --namespace` | string | the namespace where the Admin Console is running | + +NOTE: `--namespace` flag is required. + +### Examples + +```bash +kubectl kots identity-service enable-shared-password --namespace kots-sentry +``` + + +--- + + +# identity-service + +# identity-service + +KOTS Identity Service + +### Usage + +```bash +kubectl kots identity-service [command] +``` + +This command supports all [global flags](kots-cli-global-flags). + + +--- + + +# install + +import StrictSecurityContext from "../partials/kots-cli/_strict-security-context.mdx" +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import Help from "../partials/kots-cli/_help.mdx" + +# install + +Installs the application and the KOTS Admin Console directly to a cluster. +The `kots install` command pulls Kubernetes manifests from the remote upstream, deploys the manifests to the specified cluster, installs the Admin Console, and sets up port forwarding to make the Admin Console accessible on port 8800. +Alternatively, you can specify the `--port` flag to override the default port. + +### Usage + +```bash +kubectl kots install [upstream uri] [flags] +``` + +- _Replace [upstream-uri] with the URI for your KOTS application (required)._ +- _If the KOTS application has been packaged by Replicated Vendor, the `--license-file` flag must be provided._ +- _Provide [flags] according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + +<table> + <tr> + <td>Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <tr> + <td><code>--additional-annotations</code></td> + <td>bool</td> + <td>Additional annotations to add to kotsadm pods.</td> + </tr> + <tr> + <td><code>--additional-labels</code></td> + <td>bool</td> + <td>Additional labels to add to kotsadm pods.</td> + </tr> + <tr> + <td><code>--airgap</code></td> + <td>bool</td> + <td>Set to <code>true</code> to run install in air gapped mode. Setting <code>--airgap-bundle</code> implies <code>--airgap=true</code>. <strong>Default:</strong> <code>false</code>. For more information, see <a href="/enterprise/installing-existing-cluster-airgapped">Air Gap Installation in Existing Clusters with KOTS</a>.</td> + </tr> + <tr> + <td><code>--airgap-bundle</code></td> + <td>string</td> + <td>Path to the application air gap bundle where application metadata will be loaded from. Setting <code>--airgap-bundle</code> implies <code>--airgap=true</code>. For more information, see <a href="/enterprise/installing-existing-cluster-airgapped">Air Gap Installation in Existing Clusters with KOTS</a>.</td> + </tr> + <tr> + <td><code>--app-version-label</code></td> + <td>string</td> + <td>The application version label to install. If not specified, the latest version is installed.</td> + </tr> + <tr> + <td><code>--config-values</code></td> + <td>string</td> + <td>Path to a manifest file containing configuration values. This manifest must be <code>apiVersion: kots.io/v1beta1</code> and <code>kind: ConfigValues</code>. For more information, see <a href="/enterprise/installing-existing-cluster-automation">Installing with the KOTS CLI</a>.</td> + </tr> + <tr> + <td><code>--copy-proxy-env</code></td> + <td>bool</td> + <td>Copy proxy environment variables from current environment into all Admin Console components. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--disable-image-push</code></td> + <td>bool</td> + <td>Set to <code>true</code> to disable images from being pushed to private registry. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--ensure-rbac</code></td> + <td>bool</td> + <td>When <code>false</code>, KOTS does not attempt to create the RBAC resources necessary to manage applications. <strong>Default:</strong> <code>true</code>. If a role specification is needed, use the [generate-manifests](kots-cli-admin-console-generate-manifests) command.</td> + </tr> + <Help/> + <tr> + <td><code>--http-proxy</code></td> + <td>string</td> + <td>Sets HTTP_PROXY environment variable in all Admin Console components.</td> + </tr> + <tr> + <td><code>--https-proxy</code></td> + <td>string</td> + <td>Sets HTTPS_PROXY environment variable in all Admin Console components.</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <tr> + <td><code>--license-file</code></td> + <td>string</td> + <td>Path to a license file.</td> + </tr> + <tr> + <td><code>--local-path</code></td> + <td>string</td> + <td>Specify a local-path to test the behavior of rendering a Replicated application locally. Only supported on Replicated application types.</td> + </tr> + <tr> + <td><code>--name</code></td> + <td>string</td> + <td>Name of the application to use in the Admin Console.</td> + </tr> + <tr> + <td><code>--no-port-forward</code></td> + <td>bool</td> + <td>Set to <code>true</code> to disable automatic port forward. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--no-proxy</code></td> + <td>string</td> + <td>Sets NO_PROXY environment variable in all Admin Console components.</td> + </tr> + <tr> + <td><code>--port</code></td> + <td>string</td> + <td>Override the local port to access the Admin Console. <strong>Default:</strong> 8800</td> + </tr> + <tr> + <td><code>--private-ca-configmap</code></td> + <td>string</td> + <td>Name of a ConfigMap containing private CAs to add to the kotsadm deployment.</td> + </tr> + <tr> + <td><code>--preflights-wait-duration</code></td> + <td>string</td> + <td>Timeout to be used while waiting for preflights to complete. Must be in [Go duration](https://pkg.go.dev/time#ParseDuration) format. For example, 10s, 2m. <strong>Default:</strong> 15m</td> + </tr> + <RegistryPassword/> + <RegistryUsername/> + <tr> + <td><code>--repo</code></td> + <td>string</td> + <td>Repo URI to use when installing a Helm chart.</td> + </tr> + <tr> + <td><code>--shared-password</code></td> + <td>string</td> + <td>Shared password to use when deploying the Admin Console.</td> + </tr> + <tr> + <td><code>--skip-compatibility-check</code></td> + <td>bool</td> + <td>Set to <code>true</code> to skip compatibility checks between the current KOTS version and the application. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--skip-preflights</code></td> + <td>bool</td> + <td>Set to <code>true</code> to skip preflight checks. <strong>Default:</strong> <code>false</code>. If any strict preflight checks are configured, the <code>--skip-preflights</code> flag is not honored because strict preflight checks must run and contain no failures before the application is deployed. For more information, see [Defining Preflight Checks](/vendor/preflight-defining).</td> + </tr> + <tr> + <td><code>--skip-rbac-check</code></td> + <td>bool</td> + <td>Set to <code>true</code> to bypass RBAC check. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--skip-registry-check</code></td> + <td>bool</td> + <td>Set to <code>true</code> to skip the connectivity test and validation of the provided registry information. <strong>Default:</strong> <code>false</code></td> + </tr> + <StrictSecurityContext/> + <tr> + <td><code>--use-minimal-rbac</code></td> + <td>bool</td> + <td>When set to <code>true</code>, KOTS RBAC permissions are limited to the namespace where it is installed. To use <code>--use-minimal-rbac</code>, the application must support namespace-scoped installations and the user must have the minimum RBAC permissions required by KOTS in the target namespace. For a complete list of requirements, see [Namespace-scoped RBAC Requirements​](/enterprise/installing-general-requirements#namespace-scoped) in _Installation Requirements_. <strong>Default:</strong> <code>false</code></td> + </tr> + <tr> + <td><code>--wait-duration</code></td> + <td>string</td> + <td>Timeout to be used while waiting for individual components to be ready. Must be in [Go duration](https://pkg.go.dev/time#ParseDuration) format. For example, 10s, 2m. <strong>Default:</strong> 2m</td> + </tr> + <tr> + <td><code>--with-minio</code></td> + <td>bool</td> + <td>When set to <code>true</code>, KOTS deploys a local MinIO instance for storage and uses MinIO for host path and NFS snapshot storage. <strong>Default:</strong> <code>true</code></td> + </tr> + <tr> + <td><code>--storage-class</code></td> + <td>string</td> + <td>Sets the storage class to use for the KOTS Admin Console components. <strong>Default:</strong> unset, which means the default storage class will be used</td> + </tr> +</table> + + +### Examples + +```bash +kubectl kots install sentry/unstable --license-file ~/license.yaml +kubectl kots install kots-sentry/stable --shared-password IgqG5OBc9Gp --license-file ~/sentry-license.yaml --namespace sentry-namespace --config-values ~/config-values.yaml +kubectl kots install --ensure-rbac=false +``` + + +--- + + +# pull + +# pull + +Running this command will create a directory on the workstation containing the application and Kubernetes manifests. These assets can be used to deploy KOTS to a cluster through other workflows, such as kubectl. This command is necessary when managing a application without the use of the Admin Console. + +### Usage +```bash +kubectl kots pull [upstream uri] [flags] +``` +* _Replace `[upstream-uri]` with the URI for your KOTS application (required)._ +* _If the KOTS application has been packaged by Replicated Vendor, the `--license-file` flag must be provided._ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + +| Flag | Type | Description | +|:-----|------|-------------| +| `--downstream` | strings | the list of any downstreams to create/update | +| `--exclude-admin-console` | bool | set to true to exclude the Admin Console _(only valid when `[upstream-uri]` points to a replicated app)_ | +| `--exclude-kots-kinds` | bool | set to true to exclude rendering KOTS custom objects to the base directory _(default `true`)_ | +| `-h, --help` | | help for pull | +| `--image-namespace` | string | the namespace/org in the docker registry to push images to _(required when `--rewrite-images` is set)_ | +| `--license-file` | string | path to a license file _(required when `[upstream-uri]` points to a replicated app)_ | +| `--local-path` | string | specify a local-path to pull a locally available replicated app _(only valid when `[upstream-uri]` points to a replicated app)_ | +| `-n, --namespace` | string | namespace to render the upstream to in the base _(default `"default"`)_ | +| `--private-ca-configmap` | string | name of a ConfigMap containing private CAs to add to the kotsadm deployment. +| `--registry-endpoint` | string | the endpoint of the local docker registry to use when pushing images _(required when `--rewrite-images` is set)_ | +| `--rewrite-images` | bool | set to true to force all container images to be rewritten and pushed to a local registry | +| `--rootdir` | string | root directory that will be used to write the yaml to _(default `${HOME}` or `%USERPROFILE%`)_ | +| `--shared-password` | string | shared password to use when deploying the Admin Console | +| `--http-proxy` | string | sets HTTP_PROXY environment variable in all KOTS Admin Console components | +| `--https-proxy` | string | sets HTTPS_PROXY environment variable in all KOTS Admin Console components | +| `--no-proxy` | string | sets NO_PROXY environment variable in all KOTS Admin Console components | +| `--copy-proxy-env` | bool | copy proxy environment variables from current environment into all KOTS Admin Console components | +| `--config-values` | string | path to a manifest containing config values (must be apiVersion: kots.io/v1beta1, kind: ConfigValues) | +| `--with-minio` | bool | set to true to include a local minio instance to be used for storage _(default true)_ | +| `--storage-class` | string | sets the storage class to use for the KOTS Admin Console components. _(default unset, which means the default storage class will be used)_ | + +### Example +```bash +kubectl kots pull sentry/unstable --license-file ~/license.yaml +``` + + +--- + + +# remove + +# remove + +Remove application reference from the KOTS Admin Console. + +You can use the `kots remove` command to remove one or more installed applications from the Admin Console. +By default, the deployed application is not removed from the cluster. Only the reference for the application is removed from the Admin Console. To completely remove the application and delete its resources from the cluster, use the `--undeploy` flag. + +### Usage +```bash +kubectl kots remove [app-slug] -n [namespace] +``` +* _`[app-slug]` is the slug of the installed application to be removed (required)_ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + +<table> + <tr> + <th width="20%">Flag</th> + <th width="10%">Type</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><code>--force</code></td> + <td><code>bool</code></td> + <td> + <p>Removes the reference even if the application has already been deployed.</p> + </td> + </tr> + <tr> + <td><code>--undeploy</code></td> + <td><code>bool</code></td> + <td> + <p>Un-deploys the application by deleting all its resources from the cluster. When <code>--undeploy</code> is set, the <code>--force</code> flag is set automatically.</p> + <p><strong>Note:</strong> <code>--undeploy</code> can remove application resources only from the namespace where KOTS is installed and from any namespaces provided in the <a href="custom-resource-application#additionalnamespaces">additionalNamespaces</a> field in the Application custom resource.</p> + <p>The following describes how <code>--undeploy</code> removes application resources:</p> + <ul> + <li>For applications deployed with <code>kubectl apply</code> (including standalone manifest files and Helm charts deployed with <a href="/vendor/helm-native-about#replicated">Replicated Helm</a>), <code>--undeploy</code> identifies and removes resources based on a <code>kots.io/app-slug: <app_slug></code> annotation that is applied to all application resources during deployment. </li> + <li>For Helm chart applications deployed with HelmChart custom resources with <code>apiVersion: kots.io/v1beta2</code> or <code>apiVersion: kots.io/v1beta1</code> and <code>useHelmInstall: true</code>, <code>--undeploy</code> runs <code>helm uninstall</code>.</li> + </ul> + </td> + </tr> + <tr> + <td><code>-n</code></td> + <td><code>string</code></td> + <td><p>The namespace where the target application is deployed. Use <code>default</code> for the default namespace.</p></td> + </tr> +</table> + +### Example +```bash +kubectl kots remove sentry -n default +``` + + +--- + + +# reset-password + +# reset-password + +If you deployed an application with the KOTS Admin Console, the `kots reset-password` command will change the bcrypted password hash in the cluster, allowing you to log in again. + +### Usage +```bash +kubectl kots reset-password [namespace] [flags] +``` +* _Replace `[namespace]` with the namespace where the Admin Console and your KOTS application resides (required)._ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:----------------------|------|-------------| +| `-h, --help` | | help for reset-password | +| `-n, --namespace`| string | the namespace where the Admin Console is running | + +### Examples +```bash +kubectl kots reset-password sentry-namespace +``` + + +--- + + +# reset-tls + +# reset-tls + +If a bad TLS certificate is uploaded to the KOTS Admin Console or the kotsadm-tls secret is missing, the `kots reset-tls` command reapplies a default self-signed TLS certificate. +For more information about the certificates stored in this secret, see [Setting up TLS Certificates](https://kurl.sh/docs/install-with-kurl/setup-tls-certs) in the open source kURL documentation. + +### Usage +```bash +kubectl kots reset-tls [namespace] [flags] +``` +* _Replace `[namespace]` with the namespace where the Admin Console and your KOTS application resides (required)._ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:----------------------|------|-------------| +| `-h, --help` | | Help for `reset-tls`. | +| `-n, --namespace`| string | The namespace where the Admin Console is running. | +| `--accept-anonymous-uploads`| bool | Allow uploading a new certificate prior to authenticating. | + +### Examples +```bash +kubectl kots reset-tls sentry-namespace + + +--- + + +# restore + +# restore + +Restore full snapshots for disaster recovery, or do a partial restore of the application only or the Replicated Admin Console only. + +### Usage + +```bash +kubectl kots restore --from-backup [flags] +``` + +This command supports the following flags: + +| Flag | Type | Description | +| :-------------------------- | ------ | --------------------------------------------------------------------------------------------- | +| `--exclude-admin-console` | bool | Exclude restoring the Admin Console and only restore the applications. **Default:** false | +| `--exclude-apps` | bool | Exclude restoring the applications and only restore the Admin Console. **Default:** false | +| `--from-backup` | string | (Required) The name of the backup to restore from. | +| `-h, --help` | | Help for `restore`. | +| `-o, --output` | string | The output format. Supports JSON. Defaults to plain text if not set. | +| `--velero-namespace` | string | (Required for minimal RBAC installations) The namespace where Velero is installed. | +| `--wait-for-apps` | bool | Wait for all applications to be restored. **Default:** true | + +### Example + +```bash +kubectl kots restore --from-backup instance-942kf +``` + + +--- + + +# restore ls + +# restore ls + +:::note +This command is deprecated. Use [`kubectl kots get restores`](/reference/kots-cli-get-restores) instead. +::: + +Show a list of all the available full snapshot restores for disaster recovery. + +### Usage + +```bash +kubectl kots restore ls [flags] +``` + +This command supports the following flags: + +| Flag | Type | Description | +| :---------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | Help for `restore ls`. | +| `-n, --namespace` | string | Filter by the namespace the Admin Console was installed in.| + +### Example + +```bash +kubectl kots restore ls --namespace kots-sentry +``` + + +--- + + +# set config + +import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" + + +# set config + +The `kots set config` allows setting values for application config items in the latest release version. + +> Introduced in KOTS v1.31.0 + +## Usage + +```bash +kubectl kots set config [appSlug] [KEY_1=VAL_1 ... KEY_N=VAL_N] [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| :-------------------| ------ | ------------------------------------------------------------------------------------------------------------------------------------- | +| `--config-file` | string | path to a manifest containing config values (must be `apiVersion: kots.io/v1beta1, kind: ConfigValues`) | +| `--merge` | bool | when set to true, only keys specified in config file will be updated. This flag can only be used when `--config-file` flag is used. | +|`--key` | string | name of a single key to set. This flag requires `--value` or `--value-from-file` flags | +| `--value` | string | the value to set for the key specified in the `--key` flag. This flag cannot be used with `--value-from-file` flag. | +| `--value-from-file` | string | path to the file containing the value to set for the key specified in the `--key` flag. This flag cannot be used with `--value` flag. | +| `--deploy` | bool | when set, automatically deploy the latest version with the new configuration | +| `--skip-preflights` | bool | set to true to skip preflight checks when deploying new version | +| `--current` | bool | set to true to use the currently deployed version of the app as the base for the new version | +| `--sequence` | int | sequence of the app version to use as the base for the new version (defaults to the latest version unless --current flag is set) | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | + + +#### About Strict Preflight Checks + +<PreflightsStrict/> + + +## Examples + +```bash +kubectl kots set config myapp -n default --config-file /path/to/local/config.yaml +``` + +```bash +kubectl kots set config myapp -n default --key config-item-name --value-from-file /path/to/config/file/value.txt +``` + +```bash +kubectl kots set config myapp -n default config-item-name="config item value" +``` + +```bash +kubectl kots set config myapp -n default --key config-item-name --value "config item value" +``` + + +--- + + +# set + +# set + +Configure KOTS resources. + +### Usage + +```bash +kubectl kots set [resource] [flags] +``` + +This command supports all [global flags](kots-cli-global-flags). + +### Resources + +* `config` set config items for application. + + +--- + + +# upload + +import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" + +# upload + +Upload Kubernetes manifests from the local filesystem, creating a new version of the application that can be deployed. +When you have a copy of an application that was created with `kots pull` or `kots download`, you can upload it back to the Admin Console using the `kots upload` command. + +## Usage +```bash +kubectl kots upload [source] [flags] +``` +* _Replace `[source]` with a directory containing the manifests of your KOTS application (required)._ +* _Provide `[flags]` according to the table below_ + +This command supports all [global flags](kots-cli-global-flags) and also: + + +| Flag | Type | Description | +|:----------------------|------|-------------| +| `-h, --help` | | help for upload | +| `--name`| string | the name of the kotsadm application to create | +| `-n, --namespace`| string | the namespace to upload to _(default `"default"`)_ | +| `--slug`| string | the application slug to use. if not present, a new one will be created | +| `--upstream-uri`| string | the upstream uri that can be used to check for updates | +| `--deploy`| bool | when set, automatically deploy the uploaded version | +| `--skip-preflights`| bool | set to true to skip preflight checks | +| `-o, --output` | string | output format (currently supported: json) _(defaults to plain text if not set)_ | + + +Any `plainText` values in the `upstream/userdata/config.yaml` file will be re-encrypted using the application cipher automatically, if the matching config item is a password type. +If both an encrypted and plainText value is provided on a single item, the plainText value will overwrite the encrypted value, if they differ. + +#### About Strict Preflight Checks + +<PreflightsStrict/> + + +## Examples + +```bash +kubectl kots upload ./manifests --name kots-sentry --namespace kots-sentry --slug kots-sentry --upstream-uri kots-sentry/unstable +``` + + +--- + + +# upstream download + +# upstream download + +The `kots upstream download` command retries downloading a failed update of the upstream application. + +### Usage +```bash +kubectl kots upstream download [app-slug] [flags] +``` +* _Replace `[app-slug]` with the app slug for your KOTS application (required)._ +* _Provide `[flags]` according to the table below._ + +| Flag | Type | Description | +|:----------------------------------|--------|--------------------------------------------------------------------------------------------------| +| `-h, --help` | | Help for `upstream download`. | +| `--kubeconfig` | string | The kubeconfig to use. **Default**: `$KUBECONFIG`. If unset, then `$HOME/.kube/config`. | +| `-n, --namespace` | string | (Required) The namespace where the Admin Console is running. | +| `--sequence` | int | (Required) The local app sequence for the version to retry downloading. | +| `--skip-preflights` | bool | Set to `true` to skip preflight checks. | +| `--skip-compatibility-check` | bool | Set to `true` to skip compatibility checks between the current kots version and the update. | +| `--wait` | bool | Set to `false` to download the update in the background. **Default**: `true`. | +| `-o, --output` | string | Output format. **Supported formats**: `json`. **Default**: Plain text. | + +### Example +```bash +kubectl kots upstream download kots-sentry --namespace kots-sentry --sequence 8 +``` + + +--- + + +# upstream upgrade + +import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" + +# upstream upgrade + +The `kots upstream upgrade` fetches the latest version of the upstream application. +It is functionality equivalent to clicking the "Check For Updates" in the Admin Console. + +## Usage +```bash +kubectl kots upstream upgrade [app-slug] [flags] +``` +* _Replace `[app-slug]` with the app slug for your KOTS application (required)._ +* _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|:-------------------------|--------|--------------------------------------------------------------------------------------------------| +| `-h, --help` | | help for upstream | +| `--kubeconfig` | string | the kubeconfig to use. **Default:** `$KUBECONFIG`. If unset, then `$HOME/.kube/config` | +| `-n, --namespace` | string | (Required) the namespace where the Admin Console is running | +| `--deploy` | bool | ensures the latest available release is deployed | +| `--deploy-version-label` | string | ensures the release with the provided version label is deployed | +| `--skip-preflights` | bool | set to true to skip preflight checks | +| `--airgap-bundle` | string | path to the application airgap bundle where application images and metadata will be loaded from | +| `--kotsadm-namespace` | string | the registry namespace to use for application images | +| `--kotsadm-registry` | string | the registry endpoint where application images will be pushed | +| `--registry-password` | string | the password to use to authenticate with the registry | +| `--registry-username` | string | the username to use to authenticate with the registry | +| `--disable-image-push` | bool | set to true to disable images from being pushed to private registry. **Default:** `false` | +| `--skip-registry-check` | bool | Set to `true` to skip the connectivity test and validation of the provided registry information. **Default:** `false` | +| `--wait` | bool | set to false to download the updates in the background **Default:** `true` | +| `-o, --output` | string | output format (currently supported: json). **Default:** Plain text if not set | + + +#### About Strict Preflight Checks + +<PreflightsStrict/> + + +## Example +```bash +kubectl kots upstream upgrade kots-sentry --namespace kots-sentry +``` + + +--- + + +# upstream + +# upstream + +KOTS Upstream interface. + +### Usage +```bash +kubectl kots upstream [command] [flags] +``` + +This command supports all [global flags](kots-cli-global-flags). + + +--- + + +# velero configure-aws-s3 + +# velero configure-aws-s3 + +Configures snapshots to use an AWS S3 Bucket as a storage destination. +This command supports auth via [IAM User Access Keys](https://github.com/vmware-tanzu/velero-plugin-for-aws#option-1-set-permissions-with-an-iam-user) and IAM Instance Roles for the velero-plugin-for-aws. + +Valid Subcommands: +* `access-key` +* `instance-role` + +### Usage + +```bash +kubectl kots velero configure-aws-s3 [subcommand] +``` + +| Flag | Type | Description | +|--------------|------|--------------------------| +| `-h, --help` | | help for configure-aws-s3 | + +### access-key + +```bash +kubectl kots velero configure-aws-s3 access-key [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|------------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--access-key-id` | string | the aws access key id to use for accessing the bucket _(required)_ | +| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | +| `--path ` | string | path to a subdirectory in the object store bucket | +| `--region ` | string | the region where the bucket exists _(required)_ | +| `--secret-access-key ` | string | the aws secret access key to use for accessing the bucket _(required)_ | +| `--skip-validation` | bool | skip the validation of the S3 Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-aws-s3 access-key --namespace default --region us-east-1 --bucket kots-snaps --access-key-id XXXXXXXJTJB7M2XZUV7D --secret-access-key <secret access key here> +``` + +### instance-role + +```bash +kubectl kots velero configure-aws-s3 instance-role [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|------------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | +| `--path ` | string | path to a subdirectory in the object store bucket | +| `--region ` | string | the region where the bucket exists _(required)_ | +| `--skip-validation` | bool | skip the validation of the S3 Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-aws-s3 instance-role --namespace default --region us-east-1 --bucket kots-snaps +``` + + +--- + + +# velero configure-azure + +# velero configure-azure + +Configures snapshots to use an Azure Blob Storage Container as a storage destination. +Currently only the [Service Principle authentication method](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#option-1-create-service-principal) of the velero-plugin-for-microsoft-azure. + +Valid Subcommands: +* service-principle + +### Usage + +```bash +kubectl kots velero configure-azure [subcommand] +``` + +| Flag | Type | Description | +|--------------|------|--------------------------| +| `-h, --help` | | help for configure-azure | + +### service-principle + +```bash +kubectl kots velero configure-azure service-principle [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|---------------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------| +| `-h, --help` | | help for service-principle | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--client-id` | string | the client ID of a Service Principle with access to the blob storage container _(required)_ | +| `--client-secret` | string | the client secret of a Service Principle with access to the blob storage container _(required)_ | +| `--cloud-name` | string | the Azure cloud target. Options: AzurePublicCloud, AzureUSGovernmentCloud, AzureChinaCloud, AzureGermanCloud _(default `AzurePublicCloud`)_ | +| `--container` | string | name of the Azure blob storage container where backups should be stored _(required)_ | +| `--path ` | string | path to a subdirectory in the blob storage container | +| `--resource-group` | string | the resource group name of the blob storage container _(required)_ | +| `--skip-validation` | bool | skip the validation of the blob storage container _(default `false`)_ | +| `--storage-account` | string | the storage account name of the blob storage container _(required)_ | +| `--subscription-id` | string | the subscription id associated with the blob storage container _(required)_ | +| `--tenant-id ` | string | the tenant ID associated with the blob storage container _(required)_ | + +#### Example + +```bash +kubectl kots velero configure-azure service-principle --namespace default --container velero --resource-group Velero_Backups --storage-account velero1111362eb32b --subscription-id "1111111-1111-47a7-9671-c904d681c2b2" --tenant-id "1111111-1111-42e1-973b-ad2efc689308" --client-id "1111111-1111-4ac3-9e2b-bbea61392432" --client-secret "<secret here>" +``` + + +--- + + +# velero configure-gcp + +# velero configure-gcp + +Configures snapshots to use a Google Cloud Platform Object Storage Bucket as a storage destination. +This command supports auth via [Serivce Account Credentials](https://github.com/vmware-tanzu/velero-plugin-for-gcp#option-1-set-permissions-with-a-service-account) or [Workload Identity](https://github.com/vmware-tanzu/velero-plugin-for-gcp#option-2-set-permissions-with-using-workload-identity-optional). + +Valid Subcommands: +* `service-account` +* `workload-identity` + +### Usage + +```bash +kubectl kots velero configure-gcp [subcommand] +``` + +| Flag | Type | Description | +|--------------|------|--------------------------| +| `-h, --help` | | help for configure-aws-s3 | + +### service-account + +```bash +kubectl kots velero configure-gcp service-account [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|---------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | +| `--json-file` | string | path to JSON credntials file for veloro _(required)_ | +| `--path ` | string | path to a subdirectory in the object store bucket | +| `--skip-validation` | bool | skip the validation of the GCP Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-gcp service-account --namespace default --bucket velero-backups --json-file sa-creds.json +``` + +### workload-identity + +```bash +kubectl kots velero configure-gcp workload-identity [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|---------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | +| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | +| `--path ` | string | path to a subdirectory in the object store bucket | +| `--service-account` | string | the service account to use if using Google Cloud instance role _(required)_ | +| `--skip-validation` | bool | skip the validation of the GCP Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-gcp workload-identity --namespace default --bucket velero-backups --service-account ss-velero@gcp-project.iam.gserviceaccount.com +``` + + +--- + + +# velero configure-hostpath + +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import Help from "../partials/kots-cli/_help.mdx" + +# velero configure-hostpath + +Configure snapshots to use a host path as storage destination. + +### Usage + +```bash +kubectl kots velero configure-hostpath [flags] +``` + +- _Provide `[flags]` according to the table below_ + +<table> + <tr> + <td width="30%">Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <Help/> + <tr> + <td>`-n, --namespace`</td> + <td>string</td> + <td>The namespace of the Admin Console (required)</td> + </tr> + <tr> + <td>`--hostpath`</td> + <td>string</td> + <td>A local host path on the node</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <RegistryPassword/> + <RegistryUsername/> + <tr> + <td>`--force-reset`</td> + <td>bool</td> + <td>Bypass the reset prompt and force resetting the nfs path. (default `false`)</td> + </tr> + <tr> + <td>`--output`</td> + <td>string</td> + <td>Output format. Supported values: `json`</td> + </tr> +</table> + +### Examples + +Basic + +```bash +kubectl kots velero configure-hostpath --hostpath /mnt/kots-sentry-snapshots --namespace kots-sentry +``` + +Using a registry for airgapped installations + +```bash +kubectl kots velero configure-hostpath \ + --hostpath /mnt/kots-sentry-snapshots \ + --namespace kots-sentry \ + --kotsadm-registry private.registry.host/kots-sentry \ + --registry-username ro-username \ + --registry-password ro-password +``` + + +--- + + +# velero configure-internal + +# velero configure-internal + +:::important +The following command is applicable only to embedded clusters created by Replicated kURL and is _not_ recommended for production usage. +Consider configuring one of the other available storage destinations. See [Configuring Other Storage Destinations](/enterprise/snapshots-storage-destinations). +::: + +Configures snapshots to use the internal object store in embedded clusters as a storage destination. + +### Usage + +```bash +kubectl kots velero configure-internal [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +|------------------------|--------|-------------------------------------------------------------------------------| +| `-h, --help` | | help for access-key | +| `--skip-validation` | bool | skip the validation of the S3 Bucket _(default `false`)_ | + +#### Example + +```bash +kubectl kots velero configure-internal +``` + + +--- + + +# velero configure-nfs + +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import Help from "../partials/kots-cli/_help.mdx" + +# velero configure-nfs + +Configures snapshots to use NFS as storage destination. + +### Usage + +```bash +kubectl kots velero configure-nfs [flags] +``` + +- _Provide `[flags]` according to the table below_ + +<table> + <tr> + <td width="30%">Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <Help/> + <tr> + <td>`-n, --namespace`</td> + <td>string</td> + <td>The namespace of the Admin Console (required)</td> + </tr> + <tr> + <td>`--nfs-server`</td> + <td>string</td> + <td>The hostname or IP address of the NFS server (required)</td> + </tr> + <tr> + <td>`--nfs-path`</td> + <td>string</td> + <td>The path that is exported by the NFS server (required)</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <RegistryPassword/> + <RegistryUsername/> + <tr> + <td>`--force-reset`</td> + <td>bool</td> + <td>Bypass the reset prompt and force resetting the nfs path. (default `false`)</td> + </tr> + <tr> + <td>`--output`</td> + <td>string</td> + <td>Output format. Supported values: `json`</td> + </tr> +</table> + +### Examples + +Basic + +```bash +kubectl kots velero configure-nfs --nfs-server 10.128.0.32 --nfs-path /mnt/nfs_share --namespace kots-sentry +``` + +Using a registry for airgapped installations + +```bash +kubectl kots velero configure-nfs \ + --nfs-server 10.128.0.32 \ + --nfs-path /mnt/nfs_share \ + --namespace kots-sentry \ + --kotsadm-registry private.registry.host/kots-sentry \ + --registry-username ro-username \ + --registry-password ro-password +``` + + +--- + + +# velero configure-other-s3 + +import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" +import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" +import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" +import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" +import Help from "../partials/kots-cli/_help.mdx" + +# velero configure-other-s3 + +Configures snapshots to use an S3-compatible storage provider, such as Minio, as a storage destination. + +### Usage + +```bash +kubectl kots velero configure-other-s3 [flags] +``` + +- _Provide `[flags]` according to the table below_ + +<table> + <tr> + <td width="30%">Flag</td> + <td>Type</td> + <td>Description</td> + </tr> + <Help/> + <tr> + <td>`-n, --namespace`</td> + <td>string</td> + <td>The namespace of the Admin Console (required)</td> + </tr> + <tr> + <td>`--access-key-id`</td> + <td>string</td> + <td>The AWS access key ID to use for accessing the bucket (required)</td> + </tr> + <tr> + <td>`--bucket`</td> + <td>string</td> + <td>Name of the object storage bucket where backups should be stored (required)</td> + </tr> + <tr> + <td>`--endpoint`</td> + <td>string</td> + <td>The S3 endpoint (for example, http://some-other-s3-endpoint) (required)</td> + </tr> + <tr> + <td>`--path`</td> + <td>string</td> + <td>Path to a subdirectory in the object store bucket</td> + </tr> + <tr> + <td>`--region`</td> + <td>string</td> + <td>The region where the bucket exists (required)</td> + </tr> + <tr> + <td>`--secret-access-key`</td> + <td>string</td> + <td>The AWS secret access key to use for accessing the bucket (required)</td> + </tr> + <tr> + <td>`--cacert`</td> + <td>string</td> + <td>File containing a certificate bundle to use when verifying TLS connections to the object store</td> + </tr> + <tr> + <td>`--skip-validation`</td> + <td>bool</td> + <td>Skip the validation of the S3 bucket (default `false`)</td> + </tr> + <KotsadmNamespace/> + <KotsadmRegistry/> + <RegistryPassword/> + <RegistryUsername/> +</table> + +#### Example + +```bash +kubectl kots velero configure-other-s3 --namespace default --endpoint http://minio --region us-east-1 --bucket kots-snaps --access-key-id XXXXXXXJTJB7M2XZUV7D --secret-access-key mysecretkey +``` + + +--- + + +# velero ensure-permissions + +# velero ensure-permissions + +Ensures the necessary permissions that enables Replicated KOTS to access Velero. + +### Usage + +```bash +kubectl kots velero ensure-permissions [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| ----------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for ensure-permissions | +| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | +| `--velero-namespace` | string | the namespace where velero is running _(required)_ | + +### Example + +```bash +kubectl kots velero ensure-permissions --namespace kots-sentry --velero-namespace velero +``` + + +--- + + +# velero + +# velero + +The KOTS Velero interface, which configures storage destinations for backups (snapshots), permissions, and print instructions fo set up. + +### Usage + +```bash +kubectl kots velero [command] [global flags] +``` + +This command supports all [global flags](kots-cli-global-flags). + +The following `kots velero` commands are supported: + +- [`configure-aws-s3`](kots-cli-velero-configure-aws-s3): Configures an AWS S3 bucket as the storage destination. +- [`configure-azure`](kots-cli-velero-configure-azure): Configures an Azure Blob Storage Container as the storage destination. +- [`configure-gcp`](kots-cli-velero-configure-gcp): Configures a Google Cloud Platform Object Storage Bucket as The storage destination. +- [`configure-internal`](kots-cli-velero-configure-internal): (Embedded clusters only) Configures the internal object store in the cluster as the storage destination. +- [`configure-other-s3`](kots-cli-velero-configure-other-s3): Configures an S3-compatible storage provider as the storage destination. +- [`configure-nfs`](kots-cli-velero-configure-nfs): Configures NFS as the storage destination. +- [`configure-hostpath`](kots-cli-velero-configure-hostpath): Configures a host path as the storage destination. +- [`ensure-permissions`](kots-cli-velero-ensure-permissions): Allows the KOTS Admin Console to access Velero. + + +--- + + +# velero print-fs-instructions + +# velero print-fs-instructions + +:::note +This command is deprecated. Use [`kubectl kots velero configure-hostpath`](/reference/kots-cli-velero-configure-hostpath) or [`kubectl kots velero configure-nfs`](/reference/kots-cli-velero-configure-nfs) instead. +::: + +Prints instructions for setting up a file system as the snapshots storage destination (such as NFS or host path). + +### Usage + +```bash +kubectl kots velero print-fs-instructions [flags] +``` + +- _Provide `[flags]` according to the table below_ + +| Flag | Type | Description | +| ----------------- | ------ | ------------------------------------------------------------------- | +| `-h, --help` | | help for ensure-permissions | +| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | + +### Example + +Basic + +```bash +kubectl kots velero print-fs-instructions --namespace kots-sentry +``` + + +--- + + +# Linter Rules + +import MissingKindField from "../partials/linter-rules/_missing-kind-field.mdx" +import MissingAPIVersionField from "../partials/linter-rules/_missing-api-version-field.mdx" +import PreflightSpec from "../partials/linter-rules/_preflight-spec.mdx" +import ConfigSpec from "../partials/linter-rules/_config-spec.mdx" +import TroubleshootSpec from "../partials/linter-rules/_troubleshoot-spec.mdx" +import ApplicationSpec from "../partials/linter-rules/_application-spec.mdx" +import ApplicationIcon from "../partials/linter-rules/_application-icon.mdx" +import ApplicationStatusInformers from "../partials/linter-rules/_application-statusInformers.mdx" +import InvalidTargetKOTS from "../partials/linter-rules/_invalid-target-kots-version.mdx" +import InvalidMinKOTS from "../partials/linter-rules/_invalid-min-kots-version.mdx" +import InvalidKubernetesInstaller from "../partials/linter-rules/_invalid-kubernetes-installer.mdx" +import DeprecatedKubernetesInstallerVersion from "../partials/linter-rules/_deprecated-kubernetes-installer-version.mdx" +import InvalidHelmReleaseName from "../partials/linter-rules/_invalid-helm-release-name.mdx" +import Replicas1 from "../partials/linter-rules/_replicas-1.mdx" +import Privileged from "../partials/linter-rules/_privileged.mdx" +import AllowPrivilegeEscalation from "../partials/linter-rules/_allow-privilege-escalation.mdx" +import ContainerImageLatestTag from "../partials/linter-rules/_container-image-latest-tag.mdx" +import ContainerImageLocalImageName from "../partials/linter-rules/_container-image-local-image-name.mdx" +import ContainerResources from "../partials/linter-rules/_container-resources.mdx" +import ContainerResourceLimits from "../partials/linter-rules/_container-resource-limits.mdx" +import ContainerResourceRequests from "../partials/linter-rules/_container-resource-requests.mdx" +import ResourceLimitsCPU from "../partials/linter-rules/_resource-limits-cpu.mdx" +import ResourceLimitsMemory from "../partials/linter-rules/_resource-limits-memory.mdx" +import ResourceRequestsCPU from "../partials/linter-rules/_resource-requests-cpu.mdx" +import ResourceRequestsMemory from "../partials/linter-rules/_resource-requests-memory.mdx" +import VolumesHostPaths from "../partials/linter-rules/_volumes-host-paths.mdx" +import VolumeDockerSock from "../partials/linter-rules/_volume-docker-sock.mdx" +import HardcodedNamespace from "../partials/linter-rules/_hardcoded-namespace.mdx" +import ConfigOptionInvalidType from "../partials/linter-rules/_config-option-invalid-type.mdx" +import ConfigOptionInvalidRegexValidator from "../partials/linter-rules/_config-option-invalid-regex-validator.mdx" +import ConfigOptionRegexValidatorInvalidType from "../partials/linter-rules/_config-option-regex-validator-invalid-type.mdx" +import RepeatOptionMissingTemplate from "../partials/linter-rules/_repeat-option-missing-template.mdx" +import RepeatOptionMissingValuesByGroup from "../partials/linter-rules/_repeat-option-missing-valuesByGroup.mdx" +import RepeatOptionMalformedYAMLPath from "../partials/linter-rules/_repeat-option-malformed-yamlpath.mdx" +import ConfigOptionPasswordType from "../partials/linter-rules/_config-option-password-type.mdx" +import ConfigOptionIsCircular from "../partials/linter-rules/_config-option-is-circular.mdx" +import InvalidRenderedYaml from "../partials/linter-rules/_invalid-rendered-yaml.mdx" +import InvalidType from "../partials/linter-rules/_invalid_type.mdx" +import InvalidYaml from "../partials/linter-rules/_invalid-yaml.mdx" +import LinterDefinition from "../partials/linter-rules/_linter-definition.mdx" +import MayContainSecrets from "../partials/linter-rules/_may-contain-secrets.mdx" + +# Linter Rules + +This topic describes the release linter and the linter rules. + +## Overview + +<LinterDefinition/> + +The linter runs automatically against KOTS releases that you create in the Replicated vendor portal, and displays any error or warning messages in the vendor portal UI. + +To lint manifest files from the command line, you can run the Replicated CLI `replicated release lint` command against the root directory of your application manifest files. You can also use the `--lint` flag when you create a release with the `replicated release create` command. For more information, see [release lint](/reference/replicated-cli-release-lint) and [release create](/reference/replicated-cli-release-create) in the _Replicated CLI_ section. + +## Linter Rules + +This section lists the linter rules and the default rule levels (Info, Warn, Error). You can customize the default rule levels in the Replicated LinterConfig custom resource. +For more information, see [LintConfig](custom-resource-lintconfig). + +### allow-privilege-escalation + +<table> + <tr> + <th>Description</th> + <td>Notifies if any manifest file has <code>allowPrivilegeEscalation</code> set to <code>true</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><AllowPrivilegeEscalation/></td> + </tr> +</table> + +### application-icon + +<table> + <tr> + <th>Description</th> + <td> + Requires an application icon. + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><ApplicationIcon/></td> + </tr> +</table> + +### application-spec + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires an Application custom resource manifest file.</p> + <p>Accepted value for <code>kind</code>: <code>Application</code></p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ApplicationSpec/></td> + </tr> +</table> + +### application-statusInformers + +<table> + <tr> + <th>Description</th> + <td> + Requires <code>statusInformers</code>. + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><ApplicationStatusInformers/></td> + </tr> +</table> + +### config-option-invalid-type + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid types for Config items.</p> + <p>For more information, see <a href="/reference/custom-resource-config#items">Items</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><ConfigOptionInvalidType/></td> + </tr> +</table> + +### config-option-is-circular + +<table> + <tr> + <th>Description</th> + <td>Enforces that all ConfigOption items do not reference themselves.</td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td> <ConfigOptionIsCircular/> </td> + </tr> +</table> + + +### config-option-not-found + +<table> + <tr> + <th>Description</th> + <td> + Requires all ConfigOption items to be defined in the <code>Config</code> custom resource manifest file. + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> +</table> + + +### config-option-not-repeatable + +<table> + <tr> + <th>Description</th> + <td> + Enforces that sub-templated ConfigOption items must be repeatable. + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> +</table> + +### config-option-password-type + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires ConfigOption items with any of the following names to have <code>type</code> set to <code>password</code>:</p> + <ul> + <li><code>password</code></li> + <li><code>secret</code></li> + <li><code>token</code></li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><ConfigOptionPasswordType/></td> + </tr> +</table> + +### config-option-when-is-invalid + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid <code>ConfigOption.when</code>.</p> + <p>For more information, see <a href="/reference/custom-resource-config#when">when</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>.</td> + </tr> +</table> + +### config-option-invalid-regex-validator + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid <a href="https://github.com/google/re2/wiki/Syntax">RE2 regular expressions</a> pattern when regex validation is present.</p> + <p>For more information, see <a href="/reference/custom-resource-config#validation">Validation</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>.</td> + </tr> + <tr> + <th>Example</th> + <td><ConfigOptionInvalidRegexValidator/></td> + </tr> +</table> + +### config-option-regex-validator-invalid-type + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid item type when regex validation is present.</p> + <p>Item type should be <code>text</code>|<code>textarea</code>|<code>password</code>|<code>file</code></p> + <p>For more information, see <a href="/reference/custom-resource-config#validation">Validation</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>.</td> + </tr> + <tr> + <th>Example</th> + <td><ConfigOptionRegexValidatorInvalidType/></td> + </tr> +</table> + +### config-spec + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires a Config custom resource manifest file.</p> + <p>Accepted value for <code>kind</code>: <code>Config</code></p> + <p>Accepted value for <code>apiVersion</code>: <code>kots.io/v1beta1</code></p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ConfigSpec/></td> + </tr> +</table> + +### container-image-latest-tag + +<table> + <tr> + <th>Description</th> + <td>Notifies if any manifest file has a container image tag appended with + <code>:latest</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerImageLatestTag/></td> + </tr> +</table> + +### container-image-local-image-name + +<table> + <tr> + <th>Description</th> + <td>Disallows any manifest file having a container image tag that includes <code>LocalImageName</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerImageLocalImageName/></td> + </tr> +</table> + +### container-resource-limits + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.limits</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerResourceLimits/></td> + </tr> +</table> + + +### container-resource-requests + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.requests</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerResourceRequests/></td> + </tr> +</table> + +### container-resources + +<table> + <tr> + <th>Description</th> + <td>Notifies if a manifest file has no <code>resources</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ContainerResources/></td> + </tr> +</table> + +### deprecated-kubernetes-installer-version + +<table> + <tr> + <th>Description</th> + <td> + <p>Disallows using the deprecated kURL installer <code>apiVersion</code>.</p> + <p><code>kurl.sh/v1beta1</code> is deprecated. Use <code>cluster.kurl.sh/v1beta1</code> instead.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Installer</code> and <code>apiVersion: kurl.sh/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><DeprecatedKubernetesInstallerVersion/></td> + </tr> +</table> + +### duplicate-helm-release-name + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces unique <code>spec.chart.releaseName</code> across all HelmChart custom resource manifest files.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: HelmChart</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> +</table> + +### duplicate-kots-kind + +<table> + <tr> + <th>Description</th> + <td> + <p>Disallows duplicate Replicated custom resources. + A release can only include one of each <code>kind</code> of custom resource.</p> + <p>This rule disallows inclusion of more than one file with:</p> + <ul> + <li>The same <code>kind</code> and <code>apiVersion</code></li> + <li><code>kind: Troubleshoot</code> and any Troubleshoot <code>apiVersion</code></li> + <li><code>kind: Installer</code> and any Installer <code>apiVersion</code></li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + All files + </td> + </tr> +</table> + +### hardcoded-namespace + +<table> + <tr> + <th>Description</th> + <td> + <p>Notifies if any manifest file has a <code>metadata.namespace</code> set + to a static field.</p> + <p>Replicated strongly recommends not specifying a namespace to allow + for flexibility when deploying into end user environments.</p> + <p>For more information, see <a href="/vendor/namespaces">Managing Application Namespaces</a>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><HardcodedNamespace/></td> + </tr> +</table> + +### helm-archive-missing + +<table> + <tr> + <th>Description</th> + <td><p>Requires that a <code>*.tar.gz</code> file is present that matches what is in the HelmChart custom resource manifest file.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Releases with a HelmChart custom resource manifest file containing <code>kind: HelmChart</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> +</table> + +### helm-chart-missing + +<table> + <tr> + <th>Description</th> + <td><p>Enforces that a HelmChart custom resource manifest file with <code>kind: HelmChart</code> is present if there is a <code>*.tar.gz</code> archive present.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Releases with a <code>*.tar.gz</code> archive file present. + </td> + </tr> +</table> + +### invalid-helm-release-name + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid <code>spec.chart.releaseName</code> in the HelmChart custom resource manifest file.</p> + <p><code>spec.chart.releaseName</code> must meet the following requirements:</p> + <ul> + <li>Begin and end with a lowercase letter or number</li> + <li>Contain only lowercase letters, numbers, periods, and hyphens (<code>-</code>)</li> + <li>Contain a lowercase letter or number between any two symbols (periods or hyphens)</li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: HelmChart</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><InvalidHelmReleaseName/></td> + </tr> +</table> + +### invalid-kubernetes-installer + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces valid Replicated kURL add-on versions.</p> + <p>kURL add-ons included in the kURL installer must pin specific versions rather than <code>latest</code> or x-ranges (1.2.x).</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + <p>Files with <code>kind: Installer</code> and one of the following values for <code>apiVersion</code>:</p> + <ul> + <li><code>cluster.kurl.sh/v1beta1</code></li> + <li><code>kurl.sh/v1beta1</code></li> + </ul> + </td> + </tr> + <tr> + <th>Example</th> + <td><InvalidKubernetesInstaller/></td> + </tr> +</table> + +### invalid-min-kots-version + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires <code>minKotsVersion</code> in the Application custom resource to use valid Semantic Versioning. + See <a href="https://semver.org/">Semantic Versioning 2.0.0</a>.</p> + <p>Accepts a <code>v</code> as an optional prefix, so both <code>1.0.0</code> and <code>v1.0.0</code> are valid.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code>. + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><InvalidMinKOTS/></td> + </tr> +</table> + +### invalid-rendered-yaml + +<table> + <tr> + <th>Description</th> + <td><p>Enforces valid YAML after rendering the manifests using the Config spec.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + YAML files + </td> + </tr> + <tr> + <th>Example</th> + <td><InvalidRenderedYaml/></td> + </tr> +</table> + +### invalid-target-kots-version + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires <code>targetKotsVersion</code> in the Application custom resource to use valid Semantic Versioning. + See <a href="https://semver.org/">Semantic Versioning 2.0.0</a>.</p> + <p>Accepts a <code>v</code> as an optional prefix, so both <code>1.0.0</code> and <code>v1.0.0</code> are valid.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code> + </td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><InvalidTargetKOTS/></td> + </tr> +</table> + +### invalid-type + +<table> + <tr> + <th>Description</th> + <td><p>Requires that the value of a property matches that property's expected type.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + All files + </td> + </tr> + <tr> + <th>Example</th> + <td><InvalidType/></td> + </tr> +</table> + +### invalid-yaml + +<table> + <tr> + <th>Description</th> + <td><p>Enforces valid YAML.</p></td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td> + YAML files + </td> + </tr> + <tr> + <th>Example</th> + <td><InvalidYaml/></td> + </tr> +</table> + +### may-contain-secrets + +<table> + <tr> + <th>Description</th> + <td> Notifies if any manifest file may contain secrets.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><MayContainSecrets/></td> + </tr> +</table> + +### missing-api-version-field + +<table> + <tr> + <th>Description</th> + <td>Requires the <code>apiVersion:</code> field in all files.</td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><MissingAPIVersionField/></td> + </tr> +</table> + +### missing-kind-field + +<table> + <tr> + <th>Description</th> + <td>Requires the <code>kind:</code> field in all files.</td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><MissingKindField/></td> + </tr> +</table> + +### nonexistent-status-informer-object + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires that each <code>statusInformers</code> entry references an existing Kubernetes workload.</p> + <p>The linter cannot evaluate <code>statusInformers</code> for Helm-managed resources because it does not template Helm charts during analysis.</p> + <p>If you configure status informers for Helm-managed resources, you can ignore <code>nonexistent-status-informer-object</code> warnings for those workloads. To disable <code>nonexistent-status-informer-object</code> warnings, change the level for this rule to <code>info</code> or <code>off</code> in the LintConfig custom resource manifest file. See <a href="custom-resource-lintconfig">LintConfig</a> in <em>Custom Resources</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warning</td> + </tr> + <tr> + <th>Applies To</th> + <td> + <p>Compares <code>statusInformer</code> values in files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code> to all manifests in the release.</p> + </td> + </tr> +</table> + +### preflight-spec + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires a Preflight custom resource manifest file with:</p> + <p><code>kind: Preflight</code></p> + <p>and one of the following:</p> + <ul> + <li><code>apiVersion: troubleshoot.replicated.com/v1beta1</code></li> + <li><code>apiVersion: troubleshoot.sh/v1beta2</code></li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><PreflightSpec/></td> + </tr> +</table> + +### privileged + +<table> + <tr> + <th>Description</th> + <td>Notifies if any manifest file has <code>privileged</code> set to <code>true</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><Privileged/></td> + </tr> +</table> + +### repeat-option-malformed-yamlpath + +<table> + <tr> + <th>Description</th> + <td> + <p>Enforces ConfigOption <code>yamlPath</code> ending with square brackets denoting index position.</p> + <p>For more information, see <a href="/reference/custom-resource-config#template-targets">Repeatable Item Template Targets</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><RepeatOptionMalformedYAMLPath/></td> + </tr> +</table> + +### repeat-option-missing-template + +<table> + <tr> + <th>Description</th> + <td> + <p>Disallows repeating Config item with undefined <code>item.templates</code>.</p> + <p>For more information, see <a href="/reference/custom-resource-config#template-targets">Repeatable Item Template Targets</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><RepeatOptionMissingTemplate/></td> + </tr> +</table> + + +### repeat-option-missing-valuesByGroup + +<table> + <tr> + <th>Description</th> + <td> + <p>Disallows repeating Config item with undefined <code>item.valuesByGroup</code>.</p> + <p>For more information, see <a href="/reference/custom-resource-config#repeatable-items">Repeatable Items</a> in <em>Config</em>.</p> + </td> + </tr> + <tr> + <th>Level</th> + <td>Error</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of correct YAML for this rule:</p><RepeatOptionMissingValuesByGroup/></td> + </tr> +</table> + +### replicas-1 + +<table> + <tr> + <th>Description</th> + <td>Notifies if any manifest file has <code>replicas</code> set to <code>1</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><Replicas1/></td> + </tr> +</table> + +### resource-limits-cpu + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.limits.cpu</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ResourceLimitsCPU/></td> + </tr> +</table> + +### resource-limits-memory + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.limits.memory</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ResourceLimitsMemory/></td> + </tr> +</table> + +### resource-requests-cpu + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.requests.cpu</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ResourceRequestsCPU/></td> + </tr> +</table> + +### resource-requests-memory + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.container</code> has no <code>resources.requests.memory</code> field.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><ResourceRequestsMemory/></td> + </tr> +</table> + +### troubleshoot-spec + +<table> + <tr> + <th>Description</th> + <td> + <p>Requires a Troubleshoot manifest file.</p> + <p>Accepted values for <code>kind</code>:</p> + <ul> + <li><code>Collector</code></li> + <li><code>SupportBundle</code></li> + </ul> + <p>Accepted values for <code>apiVersion</code>:</p> + <ul> + <li><code>troubleshoot.replicated.com/v1beta1</code></li> + <li><code>troubleshoot.sh/v1beta2</code></li> + </ul> + </td> + </tr> + <tr> + <th>Level</th> + <td>Warn</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><TroubleshootSpec/></td> + </tr> +</table> + +### volume-docker-sock + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.volumes</code> has <code>hostPath</code> + set to <code>/var/run/docker.sock</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><VolumeDockerSock/></td> + </tr> +</table> + +### volumes-host-paths + +<table> + <tr> + <th>Description</th> + <td>Notifies if a <code>spec.volumes</code> has defined a <code>hostPath</code>.</td> + </tr> + <tr> + <th>Level</th> + <td>Info</td> + </tr> + <tr> + <th>Applies To</th> + <td>All files</td> + </tr> + <tr> + <th>Example</th> + <td><p>Example of matching YAML for this rule:</p><VolumesHostPaths/></td> + </tr> +</table> + + +--- + + +# replicated api get + +# replicated api get + +Make ad-hoc GET API calls to the Replicated API + +### Synopsis + +This is essentially like curl for the Replicated API, but +uses your local credentials and prints the response unmodified. + +We recommend piping the output to jq for easier reading. + +Pass the PATH of the request as the final argument. Do not include the host or version. + +``` +replicated api get [flags] +``` + +### Examples + +``` +replicated api get /v3/apps +``` + +### Options + +``` + -h, --help help for get +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API + + + +--- + + +# replicated api patch + +# replicated api patch + +Make ad-hoc PATCH API calls to the Replicated API + +### Synopsis + +This is essentially like curl for the Replicated API, but +uses your local credentials and prints the response unmodified. + +We recommend piping the output to jq for easier reading. + +Pass the PATH of the request as the final argument. Do not include the host or version. + +``` +replicated api patch [flags] +``` + +### Examples + +``` +replicated api patch /v3/customer/2VffY549paATVfHSGpJhjh6Ehpy -b '{"name":"Valuable Customer"}' +``` + +### Options + +``` + -b, --body string JSON body to send with the request + -h, --help help for patch +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API + + + +--- + + +# replicated api post + +# replicated api post + +Make ad-hoc POST API calls to the Replicated API + +### Synopsis + +This is essentially like curl for the Replicated API, but +uses your local credentials and prints the response unmodified. + +We recommend piping the output to jq for easier reading. + +Pass the PATH of the request as the final argument. Do not include the host or version. + +``` +replicated api post [flags] +``` + +### Examples + +``` +replicated api post /v3/app/2EuFxKLDxKjPNk2jxMTmF6Vxvxu/channel -b '{"name":"marc-waz-here"}' +``` + +### Options + +``` + -b, --body string JSON body to send with the request + -h, --help help for post +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API + + + +--- + + +# replicated api put + +# replicated api put + +Make ad-hoc PUT API calls to the Replicated API + +### Synopsis + +This is essentially like curl for the Replicated API, but +uses your local credentials and prints the response unmodified. + +We recommend piping the output to jq for easier reading. + +Pass the PATH of the request as the final argument. Do not include the host or version. + +``` +replicated api put [flags] +``` + +### Examples + +``` +replicated api put /v3/app/2EuFxKLDxKjPNk2jxMTmF6Vxvxu/channel/2QLPm10JPkta7jO3Z3Mk4aXTPyZ -b '{"name":"marc-waz-here2"}' +``` + +### Options + +``` + -b, --body string JSON body to send with the request + -h, --help help for put +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API + + + +--- + + +# replicated api + +# replicated api + +Make ad-hoc API calls to the Replicated API + +### Options + +``` + -h, --help help for api +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated api get](replicated-cli-api-get) - Make ad-hoc GET API calls to the Replicated API +* [replicated api patch](replicated-cli-api-patch) - Make ad-hoc PATCH API calls to the Replicated API +* [replicated api post](replicated-cli-api-post) - Make ad-hoc POST API calls to the Replicated API +* [replicated api put](replicated-cli-api-put) - Make ad-hoc PUT API calls to the Replicated API + + + +--- + + +# replicated app create + +# replicated app create + +Create a new application + +### Synopsis + +Create a new application in your Replicated account. + +This command allows you to initialize a new application that can be distributed +and managed using the KOTS platform. When you create a new app, it will be set up +with default configurations, which you can later customize. + +The NAME argument is required and will be used as the application's name. + +``` +replicated app create NAME [flags] +``` + +### Examples + +``` +# Create a new app named "My App" +replicated app create "My App" + +# Create a new app and output the result in JSON format +replicated app create "Another App" --output json + +# Create a new app with a specific name and view details in table format +replicated app create "Custom App" --output table +``` + +### Options + +``` + -h, --help help for create + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated app](replicated-cli-app) - Manage applications + + + +--- + + +# replicated app ls + +# replicated app ls + +List applications + +### Synopsis + +List all applications in your Replicated account, +or search for a specific application by name or ID. + +This command displays information about your applications, including their +names, IDs, and associated channels. If a NAME argument is provided, it will +filter the results to show only applications that match the given name or ID. + +The output can be customized using the --output flag to display results in +either table or JSON format. + +``` +replicated app ls [NAME] [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all applications +replicated app ls + +# Search for a specific application by name +replicated app ls "My App" + +# List applications and output in JSON format +replicated app ls --output json + +# Search for an application and display results in table format +replicated app ls "App Name" --output table +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated app](replicated-cli-app) - Manage applications + + + +--- + + +# replicated app rm + +# replicated app rm + +Delete an application + +### Synopsis + +Delete an application from your Replicated account. + +This command allows you to permanently remove an application from your account. +Once deleted, the application and all associated data will be irretrievably lost. + +Use this command with caution as there is no way to undo this operation. + +``` +replicated app rm NAME [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Delete a app named "My App" +replicated app delete "My App" + +# Delete an app and skip the confirmation prompt +replicated app delete "Another App" --force + +# Delete an app and output the result in JSON format +replicated app delete "Custom App" --output json +``` + +### Options + +``` + -f, --force Skip confirmation prompt. There is no undo for this action. + -h, --help help for rm + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated app](replicated-cli-app) - Manage applications + + + +--- + + +# replicated app + +# replicated app + +Manage applications + +### Synopsis + +The app command allows you to manage applications in your Replicated account. + +This command provides a suite of subcommands for creating, listing, updating, and +deleting applications. You can perform operations such as creating new apps, +viewing app details, modifying app settings, and removing apps from your account. + +Use the various subcommands to: +- Create new applications +- List all existing applications +- View details of a specific application +- Update application settings +- Delete applications from your account + +### Examples + +``` +# List all applications +replicated app ls + +# Create a new application +replicated app create "My New App" + +# View details of a specific application +replicated app inspect "My App Name" + +# Delete an application +replicated app delete "App to Remove" + +# Update an application's settings +replicated app update "My App" --channel stable + +# List applications with custom output format +replicated app ls --output json +``` + +### Options + +``` + -h, --help help for app +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated app create](replicated-cli-app-create) - Create a new application +* [replicated app ls](replicated-cli-app-ls) - List applications +* [replicated app rm](replicated-cli-app-rm) - Delete an application + + + +--- + + +# replicated channel create + +# replicated channel create + +Create a new channel in your app + +### Synopsis + +Create a new channel in your app and print the channel on success. + +``` +replicated channel create [flags] +``` + +### Examples + +``` +replicated channel create --name Beta --description 'New features subject to change' +``` + +### Options + +``` + --description string A longer description of this channel + -h, --help help for create + --name string The name of this channel + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + + + +--- + + +# replicated channel demote + +# replicated channel demote + +Demote a release from a channel + +### Synopsis + +Demote a channel release from a channel using a channel sequence or release sequence. + +``` +replicated channel demote CHANNEL_ID_OR_NAME [flags] +``` + +### Examples + +``` + # Demote a release from a channel by channel sequence + replicated channel release demote Beta --channel-sequence 15 + + # Demote a release from a channel by release sequence + replicated channel release demote Beta --release-sequence 12 +``` + +### Options + +``` + --channel-sequence int The channel sequence to demote + -h, --help help for demote + --release-sequence int The release sequence to demote +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + + + +--- + + +# replicated channel disable-semantic-versioning + +# replicated channel disable-semantic-versioning + +Disable semantic versioning for CHANNEL_ID + +### Synopsis + +Disable semantic versioning for the CHANNEL_ID. + +``` +replicated channel disable-semantic-versioning CHANNEL_ID [flags] +``` + +### Examples + +``` +replicated channel disable-semantic-versioning CHANNEL_ID +``` + +### Options + +``` + -h, --help help for disable-semantic-versioning +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + + + +--- + + +# replicated channel enable-semantic-versioning + +# replicated channel enable-semantic-versioning + +Enable semantic versioning for CHANNEL_ID + +### Synopsis + +Enable semantic versioning for the CHANNEL_ID. + +``` +replicated channel enable-semantic-versioning CHANNEL_ID [flags] +``` + +### Examples + +``` +replicated channel enable-semantic-versioning CHANNEL_ID +``` + +### Options + +``` + -h, --help help for enable-semantic-versioning +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + + + +--- + + +# replicated channel inspect + +# replicated channel inspect + +Show full details for a channel + +### Synopsis + +Show full details for a channel + +``` +replicated channel inspect CHANNEL_ID [flags] +``` + +### Options + +``` + -h, --help help for inspect + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + + + +--- + + +# replicated channel ls + +# replicated channel ls + +List all channels in your app + +### Synopsis + +List all channels in your app + +``` +replicated channel ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + + + +--- + + +# replicated channel rm + +# replicated channel rm + +Remove (archive) a channel + +### Synopsis + +Remove (archive) a channel + +``` +replicated channel rm CHANNEL_ID [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Options + +``` + -h, --help help for rm +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + + + +--- + + +# replicated channel un-demote + +# replicated channel un-demote + +Un-demote a release from a channel + +### Synopsis + +Un-demote a channel release from a channel using a channel sequence or release sequence. + +``` +replicated channel un-demote CHANNEL_ID_OR_NAME [flags] +``` + +### Examples + +``` + # Un-demote a release from a channel by channel sequence + replicated channel release un-demote Beta --channel-sequence 15 + + # Un-demote a release from a channel by release sequence + replicated channel release un-demote Beta --release-sequence 12 +``` + +### Options + +``` + --channel-sequence int The channel sequence to un-demote + -h, --help help for un-demote + --release-sequence int The release sequence to un-demote +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated channel](replicated-cli-channel) - List channels + + + +--- + + +# replicated channel + +# replicated channel + +List channels + +### Synopsis + +List channels + +### Options + +``` + -h, --help help for channel +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated channel create](replicated-cli-channel-create) - Create a new channel in your app +* [replicated channel demote](replicated-cli-channel-demote) - Demote a release from a channel +* [replicated channel disable-semantic-versioning](replicated-cli-channel-disable-semantic-versioning) - Disable semantic versioning for CHANNEL_ID +* [replicated channel enable-semantic-versioning](replicated-cli-channel-enable-semantic-versioning) - Enable semantic versioning for CHANNEL_ID +* [replicated channel inspect](replicated-cli-channel-inspect) - Show full details for a channel +* [replicated channel ls](replicated-cli-channel-ls) - List all channels in your app +* [replicated channel rm](replicated-cli-channel-rm) - Remove (archive) a channel +* [replicated channel un-demote](replicated-cli-channel-un-demote) - Un-demote a release from a channel + + + +--- + + +# replicated cluster addon create object-store + +# replicated cluster addon create object-store + +Create an object store bucket for a cluster. + +### Synopsis + +Creates an object store bucket for a cluster, requiring a bucket name prefix. The bucket name will be auto-generated using the format "[BUCKET_PREFIX]-[ADDON_ID]-cmx". This feature provisions an object storage bucket that can be used for storage in your cluster environment. + +``` +replicated cluster addon create object-store CLUSTER_ID --bucket-prefix BUCKET_PREFIX [flags] +``` + +### Examples + +``` +# Create an object store bucket with a specified prefix +replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket + +# Create an object store bucket and wait for it to be ready (up to 5 minutes) +replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket --wait 5m + +# Perform a dry run to validate inputs without creating the bucket +replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket --dry-run + +# Create an object store bucket and output the result in JSON format +replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket --output json + +# Create an object store bucket with a custom prefix and wait for 10 minutes +replicated cluster addon create object-store 05929b24 --bucket-prefix custom-prefix --wait 10m +``` + +### Options + +``` + --bucket-prefix string A prefix for the bucket name to be created (required) + --dry-run Simulate creation to verify that your inputs are valid without actually creating an add-on + -h, --help help for object-store + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --wait duration Wait duration for add-on to be ready before exiting (leave empty to not wait) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster addon create](replicated-cli-cluster-addon-create) - Create cluster add-ons. + + + +--- + + +# replicated cluster addon create + +# replicated cluster addon create + +Create cluster add-ons. + +### Synopsis + +Create new add-ons for a cluster. This command allows you to add functionality or services to a cluster by provisioning the required add-ons. + +### Examples + +``` +# Create an object store bucket add-on for a cluster +replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket + +# Perform a dry run for creating an object store add-on +replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket --dry-run +``` + +### Options + +``` + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. +* [replicated cluster addon create object-store](replicated-cli-cluster-addon-create-object-store) - Create an object store bucket for a cluster. + + + +--- + + +# replicated cluster addon ls + +# replicated cluster addon ls + +List cluster add-ons for a cluster. + +### Synopsis + +The 'cluster addon ls' command allows you to list all add-ons for a specific cluster. This command provides a detailed overview of the add-ons currently installed on the cluster, including their status and any relevant configuration details. + +This can be useful for monitoring the health and configuration of add-ons or performing troubleshooting tasks. + +``` +replicated cluster addon ls CLUSTER_ID [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List add-ons for a cluster with default table output +replicated cluster addon ls CLUSTER_ID + +# List add-ons for a cluster with JSON output +replicated cluster addon ls CLUSTER_ID --output json + +# List add-ons for a cluster with wide table output +replicated cluster addon ls CLUSTER_ID --output wide +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. + + + +--- + + +# replicated cluster addon rm + +# replicated cluster addon rm + +Remove cluster add-on by ID. + +### Synopsis + +The 'cluster addon rm' command allows you to remove a specific add-on from a cluster by specifying the cluster ID and the add-on ID. + +This command is useful when you want to deprovision an add-on that is no longer needed or when troubleshooting issues related to specific add-ons. The add-on will be removed immediately, and you will receive confirmation upon successful removal. + +``` +replicated cluster addon rm CLUSTER_ID --id ADDON_ID [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Remove an add-on with ID 'abc123' from cluster 'cluster456' +replicated cluster addon rm cluster456 --id abc123 +``` + +### Options + +``` + -h, --help help for rm + --id string The ID of the cluster add-on to remove (required) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. + + + +--- + + +# replicated cluster addon + +# replicated cluster addon + +Manage cluster add-ons. + +### Synopsis + +The 'cluster addon' command allows you to manage add-ons installed on a test cluster. Add-ons are additional components or services that can be installed and configured to enhance or extend the functionality of the cluster. + +You can use various subcommands to create, list, remove, or check the status of add-ons on a cluster. This command is useful for adding databases, object storage, monitoring, security, or other specialized tools to your cluster environment. + +### Examples + +``` +# List all add-ons installed on a cluster +replicated cluster addon ls CLUSTER_ID + +# Remove an add-on from a cluster +replicated cluster addon rm CLUSTER_ID --id ADDON_ID + +# Create an object store bucket add-on for a cluster +replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket + +# List add-ons with JSON output +replicated cluster addon ls CLUSTER_ID --output json +``` + +### Options + +``` + -h, --help help for addon +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated cluster addon create](replicated-cli-cluster-addon-create) - Create cluster add-ons. +* [replicated cluster addon ls](replicated-cli-cluster-addon-ls) - List cluster add-ons for a cluster. +* [replicated cluster addon rm](replicated-cli-cluster-addon-rm) - Remove cluster add-on by ID. + + + +--- + + +# replicated cluster create + +# replicated cluster create + +Create test clusters. + +### Synopsis + +The 'cluster create' command provisions a new test cluster with the specified Kubernetes distribution and configuration. You can customize the cluster's size, version, node groups, disk space, IP family, and other parameters. + +This command supports creating clusters on multiple Kubernetes distributions, including setting up node groups with different instance types and counts. You can also specify a TTL (Time-To-Live) to automatically terminate the cluster after a set duration. + +Use the '--dry-run' flag to simulate the creation process and get an estimated cost without actually provisioning the cluster. + +``` +replicated cluster create [flags] +``` + +### Examples + +``` +# Create a new cluster with basic configuration +replicated cluster create --distribution eks --version 1.21 --nodes 3 --instance-type t3.large --disk 100 --ttl 24h + +# Create a cluster with a custom node group +replicated cluster create --distribution eks --version 1.21 --nodegroup name=workers,instance-type=t3.large,nodes=5 --ttl 24h + +# Simulate cluster creation (dry-run) +replicated cluster create --distribution eks --version 1.21 --nodes 3 --disk 100 --ttl 24h --dry-run + +# Create a cluster with autoscaling configuration +replicated cluster create --distribution eks --version 1.21 --min-nodes 2 --max-nodes 5 --instance-type t3.large --ttl 24h + +# Create a cluster with multiple node groups +replicated cluster create --distribution eks --version 1.21 \ +--nodegroup name=workers,instance-type=t3.large,nodes=3 \ +--nodegroup name=cpu-intensive,instance-type=c5.2xlarge,nodes=2 \ +--ttl 24h + +# Create a cluster with custom tags +replicated cluster create --distribution eks --version 1.21 --nodes 3 --tag env=test --tag project=demo --ttl 24h + +# Create a cluster with addons +replicated cluster create --distribution eks --version 1.21 --nodes 3 --addon object-store --ttl 24h +``` + +### Options + +``` + --addon stringArray Addons to install on the cluster (can be specified multiple times) + --bucket-prefix string A prefix for the bucket name to be created (required by '--addon object-store') + --disk int Disk Size (GiB) to request per node (default 50) + --distribution string Kubernetes distribution of the cluster to provision + --dry-run Dry run + -h, --help help for create + --instance-type string The type of instance to use (e.g. m6i.large) + --ip-family string IP Family to use for the cluster (ipv4|ipv6|dual). + --license-id string License ID to use for the installation (required for Embedded Cluster distribution) + --max-nodes string Maximum Node count (non-negative number) (only for EKS, AKS and GKE clusters). + --min-nodes string Minimum Node count (non-negative number) (only for EKS, AKS and GKE clusters). + --name string Cluster name (defaults to random name) + --nodegroup stringArray Node group to create (name=?,instance-type=?,nodes=?,min-nodes=?,max-nodes=?,disk=? format, can be specified multiple times). For each nodegroup, at least one flag must be specified. The flags min-nodes and max-nodes are mutually dependent. + --nodes int Node count (default 1) + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --tag stringArray Tag to apply to the cluster (key=value format, can be specified multiple times) + --ttl string Cluster TTL (duration, max 48h) + --version string Kubernetes version to provision (format is distribution dependent) + --wait duration Wait duration for cluster to be ready (leave empty to not wait) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + + + +--- + + +# replicated cluster kubeconfig + +# replicated cluster kubeconfig + +Download credentials for a test cluster. + +### Synopsis + +The 'cluster kubeconfig' command downloads the credentials (kubeconfig) required to access a test cluster. You can either merge these credentials into your existing kubeconfig file or save them as a new file. + +This command ensures that the kubeconfig is correctly configured for use with your Kubernetes tools. You can specify the cluster by ID or by name. Additionally, the kubeconfig can be written to a specific file path or printed to stdout. + +You can also use this command to automatically update your current Kubernetes context with the downloaded credentials. + +``` +replicated cluster kubeconfig [ID] [flags] +``` + +### Examples + +``` +# Download and merge kubeconfig into your existing configuration +replicated cluster kubeconfig CLUSTER_ID + +# Save the kubeconfig to a specific file +replicated cluster kubeconfig CLUSTER_ID --output-path ./kubeconfig + +# Print the kubeconfig to stdout +replicated cluster kubeconfig CLUSTER_ID --stdout + +# Download kubeconfig for a cluster by name +replicated cluster kubeconfig --name "My Cluster" + +# Download kubeconfig for a cluster by ID +replicated cluster kubeconfig --id CLUSTER_ID +``` + +### Options + +``` + -h, --help help for kubeconfig + --id string id of the cluster to download credentials for (when name is not provided) + --name string name of the cluster to download credentials for (when id is not provided) + --output-path string path to kubeconfig file to write to, if not provided, it will be merged into your existing kubeconfig + --stdout write kubeconfig to stdout +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + + + +--- + + +# replicated cluster ls + +# replicated cluster ls + +List test clusters. + +### Synopsis + +The 'cluster ls' command lists all test clusters. This command provides information about the clusters, such as their status, name, distribution, version, and creation time. The output can be formatted in different ways, depending on your needs. + +You can filter the list of clusters by time range and status (e.g., show only terminated clusters). You can also watch clusters in real-time, which updates the list every few seconds. + +Clusters that have been deleted will be shown with a 'deleted' status. + +``` +replicated cluster ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all clusters with default table output +replicated cluster ls + +# Show clusters created after a specific date +replicated cluster ls --start-time 2023-01-01T00:00:00Z + +# Watch for real-time updates +replicated cluster ls --watch + +# List clusters with JSON output +replicated cluster ls --output json + +# List only terminated clusters +replicated cluster ls --show-terminated + +# List clusters with wide table output +replicated cluster ls --output wide +``` + +### Options + +``` + --end-time string end time for the query (Format: 2006-01-02T15:04:05Z) + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --show-terminated when set, only show terminated clusters + --start-time string start time for the query (Format: 2006-01-02T15:04:05Z) + -w, --watch watch clusters +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + + + +--- + + +# replicated cluster nodegroup ls + +# replicated cluster nodegroup ls + +List node groups for a cluster. + +### Synopsis + +The 'cluster nodegroup ls' command lists all the node groups associated with a given cluster. Each node group defines a specific set of nodes with particular configurations, such as instance types and scaling options. + +You can view information about the node groups within the specified cluster, including their ID, name, node count, and other configuration details. + +You must provide the cluster ID to list its node groups. + +``` +replicated cluster nodegroup ls [ID] [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all node groups in a cluster with default table output +replicated cluster nodegroup ls CLUSTER_ID + +# List node groups with JSON output +replicated cluster nodegroup ls CLUSTER_ID --output json + +# List node groups with wide table output +replicated cluster nodegroup ls CLUSTER_ID --output wide +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster nodegroup](replicated-cli-cluster-nodegroup) - Manage node groups for clusters. + + + +--- + + +# replicated cluster nodegroup + +# replicated cluster nodegroup + +Manage node groups for clusters. + +### Synopsis + +The 'cluster nodegroup' command provides functionality to manage node groups within a cluster. This command allows you to list node groups in a Kubernetes or VM-based cluster. + +Node groups define a set of nodes with specific configurations, such as instance types, node counts, or scaling rules. You can use subcommands to perform various actions on node groups. + +### Examples + +``` +# List all node groups for a cluster +replicated cluster nodegroup ls CLUSTER_ID +``` + +### Options + +``` + -h, --help help for nodegroup +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated cluster nodegroup ls](replicated-cli-cluster-nodegroup-ls) - List node groups for a cluster. + + + +--- + + +# replicated cluster port expose + +# replicated cluster port expose + +Expose a port on a cluster to the public internet. + +### Synopsis + +The 'cluster port expose' command is used to expose a specified port on a cluster to the public internet. When exposing a port, the command automatically creates a DNS entry and, if using the "https" protocol, provisions a TLS certificate for secure communication. + +You can also create a wildcard DNS entry and TLS certificate by specifying the "--wildcard" flag. Please note that creating a wildcard certificate may take additional time. + +This command supports different protocols including "http", "https", "ws", and "wss" for web traffic and web socket communication. + +NOTE: Currently, this feature only supports VM-based cluster distributions. + +``` +replicated cluster port expose CLUSTER_ID --port PORT [flags] +``` + +### Examples + +``` +# Expose port 8080 with HTTPS protocol and wildcard DNS +replicated cluster port expose CLUSTER_ID --port 8080 --protocol https --wildcard + +# Expose port 3000 with HTTP protocol +replicated cluster port expose CLUSTER_ID --port 3000 --protocol http + +# Expose port 8080 with multiple protocols +replicated cluster port expose CLUSTER_ID --port 8080 --protocol http,https + +# Expose port 8080 and display the result in JSON format +replicated cluster port expose CLUSTER_ID --port 8080 --protocol https --output json +``` + +### Options + +``` + -h, --help help for expose + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --port int Port to expose (required) + --protocol strings Protocol to expose (valid values are "http", "https", "ws" and "wss") (default [http,https]) + --wildcard Create a wildcard DNS entry and TLS certificate for this port +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. + + + +--- + + +# replicated cluster port ls + +# replicated cluster port ls + +List cluster ports for a cluster. + +### Synopsis + +The 'cluster port ls' command lists all the ports configured for a specific cluster. You must provide the cluster ID to retrieve and display the ports. + +This command is useful for viewing the current port configurations, protocols, and other related settings of your test cluster. The output format can be customized to suit your needs, and the available formats include table, JSON, and wide views. + +``` +replicated cluster port ls CLUSTER_ID [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List ports for a cluster in the default table format +replicated cluster port ls CLUSTER_ID + +# List ports for a cluster in JSON format +replicated cluster port ls CLUSTER_ID --output json + +# List ports for a cluster in wide format +replicated cluster port ls CLUSTER_ID --output wide +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. + + + +--- + + +# replicated cluster port rm + +# replicated cluster port rm + +Remove cluster port by ID. + +### Synopsis + +The 'cluster port rm' command removes a specific port from a cluster. You must provide either the ID of the port or the port number and protocol(s) to remove. + +This command is useful for managing the network settings of your test clusters by allowing you to clean up unused or incorrect ports. After removing a port, the updated list of ports will be displayed. + +Note that you can only use either the port ID or port number when removing a port, not both at the same time. + +``` +replicated cluster port rm CLUSTER_ID --id PORT_ID [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Remove a port using its ID +replicated cluster port rm CLUSTER_ID --id PORT_ID + +# Remove a port using its number (deprecated) +replicated cluster port rm CLUSTER_ID --port 8080 --protocol http,https + +# Remove a port and display the result in JSON format +replicated cluster port rm CLUSTER_ID --id PORT_ID --output json +``` + +### Options + +``` + -h, --help help for rm + --id string ID of the port to remove (required) + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. + + + +--- + + +# replicated cluster port + +# replicated cluster port + +Manage cluster ports. + +### Synopsis + +The 'cluster port' command is a parent command for managing ports in a cluster. It allows users to list, remove, or expose specific ports used by the cluster. Use the subcommands (such as 'ls', 'rm', and 'expose') to manage port configurations effectively. + +This command provides flexibility for handling ports in various test clusters, ensuring efficient management of cluster networking settings. + +### Examples + +``` +# List all exposed ports in a cluster +replicated cluster port ls [CLUSTER_ID] + +# Remove an exposed port from a cluster +replicated cluster port rm [CLUSTER_ID] [PORT] + +# Expose a new port in a cluster +replicated cluster port expose [CLUSTER_ID] [PORT] +``` + +### Options + +``` + -h, --help help for port +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated cluster port expose](replicated-cli-cluster-port-expose) - Expose a port on a cluster to the public internet. +* [replicated cluster port ls](replicated-cli-cluster-port-ls) - List cluster ports for a cluster. +* [replicated cluster port rm](replicated-cli-cluster-port-rm) - Remove cluster port by ID. + + + +--- + + +# replicated cluster prepare + +# replicated cluster prepare + +Prepare cluster for testing. + +### Synopsis + +The 'cluster prepare' command provisions a Kubernetes cluster and installs an application using a Helm chart or KOTS YAML configuration. + +This command is designed to be used in CI environments to prepare a cluster for testing by deploying a Helm chart or KOTS application with entitlements and custom values. You can specify the cluster configuration, such as the Kubernetes distribution, version, node count, and instance type, and then install your application automatically. + +Alternatively, if you prefer deploying KOTS applications, you can specify YAML manifests for the release and use the '--shared-password' flag for the KOTS admin console. + +You can also pass entitlement values to configure the cluster's customer entitlements. + +Note: +- The '--chart' flag cannot be used with '--yaml', '--yaml-file', or '--yaml-dir'. +- If deploying a Helm chart, use the '--set' flags to pass chart values. When deploying a KOTS application, the '--shared-password' flag is required. + +``` +replicated cluster prepare [flags] +``` + +### Examples + +``` +replicated cluster prepare --distribution eks --version 1.27 --instance-type c6.xlarge --node-count 3 --chart ./your-chart.tgz --values ./values.yaml --set chart-key=value --set chart-key2=value2 +``` + +### Options + +``` + --app-ready-timeout duration Timeout to wait for the application to be ready. Must be in Go duration format (e.g., 10s, 2m). (default 5m0s) + --chart string Path to the helm chart package to deploy + --cluster-id string The ID of an existing cluster to use instead of creating a new one. + --config-values-file string Path to a manifest containing config values (must be apiVersion: kots.io/v1beta1, kind: ConfigValues). + --disk int Disk Size (GiB) to request per node. (default 50) + --distribution string Kubernetes distribution of the cluster to provision + --entitlements strings The entitlements to set on the customer. Can be specified multiple times. + -h, --help help for prepare + --instance-type string the type of instance to use clusters (e.g. x5.xlarge) + --name string Cluster name + --namespace string The namespace into which to deploy the KOTS application or Helm chart. (default "default") + --node-count int Node count. (default 1) + --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2). + --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2). + --set-json stringArray Set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2). + --set-literal stringArray Set a literal STRING value on the command line. + --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2). + --shared-password string Shared password for the KOTS admin console. + --ttl string Cluster TTL (duration, max 48h) + --values strings Specify values in a YAML file or a URL (can specify multiple). + --version string Kubernetes version to provision (format is distribution dependent) + --wait duration Wait duration for cluster to be ready. (default 5m0s) + --yaml string The YAML config for this release. Use '-' to read from stdin. Cannot be used with the --yaml-file flag. + --yaml-dir string The directory containing multiple yamls for a KOTS release. Cannot be used with the --yaml flag. + --yaml-file string The YAML config for this release. Cannot be used with the --yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + + + +--- + + +# replicated cluster rm + +# replicated cluster rm + +Remove test clusters. + +### Synopsis + +The 'rm' command removes test clusters immediately. + +You can remove clusters by specifying a cluster ID, or by using other criteria such as cluster names or tags. Alternatively, you can remove all clusters in your account at once. + +This command can also be used in a dry-run mode to simulate the removal without actually deleting anything. + +You cannot mix the use of cluster IDs with other options like removing by name, tag, or removing all clusters at once. + +``` +replicated cluster rm ID [ID …] [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Remove a specific cluster by ID +replicated cluster rm CLUSTER_ID + +# Remove all clusters +replicated cluster rm --all +``` + +### Options + +``` + --all remove all clusters + --dry-run Dry run + -h, --help help for rm + --name stringArray Name of the cluster to remove (can be specified multiple times) + --tag stringArray Tag of the cluster to remove (key=value format, can be specified multiple times) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + + + +--- + + +# replicated cluster shell + +# replicated cluster shell + +Open a new shell with kubeconfig configured. + +### Synopsis + +The 'shell' command opens a new shell session with the kubeconfig configured for the specified test cluster. This allows you to have immediate kubectl access to the cluster within the shell environment. + +You can either specify the cluster ID directly or provide the cluster name to resolve the corresponding cluster ID. The shell will inherit your existing environment and add the necessary kubeconfig context for interacting with the Kubernetes cluster. + +Once inside the shell, you can use 'kubectl' to interact with the cluster. To exit the shell, press Ctrl-D or type 'exit'. When the shell closes, the kubeconfig will be reset back to your default configuration. + +``` +replicated cluster shell [ID] [flags] +``` + +### Examples + +``` +# Open a shell for a cluster by ID +replicated cluster shell CLUSTER_ID + +# Open a shell for a cluster by name +replicated cluster shell --name "My Cluster" +``` + +### Options + +``` + -h, --help help for shell + --id string id of the cluster to have kubectl access to (when name is not provided) + --name string name of the cluster to have kubectl access to. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + + + +--- + + +# replicated cluster update nodegroup + +# replicated cluster update nodegroup + +Update a nodegroup for a test cluster. + +### Synopsis + +The 'nodegroup' command allows you to update the configuration of a nodegroup within a test cluster. You can update attributes like the number of nodes, minimum and maximum node counts for autoscaling, and more. + +If you do not provide the nodegroup ID, the command will try to resolve it based on the nodegroup name provided. + +``` +replicated cluster update nodegroup [ID] [flags] +``` + +### Examples + +``` +# Update the number of nodes in a nodegroup +replicated cluster update nodegroup CLUSTER_ID --nodegroup-id NODEGROUP_ID --nodes 3 + +# Update the autoscaling limits for a nodegroup +replicated cluster update nodegroup CLUSTER_ID --nodegroup-id NODEGROUP_ID --min-nodes 2 --max-nodes 5 +``` + +### Options + +``` + -h, --help help for nodegroup + --max-nodes string The maximum number of nodes in the nodegroup + --min-nodes string The minimum number of nodes in the nodegroup + --nodegroup-id string The ID of the nodegroup to update + --nodegroup-name string The name of the nodegroup to update + --nodes int The number of nodes in the nodegroup + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --id string id of the cluster to update (when name is not provided) + --name string Name of the cluster to update. + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster update](replicated-cli-cluster-update) - Update cluster settings. + + + +--- + + +# replicated cluster update ttl + +# replicated cluster update ttl + +Update TTL for a test cluster. + +### Synopsis + +The 'ttl' command allows you to update the Time-To-Live (TTL) of a test cluster. The TTL represents the duration for which the cluster will remain active before it is automatically terminated. The duration starts from the moment the cluster becomes active. You must provide a valid duration, with a maximum limit of 48 hours. + +``` +replicated cluster update ttl [ID] [flags] +``` + +### Examples + +``` +# Update the TTL for a specific cluster +replicated cluster update ttl CLUSTER_ID --ttl 24h +``` + +### Options + +``` + -h, --help help for ttl + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --ttl string Update TTL which starts from the moment the cluster is running (duration, max 48h). +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --id string id of the cluster to update (when name is not provided) + --name string Name of the cluster to update. + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster update](replicated-cli-cluster-update) - Update cluster settings. + + + +--- + + +# replicated cluster update + +# replicated cluster update + +Update cluster settings. + +### Synopsis + +The 'update' command allows you to update various settings of a test cluster, such as its name or ID. + +You can either specify the cluster ID directly or provide the cluster name, and the command will resolve the corresponding cluster ID. This allows you to modify the cluster's configuration based on the unique identifier or the name of the cluster. + +### Examples + +``` +# Update a cluster using its ID +replicated cluster update --id <cluster-id> [subcommand] + +# Update a cluster using its name +replicated cluster update --name <cluster-name> [subcommand] +``` + +### Options + +``` + -h, --help help for update + --id string id of the cluster to update (when name is not provided) + --name string Name of the cluster to update. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated cluster update nodegroup](replicated-cli-cluster-update-nodegroup) - Update a nodegroup for a test cluster. +* [replicated cluster update ttl](replicated-cli-cluster-update-ttl) - Update TTL for a test cluster. + + + +--- + + +# replicated cluster upgrade + +# replicated cluster upgrade + +Upgrade a test cluster. + +### Synopsis + +The 'upgrade' command upgrades a Kubernetes test cluster to a specified version. You must provide a cluster ID and the version to upgrade to. The upgrade can be simulated with a dry-run option, or you can choose to wait for the cluster to be fully upgraded. + +``` +replicated cluster upgrade [ID] [flags] +``` + +### Examples + +``` +# Upgrade a cluster to a new Kubernetes version +replicated cluster upgrade [CLUSTER_ID] --version 1.31 + +# Perform a dry run of a cluster upgrade without making any changes +replicated cluster upgrade [CLUSTER_ID] --version 1.31 --dry-run + +# Upgrade a cluster and wait for it to be ready +replicated cluster upgrade [CLUSTER_ID] --version 1.31 --wait 30m +``` + +### Options + +``` + --dry-run Dry run + -h, --help help for upgrade + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --version string Kubernetes version to upgrade to (format is distribution dependent) + --wait duration Wait duration for cluster to be ready (leave empty to not wait) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + + + +--- + + +# replicated cluster versions + +# replicated cluster versions + +List cluster versions. + +### Synopsis + +The 'versions' command lists available Kubernetes versions for supported distributions. You can filter the versions by specifying a distribution and choose between different output formats. + +``` +replicated cluster versions [flags] +``` + +### Examples + +``` +# List all available Kubernetes cluster versions +replicated cluster versions + +# List available versions for a specific distribution (e.g., eks) +replicated cluster versions --distribution eks + +# Output the versions in JSON format +replicated cluster versions --output json +``` + +### Options + +``` + --distribution string Kubernetes distribution to filter by. + -h, --help help for versions + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. + + + +--- + + +# replicated cluster + +# replicated cluster + +Manage test Kubernetes clusters. + +### Synopsis + +The 'cluster' command allows you to manage and interact with Kubernetes clusters used for testing purposes. With this command, you can create, list, remove, and manage node groups within clusters, as well as retrieve information about available clusters. + +### Examples + +``` +# Create a single-node EKS cluster +replicated cluster create --distribution eks --version 1.31 + +# List all clusters +replicated cluster ls + +# Remove a specific cluster by ID +replicated cluster rm <cluster-id> + +# List all nodegroups in a specific cluster +replicated cluster nodegroup ls <cluster-id> +``` + +### Options + +``` + -h, --help help for cluster +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. +* [replicated cluster create](replicated-cli-cluster-create) - Create test clusters. +* [replicated cluster kubeconfig](replicated-cli-cluster-kubeconfig) - Download credentials for a test cluster. +* [replicated cluster ls](replicated-cli-cluster-ls) - List test clusters. +* [replicated cluster nodegroup](replicated-cli-cluster-nodegroup) - Manage node groups for clusters. +* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. +* [replicated cluster prepare](replicated-cli-cluster-prepare) - Prepare cluster for testing. +* [replicated cluster rm](replicated-cli-cluster-rm) - Remove test clusters. +* [replicated cluster shell](replicated-cli-cluster-shell) - Open a new shell with kubeconfig configured. +* [replicated cluster update](replicated-cli-cluster-update) - Update cluster settings. +* [replicated cluster upgrade](replicated-cli-cluster-upgrade) - Upgrade a test cluster. +* [replicated cluster versions](replicated-cli-cluster-versions) - List cluster versions. + + + +--- + + +# replicated completion + +# replicated completion + +Generate completion script + +``` +replicated completion [bash|zsh|fish|powershell] +``` + +### Examples + +``` +To load completions: + +Bash: + + This script depends on the 'bash-completion' package. + If it is not installed already, you can install it via your OS's package manager. + + $ source <(replicated completion bash) + + # To load completions for each session, execute once: + # Linux: + $ replicated completion bash > /etc/bash_completion.d/replicated + # macOS: + $ replicated completion bash > $(brew --prefix)/etc/bash_completion.d/replicated + +Zsh: + + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + $ replicated completion zsh > "${fpath[1]}/_replicated" + + # You will need to start a new shell for this setup to take effect. + +fish: + + $ replicated completion fish | source + + # To load completions for each session, execute once: + $ replicated completion fish > ~/.config/fish/completions/replicated.fish + +PowerShell: + + PS> replicated completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> replicated completion powershell > replicated.ps1 + # and source this file from your PowerShell profile. + +``` + +### Options + +``` + -h, --help help for completion +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated + + + +--- + + +# replicated customer archive + +# replicated customer archive + +Archive a customer + +### Synopsis + +Archive a customer for the current application. + +This command allows you to archive a customer record. Archiving a customer +will make their license inactive and remove them from active customer lists. +This action is reversible - you can unarchive a customer later if needed. + +The customer can be specified by either their name or ID. + +``` +replicated customer archive <customer_name_or_id> [flags] +``` + +### Examples + +``` +# Archive a customer by name +replicated customer archive "Acme Inc" + +# Archive a customer by ID +replicated customer archive cus_abcdef123456 + +# Archive multiple customers by ID +replicated customer archive cus_abcdef123456 cus_xyz9876543210 + +# Archive a customer in a specific app (if you have multiple apps) +replicated customer archive --app myapp "Acme Inc" +``` + +### Options + +``` + --app string The app to archive the customer in (not required when using a customer id) + -h, --help help for archive +``` + +### Options inherited from parent commands + +``` + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + + + +--- + + +# replicated customer create + +# replicated customer create + +Create a new customer for the current application + +### Synopsis + +Create a new customer for the current application with specified attributes. + +This command allows you to create a customer record with various properties such as name, +custom ID, channels, license type, and feature flags. You can set expiration dates, +enable or disable specific features, and assign the customer to one or more channels. + +The --app flag must be set to specify the target application. + +``` +replicated customer create [flags] +``` + +### Examples + +``` +# Create a basic customer with a name and assigned to a channel +replicated customer create --app myapp --name "Acme Inc" --channel stable + +# Create a customer with multiple channels and a custom ID +replicated customer create --app myapp --name "Beta Corp" --custom-id "BETA123" --channel beta --channel stable + +# Create a paid customer with specific features enabled +replicated customer create --app myapp --name "Enterprise Ltd" --type paid --channel enterprise --airgap --snapshot + +# Create a trial customer with an expiration date +replicated customer create --app myapp --name "Trial User" --type trial --channel stable --expires-in 720h + +# Create a customer with all available options +replicated customer create --app myapp --name "Full Options Inc" --custom-id "FULL001" \ + --channel stable --channel beta --default-channel stable --type paid \ + --email "contact@fulloptions.com" --expires-in 8760h \ + --airgap --snapshot --kots-install --embedded-cluster-download \ + --support-bundle-upload --ensure-channel +``` + +### Options + +``` + --airgap If set, the license will allow airgap installs. + --channel stringArray Release channel to which the customer should be assigned (can be specified multiple times) + --custom-id string Set a custom customer ID to more easily tie this customer record to your external data systems + --default-channel string Which of the specified channels should be the default channel. if not set, the first channel specified will be the default channel. + --developer-mode If set, Replicated SDK installed in dev mode will use mock data. + --email string Email address of the customer that is to be created. + --embedded-cluster-download If set, the license will allow embedded cluster downloads. + --ensure-channel If set, channel will be created if it does not exist. + --expires-in duration If set, an expiration date will be set on the license. Supports Go durations like '72h' or '3600m' + --geo-axis If set, the license will allow Geo Axis usage. + --gitops If set, the license will allow the GitOps usage. + --helm-install If set, the license will allow Helm installs. + --helmvm-cluster-download If set, the license will allow helmvm cluster downloads. + -h, --help help for create + --identity-service If set, the license will allow Identity Service usage. + --installer-support If set, the license will allow installer support. + --kots-install If set, the license will allow KOTS install. Otherwise license will allow Helm CLI installs only. (default true) + --kurl-install If set, the license will allow kURL installs. + --name string Name of the customer + --output string The output format to use. One of: json|table (default: table) (default "table") + --snapshot If set, the license will allow Snapshots. + --support-bundle-upload If set, the license will allow uploading support bundles. + --type string The license type to create. One of: dev|trial|paid|community|test (default: dev) (default "dev") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + + + +--- + + +# replicated customer download-license + +# replicated customer download-license + +Download a customer's license + +### Synopsis + +The download-license command allows you to retrieve and save a customer's license. + +This command fetches the license for a specified customer and either outputs it +to stdout or saves it to a file. The license contains crucial information about +the customer's subscription and usage rights. + +You must specify the customer using either their name or ID with the --customer flag. + +``` +replicated customer download-license [flags] +``` + +### Examples + +``` +# Download license for a customer by ID and output to stdout +replicated customer download-license --customer cus_abcdef123456 + +# Download license for a customer by name and save to a file +replicated customer download-license --customer "Acme Inc" --output license.yaml + +# Download license for a customer in a specific app (if you have multiple apps) +replicated customer download-license --app myapp --customer "Acme Inc" --output license.yaml +``` + +### Options + +``` + --customer string The Customer Name or ID + -h, --help help for download-license + -o, --output string Path to output license to. Defaults to stdout (default "-") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + + + +--- + + +# replicated customer inspect + +# replicated customer inspect + +Show detailed information about a specific customer + +### Synopsis + +The inspect command provides comprehensive details about a customer. + + This command retrieves and displays full information about a specified customer, + including their assigned channels, registry information, and other relevant attributes. + It's useful for getting an in-depth view of a customer's configuration and status. + + You must specify the customer using either their name or ID with the --customer flag. + +``` +replicated customer inspect [flags] +``` + +### Examples + +``` +# Inspect a customer by ID +replicated customer inspect --customer cus_abcdef123456 + +# Inspect a customer by name +replicated customer inspect --customer "Acme Inc" + +# Inspect a customer and output in JSON format +replicated customer inspect --customer cus_abcdef123456 --output json + +# Inspect a customer for a specific app (if you have multiple apps) +replicated customer inspect --app myapp --customer "Acme Inc" +``` + +### Options + +``` + --customer string The Customer Name or ID + -h, --help help for inspect + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + + + +--- + + +# replicated customer ls + +# replicated customer ls + +List customers for the current application + +### Synopsis + +List customers associated with the current application. + +This command displays information about customers linked to your application. +By default, it shows all non-test customers. You can use flags to: +- Filter customers by a specific app version +- Include test customers in the results +- Change the output format (table or JSON) + +The command requires an app to be set using the --app flag. + +``` +replicated customer ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all customers for the current application +replicated customer ls --app myapp +# Output results in JSON format +replicated customer ls --app myapp --output json + +# Combine multiple flags +replicated customer ls --app myapp --output json +``` + +### Options + +``` + --app-version string Filter customers by a specific app version + -h, --help help for ls + --include-test Include test customers in the results + --output string Output format: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + + + +--- + + +# replicated customer update + +# replicated customer update + +Update an existing customer + +### Synopsis + +Update an existing customer's information and settings. + + This command allows you to modify various attributes of a customer, including their name, + custom ID, assigned channels, license type, and feature flags. You can update expiration dates, + enable or disable specific features, and change channel assignments. + + The --customer flag is required to specify which customer to update. + +``` +replicated customer update --customer <id> --name <name> [options] [flags] +``` + +### Examples + +``` +# Update a customer's name +replicated customer update --customer cus_abcdef123456 --name "New Company Name" + +# Change a customer's channel and make it the default +replicated customer update --customer cus_abcdef123456 --channel stable --default-channel stable + +# Enable airgap installations for a customer +replicated customer update --customer cus_abcdef123456 --airgap + +# Update multiple attributes at once +replicated customer update --customer cus_abcdef123456 --name "Updated Corp" --type paid --channel enterprise --airgap --snapshot + +# Set an expiration date for a customer's license +replicated customer update --customer cus_abcdef123456 --expires-in 8760h + +# Update a customer and output the result in JSON format +replicated customer update --customer cus_abcdef123456 --name "JSON Corp" --output json +``` + +### Options + +``` + --airgap If set, the license will allow airgap installs. + --channel stringArray Release channel to which the customer should be assigned (can be specified multiple times) + --custom-id string Set a custom customer ID to more easily tie this customer record to your external data systems + --customer string The ID of the customer to update + --default-channel string Which of the specified channels should be the default channel. if not set, the first channel specified will be the default channel. + --developer-mode If set, Replicated SDK installed in dev mode will use mock data. + --email string Email address of the customer that is to be updated. + --embedded-cluster-download If set, the license will allow embedded cluster downloads. + --ensure-channel If set, channel will be created if it does not exist. + --expires-in duration If set, an expiration date will be set on the license. Supports Go durations like '72h' or '3600m' + --geo-axis If set, the license will allow Geo Axis usage. + --gitops If set, the license will allow the GitOps usage. + --helm-install If set, the license will allow Helm installs. + --helmvm-cluster-download If set, the license will allow helmvm cluster downloads. + -h, --help help for update + --identity-service If set, the license will allow Identity Service usage. + --kots-install If set, the license will allow KOTS install. Otherwise license will allow Helm CLI installs only. (default true) + --kurl-install If set, the license will allow kURL installs. + --name string Name of the customer + --output string The output format to use. One of: json|table (default: table) (default "table") + --snapshot If set, the license will allow Snapshots. + --support-bundle-upload If set, the license will allow uploading support bundles. + --type string The license type to update. One of: dev|trial|paid|community|test (default: dev) (default "dev") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated customer](replicated-cli-customer) - Manage customers + + + +--- + + +# replicated customer + +# replicated customer + +Manage customers + +### Synopsis + +The customers command allows vendors to create, display, modify end customer records. + +### Options + +``` + -h, --help help for customer +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated customer archive](replicated-cli-customer-archive) - Archive a customer +* [replicated customer create](replicated-cli-customer-create) - Create a new customer for the current application +* [replicated customer download-license](replicated-cli-customer-download-license) - Download a customer's license +* [replicated customer inspect](replicated-cli-customer-inspect) - Show detailed information about a specific customer +* [replicated customer ls](replicated-cli-customer-ls) - List customers for the current application +* [replicated customer update](replicated-cli-customer-update) - Update an existing customer + + + +--- + + +# replicated default clear-all + +# replicated default clear-all + +Clear all default values + +### Synopsis + +Clears all default values that are used by other commands. + +This command removes all default values that are used by other commands run by the current user. + +``` +replicated default clear-all [flags] +``` + +### Examples + +``` +# Clear all default values +replicated default clear-all +``` + +### Options + +``` + -h, --help help for clear-all +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated default](replicated-cli-default) - Manage default values used by other commands + + + +--- + + +# replicated default clear + +# replicated default clear + +Clear default value for a key + +### Synopsis + +Clears default value for the specified key. + +This command removes default values that are used by other commands run by the current user. + +Supported keys: +- app: the default application to use + +``` +replicated default clear KEY [flags] +``` + +### Examples + +``` +# Clear default application +replicated default clear app +``` + +### Options + +``` + -h, --help help for clear +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated default](replicated-cli-default) - Manage default values used by other commands + + + +--- + + +# replicated default set + +# replicated default set + +Set default value for a key + +### Synopsis + +Sets default value for the specified key. + +This command sets default values that will be used by other commands run by the current user. + +Supported keys: +- app: the default application to use + +The output can be customized using the --output flag to display results in +either table or JSON format. + +``` +replicated default set KEY VALUE [flags] +``` + +### Examples + +``` +# Set default application +replicated default set app my-app-slug +``` + +### Options + +``` + -h, --help help for set + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated default](replicated-cli-default) - Manage default values used by other commands + + + +--- + + +# replicated default show + +# replicated default show + +Show default value for a key + +### Synopsis + +Shows defaul values for the specified key. + +This command shows default values that will be used by other commands run by the current user. + +Supported keys: +- app: the default application to use + +The output can be customized using the --output flag to display results in +either table or JSON format. + +``` +replicated default show KEY [flags] +``` + +### Examples + +``` +# Show default application +replicated default show app + +``` + +### Options + +``` + -h, --help help for show + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated default](replicated-cli-default) - Manage default values used by other commands + + + +--- + + +# replicated default + +# replicated default + +Manage default values used by other commands + +### Options + +``` + -h, --help help for default +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated default clear](replicated-cli-default-clear) - Clear default value for a key +* [replicated default clear-all](replicated-cli-default-clear-all) - Clear all default values +* [replicated default set](replicated-cli-default-set) - Set default value for a key +* [replicated default show](replicated-cli-default-show) - Show default value for a key + + + +--- + + +# replicated installer create + +# replicated installer create + +Create a new installer spec + +### Synopsis + +Create a new installer spec by providing YAML configuration for a https://kurl.sh cluster. + +``` +replicated installer create [flags] +``` + +### Options + +``` + --auto generate default values for use in CI + -y, --confirm-auto auto-accept the configuration generated by the --auto flag + --ensure-channel When used with --promote <channel>, will create the channel if it doesn't exist + -h, --help help for create + --promote string Channel name or id to promote this installer to + --yaml string The YAML config for this installer. Use '-' to read from stdin. Cannot be used with the --yaml-file flag. + --yaml-file string The file name with YAML config for this installer. Cannot be used with the --yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated installer](replicated-cli-installer) - Manage Kubernetes installers + + + +--- + + +# replicated installer ls + +# replicated installer ls + +List an app's Kubernetes Installers + +### Synopsis + +List an app's https://kurl.sh Kubernetes Installers + +``` +replicated installer ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated installer](replicated-cli-installer) - Manage Kubernetes installers + + + +--- + + +# replicated installer + +# replicated installer + +Manage Kubernetes installers + +### Synopsis + +The installers command allows vendors to create, display, modify and promote kurl.sh specs for managing the installation of Kubernetes. + +### Options + +``` + -h, --help help for installer +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated installer create](replicated-cli-installer-create) - Create a new installer spec +* [replicated installer ls](replicated-cli-installer-ls) - List an app's Kubernetes Installers + + + +--- + + +# Installing the Replicated CLI + +import Verify from "../partials/replicated-cli/_verify-install.mdx" +import Sudo from "../partials/replicated-cli/_sudo-install.mdx" +import Login from "../partials/replicated-cli/_login.mdx" +import Logout from "../partials/replicated-cli/_logout.mdx" +import AuthToken from "../partials/replicated-cli/_authorize-with-token-note.mdx" + +# Installing the Replicated CLI + +Vendors can use the Replicated CLI to manage their applications with Replicated programmatically, rather than using the Replicated vendor portal. + +## Prerequisites + +Complete the following prerequisites before installing the Replicated CLI: + +- Create a vendor account. See [Creating a Vendor Account](/vendor/vendor-portal-creating-account). +- To run on Linux or Mac, install [curl](https://curl.haxx.se/). +- To run through a Docker container, install [docker](https://www.docker.com). + +## Install and Run + +You can install and run the Replicated CLI in the following environments: + +* Directly on MacOS +* Directly on Linux +* Through Docker (Useful for Windows, GitHub Actions, or computers without sufficient access) + +### MacOS + +To install and run the latest Replicated CLI on MacOS: + +1. Run one of the following commands: + + - With Brew: + + ```shell + brew install replicatedhq/replicated/cli + ``` + + - Without Brew: + + ```shell + curl -s https://api.github.com/repos/replicatedhq/replicated/releases/latest \ + | grep "browser_download_url.*darwin_all.tar.gz" \ + | cut -d : -f 2,3 \ + | tr -d \" \ + | wget -O replicated.tar.gz -qi - + tar xf replicated.tar.gz replicated && rm replicated.tar.gz + mv replicated /usr/local/bin/replicated + ``` + + <Sudo/> + +1. <Verify/> + +1. <Login/> + + <AuthToken/> + +1. <Logout/> + +### Linux + +To install and run the latest Replicated CLI on Linux: + +1. Run the following command: + + ```shell + curl -s https://api.github.com/repos/replicatedhq/replicated/releases/latest \ + | grep "browser_download_url.*linux_amd64.tar.gz" \ + | cut -d : -f 2,3 \ + | tr -d \" \ + | wget -O replicated.tar.gz -qi - + tar xf replicated.tar.gz replicated && rm replicated.tar.gz + mv replicated /usr/local/bin/replicated + ``` + + <Sudo/> + +1. <Verify/> + +1. <Login/> + + <AuthToken/> + +1. <Logout/> + +### Docker / Windows + +Installing in Docker environments requires that you set the `REPLICATED_API_TOKEN` environment variable to authorize the Replicated CLI with an API token. For more information, see [(Optional) Set Environment Variables](#env-var) below. + +To install and run the latest Replicated CLI in Docker environments: + +1. Generate a service account or user API token in the vendor portal. To create new releases, the token must have `Read/Write` access. See [Generating API Tokens](/vendor/replicated-api-tokens). + +1. Get the latest Replicated CLI installation files from the [replicatedhq/replicated repository](https://github.com/replicatedhq/replicated/releases) on GitHub. + + Download and install the files. For simplicity, the usage in the next step is represented assuming that the CLI is downloaded and installed to the desktop. + +1. Authorize the Replicated CLI: + + - Through a Docker container: + + ```shell + docker run \ + -e REPLICATED_API_TOKEN=$TOKEN \ + replicated/vendor-cli --help + ``` + Replace `TOKEN` with your API token. + + - On Windows: + + ```dos + docker.exe run \ + -e REPLICATED_API_TOKEN=%TOKEN% \ + replicated/vendor-cli --help + ``` + + Replace `TOKEN` with your API token. + + For more information about the `docker run` command, see [docker run](https://docs.docker.com/engine/reference/commandline/run/) in the Docker documentation. + +## (Optional) Set Environment Variables {#env-var} + +The Replicated CLI supports setting the following environment variables: + +* **`REPLICATED_API_TOKEN`**: A service account or user API token generated from a vendor portal team or individual account. The `REPLICATED_API_TOKEN` environment variable has the following use cases: + + * To use Replicated CLI commands as part of automation (such as from continuous integration and continuous delivery pipelines), authenticate by providing the `REPLICATED_API_TOKEN` environment variable. + + * To authorize the Replicated CLI when installing and running the CLI in Docker containers. + + * Optionally set the `REPLICATED_API_TOKEN` environment variable instead of using the `replicated login` command to authorize the Replicated CLI in MacOS or Linux environments. + +* **`REPLICATED_APP`**: The slug of the target application. + + When using the Replicated CLI to manage applications through your vendor account (including channels, releases, customers, or other objects associated with an application), you can set the `REPLICATED_APP` environment variable to avoid passing the application slug with each command. + +### `REPLICATED_API_TOKEN` + +To set the `REPLICATED_API_TOKEN` environment variable: + +1. Generate a service account or user API token in the vendor portal. To create new releases, the token must have `Read/Write` access. See [Generating API Tokens](/vendor/replicated-api-tokens). + +1. Set the environment variable, replacing `TOKEN` with the token you generated in the previous step: + + * **MacOs or Linux**: + + ``` + export REPLICATED_API_TOKEN=TOKEN + ``` + + * **Docker**: + + ``` + docker run \ + -e REPLICATED_API_TOKEN=$TOKEN \ + replicated/vendor-cli --help + ``` + + * **Windows**: + + ``` + docker.exe run \ + -e REPLICATED_API_TOKEN=%TOKEN% \ + replicated/vendor-cli --help + ``` + +### `REPLICATED_APP` + +To set the `REPLICATED_APP` environment variable: + +1. In the [vendor portal](https://vendor.replicated.com), go to the **Application Settings** page and copy the slug for the target application. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Application_. + +1. Set the environment variable, replacing `APP_SLUG` with the slug for the target application that you retreived in the previous step: + + * **MacOs or Linux**: + + ``` + export REPLICATED_APP=APP_SLUG + ``` + + * **Docker**: + + ``` + docker run \ + -e REPLICATED_APP=$APP_SLUG + replicated/vendor-cli --help + ``` + + * **Windows**: + + ``` + docker.exe run \ + -e REPLICATED_APP=%APP_SLUG% \ + replicated/vendor-cli --help + ``` + + +--- + + +# replicated instance inspect + +# replicated instance inspect + +Show full details for a customer instance + +### Synopsis + +Show full details for a customer instance + +``` +replicated instance inspect [flags] +``` + +### Options + +``` + --customer string Customer Name or ID + -h, --help help for inspect + --instance string Instance Name or ID + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated instance](replicated-cli-instance) - Manage instances + + + +--- + + +# replicated instance ls + +# replicated instance ls + +list customer instances + +### Synopsis + +list customer instances + +``` +replicated instance ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + --customer string Customer Name or ID + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") + --tag stringArray Tags to use to filter instances (key=value format, can be specified multiple times). Only one tag needs to match (an OR operation) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated instance](replicated-cli-instance) - Manage instances + + + +--- + + +# replicated instance tag + +# replicated instance tag + +tag an instance + +### Synopsis + +remove or add instance tags + +``` +replicated instance tag [flags] +``` + +### Options + +``` + --customer string Customer Name or ID + -h, --help help for tag + --instance string Instance Name or ID + --output string The output format to use. One of: json|table (default: table) (default "table") + --tag stringArray Tags to apply to instance. Leave value empty to remove tag. Tags not specified will not be removed. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated instance](replicated-cli-instance) - Manage instances + + + +--- + + +# replicated instance + +# replicated instance + +Manage instances + +### Synopsis + +The instance command allows vendors to display and tag customer instances. + +### Options + +``` + -h, --help help for instance +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated instance inspect](replicated-cli-instance-inspect) - Show full details for a customer instance +* [replicated instance ls](replicated-cli-instance-ls) - list customer instances +* [replicated instance tag](replicated-cli-instance-tag) - tag an instance + + + +--- + + +# replicated login + +# replicated login + +Log in to Replicated + +### Synopsis + +This command will open your browser to ask you authentication details and create / retrieve an API token for the CLI to use. + +``` +replicated login [flags] +``` + +### Options + +``` + -h, --help help for login +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated + + + +--- + + +# replicated logout + +# replicated logout + +Logout from Replicated + +### Synopsis + +This command will remove any stored credentials from the CLI. + +``` +replicated logout [flags] +``` + +### Options + +``` + -h, --help help for logout +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated + + + +--- + + +# replicated registry add dockerhub + +# replicated registry add dockerhub + +Add a DockerHub registry + +### Synopsis + +Add a DockerHub registry using a username/password or an account token + +``` +replicated registry add dockerhub [flags] +``` + +### Options + +``` + --authtype string Auth type for the registry (default "password") + -h, --help help for dockerhub + --output string The output format to use. One of: json|table (default: table) (default "table") + --password string The password to authenticate to the registry with + --password-stdin Take the password from stdin + --token string The token to authenticate to the registry with + --token-stdin Take the token from stdin + --username string The userame to authenticate to the registry with +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + + + +--- + + +# replicated registry add ecr + +# replicated registry add ecr + +Add an ECR registry + +### Synopsis + +Add an ECR registry using an Access Key ID and Secret Access Key + +``` +replicated registry add ecr [flags] +``` + +### Options + +``` + --accesskeyid string The access key id to authenticate to the registry with + --endpoint string The ECR endpoint + -h, --help help for ecr + --output string The output format to use. One of: json|table (default: table) (default "table") + --secretaccesskey string The secret access key to authenticate to the registry with + --secretaccesskey-stdin Take the secret access key from stdin +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + + + +--- + + +# replicated registry add gar + +# replicated registry add gar + +Add a Google Artifact Registry + +### Synopsis + +Add a Google Artifact Registry using a service account key + +``` +replicated registry add gar [flags] +``` + +### Options + +``` + --authtype string Auth type for the registry (default "serviceaccount") + --endpoint string The GAR endpoint + -h, --help help for gar + --output string The output format to use. One of: json|table (default: table) (default "table") + --serviceaccountkey string The service account key to authenticate to the registry with + --serviceaccountkey-stdin Take the service account key from stdin + --token string The token to use to auth to the registry with + --token-stdin Take the token from stdin +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + + + +--- + + +# replicated registry add gcr + +# replicated registry add gcr + +Add a Google Container Registry + +### Synopsis + +Add a Google Container Registry using a service account key + +``` +replicated registry add gcr [flags] +``` + +### Options + +``` + --endpoint string The GCR endpoint + -h, --help help for gcr + --output string The output format to use. One of: json|table (default: table) (default "table") + --serviceaccountkey string The service account key to authenticate to the registry with + --serviceaccountkey-stdin Take the service account key from stdin +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + + + +--- + + +# replicated registry add ghcr + +# replicated registry add ghcr + +Add a GitHub Container Registry + +### Synopsis + +Add a GitHub Container Registry using a username and personal access token (PAT) + +``` +replicated registry add ghcr [flags] +``` + +### Options + +``` + -h, --help help for ghcr + --output string The output format to use. One of: json|table (default: table) (default "table") + --token string The token to use to auth to the registry with + --token-stdin Take the token from stdin +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + + + +--- + + +# replicated registry add other + +# replicated registry add other + +Add a generic registry + +### Synopsis + +Add a generic registry using a username/password + +``` +replicated registry add other [flags] +``` + +### Options + +``` + --endpoint string endpoint for the registry + -h, --help help for other + --output string The output format to use. One of: json|table (default: table) (default "table") + --password string The password to authenticate to the registry with + --password-stdin Take the password from stdin + --username string The userame to authenticate to the registry with +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + + + +--- + + +# replicated registry add quay + +# replicated registry add quay + +Add a quay.io registry + +### Synopsis + +Add a quay.io registry using a username/password (or a robot account) + +``` +replicated registry add quay [flags] +``` + +### Options + +``` + -h, --help help for quay + --output string The output format to use. One of: json|table (default: table) (default "table") + --password string The password to authenticate to the registry with + --password-stdin Take the password from stdin + --username string The userame to authenticate to the registry with +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --skip-validation Skip validation of the registry (not recommended) + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry add](replicated-cli-registry-add) - add + + + +--- + + +# replicated registry add + +# replicated registry add + +add + +### Synopsis + +add + +### Options + +``` + -h, --help help for add + --skip-validation Skip validation of the registry (not recommended) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry](replicated-cli-registry) - Manage registries +* [replicated registry add dockerhub](replicated-cli-registry-add-dockerhub) - Add a DockerHub registry +* [replicated registry add ecr](replicated-cli-registry-add-ecr) - Add an ECR registry +* [replicated registry add gar](replicated-cli-registry-add-gar) - Add a Google Artifact Registry +* [replicated registry add gcr](replicated-cli-registry-add-gcr) - Add a Google Container Registry +* [replicated registry add ghcr](replicated-cli-registry-add-ghcr) - Add a GitHub Container Registry +* [replicated registry add other](replicated-cli-registry-add-other) - Add a generic registry +* [replicated registry add quay](replicated-cli-registry-add-quay) - Add a quay.io registry + + + +--- + + +# replicated registry ls + +# replicated registry ls + +list registries + +### Synopsis + +list registries, or a single registry by name + +``` +replicated registry ls [NAME] [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry](replicated-cli-registry) - Manage registries + + + +--- + + +# replicated registry rm + +# replicated registry rm + +remove registry + +### Synopsis + +remove registry by endpoint + +``` +replicated registry rm [ENDPOINT] [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Options + +``` + -h, --help help for rm +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry](replicated-cli-registry) - Manage registries + + + +--- + + +# replicated registry test + +# replicated registry test + +test registry + +### Synopsis + +test registry + +``` +replicated registry test HOSTNAME [flags] +``` + +### Options + +``` + -h, --help help for test + --image string The image to test pulling +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated registry](replicated-cli-registry) - Manage registries + + + +--- + + +# replicated registry + +# replicated registry + +Manage registries + +### Synopsis + +registry can be used to manage existing registries and add new registries to a team + +### Options + +``` + -h, --help help for registry +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated registry add](replicated-cli-registry-add) - add +* [replicated registry ls](replicated-cli-registry-ls) - list registries +* [replicated registry rm](replicated-cli-registry-rm) - remove registry +* [replicated registry test](replicated-cli-registry-test) - test registry + + + +--- + + +# replicated release compatibility + +# replicated release compatibility + +Report release compatibility + +### Synopsis + +Report release compatibility for a kubernetes distribution and version + +``` +replicated release compatibility SEQUENCE [flags] +``` + +### Options + +``` + --distribution string Kubernetes distribution of the cluster to report on. + --failure If set, the compatibility will be reported as a failure. + -h, --help help for compatibility + --notes string Additional notes to report. + --success If set, the compatibility will be reported as a success. + --version string Kubernetes version of the cluster to report on (format is distribution dependent) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + + + +--- + + +# replicated release create + +# replicated release create + +Create a new release + +### Synopsis + +Create a new release by providing application manifests for the next release in + your sequence. + +``` +replicated release create [flags] +``` + +### Options + +``` + --auto generate default values for use in CI + -y, --confirm-auto auto-accept the configuration generated by the --auto flag + --ensure-channel When used with --promote <channel>, will create the channel if it doesn't exist + --fail-on string The minimum severity to cause the command to exit with a non-zero exit code. Supported values are [info, warn, error, none]. (default "error") + -h, --help help for create + --lint Lint a manifests directory prior to creation of the KOTS Release. + --promote string Channel name or id to promote this release to + --release-notes string When used with --promote <channel>, sets the **markdown** release notes + --version string When used with --promote <channel>, sets the version label for the release in this channel + --yaml-dir string The directory containing multiple yamls for a Kots release. Cannot be used with the --yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + + + +--- + + +# replicated release download + +# replicated release download + +Download application manifests for a release. + +### Synopsis + +Download application manifests for a release to a specified directory. + +For non-KOTS applications, this is equivalent to the 'release inspect' command. + +``` +replicated release download RELEASE_SEQUENCE [flags] +``` + +### Examples + +``` +replicated release download 1 --dest ./manifests +``` + +### Options + +``` + -d, --dest string Directory to which release manifests should be downloaded + -h, --help help for download +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + + + +--- + + +# replicated release inspect + +# replicated release inspect + +Long: information about a release + +### Synopsis + +Show information about the specified application release. + +This command displays detailed information about a specific release of an application. + +The output can be customized using the --output flag to display results in +either table or JSON format. + + +``` +replicated release inspect RELEASE_SEQUENCE [flags] +``` + +### Examples + +``` +# Display information about a release +replicated release inspect 123 + +# Display information about a release in JSON format +replicated release inspect 123 --output json +``` + +### Options + +``` + -h, --help help for inspect + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + + + +--- + + +# replicated release lint + +# replicated release lint + +Lint a directory of KOTS manifests + +### Synopsis + +Lint a directory of KOTS manifests + +``` +replicated release lint [flags] +``` + +### Options + +``` + --fail-on string The minimum severity to cause the command to exit with a non-zero exit code. Supported values are [info, warn, error, none]. (default "error") + -h, --help help for lint + --output string The output format to use. One of: json|table (default: table) (default "table") + --yaml-dir yaml The directory containing multiple yamls for a Kots release. Cannot be used with the yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + + + +--- + + +# replicated release ls + +# replicated release ls + +List all of an app's releases + +### Synopsis + +List all of an app's releases + +``` +replicated release ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + + + +--- + + +# replicated release promote + +# replicated release promote + +Set the release for a channel + +### Synopsis + +Set the release for a channel + +``` +replicated release promote SEQUENCE CHANNEL_ID [flags] +``` + +### Examples + +``` +replicated release promote 15 fe4901690971757689f022f7a460f9b2 +``` + +### Options + +``` + -h, --help help for promote + --optional If set, this release can be skipped + --release-notes string The **markdown** release notes + --required If set, this release can't be skipped + --version string A version label for the release in this channel +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + + + +--- + + +# replicated release test + +# replicated release test + +Test the application release + +### Synopsis + +Test the application release + +``` +replicated release test SEQUENCE [flags] +``` + +### Options + +``` + -h, --help help for test +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + + + +--- + + +# replicated release update + +# replicated release update + +Updated a release's yaml config + +### Synopsis + +Updated a release's yaml config + +``` +replicated release update SEQUENCE [flags] +``` + +### Options + +``` + -h, --help help for update + --yaml string The new YAML config for this release. Use '-' to read from stdin. Cannot be used with the --yaml-file flag. + --yaml-dir string The directory containing multiple yamls for a Kots release. Cannot be used with the --yaml flag. + --yaml-file string The file name with YAML config for this release. Cannot be used with the --yaml flag. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated release](replicated-cli-release) - Manage app releases + + + +--- + + +# replicated release + +# replicated release + +Manage app releases + +### Synopsis + +The release command allows vendors to create, display, and promote their releases. + +### Options + +``` + -h, --help help for release +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated release compatibility](replicated-cli-release-compatibility) - Report release compatibility +* [replicated release create](replicated-cli-release-create) - Create a new release +* [replicated release download](replicated-cli-release-download) - Download application manifests for a release. +* [replicated release inspect](replicated-cli-release-inspect) - Long: information about a release +* [replicated release lint](replicated-cli-release-lint) - Lint a directory of KOTS manifests +* [replicated release ls](replicated-cli-release-ls) - List all of an app's releases +* [replicated release promote](replicated-cli-release-promote) - Set the release for a channel +* [replicated release test](replicated-cli-release-test) - Test the application release +* [replicated release update](replicated-cli-release-update) - Updated a release's yaml config + + + +--- + + +# replicated version upgrade + +# replicated version upgrade + +Upgrade the replicated CLI to the latest version + +### Synopsis + +Download, verify, and upgrade the Replicated CLI to the latest version + +``` +replicated version upgrade [flags] +``` + +### Options + +``` + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated version](replicated-cli-version) - Print the current version and exit + + + +--- + + +# replicated version + +# replicated version + +Print the current version and exit + +### Synopsis + +Print the current version and exit + +``` +replicated version [flags] +``` + +### Options + +``` + -h, --help help for version + --json output version info in json +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated version upgrade](replicated-cli-version-upgrade) - Upgrade the replicated CLI to the latest version + + + +--- + + +# replicated vm create + +# replicated vm create + +Create one or more test VMs with specified distribution, version, and configuration options. + +### Synopsis + +Create one or more test VMs with a specified distribution, version, and a variety of customizable configuration options. + +This command allows you to provision VMs with different distributions (e.g., Ubuntu, RHEL), versions, instance types, and more. You can set the number of VMs to create, disk size, and specify the network to use. If no network is provided, a new network will be created automatically. You can also assign tags to your VMs and use a TTL (Time-To-Live) to define how long the VMs should live. + +By default, the command provisions one VM, but you can customize the number of VMs to create by using the "--count" flag. Additionally, you can use the "--dry-run" flag to simulate the creation without actually provisioning the VMs. + +The command also supports a "--wait" flag to wait for the VMs to be ready before returning control, with a customizable timeout duration. + +``` +replicated vm create [flags] +``` + +### Examples + +``` +# Create a single Ubuntu 20.04 VM +replicated vm create --distribution ubuntu --version 20.04 + +# Create 3 Ubuntu 22.04 VMs +replicated vm create --distribution ubuntu --version 22.04 --count 3 + +# Create 5 Ubuntu VMs with a custom instance type and disk size +replicated vm create --distribution ubuntu --version 20.04 --count 5 --instance-type r1.medium --disk 100 +``` + +### Options + +``` + --count int Number of matching VMs to create (default 1) + --disk int Disk Size (GiB) to request per node (default 50) + --distribution string Distribution of the vm to provision + --dry-run Dry run + -h, --help help for create + --instance-type string The type of instance to use (e.g. r1.medium) + --name string VM name (defaults to random name) + --network string The network to use for the VM(s). If not supplied, create a new network + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --tag stringArray Tag to apply to the VM (key=value format, can be specified multiple times) + --ttl string VM TTL (duration, max 48h) + --version string Vversion to provision (format is distribution dependent) + --wait duration Wait duration for VM(s) to be ready (leave empty to not wait) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + + + +--- + + +# replicated vm ls + +# replicated vm ls + +List test VMs and their status, with optional filters for start/end time and terminated VMs. + +### Synopsis + +List all test VMs in your account, including their current status, distribution, version, and more. You can use optional flags to filter the output based on VM termination status, start time, or end time. This command can also watch the VM status in real-time. + +By default, the command will return a table of all VMs, but you can switch to JSON or wide output formats for more detailed information. The command supports filtering to show only terminated VMs or to specify a time range for the query. + +You can use the '--watch' flag to monitor VMs continuously. This will refresh the list of VMs every 2 seconds, displaying any updates in real-time, such as new VMs being created or existing VMs being terminated. + +The command also allows you to customize the output format, supporting 'json', 'table', and 'wide' views for flexibility based on your needs. + +``` +replicated vm ls [flags] +``` + +### Aliases + +``` +ls, list +``` + +### Examples + +``` +# List all active VMs +replicated vm ls + +# List all VMs that were created after a specific start time +replicated vm ls --start-time 2024-10-01T00:00:00Z + +# Show only terminated VMs +replicated vm ls --show-terminated + +# Watch VM status changes in real-time +replicated vm ls --watch +``` + +### Options + +``` + --end-time string end time for the query (Format: 2006-01-02T15:04:05Z) + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --show-terminated when set, only show terminated vms + --start-time string start time for the query (Format: 2006-01-02T15:04:05Z) + -w, --watch watch vms +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + + + +--- + + +# replicated vm port expose + +# replicated vm port expose + +Expose a port on a vm to the public internet. + +### Synopsis + +The 'vm port expose' command is used to expose a specified port on a vm to the public internet. When exposing a port, the command automatically creates a DNS entry and, if using the "https" protocol, provisions a TLS certificate for secure communication. + +You can also create a wildcard DNS entry and TLS certificate by specifying the "--wildcard" flag. Please note that creating a wildcard certificate may take additional time. + +This command supports different protocols including "http", "https", "ws", and "wss" for web traffic and web socket communication. + +``` +replicated vm port expose VM_ID --port PORT [flags] +``` + +### Examples + +``` +# Expose port 8080 with HTTPS protocol and wildcard DNS +replicated vm port expose VM_ID --port 8080 --protocol https --wildcard + +# Expose port 3000 with HTTP protocol +replicated vm port expose VM_ID --port 3000 --protocol http + +# Expose port 8080 with multiple protocols +replicated vm port expose VM_ID --port 8080 --protocol http,https + +# Expose port 8080 and display the result in JSON format +replicated vm port expose VM_ID --port 8080 --protocol https --output json +``` + +### Options + +``` + -h, --help help for expose + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --port int Port to expose (required) + --protocol strings Protocol to expose (valid values are "http", "https", "ws" and "wss") (default [http,https]) + --wildcard Create a wildcard DNS entry and TLS certificate for this port +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. + + + +--- + + +# replicated vm port ls + +# replicated vm port ls + +List vm ports for a vm. + +### Synopsis + +The 'vm port ls' command lists all the ports configured for a specific vm. You must provide the vm ID to retrieve and display the ports. + +This command is useful for viewing the current port configurations, protocols, and other related settings of your test vm. The output format can be customized to suit your needs, and the available formats include table, JSON, and wide views. + +``` +replicated vm port ls VM_ID [flags] +``` + +### Examples + +``` +# List ports for a vm in the default table format +replicated vm port ls VM_ID + +# List ports for a vm in JSON format +replicated vm port ls VM_ID --output json + +# List ports for a vm in wide format +replicated vm port ls VM_ID --output wide +``` + +### Options + +``` + -h, --help help for ls + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. + + + +--- + + +# replicated vm port rm + +# replicated vm port rm + +Remove vm port by ID. + +### Synopsis + +The 'vm port rm' command removes a specific port from a vm. You must provide the ID of the port to remove. + +This command is useful for managing the network settings of your test vms by allowing you to clean up unused or incorrect ports. After removing a port, the updated list of ports will be displayed. + +``` +replicated vm port rm VM_ID --id PORT_ID [flags] +``` + +### Examples + +``` +# Remove a port using its ID +replicated vm port rm VM_ID --id PORT_ID + +# Remove a port and display the result in JSON format +replicated vm port rm VM_ID --id PORT_ID --output json +``` + +### Options + +``` + -h, --help help for rm + --id string ID of the port to remove (required) + --output string The output format to use. One of: json|table|wide (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. + + + +--- + + +# replicated vm port + +# replicated vm port + +Manage VM ports. + +### Synopsis + +The 'vm port' command is a parent command for managing ports in a vm. It allows users to list, remove, or expose specific ports used by the vm. Use the subcommands (such as 'ls', 'rm', and 'expose') to manage port configurations effectively. + +This command provides flexibility for handling ports in various test vms, ensuring efficient management of vm networking settings. + +### Examples + +``` +# List all exposed ports in a vm +replicated vm port ls [VM_ID] + +# Remove an exposed port from a vm +replicated vm port rm [VM_ID] [PORT] + +# Expose a new port in a vm +replicated vm port expose [VM_ID] [PORT] +``` + +### Options + +``` + -h, --help help for port +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. +* [replicated vm port expose](replicated-cli-vm-port-expose) - Expose a port on a vm to the public internet. +* [replicated vm port ls](replicated-cli-vm-port-ls) - List vm ports for a vm. +* [replicated vm port rm](replicated-cli-vm-port-rm) - Remove vm port by ID. + + + +--- + + +# replicated vm rm + +# replicated vm rm + +Remove test VM(s) immediately, with options to filter by name, tag, or remove all VMs. + +### Synopsis + +The 'rm' command allows you to remove test VMs from your account immediately. You can specify one or more VM IDs directly, or use flags to filter which VMs to remove based on their name, tags, or simply remove all VMs at once. + +This command supports multiple filtering options, including removing VMs by their name, by specific tags, or by specifying the '--all' flag to remove all VMs in your account. + +You can also use the '--dry-run' flag to simulate the removal without actually deleting the VMs. + +``` +replicated vm rm ID [ID …] [flags] +``` + +### Aliases + +``` +rm, delete +``` + +### Examples + +``` +# Remove a VM by ID +replicated vm rm aaaaa11 + +# Remove multiple VMs by ID +replicated vm rm aaaaa11 bbbbb22 ccccc33 + +# Remove all VMs with a specific name +replicated vm rm --name test-vm + +# Remove all VMs with a specific tag +replicated vm rm --tag env=dev + +# Remove all VMs +replicated vm rm --all + +# Perform a dry run of removing all VMs +replicated vm rm --all --dry-run +``` + +### Options + +``` + --all remove all vms + --dry-run Dry run + -h, --help help for rm + --name stringArray Name of the vm to remove (can be specified multiple times) + --tag stringArray Tag of the vm to remove (key=value format, can be specified multiple times) +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + + + +--- + + +# replicated vm update ttl + +# replicated vm update ttl + +Update TTL for a test VM. + +### Synopsis + +The 'ttl' command allows you to update the Time to Live (TTL) for a test VM. This command modifies the lifespan of a running VM by updating its TTL, which is a duration starting from the moment the VM is provisioned. + +The TTL specifies how long the VM will run before it is automatically terminated. You can specify a duration up to a maximum of 48 hours. + +The command accepts a VM ID as an argument and requires the '--ttl' flag to specify the new TTL value. + +You can also specify the output format (json, table, wide) using the '--output' flag. + +``` +replicated vm update ttl [ID] [flags] +``` + +### Examples + +``` +# Update the TTL of a VM to 2 hours +replicated vm update ttl aaaaa11 --ttl 2h + +# Update the TTL of a VM to 30 minutes +replicated vm update ttl aaaaa11 --ttl 30m +``` + +### Options + +``` + -h, --help help for ttl + --output string The output format to use. One of: json|table|wide (default: table) (default "table") + --ttl string Update TTL which starts from the moment the vm is running (duration, max 48h). +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --id string id of the vm to update (when name is not provided) + --name string Name of the vm to update. + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm update](replicated-cli-vm-update) - Update VM settings. + + + +--- + + +# replicated vm update + +# replicated vm update + +Update VM settings. + +### Synopsis + +The 'vm update' command allows you to modify the settings of a virtual machine. You can update a VM either by providing its ID or by specifying its name. This command supports updating various VM settings, which will be handled by specific subcommands. + +- To update the VM by its ID, use the '--id' flag. +- To update the VM by its name, use the '--name' flag. + +Subcommands will allow for more specific updates like TTL + +### Examples + +``` +# Update a VM by specifying its ID +replicated vm update --id aaaaa11 --ttl 12h + +# Update a VM by specifying its name +replicated vm update --name --ttl 12h +``` + +### Options + +``` + -h, --help help for update + --id string id of the vm to update (when name is not provided) + --name string Name of the vm to update. +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. +* [replicated vm update ttl](replicated-cli-vm-update-ttl) - Update TTL for a test VM. + + + +--- + + +# replicated vm versions + +# replicated vm versions + +List available VM versions. + +### Synopsis + +The 'vm versions' command lists all the available versions of virtual machines that can be provisioned. This includes the available distributions and their respective versions. + +- You can filter the list by a specific distribution using the '--distribution' flag. +- The output can be formatted as a table or in JSON format using the '--output' flag. + +``` +replicated vm versions [flags] +``` + +### Examples + +``` +# List all available VM versions +replicated vm versions + +# List VM versions for a specific distribution (e.g., Ubuntu) +replicated vm versions --distribution ubuntu + +# Display the output in JSON format +replicated vm versions --output json +``` + +### Options + +``` + --distribution string Kubernetes distribution to filter by. + -h, --help help for versions + --output string The output format to use. One of: json|table (default: table) (default "table") +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + + + +--- + + +# replicated vm + +# replicated vm + +Manage test virtual machines. + +### Synopsis + +The 'vm' command allows you to manage and interact with virtual machines (VMs) used for testing purposes. With this command, you can create, list, remove, update, and manage VMs, as well as retrieve information about available VM versions. + +### Examples + +``` +# Create a single Ubuntu VM +replicated vm create --distribution ubuntu --version 20.04 + +# List all VMs +replicated vm ls + +# Remove a specific VM by ID +replicated vm rm <vm-id> + +# Update TTL for a specific VM +replicated vm update ttl <vm-id> --ttl 24h +``` + +### Options + +``` + -h, --help help for vm +``` + +### Options inherited from parent commands + +``` + --app string The app slug or app id to use in all calls + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated +* [replicated vm create](replicated-cli-vm-create) - Create one or more test VMs with specified distribution, version, and configuration options. +* [replicated vm ls](replicated-cli-vm-ls) - List test VMs and their status, with optional filters for start/end time and terminated VMs. +* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. +* [replicated vm rm](replicated-cli-vm-rm) - Remove test VM(s) immediately, with options to filter by name, tag, or remove all VMs. +* [replicated vm update](replicated-cli-vm-update) - Update VM settings. +* [replicated vm versions](replicated-cli-vm-versions) - List available VM versions. + + + +--- + + +# Replicated SDK API + +# Replicated SDK API + +The Replicated SDK provides an API that you can use to embed Replicated functionality in your Helm chart application. + +For example, if your application includes a UI where users manage their application instance, then you can use the `/api/v1/app/updates` endpoint to include messages in the UI that encourage users to upgrade when new versions are available. You could also revoke access to the application during runtime when a license expires using the `/api/v1/license/fields` endpoint. + +For more information about how to get started with the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +For information about how to develop against the Replicated SDK API with mock data, see [Developing Against the Replicated SDK](/vendor/replicated-sdk-development). + +## app + +### GET /app/info + +List details about an application instance, including the app name, location of the Helm chart in the Replicated OCI registry, and details about the current application release that the instance is running. + +```bash +GET http://replicated:3000/api/v1/app/info +``` + +Response: + +```json +{ + "instanceID": "8dcdb181-5cc4-458c-ad95-c0a1563cb0cb", + "appSlug": "my-app", + "appName": "My App", + "appStatus": "ready", + "helmChartURL": "oci://registry.replicated.com/my-app/beta/my-helm-chart", + "currentRelease": { + "versionLabel": "0.1.72", + "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", + "channelName": "Beta", + "createdAt": "2023-05-28T16:31:21Z", + "releaseNotes": "", + "helmReleaseName": "my-helm-chart", + "helmReleaseRevision": 5, + "helmReleaseNamespace": "my-helm-chart" + }, + "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", + "channelName": "Beta", + "channelSequence": 4, + "releaseSequence": 30 +} +``` + +### GET /app/status + +List details about an application status, including the list of individual resource states and the overall application state. + +```bash +GET http://replicated:3000/api/v1/app/status +``` + +Response: + +```json +{ + "appStatus": { + "appSlug": "my-app", + "resourceStates": [ + { + "kind": "deployment", + "name": "api", + "namespace": "default", + "state": "ready" + } + ], + "updatedAt": "2024-12-19T23:01:52.207162284Z", + "state": "ready", + "sequence": 268 + } +} +``` + +### GET /app/updates + +List details about the releases that are available to an application instance for upgrade, including the version label, created timestamp, and release notes. + +```bash +GET http://replicated:3000/api/v1/app/updates +``` + +Response: + +```json +[ + { + "versionLabel": "0.1.15", + "createdAt": "2023-05-12T15:48:45.000Z", + "releaseNotes": "Awesome new features!" + } +] +``` + +### GET /app/history + +List details about the releases that an application instance has installed previously. + +```bash +GET http://replicated:3000/api/v1/app/history +``` + +Response: + +```json +{ + "releases": [ + { + "versionLabel": "0.1.70", + "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", + "channelName": "Stable", + "createdAt": "2023-05-12T17:43:51Z", + "releaseNotes": "", + "helmReleaseName": "echo-server", + "helmReleaseRevision": 2, + "helmReleaseNamespace": "echo-server-helm" + } + ] +} +``` + +### POST /app/custom-metrics + +Send custom application metrics. For more information and examples see [Configuring Custom Metrics](/vendor/custom-metrics). + +### PATCH /app/custom-metrics + +Send partial custom application metrics for upserting. + +```bash +PATCH http://replicated:3000/api/v1/app/custom-metrics +``` +Request: + +```json +{ + "data": { + "numProjects": 20, + } +} +``` + +Response: Status `200` OK + +### DELETE /app/custom-metrics/\{metric_name\} + +Delete an application custom metric. + +```bash +DELETE http://replicated:3000/api/v1/app/custom-metrics/numProjects +``` + +Response: Status `204` No Content + +### POST /app/instance-tags + +Programmatically set new instance tags or overwrite existing tags. Instance tags are key-value pairs, where the key and the value are strings. + +Setting a tag with the `name` key will set the instance's name in the vendor portal. + +The `force` parameter defaults to `false`. If `force` is `false`, conflicting pre-existing tags will not be overwritten and the existing tags take precedence. If the `force` parameter is set to `true`, any conflicting pre-existing tags will be overwritten. + +To delete a particular tag, set the key's value to an empty string `""`. + +```bash +POST http://replicated:3000/api/v1/app/instance-tags +``` +Request: + +```json +{ + "data": { + "force": false, + "tags": { + "name": "my-instance-name", + "preExistingKey": "will-not-be-overwritten", + "cpuCores": "10", + "supportTier": "basic" + } + } +} +``` + +Response: Status `200` OK + +## license + +### GET /license/info + +List details about the license that was used to install, including the license ID, type, the customer name, and the channel the customer is assigned. + +```bash +GET http://replicated:3000/api/v1/license/info +``` + +Response: + +```json +{ + "licenseID": "YiIXRTjiB7R...", + "appSlug": "my-app", + "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", + "channelName": "Stable", + "customerName": "Example Customer", + "customerEmail": "username@example.com", + "licenseType": "dev", + "licenseSequence": 1, + "isAirgapSupported": false, + "isGitOpsSupported": false, + "isIdentityServiceSupported": false, + "isGeoaxisSupported": false, + "isSnapshotSupported": false, + "isSupportBundleUploadSupported": false, + "isSemverRequired": true, + "endpoint": "https://replicated.app", + "entitlements": { + "expires_at": { + "title": "Expiration", + "description": "License Expiration", + "value": "", + "valueType": "String" + }, + "numSeats": { + "title": "Number of Seats", + "value": 10, + "valueType": "Integer" + } + } +} +``` + +### GET /license/fields + +List details about all the fields in the license that was used to install, including the field names, descriptions, values, and signatures. + +```bash +GET http://replicated:3000/api/v1/license/fields +``` + +Response: + +```json +{ + "expires_at": { + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "Vs+W7+sF0RA6UrFEJcyHAbC5YCIT67hdsDdqtJTRBd4ZitTe4pr1D/SZg2k0NRIozrBP1mXuTgjQgeI8PyQJc/ctQwZDikIEKFW0sVv0PFPQV7Uf9fy7wRgadfUxkagcCS8O6Tpcm4WqlhEcgiJGvPBki3hZLnMO9Ol9yOepZ7UtrUMVsBUKwcTJWCytpFpvvOLfSNoHxMnPuSgpXumbHZjvdXrJoJagoRDXPiXXKGh02DOr58ncLofYqPzze+iXWbE8tqdFBZc72lLayT1am3MN0n3ejCNWNeX9+CiBJkqMqLLkjN4eugUmU/gBiDtJgFUB2gq8ejVVcohqos69WA==" + } + }, + "numSeats": { + "name": "numSeats", + "title": "Number of Seats", + "value": 10, + "valueType": "Integer", + "signature": { + "v1": "UmsYlVr4+Vg5TWsJV6goagWUM4imdj8EUUcdau7wIzfcU0MuZnv3UNVlwVE/tCuROCMcbei6ygjm4j5quBdkAGUyq86BCtohg/SqRsgVoNV6BN0S+tnqJ7w4/nqRVBc2Gsn7wTYNXiszLMkmfeNOrigLgsrtaGJmZ4IsczwI1V5Tr+AMAgrACL/UyLg78Y6EitKFW4qvJ9g5Q8B3uVmT+h9xTBxJFuKTQS6qFcDx9XCu+bKqoSmJDZ8lwgwpJDAiBzIhxiAd66lypHX9cpOg5A7cKEW+FLdaBKQdNRcPHQK2O9QwFj/NKEeCJEufuD3OeV8MSbN2PCehMzbj7tXSww==" + } + } +} +``` + +### GET /license/fields/\{field_name\} + +List details about one of the fields in the license that was used to install, including the field name, description, value, and signature. + +```bash +GET http://replicated:3000/api/v1/license/fields/\{field_name\} +``` + +Example request: + +```bash +curl replicated:3000/api/v1/license/fields/expires_at +``` + +Response: + +```json +{ + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2MD0YBlIAZEs1zXpmvwLdfcoTsZMOj0lZbxkPN5dPhEPIVcQgrzfzwU5HIwQbwc2jwDrLBQS4hGOKdxOWXnBUNbztsHXMqlAYQsmAhspRLDhBiEoYpFV/8oaaAuNBrmRu/IVAW6ahB4KtP/ytruVdBup3gn1U/uPAl5lhzuBifaW+NDFfJxAXJrhdTxMBxzfdKa6dGmlGu7Ou/xqDU1bNF3AuWoP3C78GzSBQrD1ZPnu/d+nuEjtakKSX3EK6VUisNucm8/TFlEVKUuX7hex7uZ9Of+UgS1GutQXOhXzfMZ7u+0zHXvQ==" + } +} +``` + +## Integration + +### GET /api/v1/integration/status + +Get status of Development Mode. When this mode is enabled, the `app` API will use mock data. This value cannot be set programmatically. It is controlled by the installed license. + +```json +{ + "isEnabled": true +} +``` + +### GET /api/v1/integration/mock-data + +Get mock data that is used when Development Mode is enabled. + +```json +{ + "appStatus": "ready", + "helmChartURL": "oci://registry.replicated.com/dev-app/dev-channel/dev-parent-chart", + "currentRelease": { + "versionLabel": "0.1.3", + "releaseNotes": "release notes 0.1.3", + "createdAt": "2023-05-23T20:58:07Z", + "deployedAt": "2023-05-23T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 3, + "helmReleaseNamespace": "default" + }, + "deployedReleases": [ + { + "versionLabel": "0.1.1", + "releaseNotes": "release notes 0.1.1", + "createdAt": "2023-05-21T20:58:07Z", + "deployedAt": "2023-05-21T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 1, + "helmReleaseNamespace": "default" + }, + { + "versionLabel": "0.1.2", + "releaseNotes": "release notes 0.1.2", + "createdAt": "2023-05-22T20:58:07Z", + "deployedAt": "2023-05-22T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 2, + "helmReleaseNamespace": "default" + }, + { + "versionLabel": "0.1.3", + "releaseNotes": "release notes 0.1.3", + "createdAt": "2023-05-23T20:58:07Z", + "deployedAt": "2023-05-23T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 3, + "helmReleaseNamespace": "default" + } + ], + "availableReleases": [ + { + "versionLabel": "0.1.4", + "releaseNotes": "release notes 0.1.4", + "createdAt": "2023-05-24T20:58:07Z", + "deployedAt": "2023-05-24T21:58:07Z", + "helmReleaseName": "", + "helmReleaseRevision": 0, + "helmReleaseNamespace": "" + }, + { + "versionLabel": "0.1.5", + "releaseNotes": "release notes 0.1.5", + "createdAt": "2023-06-01T20:58:07Z", + "deployedAt": "2023-06-01T21:58:07Z", + "helmReleaseName": "", + "helmReleaseRevision": 0, + "helmReleaseNamespace": "" + } + ] +} +``` + +### POST /api/v1/integration/mock-data + +Programmatically set mock data that is used when Development Mode is enabled. The payload will overwrite the existing mock data. Any data that is not included in the payload will be removed. For example, to remove release data, simply include empty arrays: + +```bash +POST http://replicated:3000/api/v1/integration/mock-data +``` + +Request: + +```json +{ + "appStatus": "ready", + "helmChartURL": "oci://registry.replicated.com/dev-app/dev-channel/dev-parent-chart", + "currentRelease": { + "versionLabel": "0.1.3", + "releaseNotes": "release notes 0.1.3", + "createdAt": "2023-05-23T20:58:07Z", + "deployedAt": "2023-05-23T21:58:07Z", + "helmReleaseName": "dev-parent-chart", + "helmReleaseRevision": 3, + "helmReleaseNamespace": "default" + }, + "deployedReleases": [], + "availableReleases": [] +} +``` + +Response: Status `201` Created + +## Examples + +This section provides example use cases for the Replicated SDK API. + +### Support Update Checks in Your Application + +The `api/v1/app/updates` endpoint returns details about new releases that are available to an instance for upgrade. You could use the `api/v1/app/updates` endpoint to allow your users to easily check for available updates from your application. + +Additionally, to make it easier for users to upgrade to new versions of your application, you could provide customer-specific upgrade instructions in your application by injecting values returned by the `/api/v1/license/info` and `/api/vi/app/info` endpoints. + +The following examples show how you could include a page in your application that lists available updates and also provides customer-specific upgrade instructions: + +![a user interface showing a list of available releases](/images/slackernews-update-page.png) +[View a larger version of this image](/images/slackernews-update-page.png) + +![user-specific application upgrade instructions displayed in a dialog](/images/slackernews-update-instructions.png) +[View a larger version of this image](/images/slackernews-update-instructions.png) + +To use the SDK API to check for available application updates and provide customer-specific upgrade instructions: + +1. From your application, call the `api/v1/app/updates` endpoint to return available updates for the application instance. Use the response to display available upgrades for the customer. + + ```bash + curl replicated:3000/api/v1/app/updates + ``` + + **Example response**: + + ```json + [ + { + "versionLabel": "0.1.15", + "createdAt": "2023-05-12T15:48:45.000Z", + "releaseNotes": "Awesome new features!" + } + ] + ``` + +1. For each available release, add logic that displays the required upgrade commands with customer-specific values. To upgrade, users must first run `helm registry login` to authenticate to the Replicated registry. Then, they can run `helm upgrade`: + + 1. Inject customer-specific values into the `helm registry login` command: + + ```bash + helm registry login REGISTRY_DOMAIN --username EMAIL --password LICENSE_ID + ``` + + The `helm registry login` command requires the following components: + + * `REGISTRY_DOMAIN`: The domain for the registry where your Helm chart is pushed. The registry domain is either `replicated.registry.com` or a custom domain that you added. + + * `EMAIL`: The customer email address is available from the `/api/v1/license/info` endpoint in the `customerEmail` field. + + * `LICENSE_ID` The customer license ID is available from the `/api/v1/license/info` endpoint in the `licenseID` field. + + 1. Inject customer-specific values into the `helm upgrade` command: + + ```bash + helm upgrade -n NAMESPACE RELEASE_NAME HELM_CHART_URL + ``` + + The following describes where the values in the `helm upgrade` command are available: + + * `NAMESPACE`: The release namespace is available from the `/api/v1/app/info` endpoint in the `currentRelease.helmReleaseNamespace` + + * `RELEASE_NAME`: The release name is available from the `/api/v1/app/info` endpoint in the `currentRelease.helmReleaseName` field. + + * `HELM_CHART_URL`: The URL of the Helm chart at the OCI registry is available from the `/api/v1/app/info` endpoint in the `helmChartURL` field. + +### Revoke Access at Runtime When a License Expires + +You can use the Replicated SDK API `/api/v1/license/fields/{field_name}` endpoint to revoke a customer's access to your application during runtime when their license expires. + +To revoke access to your application when a license expires: + +1. In the vendor portal, click **Customers**. Select the target customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. + +1. Under **Expiration policy**: + + 1. Enable **Customer's license has an expiration date**. + + 1. For **When does this customer expire?**, use the calendar to set an expiration date for the license. + + <img alt="expiration policy field in the manage customer page" src="/images/customer-expiration-policy.png" width="500px"/> + + [View a larger version of this image](/images/customer-expiration-policy.png) + +1. Install the Replicated SDK as a standalone component in your cluster. This is called _integration mode_. Installing in integration mode allows you to develop locally against the SDK API without needing to create releases for your application in the vendor portal. See [Developing Against the SDK API](/vendor/replicated-sdk-development). + +1. In your application, use the `/api/v1/license/fields/expires_at` endpoint to get the `expires_at` field that you defined in the previous step. + + **Example:** + + ```bash + curl replicated:3000/api/v1/license/fields/expires_at + ``` + + ```json + { + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2M..." + } + } + ``` + +1. Add logic to your application to revoke access if the current date and time is more recent than the expiration date of the license. + +1. (Recommended) Use signature verification in your application to ensure the integrity of the license field. See [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). + + +--- + + +# replicated + +# replicated + +Manage your Commercial Software Distribution Lifecycle using Replicated + +### Synopsis + +The 'replicated' CLI allows Replicated customers (vendors) to manage their Commercial Software Distribution Lifecycle (CSDL) using the Replicated API. + +### Options + +``` + --app string The app slug or app id to use in all calls + -h, --help help for replicated + --token string The API token to use to access your app in the Vendor API +``` + +### SEE ALSO + +* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API +* [replicated app](replicated-cli-app) - Manage applications +* [replicated channel](replicated-cli-channel) - List channels +* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. +* [replicated completion](replicated-cli-completion) - Generate completion script +* [replicated customer](replicated-cli-customer) - Manage customers +* [replicated default](replicated-cli-default) - Manage default values used by other commands +* [replicated installer](replicated-cli-installer) - Manage Kubernetes installers +* [replicated instance](replicated-cli-instance) - Manage instances +* [replicated login](replicated-cli-login) - Log in to Replicated +* [replicated logout](replicated-cli-logout) - Logout from Replicated +* [replicated registry](replicated-cli-registry) - Manage registries +* [replicated release](replicated-cli-release) - Manage app releases +* [replicated version](replicated-cli-version) - Print the current version and exit +* [replicated vm](replicated-cli-vm) - Manage test virtual machines. + + + +--- + + +# About Template Functions + +import UseCases from "../partials/template-functions/_use-cases.mdx" + +# About Template Functions + +This topic describes Replicated KOTS template functions, including information about use cases, template function contexts, syntax. + +## Overview + +For Kubernetes manifest files for applications deployed by Replicated KOTS, Replicated provides a set of custom template functions based on the Go text/template library. + +<UseCases/> + +All functionality of the Go templating language, including if statements, loops, and variables, is supported with KOTS template functions. For more information about the Go library, see [text/template](https://golang.org/pkg/text/template/) in the Go documentation. + +### Supported File Types + +You can use KOTS template functions in Kubernetes manifest files for applications deployed by KOTS, such as: +* Custom resources in the `kots.io` API group like Application, Config, or HelmChart +* Custom resources in other API groups like Preflight, SupportBundle, or Backup +* Kubernetes objects like Deployments, Services, Secrets, or ConfigMaps +* Kubernetes Operators + +### Limitations + +* Not all fields in the Config and Application custom resources support templating. For more information, see [Application](/reference/custom-resource-application) and [Item Properties](/reference/custom-resource-config#item-properties) in _Config_. + +* Templating is not supported in the [Embedded Cluster Config](/reference/embedded-config) resource. + +* KOTS template functions are not directly supported in Helm charts. For more information, see [Helm Charts](#helm-charts) below. + +### Helm Charts + +KOTS template functions are _not_ directly supported in Helm charts. However, the HelmChart custom resource provides a way to map values rendered by KOTS template functions to Helm chart values. This allows you to use KOTS template functions with Helm charts without making changes to those Helm charts. + +For information about how to map values from the HelmChart custom resource to Helm chart `values.yaml` files, see [Setting Helm Chart Values with KOTS](/vendor/helm-optional-value-keys). + +### Template Function Rendering + +During application installation and upgrade, KOTS templates all Kubernetes manifest files in a release (except for the Config custom resource) at the same time during a single process. + +For the [Config](/reference/custom-resource-config) custom resource, KOTS templates each item separately so that config items can be used in templates for other items. For examples of this, see [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional) and [Template Function Examples](/reference/template-functions-examples). + +## Syntax {#syntax} + +The KOTS template function syntax supports the following functionally equivalent delimiters: +* [`repl{{ ... }}`](#syntax-integer) +* [`{{repl ... }}`](#syntax-string) + +### Syntax Requirements + +KOTS template function syntax has the following requirements: +* In both the `repl{{ ... }}` and `{{repl ... }}` syntaxes, there must be no whitespace between `repl` and the `{{` delimiter. +* The manifests where KOTS template functions are used must be valid YAML. This is because the YAML manifests are linted before KOTS template functions are rendered. + +### `repl{{ ... }}` {#syntax-integer} + +This syntax is recommended for most use cases. + +Any quotation marks wrapped around this syntax are stripped during rendering. If you need the rendered value to be quoted, you can pipe into quote (`| quote`) or use the [`{{repl ... }}`](#syntax-string) syntax instead. + +#### Integer Example + +```yaml +http: + port: repl{{ ConfigOption "load_balancer_port" }} +``` +```yaml +http: + port: 8888 +``` + +#### Example with `| quote` + +```yaml +customTag: repl{{ ConfigOption "tag" | quote }} +``` +```yaml +customTag: 'key: value' +``` + +#### If-Else Example + +```yaml +http: + port: repl{{ if ConfigOptionEquals "ingress_type" "load_balancer" }}repl{{ ConfigOption "load_balancer_port" }}repl{{ else }}8081repl{{ end }} +``` +```yaml +http: + port: 8081 +``` + +For more examples, see [Template Function Examples](/reference/template-functions-examples). + +### `{{repl ... }}` {#syntax-string} + +This syntax can be useful when having the delimiters outside the template function improves readability of the YAML, such as in multi-line statements or if-else statements. + +To use this syntax at the beginning of a value in YAML, it _must_ be wrapped in quotes because you cannot start a YAML value with the `{` character and manifests consumed by KOTS must be valid YAML. When this syntax is wrapped in quotes, the rendered value is also wrapped in quotes. + +#### Example With Quotes + +The following example is wrapped in quotes because it is used at the beginning of a statement in YAML: + +```yaml +customTag: '{{repl ConfigOption "tag" }}' +``` +```yaml +customTag: 'key: value' +``` + +#### If-Else Example +```yaml +my-service: + type: '{{repl if ConfigOptionEquals "ingress_type" "load_balancer" }}LoadBalancer{{repl else }}ClusterIP{{repl end }}' +``` +```yaml +my-service: + type: 'LoadBalancer' +``` + +For more examples, see [Template Function Examples](/reference/template-functions-examples). + +## Contexts {#contexts} + +KOTS template functions are grouped into different contexts, depending on the phase of the application lifecycle when the function is available and the context of the data that is provided. + +### Static Context + +The context necessary to render the static template functions is always available. + +The static context also includes the Masterminds Sprig function library. For more information, see [Sprig Function Documentation](http://masterminds.github.io/sprig/) on the sprig website. + +For a list of all KOTS template functions available in the static context, see [Static context](template-functions-static-context). + +### Config Context + +Template functions in the config context are available when rendering an application that includes a Config custom resource. +At execution time, template functions in the config context also can use the static context functions. + +For a list of all KOTS template functions available in the config context, see [Config context](template-functions-config-context). + +### License Context + +Template functions in the license context have access to customer license and version data. + +For a list of all KOTS template functions available in the license context, see [License context](template-functions-license-context). + +### kURL Context + +Template functions in the kURL context have access to information about applications installed in embedded clusters created by Replicated kURL. + +For a list of all KOTS template functions available in the kURL context, see [kURL context](template-functions-kurl-context). + +### Identity Context + +Template functions in the Identity context have access to Replicated identity service information. + +For a list of all KOTS template functions available in the identity context, see [Identity context](template-functions-identity-context). + + +--- + + +# Config Context + +# Config Context + +## ConfigOption + +```go +func ConfigOption(optionName string) string +``` + +Returns the value of the config option as a string. + +For information about the config screen and associated options, see [Config](custom-resource-config) in the _Custom Resources_ section. + +```yaml +'{{repl ConfigOption "hostname" }}' +``` + +`ConfigOption` returns the base64 **encoded** value of the `file` config option. + +```yaml +'{{repl ConfigOption "ssl_key"}}' +``` + +To use files in a Secret, use `ConfigOption`: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: tls-secret +type: kubernetes.io/tls +data: + tls.crt: '{{repl ConfigOption "tls_certificate_file" }}' + tls.key: '{{repl ConfigOption "tls_private_key_file" }}' +``` + +For more information about using TLS certificates, see [Using TLS Certificates](../vendor/packaging-using-tls-certs). + +## ConfigOptionData + +```go +func ConfigOptionData(optionName string) string +``` + +`ConfigOptionData` returns the base64 **decoded** value of a `file` config option. + +```yaml +'{{repl ConfigOptionData "ssl_key"}}' +``` + +To use files in a ConfigMap, use `ConfigOptionData`: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: tls-config +data: + tls.crt: | + repl{{- ConfigOptionData "tls_certificate_file" | nindent 4 }} + + tls.key: | + repl{{- ConfigOptionData "tls_private_key_file" | nindent 4 }} +``` + +## ConfigOptionFilename + +```go +func ConfigOptionFilename(optionName string) string +``` + +`ConfigOptionFilename` returns the filename associated with a `file` config option. +It will return an empty string if used erroneously with other types. + +```yaml +'{{repl ConfigOptionFilename "pom_file"}}' +``` + +As an example, if you have the following Config Spec defined: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: my-application +spec: + groups: + - name: java_settings + title: Java Settings + description: Configures the Java Server build parameters + items: + - name: pom_file + type: file + required: true +``` + +You can use `ConfigOptionFilename` in a Pod Spec to mount a file like so: +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: configmap-demo-pod +spec: + containers: + - name: some-java-app + image: busybox + command: ["bash"] + args: + - "-C" + - "cat /config/{{repl ConfigOptionFilename pom_file}}" + volumeMounts: + - name: config + mountPath: "/config" + readOnly: true + volumes: + - name: config + configMap: + name: demo-configmap + items: + - key: data_key_one + path: repl{{ ConfigOptionFilename pom_file }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: demo-configmap +data: + data_key_one: repl{{ ConfigOptionData pom_file }} +``` + +## ConfigOptionEquals + +```go +func ConfigOptionEquals(optionName string, expectedValue string) bool +``` + +Returns true if the configuration option value is equal to the supplied value. + +```yaml +'{{repl ConfigOptionEquals "http_enabled" "1" }}' +``` + +## ConfigOptionNotEquals + +```go +func ConfigOptionNotEquals(optionName string, expectedValue string) bool +``` + +Returns true if the configuration option value is not equal to the supplied value. + +```yaml +'{{repl ConfigOptionNotEquals "http_enabled" "1" }}' +``` + +## LocalRegistryAddress + +```go +func LocalRegistryAddress() string +``` + +Returns the local registry host or host/namespace that's configured. +This will always return everything before the image name and tag. + +## LocalRegistryHost + +```go +func LocalRegistryHost() string +``` + +Returns the host of the local registry that the user configured. Alternatively, for air gap installations with Replicated Embedded Cluster or Replicated kURL, LocalRegistryHost returns the host of the built-in registry. + +Includes the port if one is specified. + +## LocalRegistryNamespace + +```go +func LocalRegistryNamespace() string +``` + +Returns the namespace of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryNamespace returns the namespace of the built-in registry. + +## LocalImageName + +```go +func LocalImageName(remoteImageName string) string +``` + +Given a `remoteImageName`, rewrite the `remoteImageName` so that it can be pulled to local hosts. + +A common use case for the `LocalImageName` function is to ensure that a Kubernetes Operator can determine the names of container images on Pods created at runtime. For more information, see [Referencing Images](/vendor/operator-referencing-images) in the _Packaging a Kubernetes Operator Application_ section. + +`LocalImageName` rewrites the `remoteImageName` in one of the following ways, depending on if a private registry is configured and if the image must be proxied: + +* If there is a private registry configured in the customer's environment, such as in air gapped environments, rewrite `remoteImageName` to reference the private registry locally. For example, rewrite `elasticsearch:7.6.0` as `registry.somebigbank.com/my-app/elasticsearch:7.6.0`. + +* If there is no private registry configured in the customer's environment, but the image must be proxied, rewrite `remoteImageName` so that the image can be pulled through the proxy registry. For example, rewrite `"quay.io/orgname/private-image:v1.2.3"` as `proxy.replicated.com/proxy/app-name/quay.io/orgname/private-image:v1.2.3`. + +* If there is no private registry configured in the customer's environment and the image does not need to be proxied, return `remoteImageName` without changes. + +For more information about the Replicated proxy registry, see [About the Proxy Registry](/vendor/private-images-about). + +## LocalRegistryImagePullSecret + +```go +func LocalRegistryImagePullSecret() string +``` + +Returns the base64 encoded local registry image pull secret value. +This is often needed when an operator is deploying images to a namespace that is not managed by Replicated KOTS. +Image pull secrets must be present in the namespace of the pod. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-image-pull-secret + namespace: my-namespace +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +--- +apiVersion: v1 +kind: Pod +metadata: + name: dynamic-pod + namespace: my-namespace +spec: + containers: + - image: '{{repl LocalImageName "registry.replicated.com/my-app/my-image:abcdef" }}' + name: my-container + imagePullSecrets: + - name: my-image-pull-secret +``` + +## ImagePullSecretName + +```go +func ImagePullSecretName() string +``` + +Returns the name of the image pull secret that can be added to pod specs that use private images. +The secret will be automatically created in all application namespaces. +It will contain authentication information for any private registry used with the application. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-deployment +spec: + template: + spec: + imagePullSecrets: + - name: repl{{ ImagePullSecretName }} +``` + +## HasLocalRegistry + +```go +func HasLocalRegistry() bool +``` + +Returns true if the environment is configured to rewrite images to a local registry. +HasLocalRegistry is always true for air gap installations. HasLocalRegistry is true in online installations if the user pushed images to a local registry. + +--- + + +# Template Function Examples + +import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" +import StringComparison from "../partials/template-functions/_string-comparison.mdx" +import NeComparison from "../partials/template-functions/_ne-comparison.mdx" +import GoSprig from "../partials/template-functions/_go-sprig.mdx" +import UseCases from "../partials/template-functions/_use-cases.mdx" + +# Template Function Examples + +This topic provides examples of how to use Replicated KOTS template functions in various common use cases. For more information about working with KOTS template functions, including the supported syntax and the types of files where KOTS template functions can be used, see [About Template Functions](template-functions-about). + +## Overview + +<GoSprig/> + +<UseCases/> + +For examples demonstrating these use cases and more, see the sections below. + +## Comparison Examples + +This section includes examples of how to use KOTS template functions to compare different types of data. + +### Boolean Comparison + +Boolean values can be used in comparisons to evaluate if a given statement is true or false. Because many KOTS template functions return string values, comparing boolean values often requires using the KOTS [ParseBool](/reference/template-functions-static-context#parsebool) template function to return the boolean represented by the string. + +One common use case for working with boolean values is to check that a given field is present in the customer's license. For example, you might need to show a configuration option on the KOTS Admin Console **Config** page only when the customer's license has a certain entitlement. + +The following example creates a conditional statement in the KOTS Config custom resource that evaluates to true when a specified license field is present in the customer's license _and_ the customer enables a specified configuration option on the Admin Console **Config** page. + +```yaml +# KOTS Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_group + title: Example Config + items: + - name: radio_example + title: Select One + type: radio + items: + - name: option_one + title: Option One + - name: option_two + title: Option Two + - name: conditional_item + title: Conditional Item + type: text + # Display this item only when the customer enables the option_one config field *and* + # has the feature-1 entitlement in their license + when: repl{{ and (LicenseFieldValue "feature-1" | ParseBool) (ConfigOptionEquals "radio_example" "option_one")}} +``` + +This example uses the following KOTS template functions: +* [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) to return the string value of a boolean type license field named `feature-1` + :::note + The LicenseFieldValue template function always returns a string, regardless of the license field type. + ::: +* [ParseBool](/reference/template-functions-static-context#parsebool) to convert the string returned by the LicenseFieldValue template function to a boolean +* [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) to return a boolean that evaluates to true if the configuration option value is equal to the supplied value + +### Integer Comparison + +Integer values can be compared using operators such as greater than, less than, equal to, and so on. Because many KOTS template functions return string values, working with integer values often requires using another function to return the integer represented by the string, such as: +* KOTS [ParseInt](/reference/template-functions-static-context#parseint), which returns the integer value represented by the string with the option to provide a `base` other than 10 +* Sprig [atoi](https://masterminds.github.io/sprig/conversion.html), which is equivalent to ParseInt(s, 10, 0), converted to type integer + +A common use case for comparing integer values with KOTS template functions is to display different configuration options on the KOTS Admin Console **Config** page depending on integer values from the customer's license. For example, licenses might include an entitlement that defines the number of seats the customer is entitled to. In this case, it can be useful to conditionally display or hide certain fields on the **Config** page depending on the customer's team size. + +<IntegerComparison/> + +### String Comparison + +A common use case for string comparison is to compare the rendered value of a KOTS template function against a string to conditionally show or hide fields on the KOTS Admin Console **Config** page depending on details about the customer's environment. For example, a string comparison can be used to check the Kubernetes distribution of the cluster where an application is deployed. + +<StringComparison/> + +### Not Equal To Comparison + +It can be useful to compare the rendered value of a KOTS template function against another value to check if the two values are different. For example, you can conditionally show fields on the KOTS Admin Console **Config** page only when the Kubernetes distribution of the cluster where the application is deployed is _not_ [Replicated embedded cluster](/vendor/embedded-overview). + +<NeComparison/> + +### Logical AND Comparison + +Logical comparisons such as AND, OR, and NOT can be used with KOTS template functions. A common use case for logical AND comparisons is to construct more complex conditional statements where it is necessary that two different conditions are both true. + +The following example shows how to use an `and` operator that evaluates to true when two different configuration options on the Admin Console **Config** page are both enabled. This example uses the KOTS [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to return a boolean that evaluates to true if the configuration option value is equal to the supplied value. + +```yaml +# KOTS Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_group + title: Example Config + items: + - name: radio_example + title: Select One Example + type: radio + items: + - name: option_one + title: Option One + - name: option_two + title: Option Two + - name: boolean_example + title: Boolean Example + type: bool + default: "0" + - name: conditional_item + title: Conditional Item + type: text + # Display this item only when *both* specified config options are enabled + when: repl{{ and (ConfigOptionEquals "radio_example" "option_one") (ConfigOptionEquals "boolean_example" "1")}} +``` + +As shown below, when both `Option One` and `Boolean Example` are selected, the conditional statement evaluates to true and the `Conditional Item` field is displayed: + +<img alt="Conditional item displayed" src="/images/conditional-item-true.png" width="550px"/> + +[View a larger version of this image](/images/conditional-item-true.png) + +Alternatively, if either `Option One` or `Boolean Example` is not selected, then the conditional statement evaluates to false and the `Conditional Item` field is not displayed: + +<img alt="Option two selected" src="/images/conditional-item-false-option-two.png" width="550px"/> + +[View a larger version of this image](/images/conditional-item-false-option-two.png) + +<img alt="Boolean field deselected" src="/images/conditional-item-false-boolean.png" width="550px"/> + +[View a larger version of this image](/images/conditional-item-false-boolean.png) + +## Conditional Statement Examples + +This section includes examples of using KOTS template functions to construct conditional statements. Conditional statements can be used with KOTS template functions to render different values depending on a given condition. + +### If-Else Statements + +A common use case for if-else statements with KOTS template functions is to set values for resources or objects deployed by your application, such as custom annotations or service types, based on user-specific data. + +This section includes examples of both single line and multi-line if-else statements. Using multi-line formatting can be useful to improve the readability of YAML files when longer or more complex if-else statements are needed. + +Multi-line if-else statements can be constructed using YAML block scalars and block chomping characters to ensure the rendered result is valid YAML. A _folded_ block scalar style is denoted using the greater than (`>`) character. With the folded style, single line breaks in the string are treated as a space. Additionally, the block chomping minus (`-`) character is used to remove all the line breaks at the end of a string. For more information about working with these characters, see [Block Style Productions](https://yaml.org/spec/1.2.2/#chapter-8-block-style-productions) in the YAML documentation. + +:::note +For Helm-based applications that need to use more complex or nested if-else statements, you can alternatively use templating within your Helm chart `templates` rather than in the KOTS HelmChart custom resource. For more information, see [If/Else](https://helm.sh/docs/chart_template_guide/control_structures/#ifelse) in the Helm documentation. +::: + +#### Single Line + +The following example shows if-else statements used in the KOTS HelmChart custom resource `values` field to render different values depending on if the user selects a load balancer or an ingress controller as the ingress type for the application. This example uses the KOTS [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to return a boolean that evaluates to true if the configuration option value is equal to the supplied value. + +```yaml +# KOTS HelmChart custom resource +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: my-app +spec: + chart: + name: my-app + chartVersion: 0.23.0 + values: + services: + my-service: + enabled: true + appName: ["my-app"] + # Render the service type based on the user's selection + # '{{repl ...}}' syntax is used for `type` to improve readability of the if-else statement and render a string + type: '{{repl if ConfigOptionEquals "ingress_type" "load_balancer" }}LoadBalancer{{repl else }}ClusterIP{{repl end }}' + ports: + http: + enabled: true + # Render the HTTP port for the service depending on the user's selection + # repl{{ ... }} syntax is used for `port` to render an integer value + port: repl{{ if ConfigOptionEquals "ingress_type" "load_balancer" }}repl{{ ConfigOption "load_balancer_port" }}repl{{ else }}8081repl{{ end }} + protocol: HTTP + targetPort: 8081 +``` + +#### Multi-Line in KOTS HelmChart Values + +The following example uses a multi-line if-else statement in the KOTS HelmChart custom resource to render the path to the Replicated SDK image depending on if the user pushed images to a local private registry. + +This example uses the following KOTS template functions: +* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry) to return true if the environment is configured to rewrite images to a local registry +* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost) to return the local registry host configured by the user +* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) to return the local registry namespace configured by the user + +:::note +This example uses the `{{repl ...}}` syntax rather than the `repl{{ ... }}` syntax to improve readability in the YAML file. However, both syntaxes are supported for this use case. For more information, see [Syntax](/reference/template-functions-about#syntax) in _About Template Functions_. +::: + +```yaml +# KOTS HelmChart custom resource +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + images: + replicated-sdk: >- + {{repl if HasLocalRegistry -}} + {{repl LocalRegistryHost }}/{{repl LocalRegistryNamespace }}/replicated-sdk:1.0.0-beta.29 + {{repl else -}} + docker.io/replicated/replicated-sdk:1.0.0-beta.29 + {{repl end}} +``` + +Given the example above, if the user is _not_ using a local registry, then the `replicated-sdk` value in the Helm chart is set to the location of the image on the default docker registry, as shown below: + +```yaml +# Helm chart values file + +images: + replicated-sdk: 'docker.io/replicated/replicated-sdk:1.0.0-beta.29' +``` + +#### Multi-Line in Secret Object + +The following example uses multi-line if-else statements in a Secret object deployed by KOTS to conditionally set the database hostname, port, username, and password depending on if the customer uses the database embedded with the application or brings their own external database. + +This example uses the following KOTS template functions: +* [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) to return a boolean that evaluates to true if the configuration option value is equal to the supplied value +* [ConfigOption](/reference/template-functions-config-context#configoption) to return the user-supplied value for the specified configuration option +* [Base64Encode](/reference/template-functions-static-context#base64encode) to encode the string with base64 + +:::note +This example uses the `{{repl ...}}` syntax rather than the `repl{{ ... }}` syntax to improve readability in the YAML file. However, both syntaxes are supported for this use case. For more information, see [Syntax](/reference/template-functions-about#syntax) in _About Template Functions_. +::: + +```yaml +# Postgres Secret +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + # Render the value for the database hostname depending on if an embedded or + # external db is used. + # Also, base64 encode the rendered value. + DB_HOST: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_host" | Base64Encode }} + {{repl end}} + DB_PORT: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "5432" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_port" | Base64Encode }} + {{repl end}} + DB_USER: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_user" | Base64Encode }} + {{repl end}} + DB_PASSWORD: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl ConfigOption "embedded_postgres_password" | Base64Encode }} + {{repl else -}} + {{repl ConfigOption "external_postgres_password" | Base64Encode }} + {{repl end}} +``` + +### Ternary Operators + +Ternary operators are useful for templating strings where certain values within the string must be rendered differently depending on a given condition. Compared to if-else statements, ternary operators are useful when a small portion of a string needs to be conditionally rendered, as opposed to rendering different values based on a conditional statement. For example, a common use case for ternary operators is to template the path to an image repository based on user-supplied values. + +The following example uses ternary operators to render the registry and repository for a private nginx image depending on if a local image regsitry is used. This example uses the following KOTS template functions: +* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry) to return true if the environment is configured to rewrite images to a local registry +* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost) to return the local registry host configured by the user +* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) to return the local registry namespace configured by the user + +```yaml +# KOTS HelmChart custom resource +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + image: + # If a local registry is configured, use the local registry host. + # Otherwise, use proxy.replicated.com + registry: repl{{ HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }} + # If a local registry is configured, use the local registry's namespace. + # Otherwise, use proxy/my-app/quay.io/my-org + repository: repl{{ HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx + tag: v1.0.1 +``` + +## Formatting Examples + +This section includes examples of how to format the rendered output of KOTS template functions. + +In addition to the examples in this section, KOTS template functions in the Static context include several options for formatting values, such as converting strings to upper or lower case and trimming leading and trailing space characters. For more information, see [Static Context](/reference/template-functions-static-context). + +### Indentation + +When using template functions within nested YAML, it is important that the rendered template functions are indented correctly so that the YAML renders. A common use case for adding indentation to KOTS template functions is when templating annotations in the metadata of resources or objects deployed by your application based on user-supplied values. + +The [nindent](https://masterminds.github.io/sprig/strings.html) function can be used to prepend a new line to the beginning of the string and indent the string by a specified number of spaces. + +#### Indent Templated Helm Chart Values + +The following example shows templating a Helm chart value that sets annotations for an Ingress object. This example uses the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function to return user-supplied annotations from the Admin Console **Config** page. It also uses [nindent](https://masterminds.github.io/sprig/strings.html) to indent the rendered value ten spaces. + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: myapp +spec: + values: + services: + myservice: + annotations: repl{{ ConfigOption "additional_annotations" | nindent 10 }} +``` + +#### Indent Templated Annotations in Manifest Files + +The following example shows templating annotations for an Ingress object. This example uses the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function to return user-supplied annotations from the Admin Console **Config** page. It also uses [nindent](https://masterminds.github.io/sprig/strings.html) to indent the rendered value four spaces. + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-ingress + annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "ingress_annotations" | nindent 4 }} +``` + +### Render Quoted Values + +To wrap a rendered value in quotes, you can pipe the result from KOTS template functions with the `repl{{ ... }}` syntax into quotes using `| quote`. Or, you can use the `'{{repl ... }}'` syntax instead. + +One use case for quoted values in YAML is when indicator characters are included in values. In YAML, indicator characters (`-`, `?`, `:`) have special semantics and must be escaped if used in values. For more information, see [Indicator Charactors](https://yaml.org/spec/1.2.2/#53-indicator-characters) in the YAML documentation. + +#### Example with `'{{repl ... }}'` Syntax + +```yaml +customTag: '{{repl ConfigOption "tag" }}' +``` +#### Example with `| quote` + +```yaml +customTag: repl{{ ConfigOption "tag" | quote }} +``` + +The result for both examples is: + +```yaml +customTag: 'key: value' +``` + +## Variables Example + +This section includes an example of using variables with KOTS template functions. For more information, see [Variables](https://pkg.go.dev/text/template#hdr-Variables) in the Go documentation. + +### Using Variables to Generate TLS Certificates in JSON + +You can use the Sprig [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions with KOTS template functions to generate certificate authorities (CAs) and signed certificates in JSON. One use case for this is to generate default CAs, certificates, and keys that users can override with their own values on the Admin Console **Config** page. + +The Sprig [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions require the subject's common name and the certificate's validity duration in days. The `genSignedCert` function also requires the CA that will sign the certificate. You can use variables and KOTS template functions to provide the necessary parameters when calling these functions. + +The following example shows how to use variables and KOTS template functions in the `default` property of a [`hidden`](/reference/custom-resource-config#hidden) item to pass parameters to the `genCA` and `genSignedCert` functions and generate a CA, certificate, and key. This example uses a `hidden` item (which is an item that is not displayed on the **Config** page) to generate the certificate chain because variables used in the KOTS Config custom resource can only be accessed from the same item where they were declared. For this reason, `hidden` items can be useful for evaluating complex templates. + +This example uses the following: +* KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function to render the user-supplied value for the ingress hostname. This is passed as a parameter to the [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions +* Sprig [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions to generate a CA and a certificate signed by the CA +* Sprig [dict](https://masterminds.github.io/sprig/dicts.html), [set](https://masterminds.github.io/sprig/dicts.html), and [dig](https://masterminds.github.io/sprig/dicts.html) dictionary functions to create a dictionary with entries for both the CA and the certificate, then traverse the dictionary to return the values of the CA, certificate, and key. +* [toJson](https://masterminds.github.io/sprig/defaults.html) and [fromJson](https://masterminds.github.io/sprig/defaults.html) Sprig functions to encode the CA and certificate into a JSON string, then decode the JSON for the purpose of displaying the values on the **Config** page as defaults + +:::important +Default values are treated as ephemeral. The following certificate chain is recalculated each time the application configuration is modified. Before using this example with your application, be sure that your application can handle updating these parameters dynamically. +::: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + items: + - name: ingress_hostname + title: Ingress Hostname + help_text: Enter a DNS hostname to use as the cert's CN. + type: text + - name: tls_json + title: TLS JSON + type: textarea + hidden: true + default: |- + repl{{ $ca := genCA (ConfigOption "ingress_hostname") 365 }} + repl{{ $tls := dict "ca" $ca }} + repl{{ $cert := genSignedCert (ConfigOption "ingress_hostname") (list ) (list (ConfigOption "ingress_hostname")) 365 $ca }} + repl{{ $_ := set $tls "cert" $cert }} + repl{{ toJson $tls }} + - name: tls_ca + title: Signing Authority + type: textarea + default: repl{{ fromJson (ConfigOption "tls_json") | dig "ca" "Cert" "" }} + - name: tls_cert + title: TLS Cert + type: textarea + default: repl{{ fromJson (ConfigOption "tls_json") | dig "cert" "Cert" "" }} + - name: tls_key + title: TLS Key + type: textarea + default: repl{{ fromJson (ConfigOption "tls_json") | dig "cert" "Key" "" }} +``` + +The following image shows how the default values for the CA, certificate, and key are displayed on the **Config** page: + +<img alt="Default values for CA, certificate, and key on the Config page" src="/images/certificate-chain-default-values.png" width="550px"/> + +[View a larger version of this image](/images/certificate-chain-default-values.png) + +## Additional Examples + +The following topics include additional examples of using KOTS template functions in Kubernetes manifests deployed by KOTS or in KOTS custom resources: + +* [Add Status Informers](/vendor/admin-console-display-app-status#add-status-informers) in _Adding Resource Status Informers_ +* [Conditionally Including or Excluding Resources](/vendor/packaging-include-resources) +* [Example: Including Optional Helm Charts](/vendor/helm-optional-charts) +* [Example: Adding Database Configuration Options](/vendor/tutorial-adding-db-config) +* [Templating Annotations](/vendor/resources-annotations-templating) +* [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup) + +--- + + +# Identity Context + +# Identity Context + +## IdentityServiceEnabled + +```go +func IdentityServiceEnabled() bool +``` + +Returns true if the Replicated identity service has been enabled and configured by the end customer. + +```yaml +apiVersion: apps/v1 +kind: Deployment +... + env: + - name: IDENTITY_ENABLED + value: repl{{ IdentityServiceEnabled }} +``` + + +## IdentityServiceClientID + +```go +func IdentityServiceClientID() string +``` + +Returns the client ID required for the application to connect to the identity service OIDC server. + +```yaml +apiVersion: apps/v1 +kind: Deployment +... + env: + - name: CLIENT_ID + value: repl{{ IdentityServiceClientID }} +``` + + +## IdentityServiceClientSecret + +```go +func IdentityServiceClientSecret() (string, error) +``` + +Returns the client secret required for the application to connect to the identity service OIDC server. + +```yaml +apiVersion: v1 +kind: Secret +... +data: + CLIENT_SECRET: repl{{ IdentityServiceClientSecret | b64enc }} +``` + + +## IdentityServiceRoles + +```go +func IdentityServiceRoles() map[string][]string +``` + +Returns a list of groups specified by the customer mapped to a list of roles as defined in the Identity custom resource manifest file. + +For more information about roles in the Identity custom resource, see [Identity](custom-resource-identity#roles) in the _Custom resources_ section. + +```yaml +apiVersion: apps/v1 +kind: Deployment +... + env: + - name: RESTRICTED_GROUPS + value: repl{{ IdentityServiceRoles | keys | toJson }} +``` + + +## IdentityServiceName + +```go +func IdentityServiceName() string +``` + +Returns the Service name for the identity service OIDC server. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +... + - path: /dex + backend: + service: + name: repl{{ IdentityServiceName }} + port: + number: repl{{ IdentityServicePort }} +``` + + +## IdentityServicePort + +```go +func IdentityServicePort() string +``` + +Returns the Service port number for the identity service OIDC server. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +... + - path: /dex + backend: + service: + name: repl{{ IdentityServiceName }} + port: + number: repl{{ IdentityServicePort }} +``` + + +--- + + +# kURL Context + +# kURL Context + +## kURL Context Functions + +For applications installed in embedded clusters created with Replicated kURL, you can use template functions to show all options the cluster was installed with. + +The creation of the Installer custom resource will reflect both install script changes made by posting YAML to the kURL API and changes made with -s flags at runtime. These functions are not available on the config page. + +KurlBool, KurlInt, KurlString, and KurlOption all take a string yamlPath as a param. +This path is the path from the manifest file, and is delineated between addon and subfield by a period ’.’. +For example, the kURL Kubernetes version can be accessed as `{{repl KurlString "Kubernetes.Version" }}`. + +KurlBool, KurlInt, KurlString respectively return a bool, integer, and string value. +If used on a valid field but with the wrong type these will return the falsy value for their type, false, 0, and “string respectively. +The `KurlOption` function will convert all bool, int, and string fields to string. +All functions will return falsy values if there is nothing at the yamlPath specified, or if these functions are run in a cluster with no installer custom resource (as in, not a cluster created by kURL). + +The following provides a complete list of the Installer custom resource with annotations: + +## KurlBool + +```go +func KurlBool(yamlPath string) bool +``` + +Returns the value at the yamlPath if there is a valid boolean there, or false if there is not. + +```yaml +'{{repl KurlBool "Docker.NoCEonEE" }}' +``` + + +## KurlInt + +```go +func KurlInt(yamlPath string) int +``` + +Returns the value at the yamlPath if there is a valid integer there, or 0 if there is not. + +```yaml +'{{repl KurlInt "Rook.CephReplicaCount" }}' +``` + + +## KurlString + +```go +func KurlString(yamlPath string) string +``` + +Returns the value at the yamlPath if there is a valid string there, or "" if there is not. + +```yaml +'{{repl KurlString "Kubernetes.Version" }}' +``` + + +## KurlOption + +```go +func KurlOption(yamlPath string) string +``` + +Returns the value at the yamlPath if there is a valid string, int, or bool value there, or "" if there is not. +Int and Bool values will be converted to string values. + +```yaml +'{{repl KurlOption "Rook.CephReplicaCount" }}' +``` + + +## KurlAll + +```go +func KurlAll() string +``` + +Returns all values in the Installer custom resource as key:value pairs, sorted by key. + +```yaml +'{{repl KurlAll }}' +``` + + +--- + + +# License Context + +# License Context + +## LicenseFieldValue +```go +func LicenseFieldValue(name string) string +``` +LicenseFieldValue returns the value of the specified license field. LicenseFieldValue accepts custom license fields and all built-in license fields. For a list of all built-in fields, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). + +LicenseFieldValue always returns a string, regardless of the license field type. To return integer or boolean values, you need to use the [ParseInt](/reference/template-functions-static-context#parseint) or [ParseBool](/reference/template-functions-static-context#parsebool) template function to convert the string value. + +#### String License Field + +The following example returns the value of the built-in `customerName` license field: + +```yaml +customerName: '{{repl LicenseFieldValue "customerName" }}' +``` +#### Integer License Field + +The following example returns the value of a custom integer license field named `numSeats`: + +```yaml +numSeats: repl{{ LicenseFieldValue "numSeats" | ParseInt }} +``` +This example uses [ParseInt](/reference/template-functions-static-context#parseint) to convert the returned value to an integer. + +#### Boolean License Field + +The following example returns the value of a custom boolean license field named `feature-1`: + +```yaml +feature-1: repl{{ LicenseFieldValue "feature-1" | ParseBool }} +``` +This example uses [ParseBool](/reference/template-functions-static-context#parsebool) to convert the returned value to a boolean. + +## LicenseDockerCfg +```go +func LicenseDockerCfg() string +``` +LicenseDockerCfg returns a value that can be written to a secret if needed to deploy manually. +Replicated KOTS creates and injects this secret automatically in normal conditions, but some deployments (with static, additional namespaces) may need to include this. + +```yaml +apiVersion: v1 +kind: Secret +type: kubernetes.io/dockerconfigjson +metadata: + name: myapp-registry + namespace: my-other-namespace +data: + .dockerconfigjson: repl{{ LicenseDockerCfg }} +``` + +## Sequence + +```go +func Sequence() int64 +``` +Sequence is the sequence of the application deployed. +This will start at 0 for each installation, and increase with every app update, config change, license update and registry setting change. + +```yaml +'{{repl Sequence }}' +``` + +## Cursor + +```go +func Cursor() string +``` +Cursor is the channel sequence of the app. +For instance, if 5 releases have been promoted to the channel that the app is running, then this would return the string `5`. + +```yaml +'{{repl Cursor }}' +``` + +## ChannelName + +```go +func ChannelName() string +``` +ChannelName is the name of the deployed channel of the app. + +```yaml +'{{repl ChannelName }}' +``` + +## VersionLabel + +```go +func VersionLabel() string +``` +VersionLabel is the semantic version of the app, as specified when promoting a release to a channel. + +```yaml +'{{repl VersionLabel }}' +``` + +## ReleaseNotes + +```go +func ReleaseNotes() string +``` +ReleaseNotes is the release notes of the current version of the app. + +```yaml +'{{repl ReleaseNotes }}' +``` + +## IsAirgap + +```go +func IsAirgap() bool +``` +IsAirgap is `true` when the app is installed via uploading an airgap package, false otherwise. + +```yaml +'{{repl IsAirgap }}' +``` + + +--- + + +# Static Context + +# Static Context + +## About Mastermind Sprig + +Many of the utility functions provided come from sprig, a third-party library of Go template functions. +For more information, see [Sprig Function Documentation](https://masterminds.github.io/sprig/) on the sprig website. + +## Certificate Functions + +### PrivateCACert + +>Introduced in KOTS v1.117.0 + +```yaml +func PrivateCACert() string +``` + +PrivateCACert returns the name of a ConfigMap that contains private CA certificates provided by the end user. For Embedded Cluster installations, these certificates are provided with the `--private-ca` flag for the `install` command. For KOTS installations, the user provides the ConfigMap using the `--private-ca-configmap` flag for the `install` command. + +You can use this template function to mount the specified ConfigMap so your containers can access the internet through enterprise proxies that issue their own TLS certificates in order to inspect traffic. + +:::note +This function will return the name of the ConfigMap even if the ConfigMap has no entries. If no ConfigMap exists, this function returns the empty string. +::: + +## Cluster Information Functions + +### Distribution +```go +func Distribution() string +``` +Distribution returns the Kubernetes distribution detected. The possible return values are: + +* aks +* digitalOcean +* dockerDesktop +* eks +* embedded-cluster +* gke +* ibm +* k0s +* k3s +* kind +* kurl +* microk8s +* minikube +* oke +* openShift +* rke2 + +:::note +[IsKurl](#iskurl) can also be used to detect kURL instances. +::: + +#### Detect the Distribution +```yaml +repl{{ Distribution }} +``` +#### Equal To Comparison +```yaml +repl{{ eq Distribution "gke" }} +``` +#### Not Equal To Comparison +```yaml +repl{{ ne Distribution "embedded-cluster" }} +``` +See [Functions](https://pkg.go.dev/text/template#hdr-Functions) in the Go documentation. + +### IsKurl +```go +func IsKurl() bool +``` +IsKurl returns true if running within a kurl-based installation. +#### Detect kURL Installations +```yaml +repl{{ IsKurl }} +``` +#### Detect Non-kURL Installations +```yaml +repl{{ not IsKurl }} +``` +See [Functions](https://pkg.go.dev/text/template#hdr-Functions) in the Go documentation. + +### KotsVersion + +```go +func KotsVersion() string +``` + +KotsVersion returns the current version of KOTS. + +```yaml +repl{{ KotsVersion }} +``` + +You can compare the KOTS version as follows: +```yaml +repl{{KotsVersion | semverCompare ">= 1.19"}} +``` + +This returns `true` if the KOTS version is greater than or equal to `1.19`. + +For more complex comparisons, see [Semantic Version Functions](https://masterminds.github.io/sprig/semver.html) in the sprig documentation. + +### KubernetesMajorVersion + +> Introduced in KOTS v1.92.0 + +```go +func KubernetesMajorVersion() string +``` + +KubernetesMajorVersion returns the Kubernetes server *major* version. + +```yaml +repl{{ KubernetesMajorVersion }} +``` + +You can compare the Kubernetes major version as follows: +```yaml +repl{{lt (KubernetesMajorVersion | ParseInt) 2 }} +``` + +This returns `true` if the Kubernetes major version is less than `2`. + +### KubernetesMinorVersion + +> Introduced in KOTS v1.92.0 + +```go +func KubernetesMinorVersion() string +``` + +KubernetesMinorVersion returns the Kubernetes server *minor* version. + +```yaml +repl{{ KubernetesMinorVersion }} +``` + +You can compare the Kubernetes minor version as follows: +```yaml +repl{{gt (KubernetesMinorVersion | ParseInt) 19 }} +``` + +This returns `true` if the Kubernetes minor version is greater than `19`. + +### KubernetesVersion + +> Introduced in KOTS v1.92.0 + +```go +func KubernetesVersion() string +``` + +KubernetesVersion returns the Kubernetes server version. + +```yaml +repl{{ KubernetesVersion }} +``` + +You can compare the Kubernetes version as follows: +```yaml +repl{{KubernetesVersion | semverCompare ">= 1.19"}} +``` + +This returns `true` if the Kubernetes version is greater than or equal to `1.19`. + +For more complex comparisons, see [Semantic Version Functions](https://masterminds.github.io/sprig/semver.html) in the sprig documentation. + +### Namespace +```go +func Namespace() string +``` +Namespace returns the Kubernetes namespace that the application belongs to. +```yaml +'{{repl Namespace}}' +``` + +### NodeCount +```go +func NodeCount() int +``` +NodeCount returns the number of nodes detected within the Kubernetes cluster. +```yaml +repl{{ NodeCount }} +``` + +### Lookup + +> Introduced in KOTS v1.103.0 + +```go +func Lookup(apiversion string, resource string, namespace string, name string) map[string]interface{} +``` + +Lookup searches resources in a running cluster and returns a resource or resource list. + +Lookup uses the Helm lookup function to search resources and has the same functionality as the Helm lookup function. For more information, see [lookup](https://helm.sh/docs/chart_template_guide/functions_and_pipelines/#using-the-lookup-function) in the Helm documentation. + +```yaml +repl{{ Lookup "API_VERSION" "KIND" "NAMESPACE" "NAME" }} +``` + +Both `NAME` and `NAMESPACE` are optional and can be passed as an empty string (""). + +The following combination of parameters are possible: + +<table> + <tr> + <th>Behavior</th> + <th>Lookup function</th> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get pod mypod -n mynamespace</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Pod" "mynamespace" "mypod" }}</code></td> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get pods -n mynamespace</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Pod" "mynamespace" "" }}</code></td> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get pods --all-namespaces</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Pod" "" "" }}</code></td> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get namespace mynamespace</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Namespace" "" "mynamespace" }}</code></td> + </tr> + <tr> + <td style={{ fontSize: 14 }}><code>kubectl get namespaces</code></td> + <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Namespace" "" "" }}</code></td> + </tr> +</table> + +The following describes working with values returned by the Lookup function: + +* When Lookup finds an object, it returns a dictionary with the key value pairs from the object. This dictionary can be navigated to extract specific values. For example, the following returns the annotations for the `mynamespace` object: + + ``` + repl{{ (Lookup "v1" "Namespace" "" "mynamespace").metadata.annotations }} + ``` + +* When Lookup returns a list of objects, it is possible to access the object list through the `items` field. For example: + + ``` + services: | + repl{{- range $index, $service := (Lookup "v1" "Service" "mynamespace" "").items }} + - repl{{ $service.metadata.name }} + repl{{- end }} + ``` + + For an array value type, omit the `|`. For example: + + ``` + services: + repl{{- range $index, $service := (Lookup "v1" "Service" "mynamespace" "").items }} + - repl{{ $service.metadata.name }} + repl{{- end }} + ``` + +* When no object is found, Lookup returns an empty value. This can be used to check for the existence of an object. + +## Date Functions + +### Now +```go +func Now() string +``` +Returns the current timestamp as an RFC3339 formatted string. +```yaml +'{{repl Now }}' +``` + +### NowFmt +```go +func NowFmt(format string) string +``` +Returns the current timestamp as a formatted string. +For information about Go time formatting guidelines, see [Constants](https://golang.org/pkg/time/#pkg-constants) in the Go documentation. +```yaml +'{{repl NowFmt "20060102" }}' +``` + +## Encoding Functions + +### Base64Decode +```go +func Base64Decode(stringToDecode string) string +``` +Returns decoded string from a Base64 stored value. +```yaml +'{{repl ConfigOption "base_64_encoded_name" | Base64Decode }}' +``` + +### Base64Encode +```go +func Base64Encode(stringToEncode string) string +``` +Returns a Base64 encoded string. +```yaml +'{{repl ConfigOption "name" | Base64Encode }}' +``` + +### UrlEncode +```go +func UrlEncode(stringToEncode string) string +``` +Returns the string, url encoded. +Equivalent to the `QueryEscape` function within the golang `net/url` library. For more information, see [func QueryEscape](https://godoc.org/net/url#QueryEscape) in the Go documentation. +```yaml +'{{repl ConfigOption "smtp_email" | UrlEncode }}:{{repl ConfigOption "smtp_password" | UrlEncode }}@smtp.example.com:587' +``` + +### UrlPathEscape + +```go +func UrlPathEscape(stringToEncode string) string +``` +Returns the string, url *path* encoded. +Equivalent to the `PathEscape` function within the golang `net/url` library. For more information, see [func PathEscape](https://godoc.org/net/url#PathEscape) in the Go documentation. +```yaml +'{{repl ConfigOption "smtp_email" | UrlPathEscape }}:{{repl ConfigOption "smtp_password" | UrlPathEscape }}@smtp.example.com:587' +``` + +## Encryption Functions + +### KubeSeal +```go +func KubeSeal(certData string, namespace string, name string, value string) string +``` + +## Integer and Float Functions + +### HumanSize +```go +func HumanSize(size interface{}) string +``` +HumanSize returns a human-readable approximation of a size in bytes capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +The size must be a integer or floating point number. +```yaml +'{{repl ConfigOption "min_size_bytes" | HumanSize }}' +``` + +## Proxy Functions + +### HTTPProxy + +```go +func HTTPProxy() string +``` +HTTPProxy returns the address of the proxy that the Admin Console is configured to use. +```yaml +repl{{ HTTPProxy }} +``` + +### HTTPSProxy + +```go +func HTTPSProxy() string +``` +HTTPSProxy returns the address of the proxy that the Admin Console is configured to use. +```yaml +repl{{ HTTPSProxy }} +``` + +### NoProxy + +```go +func NoProxy() string +``` +NoProxy returns the comma-separated list of no-proxy addresses that the Admin Console is configured to use. +```yaml +repl{{ NoProxy }} +``` + +## Math Functions +### Add +```go +func Add(x interface{}, y interface{}) interface{} +``` +Adds x and y. + +If at least one of the operands is a floating point number, the result will be a floating point number. + +If both operands are integers, the result will be an integer. +```yaml +'{{repl Add (ConfigOption "maximum_users") 1}}' +``` + +### Div +```go +func Div(x interface{}, y interface{}) interface{} +``` +Divides x by y. + +If at least one of the operands is a floating point number, the result will be a floating point number. + +If both operands are integers, the result will be an integer and will be rounded down. +```yaml +'{{repl Div (ConfigOption "maximum_users") 2.0}}' +``` + +### Mult +```go +func Mult(x interface{}, y interface{}) interface{} +``` +Multiplies x and y. + +Both operands must be either an integer or a floating point number. + +If at least one of the operands is a floating point number, the result will be a floating point number. + +If both operands are integers, the result will be an integer. +```yaml +'{{repl Mult (NodePrivateIPAddressAll "DB" "redis" | len) 2}}' +``` + +If a template function returns a string, the value must be converted to an integer or a floating point number first: +```yaml +'{{repl Mult (ConfigOption "session_cookie_age" | ParseInt) 86400}}' +``` + +### Sub +```go +func Sub(x interface{}, y interface{}) interface{} +``` +Subtracts y from x. + +If at least one of the operands is a floating point number, the result will be a floating point number. + +If both operands are integers, the result will be an integer. +```yaml +'{{repl Sub (ConfigOption "maximum_users") 1}}' +``` + +## String Functions + +### ParseBool +```go +func ParseBool(str string) bool +``` +ParseBool returns the boolean value represented by the string. +```yaml +'{{repl ConfigOption "str_value" | ParseBool }}' +``` + +### ParseFloat +```go +func ParseFloat(str string) float64 +``` +ParseFloat returns the float value represented by the string. +```yaml +'{{repl ConfigOption "str_value" | ParseFloat }}' +``` + +### ParseInt +```go +func ParseInt(str string, args ...int) int64 +``` +ParseInt returns the integer value represented by the string with optional base (default 10). +```yaml +'{{repl ConfigOption "str_value" | ParseInt }}' +``` + +### ParseUint +```go +func ParseUint(str string, args ...int) uint64 +``` +ParseUint returns the unsigned integer value represented by the string with optional base (default 10). +```yaml +'{{repl ConfigOption "str_value" | ParseUint }}' +``` + +### RandomString +```go +func RandomString(length uint64, providedCharset ...string) string +``` +Returns a random string with the desired length and charset. +Provided charsets must be Perl formatted and match individual characters. +If no charset is provided, `[_A-Za-z0-9]` will be used. + +#### Examples + +The following example generates a 64-character random string: + +```yaml +'{{repl RandomString 64}}' +``` +The following example generates a 64-character random string that contains `a`s and `b`s: + +```yaml +'{{repl RandomString 64 "[ab]" }}' +``` +#### Generating Persistent and Ephemeral Strings + +When you assign the RandomString template function to a `value` key in the Config custom resource, you can use the `hidden` and `readonly` properties to control the behavior of the RandomString function each time it is called. The RandomString template function is called each time the user deploys a change to the configuration settings for the application. + +Depending on if the `hidden` and `readonly` properties are `true` or `false`, the random string generated by a RandomString template function in a `value` key is either ephemeral or persistent between configuration changes: + +* **Ephemeral**: The value of the random string _changes_ when the user deploys a change to the configuration settings for the application. +* **Persistent**: The value of the random string does _not_ change when the user deploys a change to the configuration settings for the application. + +For more information about these properties, see [`hidden`](custom-resource-config#hidden) and [`readonly`](custom-resource-config#readonly) in _Config_. + +:::note +If you assign the RandomString template function to a `default` key in the Config custom resource rather than a `value` key, then the `hidden` and `readonly` properties do _not_ affect the behavior of the RandomString template function. For more information about the behavior of the `default` key in the Config custom resource, see [`default`](custom-resource-config#default) in _Config_. +::: + +The following table describes the behavior of the RandomString template function when it is assigned to a `value` key in the Config custom resource and the `hidden` and `readonly` properties are `true` or `false`: + +<table> + <tr> + <th width="15%">readonly</th> + <th width="15%">hidden</th> + <th width="15%">Outcome</th> + <th width="55%">Use Case</th> + </tr> + <tr> + <td>false</td> + <td>true</td> + <td>Persistent</td> + <td> + <p>Set <code>readonly</code> to <code>false</code> and <code>hidden</code> to <code>true</code> if:</p> + <ul> + <li>The random string must <em>not</em> change each time the user deploys a change to the application's configuration settings.</li> + <li>The user does <em>not</em> need to see or change, or must be prevented from seeing or changing, the value of the random string.</li> + </ul> + </td> + </tr> + <tr> + <td>true</td> + <td>false</td> + <td>Ephemeral</td> + <td> + <p>Set <code>readonly</code> to <code>true</code> and <code>hidden</code> to <code>false</code> if:</p> + <ul> + <li>The random string <em>must</em> change each time the user deploys a change to the application's configuration settings.</li> + <li>The user does <em>not</em> need to change, or must be prevented from changing, the value of the random string.</li> + <li>The user <em>must</em> be able to see the value of the random string.</li> + </ul> + </td> + </tr> + <tr> + <td>true</td> + <td>true</td> + <td>Ephemeral</td> + <td> + <p>Set <code>readonly</code> to <code>true</code> and <code>hidden</code> to <code>true</code> if:</p> + <ul> + <li>The random string <em>must</em> change each time the user deploys a change to the application's configuration settings.</li> + <li>The user does <em>not</em> need to see or change, or must be preventing from seeing or changing, the value of the random string.</li> + </ul> + </td> + </tr> + <tr> + <td>false</td> + <td>false</td> + <td>Persistent</td> + <td> + <p>Set <code>readonly</code> to <code>false</code> and <code>hidden</code> to <code>false</code> if:</p> + <ul> + <li>The random string must <em>not</em> change each time the user deploys a change to the application's configuration settings.</li> + <li>The user <em>must</em> be able to see and change the value of the random string.</li> + </ul> + <p>For example, set both <code>readonly</code> and <code>hidden</code> to <code>false</code> to generate a random password that users must be able to see and then change to a different value that they choose.</p> + </td> + </tr> +</table> + +### Split +```go +func Split(s string, sep string) []string +``` +Split slices s into all substrings separated by sep and returns an array of the substrings between those separators. +```yaml +'{{repl Split "A,B,C" "," }}' +``` + +Combining `Split` and `index`: +Assuming the `github_url` param is set to `https://github.mycorp.internal:3131`, the following would set +`GITHUB_HOSTNAME` to `github.mycorp.internal`. +```yaml +'{{repl index (Split (index (Split (ConfigOption "github_url") "/") 2) ":") 0}}' +``` + +### ToLower +```go +func ToLower(stringToAlter string) string +``` +Returns the string, in lowercase. +```yaml +'{{repl ConfigOption "company_name" | ToLower }}' +``` + +### ToUpper +```go +func ToUpper(stringToAlter string) string +``` +Returns the string, in uppercase. +```yaml +'{{repl ConfigOption "company_name" | ToUpper }}' +``` + +### Trim +```go +func Trim(s string, args ...string) string +``` +Trim returns a string with all leading and trailing strings contained in the optional args removed (default space). +```yaml +'{{repl Trim (ConfigOption "str_value") "." }}' +``` + +### TrimSpace +```go +func TrimSpace(s string) string +``` +Trim returns a string with all leading and trailing spaces removed. +```yaml +'{{repl ConfigOption "str_value" | TrimSpace }}' +``` + +### YamlEscape +```go +func YamlEscape(input string) string +``` + +YamlEscape returns an escaped and quoted version of the input string, suitable for use within a YAML document. +This can be useful when dealing with user-uploaded files that may include null bytes and other nonprintable characters. For more information about printable characters, see [Character Set](https://yaml.org/spec/1.2.2/#51-character-set) in the YAML documentation. + +```yaml +repl{{ ConfigOptionData "my_file_upload" | YamlEscape }} +``` + +--- + + +# Using the Vendor API v3 + +import ApiAbout from "../partials/vendor-api/_api-about.mdx" + +# Using the Vendor API v3 + +This topic describes how to use Replicated Vendor API authentication tokens to make API calls. + +## About the Vendor API + +<ApiAbout/> + +## API Token Requirement + +To use the Vendor API v3, you need a token for authorization. You provide the token as the value of the `Authorization` header of Vendor API calls. For example, to pass a token as the authorization header in a request: + +``` +curl --request GET \ + --url https://api.replicated.com/vendor/v3/customers \ + --header 'Accept: application/json' \ + --header 'Authorization: my-token' +``` + +Generate a service account or user API token in the Vendor Portal. The token must have `Read/Write` access to create new releases. See [Generating API Tokens](/vendor/replicated-api-tokens). + +## Vendor API v3 Documentation + +For Vendor API documentation and an interactive API console, see [Vendor API v3 Reference](https://replicated-vendor-api.readme.io/v3/reference/createapp). + +For the Vendor API swagger specification, see [vendor-api-v3.json](https://api.replicated.com/vendor/v3/spec/vendor-api-v3.json). + +![vendor api documentation page](/images/vendor-api-docs.png) + +[View a larger version of this image](/images/vendor-api-docs.png) + +--- + + +# Adding Links to the Dashboard + +# Adding Links to the Dashboard + +This topic describes how to use the Kubernetes SIG Application custom resource to add links to the Replicated KOTS Admin Console dashboard. + +## Overview + +Replicated recommends that every application include a Kubernetes SIG Application custom resource. The Kubernetes SIG Application custom resource provides a standard API for creating, viewing, and managing applications. For more information, see [Kubernetes Applications](https://github.com/kubernetes-sigs/application#kubernetes-applications) in the kubernetes-sigs GitHub repository. + +You can include the Kubernetes SIG Application custom resource in your releases to add links to the Admin Console dashboard. Common use cases include adding links to documentation, dashboards, or a landing page for the application. + +For example, the following shows an **Open App** button on the dashboard of the Admin Console for an application named Gitea: + +<img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> + +[View a larger version of this image](/images/gitea-open-app.png) + +:::note +KOTS uses the Kubernetes SIG Application custom resource as metadata and does not require or use an in-cluster controller to handle this custom resource. An application that follows best practices does not require cluster admin privileges or any cluster-wide components to be installed. +::: + +## Add a Link + +To add a link to the Admin Console dashboard, include a [Kubernetes SIG Application](https://github.com/kubernetes-sigs/application#kubernetes-applications) custom resource in the release with a `spec.descriptor.links` field. The `spec.descriptor.links` field is an array of links that are displayed on the Admin Console dashboard after the application is deployed. + +Each link in the `spec.descriptor.links` array contains two fields: +* `description`: The link text that will appear on the Admin Console dashboard. +* `url`: The target URL. + +For example: + +```yaml +# app.k8s.io/v1beta1 Application Custom resource + +apiVersion: app.k8s.io/v1beta1 +kind: Application +metadata: + name: "gitea" +spec: + descriptor: + links: + - description: About Wordpress + url: "https://wordpress.org/" +``` + +When the application is deployed, the "About Wordpress" link is displayed on the Admin Console dashboard as shown below: + +<img alt="About Wordpress link on the Admin Console dashboard" src="/images/dashboard-link-about-wordpress.png" width="450px"/> + +[View a larger version of this image](/images/dashboard-link-about-wordpress.png) + +For an additional example of a Kubernetes SIG Application custom resource, see [application.yaml](https://github.com/kubernetes-sigs/application/blob/master/docs/examples/wordpress/application.yaml) in the kubernetes-sigs GitHub repository. + +### Create URLs with User-Supplied Values Using KOTS Template Functions {#url-template} + +You can use KOTS template functions to template URLs in the Kubernetes SIG Application custom resource. This can be useful when all or some of the URL is a user-supplied value. For example, an application might allow users to provide their own ingress controller or load balancer. In this case, the URL can be templated to render the hostname that the user provides on the Admin Console Config screen. + +The following examples show how to use the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function in the Kubernetes SIG Application custom resource `spec.descriptor.links.url` field to render one or more user-supplied values: + +* In the example below, the URL hostname is a user-supplied value for an ingress controller that the user configures during installation. + + ```yaml + apiVersion: app.k8s.io/v1beta1 + kind: Application + metadata: + name: "my-app" + spec: + descriptor: + links: + - description: Open App + url: 'http://{{repl ConfigOption "ingress_host" }}' + ``` +* In the example below, both the URL hostname and a node port are user-supplied values. It might be necessary to include a user-provided node port if you are exposing NodePort services for installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about). + + ```yaml + apiVersion: app.k8s.io/v1beta1 + kind: Application + metadata: + name: "my-app" + spec: + descriptor: + links: + - description: Open App + url: 'http://{{repl ConfigOption "hostname" }}:{{repl ConfigOption "node_port"}}' + ``` + +For more information about working with KOTS template functions, see [About Template Functions](/reference/template-functions-about). + +--- + + +# Customizing the Application Icon + +# Customizing the Application Icon + +You can add a custom application icon that displays in the Replicated Admin Console and the download portal. Adding a custom icon helps ensure that your brand is reflected for your customers. + +:::note +You can also use a custom domain for the download portal. For more information, see [About Custom Domains](custom-domains). +::: + +## Add a Custom Icon + +For information about how to choose an image file for your custom application icon that displays well in the Admin Console, see [Icon Image File Recommendations](#icon-image-file-recommendations) below. + +To add a custom application icon: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. +1. Create or open the Application custom resource manifest file. An Application custom resource manifest file has `apiVersion: kots.io/v1beta1` and `kind: Application`. + +1. In the preview section of the Help pane: + + 1. If your Application manifest file is already populated with an `icon` key, the icon displays in the preview. Click **Preview a different icon** to access the preview options. + + 1. Drag and drop an icon image file to the drop zone. Alternatively, paste a link or Base64 encoded data URL in the text box. Click **Preview**. + + ![Application icon preview](/images/app-icon-preview.png) + + 1. (Air gap only) If you paste a link to the image in the text box, click **Preview** and **Base64 encode icon** to convert the image to a Base64 encoded data URL. An encoded URL displays that you can copy and paste into the Application manifest. Base64 encoding is required for images used with air gap installations. + + :::note + If you pasted a Base64 encoded data URL into the text box, the **Base64 encode icon** button does not display because the image is already encoded. If you drag and drop an icon, the icon is automatically encoded for you. + ::: + + ![Base64 encode image button](/images/app-icon-preview-base64.png) + + 1. Click **Preview a different icon** to preview a different icon if needed. + +1. In the Application manifest, under `spec`, add an `icon` key that includes a link or the Base64 encoded data URL to the desired image. + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + title: My Application + icon: https://kots.io/images/kotsadm-logo-large@2x.png + ``` +1. Click **Save Release**. + + +## Icon Image File Recommendations + +For your custom application icon to look best in the Admin Console, consider the following recommendations: + +* Use a PNG or JPG file. +* Use an image that is at least 250 by 250 pixels. +* Export the image file at 2x. + + +--- + + +# Creating and Editing Configuration Fields + +# Creating and Editing Configuration Fields + +This topic describes how to use the KOTS Config custom resource manifest file to add and edit fields in the KOTS Admin Console configuration screen. + +## About the Config Custom Resource + +Applications distributed with Replicated KOTS can include a configuration screen in the Admin Console to collect required or optional values from your users that are used to run your application. For more information about the configuration screen, see [About the Configuration Screen](config-screen-about). + +To include a configuration screen in the Admin Console for your application, you add a Config custom resource manifest file to a release for the application. + +You define the fields that appear on the configuration screen as an array of `groups` and `items` in the Config custom resource: + * `groups`: A set of `items`. Each group must have a `name`, `title`, `description`, and `items`. For example, you can create a group of several user input fields that are all related to configuring an SMTP mail server. + * `items`: An array of user input fields. Each array under `items` must have a `name`, `title`, and `type`. You can also include several optional properties. For example, in a group for configuring a SMTP mail server, you can have user input fields under `items` for the SMTP hostname, port, username, and password. + + There are several types of `items` supported in the Config manifest that allow you to collect different types of user inputs. For example, you can use the `password` input type to create a text field on the configuration screen that hides user input. + +For more information about the syntax of the Config custom resource manifest, see [Config](/reference/custom-resource-config). + +## About Regular Expression Validation + +You can use [RE2 regular expressions](https://github.com/google/re2/wiki/Syntax) (regex) to validate user input for config items, ensuring conformity to certain standards, such as valid email addresses, password complexity rules, IP addresses, and URLs. This prevents users from deploying an application with a verifiably invalid configuration. + +You add the `validation`, `regex`, `pattern` and `message` fields to items in the Config custom resource. Validation is supported for `text`, `textarea`, `password` and `file` config item types. For more information about regex validation fields, see [Item Validation](/reference/custom-resource-config#item-validation) in _Config_. + +The following example shows a common password complexity rule: + +``` +- name: smtp-settings + title: SMTP Settings + items: + - name: smtp_password + title: SMTP Password + type: password + help_text: Set SMTP password + validation: + regex: + pattern: ^(?:[\w@#$%^&+=!*()_\-{}[\]:;"'<>,.?\/|]){8,16}$ + message: The password must be between 8 and 16 characters long and can contain a combination of uppercase letter, lowercase letters, digits, and special characters. +``` + +## Add Fields to the Configuration Screen + +To add fields to the Admin Console configuration screen: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. +1. Create or open the Config custom resource manifest file in the desired release. A Config custom resource manifest file has `kind: Config`. +1. In the Config custom resource manifest file, define custom user-input fields in an array of `groups` and `items`. + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: smtp_settings + title: SMTP Settings + description: Configure SMTP Settings + items: + - name: enable_smtp + title: Enable SMTP + help_text: Enable SMTP + type: bool + default: "0" + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + - name: smtp_port + title: SMTP Port + help_text: Set SMTP Port + type: text + - name: smtp_user + title: SMTP User + help_text: Set SMTP User + type: text + - name: smtp_password + title: SMTP Password + type: password + default: 'password' + ``` + + The example above includes a single group with the name `smtp_settings`. + + The `items` array for the `smtp_settings` group includes the following user-input fields: `enable_smtp`, `smtp_host`, `smtp_port`, `smtp_user`, and `smtp_password`. Additional item properties are available, such as `affix` to make items appear horizontally on the same line. For more information about item properties, see [Item Properties](/reference/custom-resource-config#item-properties) in Config. + + The following screenshot shows how the SMTP Settings group from the example YAML above displays in the Admin Console configuration screen during application installation: + + ![User input fields on the configuration screen for the SMTP settings](/images/config-screen-smtp-example-large.png) + +1. (Optional) Add default values for the fields. You can add default values using one of the following properties: + * **With the `default` property**: When you include the `default` key, KOTS uses this value when rendering the manifest files for your application. The value then displays as a placeholder on the configuration screen in the Admin Console for your users. KOTS only uses the default value if the user does not provide a different value. + + :::note + If you change the `default` value in a later release of your application, installed instances of your application receive the updated value only if your users did not change the default from what it was when they initially installed the application. + + If a user did change a field from its default, the Admin Console does not overwrite the value they provided. + ::: + + * **With the `value` property**: When you include the `value` key, KOTS does not overwrite this value during an application update. The value that you provide for the `value` key is visually indistinguishable from other values that your user provides on the Admin Console configuration screen. KOTS treats user-supplied values and the value that you provide for the `value` key as the same. + +2. (Optional) Add regular expressions to validate user input for `text`, `textarea`, `password` and `file` config item types. For more information, see [About Regular Expression Validation](#about-regular-expression-validation). + + **Example**: + + ```yaml + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + validation: + regex: ​ + pattern: ^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$ + message: Valid hostname starts with a letter (uppercase/lowercase), followed by zero or more groups of letters (uppercase/lowercase), digits, or hyphens, optionally followed by a period. Ends with a letter or digit. + ``` +3. (Optional) Mark fields as required by including `required: true`. When there are required fields, the user is prevented from proceeding with the installation until they provide a valid value for required fields. + + **Example**: + + ```yaml + - name: smtp_password + title: SMTP Password + type: password + required: true + ``` + +4. Save and promote the release to a development environment to test your changes. + +## Next Steps + +After you add user input fields to the configuration screen, you use template functions to map the user-supplied values to manifest files in your release. If you use a Helm chart for your application, you map the values to the Helm chart `values.yaml` file using the HelmChart custom resource. + +For more information, see [Mapping User-Supplied Values](config-screen-map-inputs). + +--- + + +# Adding Resource Status Informers + +import StatusesTable from "../partials/status-informers/_statusesTable.mdx" +import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" +import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" +import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" + +# Adding Resource Status Informers + +This topic describes how to add status informers for your application. Status informers apply only to applications installed with Replicated KOTS. For information about how to collect application status data for applications installed with Helm, see [Enabling and Understanding Application Status](insights-app-status). + +## About Status Informers + +_Status informers_ are a feature of KOTS that report on the status of supported Kubernetes resources deployed as part of your application. You enable status informers by listing the target resources under the `statusInformers` property in the Replicated Application custom resource. KOTS watches all of the resources that you add to the `statusInformers` property for changes in state. + +Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information, see [Understanding Application Status](#understanding-application-status). + +When you one or more status informers to your application, KOTS automatically does the following: + +* Displays application status for your users on the dashboard of the Admin Console. This can help users diagnose and troubleshoot problems with their instance. The following shows an example of how an Unavailable status displays on the Admin Console dashboard: + + <img src="/images/kotsadm-dashboard-appstatus.png" alt="Unavailable status on the Admin Console dashboard" width="500px"/> + +* Sends application status data to the Vendor Portal. This is useful for viewing insights on instances of your application running in customer environments, such as the current status and the average uptime. For more information, see [Instance Details](instance-insights-details). + + The following shows an example of the Vendor Portal **Instance details** page with data about the status of an instance over time: + + <img src="/images/instance-details.png" alt="Instance details full page" width="700px"/> + + [View a larger version of this image](/images/instance-details.png) +## Add Status Informers + +To create status informers for your application, add one or more supported resource types to the `statusInformers` property in the Application custom resource. See [`statusInformers`](/reference/custom-resource-application#statusinformers) in _Application_. + +<SupportedResources/> + +You can target resources of the supported types that are deployed in any of the following ways: + +* Deployed by KOTS. +* Deployed by a Kubernetes Operator that is deployed by KOTS. For more information, see [About Packaging a Kubernetes Operator Application](operator-packaging-about). +* Deployed by Helm. For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +### Examples + +Status informers are in the format `[namespace/]type/name`, where namespace is optional and defaults to the current namespace. + +**Example**: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-application +spec: + statusInformers: + - deployment/my-web-svc + - deployment/my-worker +``` + +The `statusInformers` property also supports template functions. Using template functions allows you to include or exclude a status informer based on a customer-provided configuration value: + +**Example**: + +```yaml +statusInformers: + - deployment/my-web-svc + - '{{repl if ConfigOptionEquals "option" "value"}}deployment/my-worker{{repl else}}{{repl end}}' +``` + +In the example above, the `deployment/my-worker` status informer is excluded unless the statement in the `ConfigOptionEquals` template function evaluates to true. + +For more information about using template functions in application manifest files, see [About Template Functions](/reference/template-functions-about). + +## Understanding Application Status + +This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. + +### Resource Statuses + +Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. + +The following table lists the supported Kubernetes resources and the conditions that contribute to each status: + +<StatusesTable/> + +### Aggregate Application Status + +<AggregateStatusIntro/> + +<AggregateStatus/> + +--- + + +# Port Forwarding Services with KOTS + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import ServicePortNote from "../partials/custom-resource-application/_servicePort-note.mdx" +import GiteaKotsApp from "../partials/getting-started/_gitea-kots-app-cr.mdx" +import GiteaHelmChart from "../partials/getting-started/_gitea-helmchart-cr.mdx" +import GiteaK8sApp from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import PortsApplicationURL from "../partials/custom-resource-application/_ports-applicationURL.mdx" +import NginxKotsApp from "../partials/application-links/_nginx-kots-app.mdx" +import NginxK8sApp from "../partials/application-links/_nginx-k8s-app.mdx" +import NginxService from "../partials/application-links/_nginx-service.mdx" +import NginxDeployment from "../partials/application-links/_nginx-deployment.mdx" + +# Port Forwarding Services with KOTS + +This topic describes how to add one or more ports to the Replicated KOTS port forward tunnel by configuring the `ports` key in the KOTS Application custom resource. + +The information in this topic applies to existing cluster installations. For information about exposing services for Replicated kURL or Replicated Embedded Cluster installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). + +## Overview + +For installations into existing clusters, KOTS automatically creates a port forward tunnel and exposes the Admin Console on port 8800 where it can be accessed by users. In addition to the 8800 Admin Console port, you can optionally add one or more extra ports to the port forward tunnel. + +Adding ports to the port forward tunnel allows you to port forward application services without needing to manually run the `kubectl port-forward` command. You can also add a link to the Admin Console dashboard that points to port-forwarded services. + +This can be particularly useful when developing and testing KOTS releases for your application, because it provides a quicker way to access an application after installation compared to setting up an ingress controller or adding a load balancer. + +## Port Forward a Service with the KOTS Application `ports` Key + +To port forward a service with KOTS for existing cluster installations: + +1. In a new release, configure the [`ports`](/reference/custom-resource-application#ports) key in the KOTS Application custom resource with details for the target service. For example: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + ports: + - serviceName: my-service + servicePort: 3000 + localPort: 8888 + ``` + + 1. For `ports.serviceName`, add the name of the service. KOTS can create a port forward to ClusterIP, NodePort, or LoadBalancer services. For more information about Kubernetes service types, see [Service](https://kubernetes.io/docs/concepts/services-networking/service/) in the Kubernetes documentation. + + 1. For `ports.servicePort`, add the `containerPort` of the Pod where the service is running. This is the port where KOTS forwards traffic. + + <ServicePortNote/> + + 1. For `ports.localPort`, add the port to map on the local workstation. + +1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. + + When the application is in a Ready state and the KOTS port forward is running, you will see output similar to the following: + + ```bash + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + • Go to http://localhost:8888 to access the application + ``` + Confirm that you can access the service at the URL provided in the KOTS CLI output. + +1. (Optional) Add a link to the service on the Admin Console dashboard. See [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link) below. + +## Add a Link to a Port-Forwarded Service on the Admin Console Dashboard {#add-link} + +After you add a service to the KOTS port forward tunnel, you can also optionally add a link to the port-forwarded service on the Admin Console dashboard. + +To add a link to a port-forwarded service, add the _same_ URL in the KOTS Application custom resource `ports.applicationURL` and Kubernetes SIG Application custom resource `spec.descriptor.links.url` fields. When the URLs in these fields match, KOTS adds a link on the Admin Console dashboard where the given service can be accessed. This process automatically links to the hostname in the browser (where the Admin Console is being accessed) and appends the specified `localPort`. + +To add a link to a port-forwarded service on the Admin Console dashboard: + +1. In a new release, open the KOTS Application custom resource and add a URL to the `ports.applicationURL` field. For example: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + ports: + - serviceName: my-service + servicePort: 3000 + localPort: 8888 + applicationUrl: "http://my-service" + ``` + + Consider the following guidelines for this URL: + * Use HTTP instead of HTTPS unless TLS termination takes place in the application Pod. + * KOTS rewrites the URL with the hostname in the browser during deployment. So, you can use any hostname for the URL, such as the name of the service. For example, `http://my-service`. + +1. Add a Kubernetes SIG Application custom resource in the release. For example: + + ```yaml + # app.k8s.io/v1beta1 Application Custom resource + + apiVersion: app.k8s.io/v1beta1 + kind: Application + metadata: + name: "my-application" + spec: + descriptor: + links: + - description: Open App + # url matches ports.applicationURL in the KOTS Application custom resource + url: "http://my-service" + ``` + + 1. For `spec.descriptor.links.description`, add the link text that will appear on the Admin Console dashboard. For example, `Open App`. + + 1. For `spec.descriptor.links.url`, add the _same_ URL that you used in the `ports.applicationURL` in the KOTS Application custom resource. + +1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. + + When the application is in a Ready state, confirm that you can access the service by clicking the link that appears on the dashboard. For example: + + <img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> + + [View a larger version of this image](/images/gitea-open-app.png) + +## Access Port-Forwarded Services + +This section describes how to access port-forwarded services. + +### Command Line + +Run [`kubectl kots admin-console`](/reference/kots-cli-admin-console-index) to open the KOTS port forward tunnel. + +The `kots admin-console` command runs the equivalent of `kubectl port-forward svc/myapplication-service <local-port>:<remote-port>`, then prints a message with the URLs where the Admin Console and any port-forwarded services can be accessed. For more information about the `kubectl port-forward` command, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the Kubernetes documentation. + +For example: + +```bash +kubectl kots admin-console --namespace gitea +``` +```bash +• Press Ctrl+C to exit +• Go to http://localhost:8800 to access the Admin Console +• Go to http://localhost:8888 to access the application +``` + +### Admin Console + +You can optionally add a link to a port-forwarded service from the Admin Console dashboard. This requires additional configuration. For more information, see [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link). + +The following example shows an **Open App** link on the dashboard of the Admin Console for an application named Gitea: + +<img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> + +[View a larger version of this image](/images/gitea-open-app.png) + +## Examples + +This section provides examples of how to configure the `ports` key to port-forward a service in existing cluster installations and add links to services on the Admin Console dashboard. + +### Example: Bitnami Gitea Helm Chart with LoadBalancer Service + +This example uses a KOTS Application custom resource and a Kubernetes SIG Application custom resource to configure port forwarding for the Bitnami Gitea Helm chart in existing cluster installations, and add a link to the port-forwarded service on the Admin Console dashboard. To view the Gitea Helm chart source, see [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) in GitHub. + +To test this example: + +1. Pull version 1.0.6 of the Gitea Helm chart from Bitnami: + + ``` + helm pull oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + +1. Add the `gitea-1.0.6.tgz` chart archive to a new, empty release in the Vendor Portal along with the `kots-app.yaml`, `k8s-app.yaml`, and `gitea.yaml` files provided below. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). + + <Tabs> + <TabItem value="kots-app" label="kots-app.yaml" default> + <h5>Description</h5> + <p>Based on the <a href="https://github.com/bitnami/charts/blob/main/bitnami/gitea/templates/svc.yaml">templates/svc.yaml</a> and <a href="https://github.com/bitnami/charts/blob/main/bitnami/gitea/values.yaml">values.yaml</a> files in the Gitea Helm chart, the following KOTS Application custom resource adds port 3000 to the port forward tunnel and maps local port 8888. Port 3000 is the container port of the Pod where the <code>gitea</code> service runs.</p> + <h5>YAML</h5> + <GiteaKotsApp/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml" default> + <h5>Description</h5> + <p>The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service from the Admin Console dashboard. It also triggers KOTS to rewrite the URL to use the hostname in the browser and append the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".</p> + <h5>YAML</h5> + <GiteaK8sApp/> + </TabItem> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.</p> + <h5>YAML</h5> + <GiteaHelmChart/> + </TabItem> + </Tabs> + +1. Install the release to confirm that the service was port-forwarded successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + +### Example: NGINX Application with ClusterIP and NodePort Services + +The following example demonstrates how to link to a port-forwarded ClusterIP service for existing cluster installations. + +It also shows how to use the `ports` key to add a link to a NodePort service for kURL installations. Although the primary purpose of the `ports` key is to port forward services for existing cluster installations, it is also possible to use the `ports` key so that links to NodePort services for Embedded Cluster or kURL installations use the hostname in the browser. For information about exposing NodePort services for Embedded Cluster or kURL installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). + +To test this example: + +1. Add the `example-service.yaml`, `example-deployment.yaml`, `kots-app.yaml`, and `k8s-app.yaml` files provided below to a new, empty release in the Vendor Portal. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). + + <Tabs> + <TabItem value="service" label="example-service.yaml" default> + <h5>Description</h5> + <p>The YAML below contains ClusterIP and NodePort specifications for a service named <code>nginx</code>. Each specification uses the <code>kots.io/when</code> annotation with the Replicated IsKurl template function to conditionally include the service based on the installation type (existing cluster or kURL cluster). For more information, see <a href="/vendor/packaging-include-resources">Conditionally Including or Excluding Resources</a> and <a href="/reference/template-functions-static-context#iskurl">IsKurl</a>.</p> + <p>As shown below, both the ClusterIP and NodePort <code>nginx</code> services are exposed on port 80.</p> + <h5>YAML</h5> + <NginxService/> + </TabItem> + <TabItem value="deployment" label="example-deployment.yaml" default> + <h5>Description</h5> + <p>A basic Deployment specification for the NGINX application.</p> + <h5>YAML</h5> + <NginxDeployment/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml" default> + <h5>Description</h5> + <p>The KOTS Application custom resource below adds port 80 to the KOTS port forward tunnel and maps port 8888 on the local machine. The specification also includes <code>applicationUrl: "http://nginx"</code> so that a link to the service can be added to the Admin Console dashboard.</p> + <h5>YAML</h5> + <NginxKotsApp/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml" default> + <h5>Description</h5> + <p>The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service on the Admin Console dashboard that uses the hostname in the browser and appends the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".</p> + <h5>YAML</h5> + <NginxK8sApp/> + </TabItem> + </Tabs> + +1. Install the release into an existing cluster and confirm that the service was port-forwarded successfully by clicking **Open App** on the Admin Console dashboard. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + +1. If there is not already a kURL installer promoted to the channel, add a kURL installer to the release to support kURL installs. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). + +1. Install the release on a VM and confirm that the service was exposed successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation with kURL](/enterprise/installing-kurl). + + :::note + Ensure that the VM where you install allows HTTP traffic. + ::: + +--- + + +# Adding Custom Graphs + +import OverviewProm from "../partials/monitoring/_overview-prom.mdx" +import LimitationEc from "../partials/monitoring/_limitation-ec.mdx" + +# Adding Custom Graphs + +This topic describes how to customize the graphs that are displayed on the Replicated Admin Console dashboard. + +## Overview of Monitoring with Prometheus + +<OverviewProm/> + +## About Customizing Graphs + +If your application exposes Prometheus metrics, you can add custom graphs to the Admin Console dashboard to expose these metrics to your users. You can also modify or remove the default graphs. + +To customize the graphs that are displayed on the Admin Console, edit the [`graphs`](/reference/custom-resource-application#graphs) property in the KOTS Application custom resource manifest file. At a minimum, each graph in the `graphs` property must include the following fields: +* `title`: Defines the graph title that is displayed on the Admin Console. +* `query`: A valid PromQL Prometheus query. You can also include a list of multiple queries by using the `queries` property. For more information about querying Prometheus with PromQL, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/) in the Prometheus documentation. + +:::note +By default, a kURL cluster exposes the Prometheus expression browser at NodePort 30900. For more information, see [Expression Browser](https://prometheus.io/docs/visualization/browser/) in the Prometheus documentation. +::: + +## Limitation + +<LimitationEc/> + +## Add and Modify Graphs + +To customize graphs on the Admin Console dashboard: + +1. In the [Vendor Portal](https://vendor.replicated.com/), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. + +1. Create or open the [KOTS Application](/reference/custom-resource-application) custom resource manifest file. + +1. In the Application manifest file, under `spec`, add a `graphs` property. Edit the `graphs` property to modify or remove existing graphs or add a new custom graph. For more information, see [graphs](/reference/custom-resource-application#graphs) in _Application_. + + **Example**: + + The following example shows the YAML for adding a custom graph that displays the total number of user signups for an application. + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' + ``` + +1. (Optional) Under `graphs`, copy and paste the specs for the default Disk Usage, CPU Usage, and Memory Usage Admin Console graphs provided in the YAML below. + + Adding these default graphs to the Application custom resource manifest ensures that they are not overwritten when you add one or more custom graphs. When the default graphs are included in the Application custom resource, the Admin Console displays them in addition to any custom graphs. + + Alternatively, you can exclude the YAML specs for the default graphs to remove them from the Admin Console dashboard. + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + graphs: + - title: User Signups + query: 'sum(user_signup_events_total)' + # Disk Usage, CPU Usage, and Memory Usage below are the default graphs + - title: Disk Usage + queries: + - query: 'sum((node_filesystem_size_bytes{job="node-exporter",fstype!="",instance!=""} - node_filesystem_avail_bytes{job="node-exporter", fstype!=""})) by (instance)' + legend: 'Used: {{ instance }}' + - query: 'sum((node_filesystem_avail_bytes{job="node-exporter",fstype!="",instance!=""})) by (instance)' + legend: 'Available: {{ instance }}' + yAxisFormat: bytes + - title: CPU Usage + query: 'sum(rate(container_cpu_usage_seconds_total{namespace="{{repl Namespace}}",container!="POD",pod!=""}[5m])) by (pod)' + legend: '{{ pod }}' + - title: Memory Usage + query: 'sum(container_memory_usage_bytes{namespace="{{repl Namespace}}",container!="POD",pod!=""}) by (pod)' + legend: '{{ pod }}' + yAxisFormat: bytes + ``` +1. Save and promote the release to a development environment to test your changes. + + +--- + + +# About Integrating with CI/CD + +import TestRecs from "../partials/ci-cd/_test-recs.mdx" + +# About Integrating with CI/CD + +This topic provides an introduction to integrating Replicated CLI commands in your continuous integration and continuous delivery (CI/CD) pipelines, including Replicated's best practices and recommendations. + +## Overview + +Using CI/CD workflows to automatically compile code and run tests improves the speed at which teams can test, iterate on, and deliver releases to customers. When you integrate Replicated CLI commands into your CI/CD workflows, you can automate the process of deploying your application to clusters for testing, rather than needing to manually create and then archive channels, customers, and environments for testing. + +You can also include continuous delivery workflows to automatically promote a release to a shared channel in your Replicated team. This allows you to more easily share releases with team members for internal testing and iteration, and then to promote releases when they are ready to be shared with customers. + +## Best Practices and Recommendations + +The following are Replicated's best practices and recommendations for CI/CD: + +* Include unique workflows for development and for releasing your application. This allows you to run tests on every commit, and then to promote releases to internal and customer-facing channels only when ready. For more information about the workflows that Replicated recommends, see [Recommended CI/CD Workflows](ci-workflows). + +* Integrate Replicated Compatibility Matrix into your CI/CD workflows to quickly create multiple different types of clusters where you can deploy and test your application. Supported distributions include OpenShift, GKE, EKS, and more. For more information, see [About Compatibility Matrix](testing-about). + +* If you use the GitHub Actions CI/CD platform, integrate the custom GitHub actions that Replicated maintains to replace repetitive tasks related to distributing application with Replicated or using Compatibility Matrix. For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). + +* To help show you are conforming to a secure supply chain, sign all commits and container images. Additionally, provide a verification mechanism for container images. + +* Use custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy that blocks the ability to promote releases to your production channel. For more information about creating custom RBAC policies in the Vendor Portal, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). + +* Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: + <TestRecs/> + +--- + + +# Integrating Replicated GitHub Actions + +# Integrating Replicated GitHub Actions + +This topic describes how to integrate Replicated's custom GitHub actions into continuous integration and continuous delivery (CI/CD) workflows that use the GitHub Actions platform. + +## Overview + +Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to distributing your application with Replicated and related to using the Compatibility Matrix, such as: + * Creating and removing customers, channels, and clusters + * Promoting releases + * Creating a matrix of clusters for testing based on the Kubernetes distributions and versions where your customers are running application instances + * Reporting the success or failure of tests + +If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. + +To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. + +## GitHub Actions Workflow Examples + +The [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions#examples) repository in GitHub contains example workflows that use the Replicated GitHub actions. You can use these workflows as a template for your own GitHub Actions CI/CD workflows: + +* For a simplified development workflow, see [development-helm-prepare-cluster.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm-prepare-cluster.yaml). +* For a customizable development workflow for applications installed with the Helm CLI, see [development-helm.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm.yaml). +* For a customizable development workflow for applications installed with KOTS, see [development-kots.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-kots.yaml). +* For a release workflow, see [release.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/release.yaml). + +## Integrate GitHub Actions + +The following table lists GitHub actions that are maintained by Replicated that you can integrate into your CI/CI workflows. The table also describes when to use the action in a workflow and indicates the related Replicated CLI command where applicable. + +:::note +For an up-to-date list of the avilable custom GitHub actions, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. +::: + +<table> + <tr> + <th width="25%">GitHub Action</th> + <th width="50%">When to Use</th> + <th width="25%">Related Replicated CLI Commands</th> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/archive-channel">archive-channel</a></td> + <td> + <p>In release workflows, a temporary channel is created to promote a release for testing. This action archives the temporary channel after tests complete.</p> + <p>See <a href="/vendor/ci-workflows#rel-cleanup">Archive the temporary channel and customer</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-channel-delete"><code>channel delete</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/archive-customer">archive-customer</a></td> + <td> + <p>In release workflows, a temporary customer is created so that a release can be installed for testing. This action archives the temporary customer after tests complete.</p> + <p>See <a href="/vendor/ci-workflows#rel-cleanup">Archive the temporary channel and customer</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster">create-cluster</a></td> + <td> + <p>In release workflows, use this action to create one or more clusters for testing.</p> + <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-cluster-create"><code>cluster create</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/create-release">create-release</a></td> + <td> + <p>In release workflows, use this action to create a release to be installed and tested, and optionally to be promoted to a shared channel after tests complete.</p> + <p>See <a href="/vendor/ci-workflows#rel-release">Create a release and promote to a temporary channel</a> in <em>Recommended CI/CD Workflows</em>. </p> + </td> + <td><a href="/reference/replicated-cli-release-create"><code>release create</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/get-customer-instances">get-customer-instances</a></td> + <td> + <p>In release workflows, use this action to create a matrix of clusters for running tests based on the Kubernetes distributions and versions of active instances of your application running in customer environments.</p> + <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/helm-install">helm-install</a></td> + <td> + <p>In development or release workflows, use this action to install a release using the Helm CLI in one or more clusters for testing.</p> + <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/kots-install">kots-install</a></td> + <td> + <p>In development or release workflows, use this action to install a release with Replicated KOTS in one or more clusters for testing.</p> + <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/prepare-cluster">prepare-cluster</a></td> + <td> + <p>In development workflows, use this action to create a cluster, create a temporary customer of type <code>test</code>, and install an application in the cluster.</p> + <p>See <a href="/vendor/ci-workflows#dev-deploy">Prepare clusters, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-cluster-prepare"><code>cluster prepare</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/promote-release">promote-release</a></td> + <td> + <p>In release workflows, use this action to promote a release to an internal or customer-facing channel (such as Unstable, Beta, or Stable) after tests pass.</p> + <p>See <a href="/vendor/ci-workflows#rel-promote">Promote to a shared channel</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-release-promote"><code>release promote</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster">remove-cluster</a></td> + <td> + <p>In development or release workflows, use this action to remove a cluster after running tests if no <code>ttl</code> was set for the cluster.</p> + <p>See <a href="/vendor/ci-workflows#dev-deploy">Prepare clusters, deploy, and test</a> and <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> + </td> + <td><a href="/reference/replicated-cli-cluster-rm"><code>cluster rm</code></a></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/report-compatibility-result">report-compatibility-result</a></td> + <td>In development or release workflows, use this action to report the success or failure of tests that ran in clusters provisioned by the Compatibility Matrix.</td> + <td><code>release compatibility</code></td> + </tr> + <tr> + <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/upgrade-cluster">upgrade-cluster</a></td> + <td>In release workflows, use this action to test your application's compatibility with Kubernetes API resource version migrations after upgrading.</td> + <td><a href="/reference/replicated-cli-cluster-upgrade"><code>cluster upgrade</code></a></td> + </tr> +</table> + + +--- + + +# Recommended CI/CD Workflows + +import Build from "../partials/ci-cd/_build-source-code.mdx" + +# Recommended CI/CD Workflows + +This topic provides Replicated's recommended development and release workflows for your continuous integration and continuous delivery (CI/CD) pipelines. + +## Overview + +Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). The development and release workflows in this topic describe the recommended steps and jobs to include in your own workflows, including how to integrate Replicated Compatibility Matrix into your workflows for testing. For more information about Compatibility Matrix, see [About Compatibility Matrix](testing-about). + +For each step, the corresponding Replicated CLI command is provided. Additionally, for users of the GitHub Actions platform, a corresponding custom GitHub action that is maintained by Replicated is also provided. For more information about using the Replicated CLI, see [Installing the Replicated CLI](/reference/replicated-cli-installing). For more information about the Replicated GitHub actions, see [Integrating Replicated GitHub Actions](ci-workflows-github-actions). + +:::note +How you implement CI/CD workflows varies depending on the platform, such as GitHub, GitLab, CircleCI, TravisCI, or Jenkins. Refer to the documentation for your CI/CD platform for additional guidance on how to create jobs and workflows. +::: + +## About Creating RBAC Policies for CI/CD + +Replicated recommends using custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy using the [`kots/app/[]/channel/[]/promote`](/vendor/team-management-rbac-resource-names#kotsappchannelpromote) resource that blocks the ability to promote releases to your production channel. This allows for using CI/CD for the purpose of testing, without accidentally releasing to customers. + +For more information about creating custom RBAC policies in the Vendor Portal, including examples, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). + +For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). + +## Development Workflow + +In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. Additionally, for applications managed in the Replicated vendor portal, a release is created and promoted to a channel in the Replicated Vendor Portal where it can be shared with internal teams. + +The following diagram shows the recommended development workflow, where a commit to the application code repository triggers the source code to be built and the application to be deployed to clusters for testing: + +![Development CI workflow](/images/ci-workflow-dev.png) + +[View a larger version of this image](/images/ci-workflow-dev.png) + +The following describes the recommended steps to include in release workflows, as shown in the diagram above: +1. [Define workflow triggers](#dev-triggers) +1. [Build source code](#dev-build) +1. [Prepare clusters, deploy, and test](#dev-deploy) + +### Define workflow triggers {#dev-triggers} + +Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. + +The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: + +```yaml +name: development-workflow-example + +on: + push: + branches: + - '*' # matches every branch that doesn't contain a '/' + - '*/*' # matches every branch containing a single '/' + - '**' # matches every branch + - '!main' # excludes main + +jobs: + ... +``` + +### Build source code {#dev-build} + +<Build/> + +### Prepare clusters, deploy, and test {#dev-deploy} + +Add a job with the following steps to prepare clusters with Replicated Compatibility Matrix, deploy the application, and run tests: + +1. Use Replicated Compatibility Matrix to prepare one or more clusters and deploy the application. Consider the following recommendations: + + * For development workflows, Replicated recommends that you use the `cluster prepare` command to provision one or more clusters with Compatibility Matrix. The `cluster prepare` command creates a cluster, creates a release, and installs the release in the cluster, without the need to promote the release to a channel or create a temporary customer. See the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [prepare-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/prepare-cluster) GitHub action. + + :::note + The `cluster prepare` command is Beta. It is recommended for development only and is not recommended for production releases. For production releases, Replicated recommends that you use the `cluster create` command instead. For more information, see [Create cluster matrix and deploy](#rel-deploy) in _Release Workflow_ below. + ::: + + * The type and number of clusters that you choose to provision as part of a development workflow depends on how frequently you intend the workflow to run. For example, for workflows that run multiple times a day, you might prefer to provision cluster distributions that can be created quickly, such as kind clusters. + +1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. + +1. After the tests complete, remove the cluster. Alternatively, if you used the `--ttl` flag with the `cluster prepare` command, the cluster is automatically removed when the time period provided is reached. See the [`cluster remove`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. + +## Compatibility Matrix-Only Development Workflow + +In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. + +This example development workflow does _not_ create releases or customers in the Replicated vendor platform. This workflow is useful for applications that are not distributed or managed in the Replicated platform. + +The following describes the recommended steps to include in a development workflow using Compatibility Matrix: + +1. [Define workflow triggers](#dev-triggers) +1. [Build source code](#dev-build) +1. [Create cluster matrix, deploy, and test](#dev-deploy) + +### Define workflow triggers {#dev-triggers} + +Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. + +The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: + +```yaml +name: development-workflow-example + +on: + push: + branches: + - '*' # matches every branch that doesn't contain a '/' + - '*/*' # matches every branch containing a single '/' + - '**' # matches every branch + - '!main' # excludes main + +jobs: + ... +``` + +### Build source code {#dev-build} + +<Build/> + + +### Create cluster matrix, deploy, and test {#dev-deploy} + +Add a job with the following steps to provision clusters with Compatibility Matrix, deploy your application to the clusters, and run tests: + +1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. + + The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: + + ```yaml + # github actions cluster matrix example + + compatibility-matrix-example: + runs-on: ubuntu-22.04 + strategy: + matrix: + cluster: + - {distribution: kind, version: "1.25"} + - {distribution: kind, version: "1.26"} + - {distribution: eks, version: "1.26"} + - {distribution: gke, version: "1.27"} + - {distribution: openshift, version: "4.13.0-okd"} + ``` + +1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). + +1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. + +1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. + +## Replicated Platform Release Workflow + +In a release workflow (which is triggered by an action such as a commit to `main` or a tag being pushed to the repository), the source code is built, the application is deployed to clusters for testing, and then the application is made available to customers. In this example release workflow, a release is created and promoted to a channel in the Replicated vendor platform so that it can be installed by internal teams or by customers. + +The following diagram demonstrates a release workflow that promotes a release to the Beta channel when a tag with the format `"v*.*.*-beta.*"` is pushed: + +![Workflow that promotes to Beta channel](/images/ci-workflow-beta.png) + +[View a larger version of this image](/images/ci-workflow-beta.png) + +The following describes the recommended steps to include in release workflows, as shown in the diagram above: + +1. [Define workflow triggers](#rel-triggers) +1. [Build source code](#rel-build) +1. [Create a release and promote to a temporary channel](#rel-release) +1. [Create cluster matrix, deploy, and test](#rel-deploy) +1. [Promote to a shared channel](#rel-promote) +1. [Archive the temporary channel and customer](#rel-cleanup) + +### Define workflow triggers {#rel-triggers} + +Create unique workflows for promoting releases to your team's internal-only, beta, and stable channels. Define unique event triggers for each of your release workflows so that releases are only promoted to a channel when a given condition is met: + +* On every commit to the `main` branch in your code repository, promote a release to the channel that your team uses for internal testing (such as the default Unstable channel). + + The following example shows a workflow trigger in GitHub Actions that runs the workflow on commits to `main`: + + ```yaml + name: unstable-release-example + + on: + push: + branches: + - 'main' + + jobs: + ... + ``` + +* On pushing a tag that contains a version label with the semantic versioning format `x.y.z-beta-n` (such as `1.0.0-beta.1` or `v1.0.0-beta.2`), promote a release to your team's Beta channel. + + The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*-beta.*` is pushed: + + ```yaml + name: beta-release-example + + on: + push: + tags: + - "v*.*.*-beta.*" + + jobs: + ... + ``` + +* On pushing a tag that contains a version label with the semantic versioning format `x.y.z` (such as `1.0.0` or `v1.0.01`), promote a release to your team's Stable channel. + + The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*` is pushed: + + ```yaml + name: stable-release-example + + on: + push: + tags: + - "v*.*.*" + + jobs: + ... + ``` + +### Build source code {#rel-build} + +<Build/> + +### Create a release and promote to a temporary channel {#rel-release} + +Add a job that creates and promotes a release to a temporary channel. This allows the release to be installed for testing in the next step. See the [release create](/reference/replicated-cli-release-create) Replicated CLI command. Or, for GitHub Actions workflows, see [create-release](https://github.com/replicatedhq/replicated-actions/tree/main/create-release). + +Consider the following requirements and recommendations: + +* Use a consistent naming pattern for the temporary channels. Additionally, configure the workflow so that a new temporary channel with a unique name is created each time that the release workflow runs. + +* Use semantic versioning for the release version label. + + :::note + If semantic versioning is enabled on the channel where you promote the release, then the release version label _must_ be a valid semantic version number. See [Semantic Versioning](releases-about#semantic-versioning) in _About Channels and Releases_. + ::: + +* For Helm chart-based applications, the release version label must match the version in the `version` field of the Helm chart `Chart.yaml` file. To automatically update the `version` field in the `Chart.yaml` file, you can define a step in this job that updates the version label before packaging the Helm chart into a `.tgz` archive. + +* For releases that will be promoted to a customer-facing channel such as Beta or Stable, Replicated recommends that the version label for the release matches the tag that triggered the release workflow. For example, if the tag `1.0.0-beta.1` was used to trigger the workflow, then the version label for the release is also `1.0.0-beta.1`. + +### Create cluster matrix, deploy, and test {#rel-deploy} + +Add a job with the following steps to provision clusters with Compatibility Matrix, deploy the release to the clusters, and run tests: + +1. Create a temporary customer for installing the release. See the [customer create](/reference/replicated-cli-customer-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-customer](https://github.com/replicatedhq/replicated-actions/tree/main/create-customer) action. + +1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. + + Consider the following recommendations: + + * For release workflows, Replicated recommends that you run tests against multiple clusters of different Kubernetes distributions and versions. To help build the matrix, you can review the most common Kubernetes distributions and versions used by your customers on the **Customers > Reporting** page in the Replicated vendor portal. For more information, see [Customer Reporting](/vendor/customer-reporting). + + * When using the Replicated CLI, a list of representative customer instances can be obtained using the `api get` command. For example, `replicated api get /v3/app/[APP_ID]/cluster-usage | jq .` You can further filter these results by `channel_id`, `channel_sequence`, and `version_label`. + + * GitHub Actions users can also use the `get-customer-instances` action to automate the creation of a cluster matrix based on the distributions of clusters where instances of your application are installed and running. For more information, see the [example workflow](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-dynamic.yaml) that makes use of [get-customer-instances](https://github.com/replicatedhq/replicated-actions/tree/main/get-customer-instances) in GitHub. + + The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: + + ```yaml + # github actions cluster matrix example + + compatibility-matrix-example: + runs-on: ubuntu-22.04 + strategy: + matrix: + cluster: + - {distribution: kind, version: "1.25.3"} + - {distribution: kind, version: "1.26.3"} + - {distribution: eks, version: "1.26"} + - {distribution: gke, version: "1.27"} + - {distribution: openshift, version: "4.13.0-okd"} + ``` + +1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). + + For more information about installing in an existing cluster, see: + * [Installing with Helm](/vendor/install-with-helm) + * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) + +1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. + +1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. + +### Promote to a shared channel {#rel-promote} + +Add a job that promotes the release to a shared internal-only or customer-facing channel, such as the default Unstable, Beta, or Stable channel. See the [release promote](/reference/replicated-cli-release-promote) Replicated CLI command. Or, for GitHub Actions workflows, see the [promote-release](https://github.com/replicatedhq/replicated-actions/tree/main/promote-release) action. + +Consider the following requirements and recommendations: + +* Replicated recommends that you include the `--version` flag with the `release promote` command to explicitly declare the version label for the release. Use the same version label that was used when the release was created as part of [Create a release and promote to a temporary channel](#rel-release) above. Although the `--version` flag is not required, declaring the same release version label during promotion provides additional consistency that makes the releases easier to track. + +* The channel to which the release is promoted depends on the event triggers that you defined for the workflow. For example, if the workflow runs on every commit to the `main` branch, then promote the release to an internal-only channel, such as Unstable. For more information, see [Define Workflow Triggers](#rel-triggers) above. + +* Use the `--release-notes` flag to include detailed release notes in markdown. + +### Archive the temporary channel and customer {#rel-cleanup} + +Finally, add a job to archive the temporary channel and customer that you created. This ensures that these artifacts are removed from your Replicated team and that they do not have to be manually archived after the release is promoted. + +See the [channel rm](/reference/replicated-cli-channel-rm) Replicated CLI command and the [customer/\{customer_id\}/archive](https://replicated-vendor-api.readme.io/reference/archivecustomer) endpoint in the Vendor API v3 documentation. Or, for GitHub Actions workflows, see the [archive-channel](https://github.com/replicatedhq/replicated-actions/tree/main/archive-channel) and [archive-customer](https://github.com/replicatedhq/replicated-actions/tree/main/archive-customer) actions. + + +--- + + +# Viewing Compatibility Matrix Usage History + +# Viewing Compatibility Matrix Usage History +This topic describes using the Replicated Vendor Portal to understand +Compatibility Matrix usage across your team. + +## View Historical Usage +The **Compatibility Matrix > History** page provides +historical information about both clusters and VMs, as shown below: + +![Compatibility Matrix History Page](/images/compatibility-matrix-history.png) +[View a larger version of this image](/images/compatibility-matrix-history.png) + +Only _terminated_ clusters and VMs that have been deleted or errored are displayed on the **History** page. + +The top of the **History** page displays the total number of terminated clusters and VMs +in the selected time period as well as the total cost and usage time for +the terminated resources. + +The table includes cluster and VM entries with the following columns: +- **Name:** The name of the cluster or VM. +- **By:** The actor that created the resource. +- **Cost:** The cost of the resource. This is calculated at termination and is + based on the time the resource was running. +- **Distribution:** The distribution and version of the resource. For example, + `kind 1.32.1`. +- **Type:** The distribution type of the resource. Kubernetes clusters + are listed as `kubernetes` and VMs are listed as `vm`. +- **Status:** The status of the resource. For example `terminated` or `error`. +- **Instance:** The instance type of the resource. For example `r1.small`. +- **Nodes:** The node count for "kubernetes" resources. VMs do not use this + field. +- **Node Groups:** The node group count for "kubernetes" resources. VMs do not + use this field. +- **Created At:** The time the resource was created. +- **Running At:** The time the resource started running. For billing purposes, + this is the time when Replicated began charging for the resource. +- **Terminated At:** The time the resource was terminated. For billing + purposes, this is the time when Replicated stopped charging for the resource. +- **TTL:** The time-to-live for the resource. This is the maximum amount of + time the resource can run before it is automatically terminated. +- **Duration:** The total time the resource was running. This is the time + between the `running` and `terminated` states. +- **Tag:** Any tags that were applied to the resource. + +## Filter and Sort Usage History + +Each of the fields on the **History** page can be filtered and sorted. To sort by a specific field, click on the column header. + +To filter by a specific field, click on the filter icon in the column header, then use each specific filter input to filter the results, as shown below: + +![Compatibility Matrix History Page, filter input](/images/compatibility-matrix-column-filter-input.png) +[View a larger version of this image](/images/compatibility-matrix-column-filter-input.png) + +## Get Usage History with the Vendor API v3 + +For more information about using the Vendor API v3 to get Compatibility Matrix +usage history information, see the following API endpoints within the +Vendor API v3 documentation: + +* [/v3/cmx/stats](https://replicated-vendor-api.readme.io/reference/getcmxstats) +* [/v3/vms](https://replicated-vendor-api.readme.io/reference/listvms) +* [/v3/clusters](https://replicated-vendor-api.readme.io/reference/listclusters) +* [/v3/cmx/history](https://replicated-vendor-api.readme.io/reference/listcmxhistory) + +For examples of using these endpoints, see the sections below. + +### Credit Balance and Summarized Usage +You can use the `/v3/cmx/stats` endpoint to get summarized usage information in addition to your Compatibility Matrix +credit balance. + +This endpoint returns: + +- **`cluster_count`:** The total number of terminated clusters. +- **`vm_count`:** The total number of terminated VMs. +- **`usage_minutes`:** The total number of billed usage minutes. +- **`cost`:** The total cost of the terminated clusters and VMs in cents. +- **`credit_balance`:** The remaining credit balance in cents. + +```shell +curl --request GET \ + --url https://api.replicated.com/vendor/v3/customers \ + --header 'Accept: application/json' \ + --header 'Authorization: $REPLICATED_API_TOKEN' +{"cluster_count":2,"vm_count":4,"usage_minutes":152,"cost":276,"credit_balance":723}% +``` + +The `v3/cmx/stats` endpoint also supports filtering by `start-time` and +`end-time`. For example, the following request gets usage information for January 2025: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/stats?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` + +### Currently Active Clusters +To get a list of active clusters: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/clusters' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` + +You can also use a tool such as `jq` to filter and iterate over the output: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/clusters' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' | \ + jq '.clusters[] | {name: .name, ttl: .ttl, distribution: .distribution, version: .version}' + +{ + "name": "friendly_brown", + "ttl": "1h", + "distribution": "kind", + "version": "1.32.1" +} +``` + +### Currently Active Virtual Machines +To get a list of active VMs: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/vms' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` + +### Historical Usage +To fetch historical usage information: + +```shell +curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' +``` + +You can also filter the response from the `/v3/cmx/history` endpoint by `distribution-type`, which +allows you to get a list of either clusters or VMs: + +- **For clusters use `distribution-type=kubernetes`:** + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=kubernetes' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **For VMs use `distribution-type=vm`:** + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=vm' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +### Filtering Endpoint Results +Each of these endpoints supports pagination and filtering. You can use the +following query parameters to filter the results. + +:::note +Each of the examples below +uses the `v3/cmx/history` endpoint, but the same query parameters can be used +with the other endpoints as well. +::: + +- **Pagination:** Use the `pageSize` and `currentPage` query parameters to + paginate through the results: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?pageSize=10¤tPage=1' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **Filter by date:** Use the `start-time` and `end-time` query parameters to + filter the results by a specific date range: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **Sort by:** Use the `tag-sort-key` query parameter to sort the results by a + specific field. The field can be any of the fields returned in the response. + + By default, the results are sorted in ascending order, use + `sortDesc=true` to sort in descending order: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-sort-key=created_at&sortDesc=true' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **Tag filters:** Use the `tag-filter` query parameter to filter the results by + a specific tag: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-filter=tag1' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + +- **Actor filters:** Use the `actor-filter` query parameter to filter the actor + that created the resource, or the type of actor such as `Web UI` or + `Replicated CLI`: + + ```shell + curl --request GET \ + --url 'https://api.replicated.com/vendor/v3/cmx/history?actor-filter=name' \ + --header 'Authorization: $REPLICATED_API_TOKEN' \ + --header 'accept: application/json' + ``` + + :::note + If any filter is passed for an object that does not exist, no warning is given. + For example, if you filter by `actor-filter=name` and there are no results + the response will be empty. + ::: + + +--- + + +# About the Configuration Screen + +# About the Configuration Screen + +This topic describes the configuration screen on the Config tab in the Replicated Admin Console. + +## About Collecting Configuration Values + +When you distribute your application with Replicated KOTS, you can include a configuration screen in the Admin Console. This configuration screen is used to collect required or optional values from your users that are used to run your application. You can use regular expressions to validate user input for some fields, such as passwords and email addresses. For more information about how to add custom fields to the configuration screen, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). + +If you use a Helm chart for your application, your users provide any values specific to their environment from the configuration screen, rather than in a Helm chart `values.yaml` file. This means that your users can provide configuration values through a user interface, rather than having to edit a YAML file or use `--set` CLI commands. The Admin Console configuration screen also allows you to control which options you expose to your users. + +For example, you can use the configuration screen to provide database configuration options for your application. Your users could connect your application to an external database by providing required values in the configuration screen, such as the host, port, and a username and password for the database. + +Or, you can also use the configuration screen to provide a database option that runs in the cluster as part of your application. For an example of this use case, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). + +## Viewing the Configuration Screen + +If you include a configuration screen with your application, users of your application can access the configuration screen from the Admin Console: +* During application installation. +* At any time after application installation on the Admin Console Config tab. + +### Application Installation + +The Admin Console displays the configuration screen when the user installs the application, after they upload their license file. + +The following shows an example of how the configuration screen displays during installation: + +![configuration screen that displays during application install](/images/config-screen-sentry-enterprise-app-install.png) + +[View a larger version of this image](/images/config-screen-sentry-enterprise-app-install.png) + +### Admin Console Config Tab + +Users can access the configuration screen any time after they install the application by going to the Config tab in the Admin Console. + +The following shows an example of how the configuration screen displays in the Admin Console Config tab: + +![configuration screen that displays in the Config tab](/images/config-screen-sentry-enterprise.png) + +[View a larger version of this image](/images/config-screen-sentry-enterprise.png) + + +--- + + +# Using Conditional Statements in Configuration Fields + +import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" +import PropertyWhen from "../partials/config/_property-when.mdx" +import DistroCheck from "../partials/template-functions/_string-comparison.mdx" +import NeComparison from "../partials/template-functions/_ne-comparison.mdx" + +# Using Conditional Statements in Configuration Fields + +This topic describes how to use Replicated KOTS template functions in the Config custom resource to conditionally show or hide configuration fields for your application on the Replicated KOTS Admin Console **Config** page. + +## Overview + +The `when` property in the Config custom resource denotes configuration groups or items that are displayed on the Admin Console **Config** page only when a condition evaluates to true. When the condition evaluates to false, the group or item is not displayed. + +<PropertyWhen/> + +For more information about the Config custom resource `when` property, see [when](/reference/custom-resource-config#when) in _Config_. + +## Conditional Statement Examples + +This section includes examples of common types of conditional statements used in the `when` property of the Config custom resource. + +For additional examples of using conditional statements in the Config custom resource, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. + +### Cluster Distribution Check + +It can be useful to show or hide configuration fields depending on the distribution of the cluster because different distributions often have unique requirements. + +In the following example, the `when` properties use the [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where Replicated KOTS is running. If the distribution of the cluster matches the specified distribution, then the `when` property evaluates to true. + +<DistroCheck/> + +### Embedded Cluster Distribution Check + +It can be useful to show or hide configuration fields if the distribution of the cluster is [Replicated Embedded Cluster](/vendor/embedded-overview) because you can include extensions in embedded cluster distributions to manage functionality such as ingress and storage. This means that embedded clusters frequently have fewer configuration options for the user. + +<NeComparison/> + +### kURL Distribution Check + +It can be useful to show or hide configuration fields if the cluster was provisioned by Replicated kURL because kURL distributions often include add-ons to manage functionality such as ingress and storage. This means that kURL clusters frequently have fewer configuration options for the user. + +In the following example, the `when` property of the `not_kurl` group uses the IsKurl template function to evaluate if the cluster was provisioned by kURL. For more information about the IsKurl template function, see [IsKurl](/reference/template-functions-static-context#iskurl) in _Static Context_. + +```yaml +# Config custom resource +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: all_distributions + title: Example Group + description: This group always displays. + items: + - name: example_item + title: This item always displays. + type: text + - name: not_kurl + title: Non-kURL Cluster Group + description: This group displays only if the cluster is not provisioned by kURL. + when: 'repl{{ not IsKurl }}' + items: + - name: example_item_non_kurl + title: The cluster is not provisioned by kURL. + type: label +``` + +As shown in the image below, both the `all_distributions` and `non_kurl` groups are displayed on the **Config** page when KOTS is _not_ running in a kURL cluster: + +![Config page displays both groups from the example](/images/config-example-iskurl-false.png) + +[View a larger version of this image](/images/config-example-iskurl-false.png) + +However, when KOTS is running in a kURL cluster, only the `all_distributions` group is displayed, as shown below: + +![Config page displaying only the first group from the example](/images/config-example-iskurl-true.png) + +[View a larger version of this image](/images/config-example-iskurl-true.png) + +### License Field Value Equality Check + +You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. + +In the following example, the `when` property of the `new_feature_config` item uses the LicenseFieldValue template function to determine if the user's license contains a `newFeatureEntitlement` field that is set to `true`. For more information about the LicenseFieldValue template function, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + description: Example fields for using LicenseFieldValue template function + items: + - name: new_feature_config + type: label + title: "You have the new feature entitlement" + when: '{{repl (LicenseFieldValue "newFeatureEntitlement") }}' +``` + +As shown in the image below, the **Config** page displays the `new_feature_config` item when the user's license contains `newFeatureEntitlement: true`: + +![Config page displaying the text "You have the new feature entitlement"](/images/config-example-newfeature.png) + +[View a larger version of this image](/images/config-example-newfeature.png) + +### License Field Value Integer Comparison + +You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. You can also compare integer values from license fields to control the configuration experience for your users. + +<IntegerComparison/> + +### User-Supplied Value Check + +You can show or hide configuration fields based on user-supplied values on the **Config** page to ensure that users only see options that are relevant to their selections. + +In the following example, the `database_host` and `database_passwords` items use the ConfigOptionEquals template function to evaluate if the user selected the `external` database option for the `db_type` item. For more information about the ConfigOptionEquals template function, see [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) in _Config Context_. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: database_settings_group + title: Database Settings + items: + - name: db_type + title: Database Type + type: radio + default: external + items: + - name: external + title: External Database + - name: embedded + title: Embedded Database + - name: database_host + title: Database Hostname + type: text + when: '{{repl (ConfigOptionEquals "db_type" "external")}}' + - name: database_password + title: Database Password + type: password + when: '{{repl (ConfigOptionEquals "db_type" "external")}}' +``` +As shown in the images below, when the user selects the external database option, the `database_host` and `database_passwords` items are displayed. Alternatively, when the user selects the embedded database option, the items are _not_ displayed: + +![Config page displaying the database host and password fields](/images/config-example-external-db.png) + +[View a larger version of this image](/images/config-example-external-db.png) + +![Config page with embedded database option selected](/images/config-example-embedded-db.png) + +[View a larger version of this image](/images/config-example-embedded-db.png) + +## Use Multiple Conditions in the `when` Property + +You can use more than one template function in the `when` property to create more complex conditional statements. This allows you to show or hide configuration fields based on multiple conditions being true. + +The following example includes `when` properties that use both the ConfigOptionEquals and IsKurl template functions: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: ingress_settings + title: Ingress Settings + description: Configure Ingress + items: + - name: ingress_type + title: Ingress Type + help_text: | + Select how traffic will ingress to the appliction. + type: radio + items: + - name: ingress_controller + title: Ingress Controller + - name: load_balancer + title: Load Balancer + default: "ingress_controller" + required: true + when: 'repl{{ not IsKurl }}' + - name: ingress_host + title: Hostname + help_text: Hostname used to access the application. + type: text + default: "hostname.example.com" + required: true + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' + - name: ingress_annotations + type: textarea + title: Ingress Annotations + help_text: See your ingress controller’s documentation for the required annotations. + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' + - name: ingress_tls_type + title: Ingress TLS Type + type: radio + items: + - name: self_signed + title: Self Signed (Generate Self Signed Certificate) + - name: user_provided + title: User Provided (Upload a TLS Certificate and Key Pair) + required: true + default: self_signed + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' + - name: ingress_tls_cert + title: TLS Cert + type: file + when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' + required: true + - name: ingress_tls_key + title: TLS Key + type: file + when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' + required: true + - name: load_balancer_port + title: Load Balancer Port + help_text: Port used to access the application through the Load Balancer. + type: text + default: "443" + required: true + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' + - name: load_balancer_annotations + type: textarea + title: Load Balancer Annotations + help_text: See your cloud provider’s documentation for the required annotations. + when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' +``` + +As shown in the image below, the configuration fields that are specific to the ingress controller display only when the user selects the ingress controller option and KOTS is _not_ running in a kURL cluster: + +![Config page displaying the ingress controller options](/images/config-example-ingress-controller.png) + +[View a larger version of this image](/images/config-example-ingress-controller.png) + +Additionally, the options relevant to the load balancer display when the user selects the load balancer option and KOTS is _not_ running in a kURL cluster: + +![Config page displaying the load balancer options](/images/config-example-ingress-load-balancer.png) + +[View a larger version of this image](/images/config-example-ingress-load-balancer.png) + +--- + + +# Mapping User-Supplied Values + +# Mapping User-Supplied Values + +This topic describes how to map the values that your users provide in the Replicated Admin Console configuration screen to your application. + +This topic assumes that you have already added custom fields to the Admin Console configuration screen by editing the Config custom resource. For more information, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). + +## Overview of Mapping Values + +You use the values that your users provide in the Admin Console configuration screen to render YAML in the manifest files for your application. + +For example, if you provide an embedded database with your application, you might add a field on the Admin Console configuration screen where users input a password for the embedded database. You can then map the password that your user supplies in this field to the Secret manifest file for the database in your application. + +For an example of mapping database configuration options in a sample application, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). + +You can also conditionally deploy custom resources depending on the user input for a given field. For example, if a customer chooses to use their own database with your application rather than an embedded database option, it is not desirable to deploy the optional database resources such as a StatefulSet and a Service. + +For more information about including optional resources conditionally based on user-supplied values, see [Conditionally Including or Excluding Resources](packaging-include-resources). + +## About Mapping Values with Template Functions + +To map user-supplied values, you use Replicated KOTS template functions. The template functions are based on the Go text/template libraries. To use template functions, you add them as strings in the custom resource manifest files in your application. + +For more information about template functions, including use cases and examples, see [About Template Functions](/reference/template-functions-about). + +For more information about the syntax of the template functions for mapping configuration values, see [Config Context](/reference/template-functions-config-context) in the _Template Functions_ section. + +## Map User-Supplied Values + +Follow one of these procedures to map user inputs from the configuration screen, depending on if you use a Helm chart for your application: + +* **Without Helm**: See [Map Values to Manifest Files](#map-values-to-manifest-files). +* **With Helm**: See [Map Values to a Helm Chart](#map-values-to-a-helm-chart). + +### Map Values to Manifest Files + +To map user-supplied values from the configuration screen to manifest files in your application: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. + +1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. + +1. In the Config manifest file, locate the name of the user-input field that you want to map. + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: smtp_settings + title: SMTP Settings + description: Configure SMTP Settings + items: + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + ``` + + In the example above, the field name to map is `smtp_host`. + +1. In the same release in the Vendor Portal, open the manifest file where you want to map the value for the field that you selected. + +1. In the manifest file, use the ConfigOption template function to map the user-supplied value in a key value pair. For example: + + ```yaml + hostname: '{{repl ConfigOption "smtp_host"}}' + ``` + + For more information about the ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. + + **Example**: + + The following example shows mapping user-supplied TLS certificate and TLS private key files to the `tls.cert` and `tls.key` keys in a Secret custom resource manifest file. + + For more information about working with TLS secrets, including a strategy for re-using the certificates uploaded for the Admin Console itself, see the [Configuring Cluster Ingress](packaging-ingress) example. + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: tls-secret + type: kubernetes.io/tls + data: + tls.crt: '{{repl ConfigOption "tls_certificate_file" }}' + tls.key: '{{repl ConfigOption "tls_private_key_file" }}' + ``` + +1. Save and promote the release to a development environment to test your changes. + +### Map Values to a Helm Chart + +The `values.yaml` file in a Helm chart defines parameters that are specific to each environment in which the chart will be deployed. With Replicated KOTS, your users provide these values through the configuration screen in the Admin Console. You customize the configuration screen based on the required and optional configuration fields that you want to expose to your users. + +To map the values that your users provide in the Admin Console configuration screen to your Helm chart `values.yaml` file, you create a HelmChart custom resource. + +For a tutorial that shows how to set values in a sample Helm chart during installation with KOTS, see [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). + +To map user inputs from the configuration screen to the `values.yaml` file: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. + +1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. + +1. In the Config manifest file, locate the name of the user-input field that you want to map. + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: smtp_settings + title: SMTP Settings + description: Configure SMTP Settings + items: + - name: smtp_host + title: SMTP Hostname + help_text: Set SMTP Hostname + type: text + ``` + + In the example above, the field name to map is `smtp_host`. + +1. In the same release, create a HelmChart custom resource manifest file. A HelmChart custom resource manifest file has `kind: HelmChart`. + + For more information about the HelmChart custom resource, see [HelmChart](../reference/custom-resource-helmchart) in the _Custom Resources_ section. + +1. In the HelmChart manifest file, copy and paste the name of the property from your `values.yaml` file that corresponds to the field that you selected from the Config manifest file under `values`: + + ```yaml + values: + HELM_VALUE_KEY: + ``` + Replace `HELM_VALUE_KEY` with the property name from the `values.yaml` file. + +1. Use the ConfigOption template function to set the property from the `values.yaml` file equal to the corresponding configuration screen field: + + ```yaml + values: + HELM_VALUE_KEY: '{{repl ConfigOption "CONFIG_SCREEN_FIELD_NAME" }}' + ``` + Replace `CONFIG_SCREEN_FIELD_NAME` with the name of the field that you created in the Config custom resource. + + For more information about the KOTS ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta1 + kind: HelmChart + metadata: + name: samplechart + spec: + chart: + name: samplechart + chartVersion: 3.1.7 + helmVersion: v3 + useHelmInstall: true + values: + hostname: '{{repl ConfigOption "smtp_host" }}' + ``` + +1. Save and promote the release to a development environment to test your changes. + + +--- + + +# Using Custom Domains + +# Using Custom Domains + +This topic describes how to use the Replicated Vendor Portal to add and manage custom domains to alias the Replicated registry, the Replicated proxy registry, the Replicated app service, and the download portal. + +For information about adding and managing custom domains with the Vendor API v3, see the [customHostnames](https://replicated-vendor-api.readme.io/reference/createcustomhostname) section in the Vendor API v3 documentation. + +For an overview about custom domains and limitations, see [About Custom Domains](custom-domains). + +## Configure a Custom Domain + +Before you assign a custom domain for a registry or the download portal, you must first configure and verify the ownership and TLS certificate. + +To add and configure a custom domain: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Custom Domains**. + +1. In the **Add custom domain** dropdown, select the target Replicated endpoint. + + The **Configure a custom domain** wizard opens. + + <img src="/images/custom-domains-download-configure.png" alt="custom domain wizard" width="500"/> + + [View a larger version of this image](/images/custom-domains-download-configure.png) + +1. For **Domain**, enter the custom domain. Click **Save & continue**. + +1. For **Create CNAME**, copy the text string and use it to create a CNAME record in your DNS account. Click **Continue**. + +1. For **Verify ownership**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. + + Your changes can take up to 24 hours to propagate. + +1. For **TLS cert creation verification**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. + + Your changes can take up to 24 hours to propagate. + + :::note + If you set up a [CAA record](https://letsencrypt.org/docs/caa/) for this hostname, you must include all Certificate Authorities (CAs) that Cloudflare partners with. The following CAA records are required to ensure proper certificate issuance and renewal: + + ```dns + @ IN CAA 0 issue "letsencrypt.org" + @ IN CAA 0 issue "pki.goog; cansignhttpexchanges=yes" + @ IN CAA 0 issue "ssl.com" + @ IN CAA 0 issue "amazon.com" + @ IN CAA 0 issue "cloudflare.com" + @ IN CAA 0 issue "google.com" + ``` + + Failing to include any of these CAs might prevent certificate issuance or renewal, which can result in downtime for your customers. For additional security, you can add an IODEF record to receive notifications about certificate requests: + + ```dns + @ IN CAA 0 iodef "mailto:your-security-team@example.com" + ``` + ::: + +1. For **Use Domain**, to set the new domain as the default, click **Yes, set as default**. Otherwise, click **Not now**. + + :::note + Replicated recommends that you do _not_ set a domain as the default until you are ready for it to be used by customers. + ::: + +The Vendor Portal marks the domain as **Configured** after the verification checks for ownership and TLS certificate creation are complete. + +## Use Custom Domains + +After you configure one or more custom domains in the Vendor Portal, you assign a custom domain by setting it as the default for all channels and customers or by assigning it to an individual release channel. + +### Set a Default Domain + +Setting a default domain is useful for ensuring that the same domain is used across channels for all your customers. + +When you set a custom domain as the default, it is used by default for all new releases promoted to any channel, as long as the channel does not have a different domain assigned in its channel settings. + +Only releases that are promoted to a channel _after_ you set a default domain use the new default domain. Any existing releases that were promoted before you set the default continue to use the same domain that they used previously. + +To set a custom domain as the default: + +1. In the Vendor Portal, go to **Custom Domains**. + +1. Next to the target domain, click **Set as default**. + +1. In the confirmation dialog that opens, click **Yes, set as default**. + +### Assign a Domain to a Channel {#channel-domain} + +You can assign a domain to an individual channel by editing the channel settings. When you specify a domain in the channel settings, new releases promoted to the channel use the selected domain even if there is a different domain set as the default on the **Custom Domains** page. + +Assigning a domain to a release channel is useful when you need to override either the default Replicated domain or a default custom domain for a specific channel. For example: +* You need to use a different domain for releases promoted to your Beta and Stable channels. +* You need to test a domain in a development environment before you set the domain as the default for all channels. + +To assign a custom domain to a channel: + +1. In the Vendor Portal, go to **Channels** and click the settings icon for the target channel. + +1. Under **Custom domains**, in the drop-down for the target Replicated endpoint, select the domain to use for the channel. For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. + + <img alt="channel settings dialog" src="/images/channel-settings.png" width="500px"/> + + [View a larger version of this image](/images/channel-settings.png) + +## Reuse a Custom Domain for Another Application + +If you have configured a custom domain for one application, you can reuse the custom domain for another application in the same team without going through the ownership and TLS certificate verification process again. + +To reuse a custom domain for another application: + +1. In the Vendor Portal, select the application from the dropdown list. + +1. Click **Custom Domains**. + +1. In the section for the target endpoint, click Add your first custom domain for your first domain, or click **Add new domain** for additional domains. + + The **Configure a custom domain** wizard opens. + +1. In the text box, enter the custom domain name that you want to reuse. Click **Save & continue**. + + The last page of the wizard opens because the custom domain was verified previously. + +1. Do one of the following: + + - Click **Set as default**. In the confirmation dialog that opens, click **Yes, set as default**. + + - Click **Not now**. You can come back later to set the domain as the default. The Vendor Portal shows shows that the domain has a Configured status because it was configured for a previous application, though it is not yet assigned as the default for this application. + + +## Remove a Custom Domain + +You can remove a custom domain at any time, but you should plan the transition so that you do not break any existing installations or documentation. + +Removing a custom domain for the Replicated registry, proxy registry, or Replicated app service will break existing installations that use the custom domain. Existing installations need to be upgraded to a version that does not use the custom domain before it can be removed safely. + +If you remove a custom domain for the download portal, it is no longer accessible using the custom URL. You will need to point customers to an updated URL. + +To remove a custom domain: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com) and click **Custom Domains**. + +1. Verify that the domain is not set as the default nor in use on any channels. You can edit the domains in use on a channel in the channel settings. For more information, see [Settings](releases-about#settings) in _About Channels and Releases_. + + :::important + When you remove a registry or Replicated app service custom domain, any installations that reference that custom domain will break. Ensure that the custom domain is no longer in use before you remove it from the Vendor Portal. + ::: + +1. Click **Remove** next to the unused domain in the list, and then click **Yes, remove domain**. + + +--- + + +# About Custom Domains + +# About Custom Domains + +This topic provides an overview and the limitations of using custom domains to alias the Replicated private registry, Replicated proxy registry, Replicated app service, and the Download Portal. + +For information about configuring and managing custom domains, see [Using Custom Domains](custom-domains-using). + +## Overview + +You can use custom domains to alias Replicated endpoints by creating Canonical Name (CNAME) records for your domains. + +Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. + +TXT records must be created to verify: + +- Domain ownership: Domain ownership is verified when you initially add a record. +- TLS certificate creation: Each new domain must have a new TLS certificate to be verified. + +The TXT records can be removed after the verification is complete. + +You can configure custom domains for the following services, so that customer-facing URLs reflect your company's brand: + +- **Replicated registry:** Images and Helm charts can be pulled from the Replicated registry. By default, this registry uses the domain `registry.replicated.com`. We suggest using a CNAME such as `registry.{your app name}.com`. + +- **Proxy registry:** Images can be proxied from external private registries using the Replicated proxy registry. By default, the proxy registry uses the domain `proxy.replicated.com`. We suggest using a CNAME such as `proxy.{your app name}.com`. + +- **Replicated app service:** Upstream application YAML and metadata, including a license ID, are pulled from replicated.app. By default, this service uses the domain `replicated.app`. We suggest using a CNAME such as `updates.{your app name}.com`. + +- **Download Portal:** The Download Portal can be used to share customer license files, air gap bundles, and so on. By default, the Download Portal uses the domain `get.replicated.com`. We suggest using a CNAME such as `portal.{your app name}.com` or `enterprise.{your app name}.com`. + +## Limitations + +Using custom domains has the following limitations: + +- A single custom domain cannot be used for multiple endpoints. For example, a single domain can map to `registry.replicated.com` for any number of applications, but cannot map to both `registry.replicated.com` and `proxy.replicated.com`, even if the applications are different. + +- Custom domains cannot be used to alias api.replicated.com (legacy customer-facing APIs) or kURL. + +- Multiple custom domains can be configured, but only one custom domain can be the default for each Replicated endpoint. All configured custom domains work whether or not they are the default. + +- A particular custom domain can only be used by one team. + + +--- + + +# Configuring Custom Metrics (Beta) + +# Configuring Custom Metrics (Beta) + +This topic describes how to configure an application to send custom metrics to the Replicated Vendor Portal. + +## Overview + +In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running customer environments. Custom metrics can be collected for application instances running in online or air gap environments. + +Custom metrics can be used to generate insights on customer usage and adoption of new features, which can help your team to make more informed prioritization decisions. For example: +* Decreased or plateaued usage for a customer can indicate a potential churn risk +* Increased usage for a customer can indicate the opportunity to invest in growth, co-marketing, and upsell efforts +* Low feature usage and adoption overall can indicate the need to invest in usability, discoverability, documentation, education, or in-product onboarding +* High usage volume for a customer can indicate that the customer might need help in scaling their instance infrastructure to keep up with projected usage + +## How the Vendor Portal Collects Custom Metrics + +The Vendor Portal collects custom metrics through the Replicated SDK that is installed in the cluster alongside the application. + +The SDK exposes an in-cluster API where you can configure your application to POST metric payloads. When an application instance sends data to the API, the SDK sends the data (including any custom and built-in metrics) to the Replicated app service. The app service is located at `replicated.app` or at your custom domain. + +If any values in the metric payload are different from the current values for the instance, then a new event is generated and displayed in the Vendor Portal. For more information about how the Vendor Portal generates events, see [How the Vendor Portal Generates Events and Insights](/vendor/instance-insights-event-data#about-events) in _About Instance and Event Data_. + +The following diagram demonstrates how a custom `activeUsers` metric is sent to the in-cluster API and ultimately displayed in the Vendor Portal, as described above: + +<img alt="Custom metrics flowing from customer environment to Vendor Portal" src="/images/custom-metrics-flow.png" width="800px"/> + +[View a larger version of this image](/images/custom-metrics-flow.png) + +## Requirements + +To support the collection of custom metrics in online and air gap environments, the Replicated SDK version 1.0.0-beta.12 or later must be running in the cluster alongside the application instance. + +The `PATCH` and `DELETE` methods described below are available in the Replicated SDK version 1.0.0-beta.23 or later. + +For more information about the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +If you have any customers running earlier versions of the SDK, Replicated recommends that you add logic to your application to gracefully handle a 404 from the in-cluster APIs. + +## Limitations + +Custom metrics have the following limitations: + +* The label that is used to display metrics in the Vendor Portal cannot be customized. Metrics are sent to the Vendor Portal with the same name that is sent in the `POST` or `PATCH` payload. The Vendor Portal then converts camel case to title case: for example, `activeUsers` is displayed as **Active Users**. + +* The in-cluster APIs accept only JSON scalar values for metrics. Any requests containing nested objects or arrays are rejected. + +* When using the `POST` method any existing keys that are not included in the payload will be deleted. To create new metrics or update existing ones without sending the entire dataset, simply use the `PATCH` method. + +## Configure Custom Metrics + +You can configure your application to `POST` or `PATCH` a set of metrics as key value pairs to the API that is running in the cluster alongside the application instance. + +To remove an existing custom metric use the `DELETE` endpoint with the custom metric name. + +The Replicated SDK provides an in-cluster API custom metrics endpoint at `http://replicated:3000/api/v1/app/custom-metrics`. + +**Example:** + +```bash +POST http://replicated:3000/api/v1/app/custom-metrics +``` + +```json +{ + "data": { + "num_projects": 5, + "weekly_active_users": 10 + } +} +``` + +```bash +PATCH http://replicated:3000/api/v1/app/custom-metrics +``` + +```json +{ + "data": { + "num_projects": 54, + "num_error": 2 + } +} +``` + +```bash +DELETE http://replicated:3000/api/v1/app/custom-metrics/num_projects +``` + +### POST vs PATCH + +The `POST` method will always replace the existing data with the most recent payload received. Any existing keys not included in the most recent payload will still be accessible in the instance events API, but they will no longer appear in the instance summary. + +The `PATCH` method will accept partial updates or add new custom metrics if a key:value pair that does not currently exist is passed. + +In most cases, simply using the `PATCH` method is recommended. + +For example, if a component of your application sends the following via the `POST` method: + +```json +{ + "numProjects": 5, + "activeUsers": 10, +} +``` + +Then, the component later sends the following also via the `POST` method: + +```json +{ + "activeUsers": 10, + "usingCustomReports": false +} +``` + +The instance detail will show `Active Users: 10` and `Using Custom Reports: false`, which represents the most recent payload received. The previously-sent `numProjects` value is discarded from the instance summary and is available in the instance events payload. In order to preseve `numProjects`from the initial payload and upsert `usingCustomReports` and `activeUsers` use the `PATCH` method instead of `POST` on subsequent calls to the endpoint. + +For example, if a component of your application initially sends the following via the `POST` method: + +```json +{ + "numProjects": 5, + "activeUsers": 10, +} +``` + +Then, the component later sends the following also via the `PATCH` method: +```json +{ + "usingCustomReports": false +} +``` + +The instance detail will show `Num Projects: 5`, `Active Users: 10`, `Using Custom Reports: false`, which represents the merged and upserted payload. + +### NodeJS Example + +The following example shows a NodeJS application that sends metrics on a weekly interval to the in-cluster API exposed by the SDK: + +```javascript +async function sendMetrics(db) { + + const projectsQuery = "SELECT COUNT(*) as num_projects from projects"; + const numProjects = (await db.getConnection().queryOne(projectsQuery)).num_projects; + + const usersQuery = + "SELECT COUNT(*) as active_users from users where DATEDIFF('day', last_active, CURRENT_TIMESTAMP) < 7"; + const activeUsers = (await db.getConnection().queryOne(usersQuery)).active_users; + + const metrics = { data: { numProjects, activeUsers }}; + + const res = await fetch('https://replicated:3000/api/v1/app/custom-metrics', { + method: 'POST', + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify(metrics), + }); + if (res.status !== 200) { + throw new Error(`Failed to send metrics: ${res.statusText}`); + } +} + +async function startMetricsLoop(db) { + + const ONE_DAY_IN_MS = 1000 * 60 * 60 * 24 + + // send metrics once on startup + await sendMetrics(db) + .catch((e) => { console.log("error sending metrics: ", e) }); + + // schedule weekly metrics payload + + setInterval( () => { + sendMetrics(db, licenseId) + .catch((e) => { console.log("error sending metrics: ", e) }); + }, ONE_DAY_IN_MS); +} + +startMetricsLoop(getDatabase()); +``` + +## View Custom Metrics + +You can view the custom metrics that you configure for each active instance of your application on the **Instance Details** page in the Vendor Portal. + +The following shows an example of an instance with custom metrics: + +<img alt="Custom Metrics section of Instance details page" src="/images/instance-custom-metrics.png" width="700px"/> + +[View a larger version of this image](/images/instance-custom-metrics.png) + +As shown in the image above, the **Custom Metrics** section of the **Instance Details** page includes the following information: +* The timestamp when the custom metric data was last updated. +* Each custom metric that you configured, along with the most recent value for the metric. +* A time-series graph depicting the historical data trends for the selected metric. + +Custom metrics are also included in the **Instance activity** stream of the **Instance Details** page. For more information, see [Instance Activity](/vendor/instance-insights-details#instance-activity) in _Instance Details_. + +## Export Custom Metrics + +You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Export Customer and Instance Data](/vendor/instance-data-export). + + +--- + + +# Adoption Report + +# Adoption Report + +This topic describes the insights in the **Adoption** section on the Replicated Vendor Portal **Dashboard** page. + +## About Adoption Rate + +The **Adoption** section on the **Dashboard** provides insights about the rate at which your customers upgrade their instances and adopt the latest versions of your application. As an application vendor, you can use these adoption rate metrics to learn if your customers are completing upgrades regularly, which is a key indicator of the discoverability and ease of application upgrades. + +The Vendor Portal generates adoption rate data from all your customer's application instances that have checked-in during the selected time period. For more information about instance check-ins, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. + +The following screenshot shows an example of the **Adoption** section on the **Dashboard**: + +![Adoption report section on dashboard](/images/customer_adoption_rates.png) + +[View a larger version of this image](/images/customer_adoption_rates.png) + +As shown in the screenshot above, the **Adoption** report includes a graph and key adoption rate metrics. For more information about how to interpret this data, see [Adoption Graph](#graph) and [Adoption Metrics](#metrics) below. + +The **Adoption** report also displays the number of customers assigned to the selected channel and a link to the report that you can share with other members of your team. + +You can filter the graph and metrics in the **Adoption** report by: +* License type (Paid, Trial, Dev, or Community) +* Time period (the previous month, three months, six months, or twelve months) +* Release channel to which instance licenses are assigned, such as Stable or Beta + +## Adoption Graph {#graph} + +The **Adoption** report includes a graph that shows the percent of active instances that are running different versions of your application within the selected time period. + +The following shows an example of an adoption rate graph with three months of data: + +![Adoption report graph showing three months of data](/images/adoption_rate_graph.png) + +[View a larger version of this image](/images/adoption_rate_graph.png) + +As shown in the image above, the graph plots the number of active instances in each week in the selected time period, grouped by the version each instance is running. The key to the left of the graph shows the unique color that is assigned to each application version. You can use this color-coding to see at a glance the percent of active instances that were running different versions of your application across the selected time period. + +Newer versions will enter at the bottom of the area chart, with older versions shown higher up. + +You can also hover over a color-coded section in the graph to view the number and percentage of active instances that were running the version in a given period. + +If there are no active instances of your application, then the adoption rate graph displays a "No Instances" message. + +## Adoption Metrics {#metrics} + +The **Adoption** section includes metrics that show how frequently your customers discover and complete upgrades to new versions of your application. It is important that your users adopt new versions of your application so that they have access to the latest features and bug fixes. Additionally, when most of your users are on the latest versions, you can also reduce the number of versions for which you provide support and maintain documentation. + +The following shows an example of the metrics in the **Adoption** section: + +![Adoption rate metrics showing](/images/adoption_rate_metrics.png) + +[View a larger version of this image](/images/adoption_rate_metrics.png) + +As shown in the image above, the **Adoption** section displays the following metrics: +* Instances on last three versions +* Unique versions +* Median relative age +* Upgrades completed + +Based on the time period selected, each metric includes an arrow that shows the change in value compared to the previous period. For example, if the median relative age today is 68 days, the selected time period is three months, and three months ago the median relative age was 55 days, then the metric would show an upward-facing arrow with an increase of 13 days. + +The following table describes each metric in the **Adoption** section, including the formula used to calculate its value and the recommended trend for the metric over time: + +<table> + <tbody> + <tr> + <th width="25%">Metric</th> + <th width="45%">Description</th> + <th width="30%">Target Trend</th> + </tr> + <tr> + <td>Instances on last three versions</td> + <td> + <p>Percent of active instances that are running one the latest three versions of your application.</p> + <p><strong>Formula</strong>: <code>count(instances on last 3 versions) / count(instances)</code></p> + </td> + <td>Increase towards 100%</td> + </tr> + <tr> + <td>Unique versions</td> + <td> + <p>Number of unique versions of your application running in active instances.</p> + <p><strong>Formula</strong>: <code>count(distinct instance_version)</code></p> + </td> + <td>Decrease towards less than or equal to three</td> + </tr> + <tr> + <td>Median relative age</td> + <td> + <p>The <em>relative age</em> of a single instance is the number of days between the date that the instance's version was promoted to the channel and the date when the latest available application version was promoted to the channel.</p> + <p><em>Median relative age</em> is the median value across all active instances for the selected time period and channel.</p> + <p><strong>Formula</strong>: <code>median(relative_age(instance_version))</code></p> + </td> + <td><p>Depends on release cadence. For vendors who ship every four to eight weeks, decrease the median relative age towards 60 days or fewer.</p></td> + </tr> + <tr> + <td>Upgrades completed</td> + <td> + <p>Total number of completed upgrades across active instances for the selected time period and channel.</p> + <p>An upgrade is a single version change for an instance. An upgrade is considered complete when the instance deploys the new application version.</p> + <p>The instance does <em>not</em> need to become available (as indicated by reaching a Ready state) after deploying the new version for the upgrade to be counted as complete.</p> + <p><strong>Formula</strong>: <code>sum(instance.upgrade_count) across all instances</code></p> + </td> + <td>Increase compared to any previous period, unless you reduce your total number of live instances.</td> + </tr> + </tbody> +</table> + + +--- + + +# Customer Reporting + +# Customer Reporting + +This topic describes the customer and instance data displayed in the **Customers > Reporting** page of the Replicated Vendor Portal. + +## About the Customer Reporting Page {#reporting-page} + +The **Customers > Reporting** page displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page: + +![Customer reporting page showing two active instances](/images/customer-reporting-page.png) + +[View a larger version of this image](/images/customer-reporting-page.png) + +As shown in the image above, the **Reporting** page has the following main sections: +* [Manage Customer](#manage-customer) +* [Time to Install](#time-to-install) +* [Download Portal](#download-portal) +* [Instances](#instances) + +### Manage Customer + +The manage customer section displays the following information about the customer: + +* The customer name +* The channel the customer is assigned +* Details about the customer license: + * The license type + * The date the license was created + * The expiration date of the license +* The features the customer has enabled, including: + * GitOps + * Air gap + * Identity + * Snapshots + +In this section, you can also view the Helm CLI installation instructions for the customer and download the customer license. + +### Time to Install + +If the customer has one or more application instances that have reached a Ready status at least one time, then the **Time to install** section displays _License time to install_ and _Instance time to install_ metrics: + +* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. +* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. + +A _Ready_ status indicates that all Kubernetes resources for the application are Ready. For example, a Deployment resource is considered Ready when the number of Ready replicas equals the total desired number of replicas. For more information, see [Enabling and Understanding Application Status](insights-app-status). + +If the customer has no application instances that have ever reported a Ready status, or if you have not configured your application to deliver status data to the Vendor Portal, then the **Time to install** section displays a **No Ready Instances** message. + +If the customer has more than one application instance that has previously reported a Ready status, then the **Time to install** section displays metrics for the instance that most recently reported a Ready status for the first time. + +For example, Instance A reported its first Ready status at 9:00 AM today. Instance B reported its first Ready status at 8:00 AM today, moved to a Degraded status, then reported a Ready status again at 10:00 AM today. In this case, the Vendor Portal displays the time to install metrics for Instance A, which reported its _first_ Ready status most recently. + +For more information about how to interpret the time to install metrics, see [Time to Install](instance-insights-details#time-to-install) in _Instance Details_. + +### Download Portal + +From the **Download portal** section, you can: +* Manage the password for the Download Portal +* Access the unique Download Portal URL for the customer + +You can use the Download Portal to give your customers access to the files they need to install your application, such as their license file or air gap bundles. For more information, see [Downloading Assets from the Download Portal](releases-share-download-portal). + +### Instances + +The **Instances** section displays details about the active application instances associated with the customer. + +You can click any of the rows in the **Instances** section to open the **Instance details** page. The **Instance details** page displays additional event data and computed metrics to help you understand the performance and status of each active application instance. For more information, see [Instance Details](instance-insights-details). + +The following shows an example of a row for an active instance in the **Instances** section: + +![Row in the Instances section](/images/instance-row.png) +[View a larger version of this image](/images/instance-row.png) + +The **Instances** section displays the following details about each active instance: +* The first seven characters of the instance ID. +* The status of the instance. Possible statuses are Missing, Unavailable, Degraded, Ready, and Updating. For more information, see [Enabling and Understanding Application Status](insights-app-status). +* The application version. +* Details about the cluster where the instance is installed, including: + * The Kubernetes distribution for the cluster, if applicable. + * The Kubernetes version running in the cluster. + * Whether the instance is installed in a Replicated kURL cluster. + * (kURL Clusters Only) The number of nodes ready in the cluster. + * (KOTS Only) The KOTS version running in the cluster. + * The Replicated SDK version running in the cluster. + * The cloud provider and region, if applicable. +* Instance uptime data, including: + * The timestamp of the last recorded check-in for the instance. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. + * An uptime graph of the previous two weeks. For more information about how the Vendor Portal determines uptime, see [Instance Uptime](instance-insights-details#instance-uptime) in _Instance Details_. + * The uptime ratio in the previous two weeks. + +--- + + +# Data Availability and Continuity + +# Data Availability and Continuity + +Replicated uses redundancy and a cloud-native architecture in support of availability and continuity of vendor data. + +## Data Storage Architecture + +To ensure availability and continuity of necessary vendor data, Replicated uses a cloud-native architecture. This cloud-native architecture includes clustering and network redundancies to eliminate single point of failure. + +Replicated stores vendor data in various Amazon Web Services (AWS) S3 buckets and multiple databases. Data stored in the AWS S3 buckets includes registry images and air gap build data. + +The following diagram shows the flow of air gap build data and registry images from vendors to enterprise customers. + +![Architecture diagram of Replicated vendor data storage](/images/data-storage.png) + +[View a larger version of this image](/images/data-storage.png) + +As shown in the diagram above, vendors push application images to an image registry. Replicated stores this registry image data in AWS S3 buckets, which are logically isolated by vendor portal Team. Instances of the vendor's application that are installed by enterprise customers pull data from the image registry. + +For more information about how Replicated secures images pushed to the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). + +The diagram also shows how enterprise customers access air gap build data from the customer download portal. Replicated stores this air gap build data in AWS S3 buckets. + +## Data Recovery + +Our service provider's platform automatically restores customer applications and databases in the case of an outage. The provider's platform is designed to dynamically deploy applications within its cloud, monitor for failures, and recover failed platform components including customer applications and databases. + +For more information, see the [Replicated Security White Paper](https://www.replicated.com/downloads/Replicated-Security-Whitepaper.pdf). + +## Data Availability + +Replicated availability is continuously monitored. For availability reports, see https://status.replicated.com. + +## Offsite Data Backup Add-on + +For additional data redundancy, an offsite data backup add-on is available to copy customers data to a separate cloud provider. This add-on mitigates against potential data loss by our primary service provider. For more information, see [Offsite Data Backup](offsite-backup). + + +--- + + +# About Managing Stateful Services + +# About Managing Stateful Services + +This topic provides recommendations for managing stateful services that you install into existing clusters. + +## Preflight Checks for Stateful Services + +If you expect to also install stateful services into existing clusters, you will likely want to expose [preflight analyzers that check for the existence of a storage class](https://troubleshoot.sh/reference/analyzers/storage-class/). + +If you are allowing end users to provide connection details for external databases, you can often use a troubleshoot.sh built-in [collector](https://troubleshoot.sh/docs/collect/) and [analyzer](https://troubleshoot.sh/docs/analyze/) to validate the connection details for [Postgres](https://troubleshoot.sh/docs/analyze/postgresql/), [Redis](https://troubleshoot.sh/docs/collect/redis/), and many other common datastores. These can be included in both `Preflight` and `SupportBundle` specifications. + +## About Adding Persistent Datastores + +You can integrate persistent stores, such as databases, queues, and caches. There are options to give an end user, such as embedding an instance alongside the application or connecting an application to an external instance that they will manage. + +For an example of integrating persistent datastores, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). + + +--- + + +# Disaster Recovery for Embedded Cluster (Alpha) + +# Disaster Recovery for Embedded Cluster (Alpha) + +This topic describes the disaster recovery feature for Replicated Embedded Cluster, including how to enable disaster recovery for your application. It also describes how end users can configure disaster recovery in the Replicated KOTS Admin Console and restore from a backup. + +:::important +Embedded Cluster disaster recovery is an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). +::: + +:::note +Embedded Cluster does not support backup and restore with the KOTS snapshots feature. For more information about using snapshots for existing cluster installations with KOTS, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). +::: + +## Overview + +The Embedded Cluster disaster recovery feature allows your customers to take backups from the Admin Console and perform restores from the command line. Disaster recovery for Embedded Cluster is implemented with Velero. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. + +The backups that your customers take from the Admin Console will include both the Embedded Cluster infrastructure and the application resources that you specify. + +The Embedded Cluster infrastructure that is backed up includes components such as the KOTS Admin Console and the built-in registry that is deployed for air gap installations. No configuration is required to include Embedded Cluster infrastructure in backups. Vendors specify the application resources to include in backups by configuring a Velero Backup resource in the application release. + +## Requirements + +Embedded Cluster disaster recovery has the following requirements: + +* The disaster recovery feature flag must be enabled for your account. To get access to disaster recovery, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). +* Embedded Cluster version 1.22.0 or later +* Backups must be stored in S3-compatible storage + +## Limitations and Known Issues + +Embedded Cluster disaster recovery has the following limitations and known issues: + +* During a restore, the version of the Embedded Cluster installation assets must match the version of the application in the backup. So if version 0.1.97 of your application was backed up, the Embedded Cluster installation assets for 0.1.97 must be used to perform the restore. Use `./APP_SLUG version` to check the version of the installation assets, where `APP_SLUG` is the unique application slug. For example: + + <img alt="version command" src="/images/ec-version-command.png" width="450px"/> + + [View a larger version of this image](/images/ec-version-command.png) + +* Any Helm extensions included in the `extensions` field of the Embedded Cluster Config are _not_ included in backups. Helm extensions are reinstalled as part of the restore process. To include Helm extensions in backups, configure the Velero Backup resource to include the extensions using namespace-based or label-based selection. For more information, see [Configure the Velero Custom Resources](#config-velero-resources) below. + +* Users can only restore from the most recent backup. + +* Velero is installed only during the initial installation process. Enabling the disaster recovery license field for customers after they have already installed will not do anything. + +* If the `--admin-console-port` flag was used during install to change the port for the Admin Console, note that during a restore the Admin Console port will be used from the backup and cannot be changed. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + +## Configure Disaster Recovery + +This section describes how to configure disaster recovery for Embedded Cluster installations. It also describes how to enable access to the disaster recovery feature on a per-customer basis. + +### Configure the Velero Custom Resources {#config-velero-resources} + +This section describes how to set up Embedded Cluster disaster recovery for your application by configuring Velero [Backup](https://velero.io/docs/latest/api-types/backup/) and [Restore](https://velero.io/docs/latest/api-types/restore/) custom resources in a release. + +To configure Velero Backup and Restore custom resources for Embedded Cluster disaster recovery: + +1. In a new release containing your application files, add a Velero Backup resource. In the Backup resource, use namespace-based or label-based selection to indicate the application resources that you want to be included in the backup. For more information, see [Backup API Type](https://velero.io/docs/latest/api-types/backup/) in the Velero documentation. + + :::important + If you use namespace-based selection to include all of your application resources deployed in the `kotsadm` namespace, ensure that you exclude the Replicated resources that are also deployed in the `kotsadm` namespace. Because the Embedded Cluster infrastructure components are always included in backups automatically, this avoids duplication. + ::: + + **Example:** + + The following Backup resource uses namespace-based selection to include application resources deployed in the `kotsadm` namespace: + + ```yaml + apiVersion: velero.io/v1 + kind: Backup + metadata: + name: backup + spec: + # Back up the resources in the kotsadm namespace + includedNamespaces: + - kotsadm + orLabelSelectors: + - matchExpressions: + # Exclude Replicated resources from the backup + - { key: kots.io/kotsadm, operator: NotIn, values: ["true"] } + ``` + +1. In the same release, add a Velero Restore resource. In the `backupName` field of the Restore resource, include the name of the Backup resource that you created. For more information, see [Restore API Type](https://velero.io/docs/latest/api-types/restore/) in the Velero documentation. + + **Example**: + + ```yaml + apiVersion: velero.io/v1 + kind: Restore + metadata: + name: restore + spec: + # the name of the Backup resource that you created + backupName: backup + includedNamespaces: + - '*' + ``` + +1. For any image names that you include in your Backup and Restore resources, rewrite the image name using the Replicated KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions. This ensures that the image name is rendered correctly during deployment, allowing the image to be pulled from the user's local image registry (such as in air gap installations) or through the Replicated proxy registry. + + **Example:** + + ```yaml + apiVersion: velero.io/v1 + kind: Restore + metadata: + name: restore + spec: + hooks: + resources: + - name: restore-hook-1 + includedNamespaces: + - kotsadm + labelSelector: + matchLabels: + app: example + postHooks: + - init: + initContainers: + - name: restore-hook-init1 + image: + # Use HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace + # to template the image name + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' + tag: 1.24-alpine + ``` + For more information about how to rewrite image names using the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions, including additional examples, see [Task 1: Rewrite Image Names](helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart v2 Custom Resource_. + +1. If you support air gap installations, add any images that are referenced in your Backup and Restore resources to the `additionalImages` field of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release so they can be used during the backup and restore process in environments with limited or no outbound internet access. For more information, see [additionalImages](/reference/custom-resource-application#additionalimages) in _Application_. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-app + spec: + additionalImages: + - elasticsearch:7.6.0 + - quay.io/orgname/private-image:v1.2.3 + ``` + +1. (Optional) Use Velero functionality like [backup](https://velero.io/docs/main/backup-hooks/) and [restore](https://velero.io/docs/main/restore-hooks/) hooks to customize the backup and restore process as needed. + + **Example:** + + For example, a Postgres database might be backed up using pg_dump to extract the database into a file as part of a backup hook. It can then be restored using the file in a restore hook: + + ```yaml + podAnnotations: + backup.velero.io/backup-volumes: backup + pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U {{repl ConfigOption "postgresql_username" }} -d {{repl ConfigOption "postgresql_database" }} -h 127.0.0.1 > /scratch/backup.sql"]' + pre.hook.backup.velero.io/timeout: 3m + post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U {{repl ConfigOption "postgresql_username" }} -h 127.0.0.1 -d {{repl ConfigOption "postgresql_database" }} -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' + post.hook.restore.velero.io/wait-for-ready: 'true' # waits for the pod to be ready before running the post-restore hook + ``` + +1. Save and the promote the release to a development channel for testing. + +### Enable the Disaster Recovery Feature for Your Customers + +After configuring disaster recovery for your application, you can enable it on a per-customer basis with the **Allow Disaster Recovery (Alpha)** license field. + +To enable disaster recovery for a customer: + +1. In the Vendor Portal, go to the [Customers](https://vendor.replicated.com/customers) page and select the target customer. + +1. On the **Manage customer** page, under **License options**, enable the **Allow Disaster Recovery (Alpha)** field. + + When your customer installs with Embedded Cluster, Velero will be deployed if the **Allow Disaster Recovery (Alpha)** license field is enabled. + +## Take Backups and Restore + +This section describes how your customers can configure backup storage, take backups, and restore from backups. + +### Configure Backup Storage and Take Backups in the Admin Console + +Customers with the **Allow Disaster Recovery (Alpha)** license field can configure their backup storage location and take backups from the Admin Console. + +To configure backup storage and take backups: + +1. After installing the application and logging in to the Admin Console, click the **Disaster Recovery** tab at the top of the Admin Console. + +1. For the desired S3-compatible backup storage location, enter the bucket, prefix (optional), access key ID, access key secret, endpoint, and region. Click **Update storage settings**. + + <img alt="backup storage settings" src="/images/dr-backup-storage-settings.png" width="400px"/> + + [View a larger version of this image](/images/dr-backup-storage-settings.png) + +1. (Optional) From this same page, configure scheduled backups and a retention policy for backups. + + <img src="/images/dr-scheduled-backups.png" width="400px" alt="scheduled backups"/> + + [View a larger version of this image](/images/dr-scheduled-backups.png) + +1. In the **Disaster Recovery** submenu, click **Backups**. Backups can be taken from this screen. + + <img src="/images/dr-backups.png" alt="backups page" width="600px"/> + + [View a larger version of this image](/images/dr-backups.png) + +### Restore from a Backup + +To restore from a backup: + +1. SSH onto a new machine where you want to restore from a backup. + +1. Download the Embedded Cluster installation assets for the version of the application that was included in the backup. You can find the command for downloading Embedded Cluster installation assets in the **Embedded Cluster install instructions dialog** for the customer. For more information, [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + + :::note + The version of the Embedded Cluster installation assets must match the version that is in the backup. For more information, see [Limitations and Known Issues](#limitations-and-known-issues). + ::: + +1. Run the restore command: + + ```bash + sudo ./APP_SLUG restore + ``` + Where `APP_SLUG` is the unique application slug. + + Note the following requirements and guidance for the `restore` command: + + * If the installation is behind a proxy, the same proxy settings provided during install must be provided to the restore command using `--http-proxy`, `--https-proxy`, and `--no-proxy`. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + + * If the `--cidr` flag was used during install to the set IP address ranges for Pods and Services, this flag must be provided with the same CIDR during the restore. If this flag is not provided or is provided with a different CIDR, the restore will fail with an error message telling you to rerun with the appropriate value. However, it will take some time before that error occurs. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + + * If the `--local-artifact-mirror-port` flag was used during install to change the port for the Local Artifact Mirror (LAM), you can optionally use the `--local-artifact-mirror-port` flag to choose a different LAM port during restore. For example, `restore --local-artifact-mirror-port=50000`. If no LAM port is provided during restore, the LAM port that was supplied during installation will be used. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). + + You will be guided through the process of restoring from a backup. + +1. When prompted, enter the information for the backup storage location. + + ![Restore prompts on the command line](/images/dr-restore.png) + [View a larger version of this image](/images/dr-restore.png) + +1. When prompted, confirm that you want to restore from the detected backup. + + ![Restore from detected backup prompt on the command line](/images/dr-restore-from-backup-confirmation.png) + [View a larger version of this image](/images/dr-restore-from-backup-confirmation.png) + + After some time, the Admin console URL is displayed: + + ![Restore from detected backup prompt on the command line](/images/dr-restore-admin-console-url.png) + [View a larger version of this image](/images/dr-restore-admin-console-url.png) + +1. (Optional) If the cluster should have multiple nodes, go to the Admin Console to get a join command and join additional nodes to the cluster. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + +1. Type `continue` when you are ready to proceed with the restore process. + + ![Type continue when you are done adding nodes](/images/dr-restore-continue.png) + [View a larger version of this image](/images/dr-restore-continue.png) + + After some time, the restore process completes. + + If the `restore` command is interrupted during the restore process, you can resume by rerunning the `restore` command and selecting to resume the previous restore. This is useful if your SSH session is interrupted during the restore. + + +--- + + +# Embedded Cluster Overview + +import EmbeddedCluster from "../partials/embedded-cluster/_definition.mdx" +import Requirements from "../partials/embedded-cluster/_requirements.mdx" +import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" +import HaArchitecture from "../partials/embedded-cluster/_multi-node-ha-arch.mdx" + +# Embedded Cluster Overview + +This topic provides an introduction to Replicated Embedded Cluster, including a description of the built-in extensions installed by Embedded Cluster, an overview of the Embedded Cluster single-node and multi-node architecture, and requirements and limitations. + +:::note +If you are instead looking for information about creating Kubernetes Installers with Replicated kURL, see the [Replicated kURL](/vendor/packaging-embedded-kubernetes) section. +::: + +## Overview + +<EmbeddedCluster/> + +## Architecture + +This section describes the Embedded Cluster architecture, including the built-in extensions deployed by Embedded Cluster. + +### Single-Node Architecture + +The following diagram shows the architecture of a single-node Embedded Cluster installation for an application named Gitea: + +![Embedded Cluster single-node architecture](/images/embedded-architecture-single-node.png) + +[View a larger version of this image](/images/embedded-architecture-single-node.png) + +As shown in the diagram above, the user downloads the Embedded Cluster installation assets as a `.tgz` in their installation environment. These installation assets include the Embedded Cluster binary, the user's license file, and (for air gap installations) an air gap bundle containing the images needed to install and run the release in an environment with limited or no outbound internet access. + +When the user runs the Embedded Cluster install command, the Embedded Cluster binary first installs the k0s cluster as a systemd service. + +After all the Kubernetes components for the cluster are available, the Embedded Cluster binary then installs the Embedded Cluster built-in extensions. For more information about these extensions, see [Built-In Extensions](#built-in-extensions) below. + +Any Helm extensions that were included in the [`extensions`](/reference/embedded-config#extensions) field of the Embedded Cluster Config are also installed. The namespace or namespaces where Helm extensions are installed is defined by the vendor in the Embedded Cluster Config. + +Finally, Embedded Cluster also installs Local Artifact Mirror (LAM). In air gap installations, LAM is used to store and update images. + +### Multi-Node Architecture + +The following diagram shows the architecture of a multi-node Embedded Cluster installation: + +![Embedded Cluster multi-node architecture](/images/embedded-architecture-multi-node.png) + +[View a larger version of this image](/images/embedded-architecture-multi-node.png) + +As shown in the diagram above, in multi-node installations, the Embedded Cluster Operator, KOTS, and the image registry for air gap installations are all installed on one controller node. + +For installations that include disaster recovery with Velero, the Velero Node Agent runs on each node in the cluster. The Node Agent is a Kubernetes DaemonSet that performs backup and restore tasks such as creating snapshots and transferring data during restores. + +Additionally, any Helm [`extensions`](/reference/embedded-config#extensions) that you include in the Embedded Cluster Config are installed in the cluster depending on the given chart and how it is configured to be deployed. + +### Multi-Node Architecture with High Availability + +:::note +High availability (HA) for multi-node installations with Embedded Cluster is Alpha and is not enabled by default. For more informaiton about enabling HA, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha). +::: + +<HaArchitecture/> + +## Built-In Extensions {#built-in-extensions} + +Embedded Cluster includes several built-in extensions. The built-in extensions provide capabilities such as application management and storage. Each built-in extension is installed in its own namespace. + +The built-in extensions installed by Embedded Cluster include: + +* **Embedded Cluster Operator**: The Operator is used for reporting purposes as well as some clean up operations. + +* **KOTS:** Embedded Cluster installs the KOTS Admin Console in the kotsadm namespace. End customers use the Admin Console to configure and install the application. Rqlite is also installed in the kotsadm namespace alongside KOTS. Rqlite is a distributed relational database that uses SQLite as its storage engine. KOTS uses rqlite to store information such as support bundles, version history, application metadata, and other small amounts of data needed to manage the application. For more information about rqlite, see the [rqlite](https://rqlite.io/) website. + +* **OpenEBS:** Embedded Cluster uses OpenEBS to provide local PersistentVolume (PV) storage, including the PV storage for rqlite used by KOTS. For more information, see the [OpenEBS](https://openebs.io/docs/) documentation. + +* **(Disaster Recovery Only) Velero:** If the installation uses the Embedded Cluster disaster recovery feature, Embedded Cluster installs Velero, which is an open-source tool that provides backup and restore functionality. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. For more information about the disaster recovery feature, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). + +* **(Air Gap Only) Image registry:** For air gap installations in environments with limited or no outbound internet access, Embedded Cluster installs an image registry where the images required to install and run the application are pushed. For more information about installing in air-gapped environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +## Comparison to kURL + +Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster offers several improvements such as: +* Significantly faster installation, updates, and node joins +* A redesigned Admin Console UI for managing the cluster +* Improved support for multi-node clusters +* One-click updates of both the application and the cluster at the same time + +Additionally, Embedded Cluster automatically deploys several built-in extensions like KOTS and OpenEBS to provide capabilities such as application management and storage. This represents an improvement over kURL because vendors distributing their application with Embedded Cluster no longer need choose and define various add-ons in the installer spec. For additional functionality that is not included in the built-in extensions, such as an ingress controller, vendors can provide their own [`extensions`](/reference/embedded-config#extensions) that will be deployed alongside the application. + +## Requirements + +### System Requirements + +<Requirements/> + +### Port Requirements + +<EmbeddedClusterPortRequirements/> + +## Limitations + +Embedded Cluster has the following limitations: + +* **Reach out about migrating from kURL**: We are helping several customers migrate from kURL to Embedded Cluster. Reach out to Alex Parker at alexp@replicated.com for more information. + +* **Multi-node support is in beta**: Support for multi-node embedded clusters is in beta, and enabling high availability for multi-node clusters is in alpha. Only single-node embedded clusters are generally available. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + +* **Disaster recovery is in alpha**: Disaster Recovery for Embedded Cluster installations is in alpha. For more information, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). + +* **Partial rollback support**: In Embedded Cluster 1.17.0 and later, rollbacks are supported only when rolling back to a version where there is no change to the [Embedded Cluster Config](/reference/embedded-config) compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same Embedded Cluster Config. For more information about how to enable rollbacks for your application in the KOTS Application custom resource, see [allowRollback](/reference/custom-resource-application#allowrollback) in _Application_. + +* **Changing node hostnames is not supported**: After a host is added to a Kubernetes cluster, Kubernetes assumes that the hostname and IP address of the host will not change. If you need to change the hostname or IP address of a node, you must first remove the node from the cluster. For more information about the requirements for naming nodes, see [Node name uniqueness](https://kubernetes.io/docs/concepts/architecture/nodes/#node-name-uniqueness) in the Kubernetes documentation. + +* **Automatic updates not supported**: Configuring automatic updates from the Admin Console so that new versions are automatically deployed is not supported for Embedded Cluster installations. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). + +* **Embedded Cluster installation assets not available through the Download Portal**: The assets required to install with Embedded Cluster cannot be shared with users through the Download Portal. Users can follow the Embedded Cluster installation instructions to download and extract the installation assets. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + +* **`minKotsVersion` and `targetKotsVersion` not supported**: The [`minKotsVersion`](/reference/custom-resource-application#minkotsversion-beta) and [`targetKotsVersion`](/reference/custom-resource-application#targetkotsversion) fields in the KOTS Application custom resource are not supported for Embedded Cluster installations. This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed`. To avoid installation failures, do not use targetKotsVersion or minKotsVersion in releases that support installation with Embedded Cluster. + +* **Support bundles over 100MB in the Admin Console**: Support bundles are stored in rqlite. Bundles over 100MB could cause rqlite to crash, causing errors in the installation. You can still generate a support bundle from the command line. For more information, see [Generating Support Bundles for Embedded Cluster](/vendor/support-bundle-embedded). + +* **Kubernetes version template functions not supported**: The KOTS [KubernetesVersion](/reference/template-functions-static-context#kubernetesversion), [KubernetesMajorVersion](/reference/template-functions-static-context#kubernetesmajorversion), and [KubernetesMinorVersion](/reference/template-functions-static-context#kubernetesminorversion) template functions do not provide accurate Kubernetes version information for Embedded Cluster installations. This is because these template functions are rendered before the Kubernetes cluster has been updated to the intended version. However, `KubernetesVersion` is not necessary for Embedded Cluster because vendors specify the Embedded Cluster version, which includes a known Kubernetes version. + +* **Custom domains not supported**: Embedded Cluster does not support the use of custom domains, even if custom domains are configured. We intend to add support for custom domains. For more information about custom domains, see [About Custom Domains](/vendor/custom-domains). + +* **KOTS Auto-GitOps workflow not supported**: Embedded Cluster does not support the KOTS Auto-GitOps workflow. If an end-user is interested in GitOps, consider the Helm install method instead. For more information, see [Installing with Helm](/vendor/install-with-helm). + +* **Downgrading Kubernetes not supported**: Embedded Cluster does not support downgrading Kubernetes. The admin console will not prevent end-users from attempting to downgrade Kubernetes if a more recent version of your application specifies a previous Embedded Cluster version. You must ensure that you do not promote new versions with previous Embedded Cluster versions. + +* **Templating not supported in Embedded Cluster Config**: The [Embedded Cluster Config](/reference/embedded-config) resource does not support the use of Go template functions, including [KOTS template functions](/reference/template-functions-about). This only applies to the Embedded Cluster Config. You can still use template functions in the rest of your release as usual. + +* **Policy enforcement on Embedded Cluster workloads is not supported**: The Embedded Cluster runs workloads that require higher levels of privilege. If your application installs a policy enforcement engine such as Gatekeeper or Kyverno, ensure that its policies are not enforced in the namespaces used by Embedded Cluster. + +* **Installing on STIG- and CIS-hardened OS images is not supported**: Embedded Cluster isn't tested on these images, and issues have arisen when trying to install on them. + + +--- + + +# Using Embedded Cluster + +import UpdateOverview from "../partials/embedded-cluster/_update-overview.mdx" +import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" +import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" +import EcConfig from "../partials/embedded-cluster/_ec-config.mdx" + +# Using Embedded Cluster + +This topic provides information about using Replicated Embedded Cluster, including how to get started, configure Embedded Cluster, access the cluster using kubectl, and more. For an introduction to Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). + +## Quick Start + +You can use the following steps to get started quickly with Embedded Cluster. More detailed documentation is available below. + +1. Create a new customer or edit an existing customer and select the **Embedded Cluster Enabled** license option. Save the customer. + +1. Create a new release that includes your application. In that release, create an Embedded Cluster Config that includes, at minimum, the Embedded Cluster version you want to use. See the Embedded Cluster [GitHub repo](https://github.com/replicatedhq/embedded-cluster/releases) to find the latest version. + + Example Embedded Cluster Config: + + <EcConfig/> + +1. Save the release and promote it to the channel the customer is assigned to. + +1. Return to the customer page where you enabled Embedded Cluster. At the top right, click **Install instructions** and choose **Embedded Cluster**. A dialog appears with instructions on how to download the Embedded Cluster installation assets and install your application. + + ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + +1. On your VM, run the commands in the **Embedded Cluster install instructions** dialog. + + <img alt="Embedded cluster install instruction dialog" src="/images/embedded-cluster-install-dialog-latest.png" width="550px"/> + + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + +1. Enter an Admin Console password when prompted. + + The Admin Console URL is printed when the installation finishes. Access the Admin Console to begin installing your application. During the installation process in the Admin Console, you have the opportunity to add nodes if you want a multi-node cluster. Then you can provide application config, run preflights, and deploy your application. + +## About Configuring Embedded Cluster + +To install an application with Embedded Cluster, an Embedded Cluster Config must be present in the application release. The Embedded Cluster Config lets you define several characteristics about the cluster that will be created. + +For more information, see [Embedded Cluster Config](/reference/embedded-config). + +## About Installing with Embedded Cluster + +This section provides an overview of installing applications with Embedded Cluster. + +### Installation Overview + +The following diagram demonstrates how Kubernetes and an application are installed into a customer environment using Embedded Cluster: + +![Embedded Cluster installs an app in a customer environment](/images/embedded-cluster-install.png) + +[View a larger version of this image](/images/embedded-cluster-install.png) + +As shown in the diagram above, the Embedded Cluster Config is included in the application release in the Replicated Vendor Portal and is used to generate the Embedded Cluster installation assets. Users can download these installation assets from the Replicated app service (`replicated.app`) on the command line, then run the Embedded Cluster installation command to install Kubernetes and the KOTS Admin Console. Finally, users access the Admin Console to optionally add nodes to the cluster and to configure and install the application. + +### Installation Options + +Embedded Cluster supports installations in online (internet-connected) environments and air gap environments with no outbound internet access. + +For online installations, Embedded Cluster also supports installing behind a proxy server. + +For more information about how to install with Embedded Cluster, see: +* [Online Installation wtih Embedded Cluster](/enterprise/installing-embedded) +* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) + +### Customer-Specific Installation Instructions + +To install with Embedded Cluster, you can follow the customer-specific instructions provided on the **Customer** page in the Vendor Portal. For example: + +<img alt="Embedded cluster install instruction dialog" src="/images/embedded-cluster-install-dialog.png" width="500px"/> + +[View a larger version of this image](/images/embedded-cluster-install-dialog.png) + +### (Optional) Serve Installation Assets Using the Vendor API + +To install with Embedded Cluster, you need to download the Embedded Cluster installer binary and a license. Air gap installations also require an air gap bundle. Some vendors already have a portal where their customers can log in to access documentation or download artifacts. In cases like this, you can serve the Embedded Cluster installation essets yourself using the Replicated Vendor API, rather than having customers download the assets from the Replicated app service using a curl command during installation. + +To serve Embedded Cluster installation assets with the Vendor API: + +1. If you have not done so already, create an API token for the Vendor API. See [Using the Vendor API v3](/reference/vendor-api-using#api-token-requirement). + +1. Call the [Get an Embedded Cluster release](https://replicated-vendor-api.readme.io/reference/getembeddedclusterrelease) endpoint to download the assets needed to install your application with Embedded Cluster. Your customers must take this binary and their license and copy them to the machine where they will install your application. + + Note the following: + + * (Recommended) Provide the `customerId` query parameter so that the customer’s license is included in the downloaded tarball. This mirrors what is returned when a customer downloads the binary directly using the Replicated app service and is the most useful option. Excluding the `customerId` is useful if you plan to distribute the license separately. + + * If you do not provide any query parameters, this endpoint downloads the Embedded Cluster binary for the latest release on the specified channel. You can provide the `channelSequence` query parameter to download the binary for a particular release. + +### About Host Preflight Checks + +During installation, Embedded Cluster automatically runs a default set of _host preflight checks_. The default host preflight checks are designed to verify that the installation environment meets the requirements for Embedded Cluster, such as: +* The system has sufficient disk space +* The system has at least 2G of memory and 2 CPU cores +* The system clock is synchronized + +For Embedded Cluster requirements, see [Embedded Cluster Installation Requirements](/enterprise/installing-embedded-requirements). For the full default host preflight spec for Embedded Cluster, see [`host-preflight.yaml`](https://github.com/replicatedhq/embedded-cluster/blob/main/pkg/preflights/host-preflight.yaml) in the `embedded-cluster` repository in GitHub. + +If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. For more information about host preflight checks for installations on VMs or bare metal servers, see [About Host Preflights](preflight-support-bundle-about#host-preflights). + +#### Limitations + +Embedded Cluster host preflight checks have the following limitations: + +* The default host preflight checks for Embedded Cluster cannot be modified, and vendors cannot provide their own custom host preflight spec for Embedded Cluster. +* Host preflight checks do not check that any application-specific requirements are met. For more information about defining preflight checks for your application, see [Defining Preflight Checks](/vendor/preflight-defining). + +#### Skip Host Preflight Checks + +You can skip host preflight checks by passing the `--skip-host-preflights` flag with the Embedded Cluster `install` command. For example: + +```bash +sudo ./my-app install --license license.yaml --skip-host-preflights +``` + +When you skip host preflight checks, the Admin Console still runs any application-specific preflight checks that are defined in the release before the application is deployed. + +:::note +Skipping host preflight checks is _not_ recommended for production installations. +::: + +## About Managing Multi-Node Clusters with Embedded Cluster + +This section describes managing nodes in multi-node clusters created with Embedded Cluster. + +### Defining Node Roles for Multi-Node Clusters + +You can optionally define node roles in the Embedded Cluster Config. For multi-node clusters, roles can be useful for the purpose of assigning specific application workloads to nodes. If nodes roles are defined, users access the Admin Console to assign one or more roles to a node when it is joined to the cluster. + +For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. + +### Adding Nodes + +Users can add nodes to a cluster with Embedded Cluster from the Admin Console. The Admin Console provides the join command used to add nodes to the cluster. + +For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). + +### High Availability for Multi-Node Clusters (Alpha) + +Multi-node clusters are not highly available by default. Enabling high availability (HA) requires that at least three controller nodes are present in the cluster. Users can enable HA when joining the third node. + +For more information about creating HA multi-node clusters with Embedded Cluster, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha) in _Managing Multi-Node Clusters with Embedded Cluster_. + +## About Performing Updates with Embedded Cluster + +<UpdateOverview/> + +For more information about updating, see [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). + +## Access the Cluster + +With Embedded Cluster, end-users are rarely supposed to need to use the CLI. Typical workflows, like updating the application and the cluster, are driven through the Admin Console. + +Nonetheless, there are times when vendors or their customers need to use the CLI for development or troubleshooting. + +To access the cluster and use other included binaries: + +1. SSH onto a controller node. + +1. Use the Embedded Cluster shell command to start a shell with access to the cluster: + + ``` + sudo ./APP_SLUG shell + ``` + + The output looks similar to the following: + ``` + __4___ + _ \ \ \ \ Welcome to APP_SLUG debug shell. + <'\ /_/_/_/ This terminal is now configured to access your cluster. + ((____!___/) Type 'exit' (or CTRL+d) to exit. + \0\0\0\0\/ Happy hacking. + ~~~~~~~~~~~ + root@alex-ec-2:/home/alex# export KUBECONFIG="/var/lib/embedded-cluster/k0s/pki/admin.conf" + root@alex-ec-2:/home/alex# export PATH="$PATH:/var/lib/embedded-cluster/bin" + root@alex-ec-2:/home/alex# source <(kubectl completion bash) + root@alex-ec-2:/home/alex# source /etc/bash_completion + ``` + + The appropriate kubeconfig is exported, and the location of useful binaries like kubectl and Replicated’s preflight and support-bundle plugins is added to PATH. + + :::note + You cannot run the `shell` command on worker nodes. + ::: + +1. Use the available binaries as needed. + + **Example**: + + ```bash + kubectl version + ``` + ``` + Client Version: v1.29.1 + Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3 + Server Version: v1.29.1+k0s + ``` + +1. Type `exit` or **Ctrl + D** to exit the shell. + + :::note + If you encounter a typical workflow where your customers have to use the Embedded Cluster shell, reach out to Alex Parker at alexp@replicated.com. These workflows might be candidates for additional Admin Console functionality. + ::: + +## Reset a Node + +Resetting a node removes the cluster and your application from that node. This is useful for iteration, development, and when mistakes are made, so you can reset a machine and reuse it instead of having to procure another machine. + +If you want to completely remove a cluster, you need to reset each node individually. + +When resetting a node, OpenEBS PVCs on the node are deleted. Only PVCs created as part of a StatefulSet will be recreated automatically on another node. To recreate other PVCs, the application will need to be redeployed. + +To reset a node: + +1. SSH onto the machine. Ensure that the Embedded Cluster binary is still available on that machine. + +1. Run the following command to reset the node and automatically reboot the machine to ensure that transient configuration is also reset: + + ``` + sudo ./APP_SLUG reset + ``` + Where `APP_SLUG` is the unique slug for the application. + + :::note + Pass the `--no-prompt` flag to disable interactive prompts. Pass the `--force` flag to ignore any errors encountered during the reset. + ::: + +## Additional Use Cases + +This section outlines some additional use cases for Embedded Cluster. These are not officially supported features from Replicated, but are ways of using Embedded Cluster that we or our customers have experimented with that might be useful to you. + +### NVIDIA GPU Operator + +The NVIDIA GPU Operator uses the operator framework within Kubernetes to automate the management of all NVIDIA software components needed to provision GPUs. For more information about this operator, see the [NVIDIA GPU Operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html) documentation. + +You can include the NVIDIA GPU Operator in your release as an additional Helm chart, or using Embedded Cluster Helm extensions. For information about adding Helm extensions, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. + +Using the NVIDIA GPU Operator with Embedded Cluster requires configuring the containerd options in the operator as follows: + +```yaml +# Embedded Cluster Config + + extensions: + helm: + repositories: + - name: nvidia + url: https://nvidia.github.io/gpu-operator + charts: + - name: gpu-operator + chartname: nvidia/gpu-operator + namespace: gpu-operator + version: "v24.9.1" + values: | + # configure the containerd options + toolkit: + env: + - name: CONTAINERD_CONFIG + value: /etc/k0s/containerd.d/nvidia.toml + - name: CONTAINERD_SOCKET + value: /run/k0s/containerd.sock +``` +When the containerd options are configured as shown above, the NVIDIA GPU Operator automatically creates the required configurations in the `/etc/k0s/containerd.d/nvidia.toml` file. It is not necessary to create this file manually, or modify any other configuration on the hosts. + +:::note +If you include the NVIDIA GPU Operator as a Helm extension, remove any existing containerd services that are running on the host (such as those deployed by Docker) before attempting to install the release with Embedded Cluster. If there are any containerd services on the host, the NVIDIA GPU Operator will generate an invalid containerd config, causing the installation to fail. +::: + +## Troubleshoot with Support Bundles + +<SupportBundleIntro/> + +<EmbeddedClusterSupportBundle/> + + +--- + + +# Using the Proxy Registry with Helm Installations + +import StepCreds from "../partials/proxy-service/_step-creds.mdx" +import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" + +# Using the Proxy Registry with Helm Installations + +This topic describes how to use the Replicated proxy registry to proxy images for installations with the Helm CLI. For more information about the proxy registry, see [About the Replicated Proxy Registry](private-images-about). + +## Overview + +With the Replicated proxy registry, each customer's unique license can grant proxy access to images in an external private registry. To enable the proxy registry for Helm installations, you must create a Secret with `type: kubernetes.io/dockerconfigjson` to authenticate with the proxy registry. + +During Helm installations, after customers provide their license ID, a `global.replicated.dockerconfigjson` field that contains a base64 encoded Docker configuration file is automatically injected in the Helm chart values. You can use this `global.replicated.dockerconfigjson` field to create the required pull secret. + +For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. + +## Enable the Proxy Registry + +This section describes how to enable the proxy registry for applications deployed with Helm, including how to use the `global.replicated.dockerconfigjson` field that is injected during application deployment to create the required pull secret. + +To enable the proxy registry: + +1. <StepCreds/> + +1. <StepCustomDomain/> + +1. In your Helm chart templates, create a Kubernetes Secret to evaluate if the `global.replicated.dockerconfigjson` value is set, and then write the rendered value into a Secret on the cluster: + + ```yaml + # /templates/replicated-pull-secret.yaml + + {{ if .Values.global.replicated.dockerconfigjson }} + apiVersion: v1 + kind: Secret + metadata: + name: replicated-pull-secret + type: kubernetes.io/dockerconfigjson + data: + .dockerconfigjson: {{ .Values.global.replicated.dockerconfigjson }} + {{ end }} + ``` + + :::note + If you use the Replicated SDK, do not use `replicated` for the name of the image pull secret because the SDK automatically creates a Secret named `replicated`. Using the same name causes an error. + ::: + +1. Ensure that you have a field in your Helm chart values file for your image repository URL, and that any references to the image in your Helm chart access the field from your values file. + + **Example**: + + ```yaml + # values.yaml + ... + dockerconfigjson: '{{ .Values.global.replicated.dockerconfigjson }}' + images: + myapp: + # Add image URL in the values file + apiImageRepository: quay.io/my-org/api + apiImageTag: v1.0.1 + ``` + ```yaml + # /templates/deployment.yaml + + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + spec: + template: + spec: + containers: + - name: api + # Access the apiImageRepository field from the values file + image: {{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }} + ``` + +1. In your Helm chart templates, add the image pull secret that you created to any manifests that reference the private image: + + ```yaml + # /templates/example.yaml + ... + {{ if .Values.global.replicated.dockerconfigjson }} + imagePullSecrets: + - name: replicated-pull-secret + {{ end }} + ``` + + **Example:** + + ```yaml + # /templates/deployment.yaml + ... + image: "{{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }}" + {{ if .Values.global.replicated.dockerconfigjson }} + imagePullSecrets: + - name: replicated-pull-secret + {{ end }} + name: myapp + ports: + - containerPort: 3000 + name: http + ``` + +1. Package your Helm chart and add it to a release. Promote the release to a development channel. See [Managing Releases with Vendor Portal](releases-creating-releases). + +1. Install the chart in a development environment to test your changes: + + 1. Create a local `values.yaml` file to override the default external registry image URL with the URL for the image on `proxy.replicated.com`. + + The proxy registry URL has the following format: `proxy.replicated.com/proxy/APP_SLUG/EXTERNAL_REGISTRY_IMAGE_URL` + + Where: + * `APP_SLUG` is the slug of your Replicated application. + * `EXTERNAL_REGISTRY_IMAGE_URL` is the path to the private image on your external registry. + + **Example** + ```yaml + # A local values.yaml file + ... + images: + myapp: + apiImageRepository: proxy.replicated.com/proxy/my-app/quay.io/my-org/api + apiImageTag: v1.0.1 + + ``` + + :::note + If you configured a custom domain for the proxy registry, use the custom domain instead of `proxy.replicated.com`. For more information, see [Using Custom Domains](custom-domains-using). + ::: + + 1. Log in to the Replicated registry and install the chart, passing the local `values.yaml` file you created with the `--values` flag. See [Installing with Helm](install-with-helm). + + + + + +--- + + +# Installing and Updating with Helm in Air Gap Environments + +import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" + +# Installing and Updating with Helm in Air Gap Environments + +## Overview + +Replicated supports installing and updating Helm charts in air gap environments with no outbound internet access. In air gap Helm installations, customers are guided through the process with instructions provided in the [Replicated Download Portal](/vendor/releases-share-download-portal). + +When air gap Helm installations are enabled, an **Existing cluster with Helm** option is displayed in the Download Portal on the left nav. When selected, **Existing cluster with Helm** displays three tabs (**Install**, **Manual Update**, **Automate Updates**), as shown in the screenshot below: + +![download helm option](/images/download-helm.png) + +[View a larger version of this image](/images/download-helm.png) + +Each tab provides instructions for how to install, perform a manual update, or configure automatic updates, respectively. + +These installing and updating instructions assume that your customer is accessing the Download Portal from a workstation that can access the internet and their internal private registry. Direct access to the target cluster is not required. + +Each method assumes that your customer is familiar with `curl`, `docker`, `helm`, `kubernetes`, and a bit of `bash`, particularly for automating updates. + +## Prerequisites + +Before you install, complete the following prerequisites: + +* Reach out to your account rep to enable the Helm air gap installation feature. + +<Prerequisites/> + +## Install + +The installation instructions provided in the Download Portal are designed to walk your customer through the first installation of your chart in an air gap environment. + +To install with Helm in an air gap environment: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers > [Customer Name] > Reporting**. + +1. In the **Download portal** section, click **Visit download portal** to log in to the Download Portal for the customer. + +1. In the Download Portal left nav, click **Existing cluster with Helm**. + + ![download helm option](/images/download-helm.png) + + [View a larger version of this image](/images/download-helm.png) + +1. On the **Install** tab, in the **App version** dropdown, select the target application version to install. + +1. Run the first command to authenticate into the Replicated proxy registry with the customer's credentials (the `license_id`). + +1. Under **Get the list of images**, run the command provided to generate the list of images needed to install. + +1. For **(Optional) Specify registry URI**, provide the URI for an internal image registry where you want to push images. If a registry URI is provided, Replicatd automatically updates the commands for tagging and pushing images with the URI. + +1. For **Pull, tag, and push each image to your private registry**, copy and paste the docker commands provided to pull, tag, and push each image to your internal registry. + + :::note + If you did not provide a URI in the previous step, ensure that you manually replace the image names in the `tag` and `push` commands with the target registry URI. + ::: + +1. Run the command to authenticate into the OCI registry that contains your Helm chart. + +1. Run the command to install the `preflight` plugin. This allows you to run preflight checks before installing to ensure that the installation environment meets the requirements for the application. + +1. For **Download a copy of the values.yaml file** and **Edit the values.yaml file**, run the `helm show values` command provided to download the values file for the Helm chart. Then, edit the values file as needed to customize the configuration of the given chart. + + If you are installing a release that contains multiple Helm charts, repeat these steps to download and edit each values file. + + :::note + For installations with mutliple charts where two or more of the top-level charts in the release use the same name, ensure that each values file has a unique name to avoid installation error. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. + ::: + +1. For **Determine install method**, select one of the options depending on your ability to access the internet and the cluster from your workstation. + +1. Use the commands provided and the values file or files that you edited to run preflight checks and then install the release. + +## Perform Updates + +This section describes the processes of performing manual and automatic updates with Helm in air gap environments using the instructions provided in the Download Portal. + +### Manual Updates + +The manual update instructions provided in the Download Portal are similar to the installation instructions. + +However, the first step prompts the customer to select their current version an the target version to install. This step takes [required releases](/vendor/releases-about#properties) into consideration, thereby guiding the customer to the versions that are upgradable from their current version. + +The additional steps are consistent with installation process until the `preflight` and `install` commands where customers provide the existing values from the cluster with the `helm get values` command. Your customer will then need to edit the `values.yaml` to reference the new image tags. + +If the new version introduces new images or other values, Replicated recommends that you explain this at the top of your release notes so that customers know they will need to make additional edits to the `values.yaml` before installing. + +### Automate Updates + +The instructions in the Download Portal for automating updates use API endpoints that your customers can automate against. + +The instructions in the Download Portal provide customers with example commands that can be put into a script that they run periodically (nightly, weekly) using GitHub Actions, Jenkins, or other platforms. + +This method assumes that the customer has already done a successful manual installation, including the configuration of the appropriate `values`. + +After logging into the registry, the customer exports their current version and uses that to query an endpoint that provides the latest installable version number (either the next required release, or the latest release) and export it as the target version. With the target version, they can now query an API for the list of images. + +With the list of images the provided `bash` script will automate the process of pulling updated images from the repository, tagging them with a name for an internal registry, and then pushing the newly tagged images to their internal registry. + +Unless the customer has set up the `values` to preserve the updated tag (for example, by using the `latest` tag), they need to edit the `values.yaml` to reference the new image tags. After doing so, they can log in to the OCI registry and perform the commands to install the updated chart. + +## Use a Harbor or Artifactory Registry Proxy + +You can integrate the Replicated proxy registry with an existing Harbor or jFrog Artifactory instance to proxy and cache images on demand. For more information, see [Using a Registry Proxy for Helm Air Gap Installations](using-third-party-registry-proxy). + + +--- + + +# About Helm Installations with Replicated + +import Helm from "../partials/helm/_helm-definition.mdx" + +# About Helm Installations with Replicated + +This topic provides an introduction to Helm installations for applications distributed with Replicated. + +## Overview + +<Helm/> + +Replicated strongly recommends that all applications are packaged using Helm because many enterprise users expect to be able to install an application with the Helm CLI. + +Existing releases in the Replicated Platform that already support installation with Replicated KOTS and Replicated Embedded Cluster (and that include one or more Helm charts) can also be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. + +For information about how to install with Helm, see: +* [Installing with Helm](/vendor/install-with-helm) +* [Installing and Updating with Helm in Air Gap Environments (Alpha)](helm-install-airgap) + +The following diagram shows how Helm charts distributed with Replicated are installed with Helm in online (internet-connected) customer environments: + +<img src="/images/helm-install-diagram.png" alt="diagram of a helm chart in a custom environment" width="700px"/> + +[View a larger version of this image](/images/helm-install-diagram.png) + +As shown in the diagram above, when a release containing one or more Helm charts is promoted to a channel, the Replicated Vendor Portal automatically extracts any Helm charts included in the release. These charts are pushed as OCI objects to the Replicated registry. The Replicated registry is a private OCI registry hosted by Replicated at `registry.replicated.com`. For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). + +For example, if your application in the Vendor Portal is named My App and you promote a release containing a Helm chart with `name: my-chart` to a channel with the slug `beta`, then the Vendor Portal pushes the chart to the following location: `oci://registry.replicated.com/my-app/beta/my-chart`. + +Customers can install your Helm chart by first logging in to the Replicated registry with their unique license ID. This step ensures that any customer who installs your chart from the registry has a valid, unexpired license. After the customer logs in to the Replicated registry, they can run `helm install` to install the chart from the registry. + +During installation, the Replicated registry injects values into the `global.replicated` key of the parent Helm chart's values file. For more information about the values schema, see [Helm global.replicated Values Schema](helm-install-values-schema). + +## Limitations + +Helm installations have the following limitations: + +* Installing with Helm in air gap environments is an Beta feature. For more information, see [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap). +* Helm CLI installations do not provide access to any of the features of the Replicated KOTS installer, such as: + * The KOTS Admin Console + * Strict preflight checks that block installation + * Backup and restore with snapshots + * Required releases with the **Prevent this release from being skipped during upgrades** option + + +--- + + +# Packaging a Helm Chart for a Release + +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" +import HelmPackage from "../partials/helm/_helm-package.mdx" + +# Packaging a Helm Chart for a Release + +This topic describes how to package a Helm chart and the Replicated SDK into a chart archive that can be added to a release. + +## Overview + +To add a Helm chart to a release, you first add the Replicated SDK as a dependency of the Helm chart and then package the chart and its dependencies as a `.tgz` chart archive. + +The Replicated SDK is a Helm chart can be installed as a small service alongside your application. The SDK provides access to key Replicated features, such as support for collecting custom metrics on application instances. For more information, see [About the Replicated SDK](replicated-sdk-overview). + +## Requirements and Recommendations + +This section includes requirements and recommendations for Helm charts. + +### Chart Version Requirement + +The chart version in your Helm chart must comply with image tag format requirements. A valid tag can contain only lowercase and uppercase letters, digits, underscores, periods, and dashes. + +The chart version must also comply with the Semantic Versioning (SemVer) specification. When you run the `helm install` command without the `--version` flag, Helm retrieves the list of all available image tags for the chart from the registry and compares them using the SemVer comparison rules described in the SemVer specification. The version that is installed is the version with the largest tag value. For more information about the SemVer specification, see the [Semantic Versioning](https://semver.org) documentation. + +### Chart Naming + +For releases that contain more than one Helm chart, Replicated recommends that you use unique names for each top-level Helm chart in the release. This aligns with Helm best practices and also avoids potential conflicts in filenames during installation that could cause the installation to fail. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. + +### Helm Best Practices + +Replicated recommends that you review the [Best Practices](https://helm.sh/docs/chart_best_practices/) guide in the Helm documentation to ensure that your Helm chart or charts follows the required and recommended conventions. + +## Package a Helm Chart {#release} + +This procedure shows how to create a Helm chart archive to add to a release. For more information about the Helm CLI commands in this procedure, see the [Helm Commands](https://helm.sh/docs/helm/helm/) section in the Helm documentation. + +To package a Helm chart so that it can be added to a release: + +1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. + + <DependencyYaml/> + + For additional guidelines related to adding the SDK as a dependency, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + + :::note + <RegistryLogout/> + ::: + +1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + + After the release is promoted, your Helm chart is automatically pushed to the Replicated registry. For information about how to install a release with the Helm CLI, see [Installing with Helm](install-with-helm). For information about how to install Helm charts with KOTS, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + + +--- + + +# Troubleshooting Helm Installations with Replicated + +# Troubleshooting Helm Installations with Replicated + +This topic provides troubleshooting information for common issues related to performing installations and upgrades with the Helm CLI. + +## Installation Fails for Release With Multiple Helm Charts {#air-gap-values-file-conflict} + +#### Symptom + +When performing installing a release with multiple Helm charts, the installation fails. You might also see the following error message: + +``` +Error: INSTALLATION FAILED: cannot re-use a name that is still in use +``` + +#### Cause + +In the Download Portal, each chart's values file is named according to the chart's name. For example, the values file for the Helm chart Gitea would be named `gitea-values.yaml`. + +If any top-level charts in the release use the same name, the associated values files will also be assigned the same name. This causes each new values file downloaded with the `helm show values` command to overwrite any previously-downloaded values file of the same name. + +#### Solution + +Replicated recommends that you use unique names for top-level Helm charts in the same release. + +Alternatively, if a release contains charts that must use the same name, convert one or both of the charts into subcharts and use Helm conditions to differentiate them. See [Conditions and Tags](https://helm.sh/docs/chart_best_practices/dependencies/#conditions-and-tags) in the Helm documentation. + +--- + + +# Helm global.replicated Values Schema + +import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" + +# Helm global.replicated Values Schema + +This topic describes the `global.replicated` values that are injected in the values file of an application's parent Helm chart during Helm installations with Replicated. + +## Overview + +When a user installs a Helm application with the Helm CLI, the Replicated registry injects a set of customer-specific values into the `global.replicated` key of the parent Helm chart's values file. + +The values in the `global.replicated` field include the following: + +* The fields in the customer's license, such as the field names, descriptions, signatures, values, and any custom license fields that you define. Vendors can use this license information to check entitlements before the application is installed. For more information, see [Checking Entitlements in Helm Charts Before Deployment](/vendor/licenses-reference-helm). + +* A base64 encoded Docker configuration file. To proxy images from an external private registry with the Replicated proxy registry, you can use the `global.replicated.dockerconfigjson` field to create an image pull secret for the proxy registry. For more information, see [Proxying Images for Helm Installations](/vendor/helm-image-registry). + +The following is an example of a Helm values file containing the `global.replicated` values: + +```yaml +# Helm values.yaml +global: + replicated: + channelName: Stable + customerEmail: username@example.com + customerName: Example Customer + dockerconfigjson: eyJhdXRocyI6eyJd1dIRk5NbEZFVGsxd2JGUmFhWGxYWm5scloyNVRSV1pPT2pKT2NGaHhUVEpSUkU1... + licenseFields: + expires_at: + description: License Expiration + name: expires_at + signature: + v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... + title: Expiration + value: "2023-05-30T00:00:00Z" + valueType: String + licenseID: YiIXRTjiB7R... + licenseType: dev +``` + +## `global.replicated` Values Schema + +The `global.replicated` values schema contains the following fields: + +| Field | Type | Description | +| --- | --- | --- | +| `channelName` | String | The name of the release channel | +| `customerEmail` | String | The email address of the customer | +| `customerName` | String | The name of the customer | +| `dockerconfigjson` | String | Base64 encoded docker config json for pulling images | +| `licenseFields`| | A list containing each license field in the customer's license. Each element under `licenseFields` has the following properties: `description`, `signature`, `title`, `value`, `valueType`. `expires_at` is the default `licenseField` that all licenses include. Other elements under `licenseField` include the custom license fields added by vendors in the Vendor Portal. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). | +| `licenseFields.[FIELD_NAME].description` | String | Description of the license field | +| `licenseFields.[FIELD_NAME].signature.v1` | Object | Signature of the license field | +| `licenseFields.[FIELD_NAME].title` | String | Title of the license field | +| `licenseFields.[FIELD_NAME].value` | String | Value of the license field | +| `licenseFields.[FIELD_NAME].valueType` | String | Type of the license field value | +| `licenseID` | String | The unique identifier for the license | +| `licenseType` | String | The type of license, such as "dev" or "prod". For more information, see [Customer Types](/vendor/licenses-about#customer-types) in _About Customers and Licensing_. | + +## Replicated SDK Helm Values + +<SdkValues/> + +--- + + +# About Distributing Helm Charts with KOTS + +import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" +import TemplateLimitation from "../partials/helm/_helm-template-limitation.mdx" +import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" +import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" +import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" +import Deprecated from "../partials/helm/_replicated-deprecated.mdx" +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" +import ReplicatedHelmMigration from "../partials/helm/_replicated-helm-migration.mdx" +import Helm from "../partials/helm/_helm-definition.mdx" + +# About Distributing Helm Charts with KOTS + +This topic provides an overview of how Replicated KOTS deploys Helm charts, including an introduction to the KOTS HelmChart custom resource, limitations of deploying Helm charts with KOTS, and more. + +## Overview + +<Helm/> + +KOTS can install applications that include: +* One or more Helm charts +* More than a single instance of any chart +* A combination of Helm charts and Kubernetes manifests + +Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise users expect to be able to install an application with the Helm CLI. + +Deploying Helm charts with KOTS provides additional functionality not directly available with the Helm CLI, such as: +* The KOTS Admin Console +* Backup and restore with snapshots +* Support for air gap installations +* Support for embedded cluster installations on VMs or bare metal servers + +Additionally, for applications packaged as Helm charts, you can support Helm CLI and KOTS installations from the same release without having to maintain separate sets of Helm charts and application manifests. The following diagram demonstrates how a single release containing one or more Helm charts can be installed using the Helm CLI and KOTS: + +<img src="/images/helm-kots-install-options.png" width="650px" alt="One release being installed into three different customer environments"/> + +[View a larger version of this image](/images/helm-kots-install-options.png) + +For a tutorial that demonstrates how to add a sample Helm chart to a release and then install the release using both KOTS and the Helm CLI, see [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup). + +## How KOTS Deploys Helm Charts + +This section describes how KOTS uses the HelmChart custom resource to deploy Helm charts. + +### About the HelmChart Custom Resource + +<KotsHelmCrDescription/> + +The HelmChart custom resource with `apiVersion: kots.io/v1beta2` (HelmChart v2) is supported with KOTS v1.99.0 and later. For more information, see [About the HelmChart kots.io/v1beta2 Installation Method](#v2-install) below. + +KOTS versions earlier than v1.99.0 can install Helm charts with `apiVersion: kots.io/v1beta1` of the HelmChart custom resource. The `kots.io/v1beta1` HelmChart custom resource is deprecated. For more information, see [Deprecated HelmChart kots.io/v1beta1 Installation Methods](#deprecated-helmchart-kotsiov1beta1-installation-methods) below. + +### About the HelmChart v2 Installation Method {#v2-install} + +When you include a HelmChart custom resource with `apiVersion: kots.io/v1beta2` in a release, KOTS v1.99.0 or later does a Helm install or upgrade of the associated Helm chart directly. + +The `kots.io/v1beta2` HelmChart custom resource does _not_ modify the chart during installation. This results in Helm chart installations that are consistent, reliable, and easy to troubleshoot. For example, you can reproduce the exact installation outside of KOTS by downloading a copy of the application files from the cluster with `kots download`, then using those files to install with `helm install`. And, you can use `helm get values` to view the values that were used to install. + +The `kots.io/v1beta2` HelmChart custom resource requires configuration. For more information, see [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). + +For information about the fields and syntax of the HelmChart custom resource, see [HelmChart v2](/reference/custom-resource-helmchart-v2). + +### Limitations + +The following limitations apply when deploying Helm charts with the `kots.io/v1beta2` HelmChart custom resource: + +* Available only for Helm v3. + +* Available only for KOTS v1.99.0 and later. + +* The rendered manifests shown in the `rendered` directory might not reflect the final manifests that will be deployed to the cluster. This is because the manifests in the `rendered` directory are generated using `helm template`, which is not run with cluster context. So values returned by the `lookup` function and the built-in `Capabilities` object might differ. + +* When updating the HelmChart custom resource in a release from `kots.io/v1beta1` to `kots.io/v1beta2`, the diff viewer shows a large diff because the underlying file structure of the rendered manifests is different. + +* Editing downstream Kustomization files to make changes to the application before deploying is not supported. This is because KOTS does not use Kustomize when installing Helm charts with the `kots.io/v1beta2` HelmChart custom resource. For more information about patching applications with Kustomize, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). + +* <GitOpsLimitation/> + + <GitOpsNotRecommended/> + + For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). +## Support for Helm Hooks {#hooks} + +KOTS supports the following hooks for Helm charts: +* `pre-install`: Executes after resources are rendered but before any resources are installed. +* `post-install`: Executes after resources are installed. +* `pre-upgrade`: Executes after resources are rendered but before any resources are upgraded. +* `post-upgrade`: Executes after resources are upgraded. +* `pre-delete`: Executes before any resources are deleted. +* `post-delete`: Executes after resources are deleted. + +The following limitations apply to using hooks with Helm charts deployed by KOTS: + +* <HooksLimitation/> + +* <HookWeightsLimitation/> + +For more information about Helm hooks, see [Chart Hooks](https://helm.sh/docs/topics/charts_hooks/) in the Helm documentation. + +## Air Gap Installations + +KOTS supports installation of Helm charts into air gap environments with configuration of the HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. The `builder` key specifies the Helm values to use when building the air gap bundle for the application. + +For more information about how to configure the `builder` key to support air gap installations, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). + +## Resource Deployment Order + +When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. + +For information about how to set the deployment order for Helm charts with KOTS, see [Orchestrating Resource Deployment](/vendor/orchestrating-resource-deployment). + +## Deprecated HelmChart kots.io/v1beta1 Installation Methods + +This section describes the deprecated Helm chart installation methods that use the HelmChart custom resource `apiVersion: kots.io/v1beta1`. + +:::important +<Deprecated/> +::: + +### useHelmInstall: true {#v1beta1} + +:::note +This method was previously referred to as _Native Helm_. +::: + +When you include version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`, KOTS uses Kustomize to render the chart with configuration values, license field values, and rewritten image names. KOTS then packages the resulting manifests into a new Helm chart to install. For more information about Kustomize, see the [Kustomize documentation](https://kubectl.docs.kubernetes.io/). + +The following diagram shows how KOTS processes Helm charts for deployment with the `kots.io/v1beta1` method: + +![Flow chart of a v1beta1 Helm chart deployment to a cluster](/images/native-helm-flowchart.png) + +[View a larger image](/images/native-helm-flowchart.png) + +As shown in the diagram above, when given a Helm chart, KOTS: + +- Uses Kustomize to merge instructions from KOTS and the end user to chart resources (see steps 2 - 4 below) +- Packages the resulting manifest files into a new Helm chart (see step 5 below) +- Deploys the new Helm chart (see step 5 below) + +To deploy Helm charts with version `kots.io/v1beta1` of the HelmChart custom resource, KOTS does the following: + +1. **Checks for previous installations of the chart**: If the Helm chart has already been deployed with a HelmChart custom resource that has `useHelmInstall: false`, then KOTS does not attempt the install the chart. The following error message is displayed if this check fails: `Deployment method for chart <chart_name> has changed`. For more information, see [HelmChart kots.io/v1beta1 (useHelmInstall: false)](#v1beta1-false) below. + +1. **Writes base files**: KOTS extracts Helm manifests, renders them with Replicated templating, and then adds all files from the original Helm tarball to a `base/charts/` directory. + + Under `base/charts/`, KOTS adds a Kustomization file named `kustomization.yaml` in the directories for each chart and subchart. KOTS uses these Kustomization files later in the deployment process to merge instructions from Kustomize to the chart resources. For more information about Kustomize, see the [Kustomize website](https://kustomize.io). + + The following screenshot from the Replicated Admin Console shows a `base/charts/` directory for a deployed application. The `base/charts/` directory contains a Helm chart named postgresql with one subchart: + + ![Base directory in the Admin Console](/images/native-helm-base.png) + + In the screenshot above, a Kustomization file that targets the resources from the postgresql Helm chart appears in the `base/charts/postgresql/` directory: + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + resources: + - secrets.yaml + - statefulset.yaml + - svc-headless.yaml + - svc.yaml + ``` + +1. **Writes midstream files with Kustomize instructions from KOTS**: KOTS then copies the directory structure from `base/charts/` to an `overlays/midstream/charts/` directory. The following screenshot shows an example of the midstream directory for the postgresql Helm chart: + + ![Midstream directory in the Admin Console UI](/images/native-helm-midstream.png) + + As shown in the screenshot above, the midstream directory also contains a Kustomization file with instructions from KOTS for all deployed resources, such as image pull secrets, image rewrites, and backup labels. For example, in the midstream Kustomization file, KOTS rewrites any private images to pull from the Replicated proxy registry. + + The following shows an example of a midstream Kustomization file for the postgresql Helm chart: + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + bases: + - ../../../../base/charts/postgresql + commonAnnotations: + kots.io/app-slug: helm-test + images: + - name: gcr.io/replicated-qa/postgresql + newName: proxy.replicated.com/proxy/helm-test/gcr.io/replicated-qa/postgresql + kind: Kustomization + patchesStrategicMerge: + - pullsecrets.yaml + resources: + - secret.yaml + transformers: + - backup-label-transformer.yaml + ``` + + As shown in the example above, all midstream Kustomization files have a `bases` entry that references the corresponding Kustomization file from the `base/charts/` directory. + +1. **Writes downstream files for end user Kustomize instructions**: KOTS then creates an `overlays/downstream/this-cluster/charts` directory and again copies the directory structure of `base/charts/` to this downstream directory: + + ![Downstream directory in the Admin Console UI](/images/native-helm-downstream.png) + + As shown in the screenshot above, each chart and subchart directory in the downstream directory also contains a Kustomization file. These downstream Kustomization files contain only a `bases` entry that references the corresponding Kustomization file from the midstream directory. For example: + + ```yaml + apiVersion: kustomize.config.k8s.io/v1beta1 + bases: + - ../../../../midstream/charts/postgresql + kind: Kustomization + ``` + + End users can edit the downstream Kustomization files to make changes before deploying the application. Any instructions that users add to the Kustomization files in the downstream directory take priority over midstream and base Kustomization files. For more information about how users can make changes before deploying, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). + +1. **Deploys the Helm chart**: KOTS runs `kustomize build` for any Kustomization files in the `overlays/downstream/charts` directory. KOTS then packages the resulting manifests into a new chart for Helm to consume. + + Finally, KOTS runs `helm upgrade -i <release-name> <chart> --timeout 3600s -n <namespace>`. The Helm binary processes hooks and weights, applies manifests to the Kubernetes cluster, and saves a release secret similar to `sh.helm.release.v1.chart-name.v1`. Helm uses this secret to track upgrades and rollbacks of applications. + +### useHelmInstall: false {#v1beta1-false} + +:::note +This method was previously referred to as _Replicated Helm_. +::: + +When you use version `kots.io/v1beta1` of HelmChart custom resource with `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. KOTS also has additional functionality for specific Helm hooks. For example, when KOTS encounters an upstream Helm chart with a `helm.sh/hook-delete-policy` annotation, it automatically adds the same `kots.io/hook-delete-policy` to the Job object. + +The resulting deployment is comprised of standard Kubernetes manifests. Therefore, cluster operators can view the exact differences between what is currently deployed and what an update will deploy. + +### Limitations {#replicated-helm-limitations} + +This section lists the limitations for version `kots.io/v1beta1` of the HelmChart custom resource. +#### kots.io/v1beta1 (useHelmInstall: true) Limitations + +The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`: + +* <Deprecated/> + +* Available only for Helm V3. + +* <GitOpsLimitation/> + + For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). + +* <HooksLimitation/> + +* <HookWeightsLimitation/> + +* <TemplateLimitation/> + +* <VersionLimitation/> + + For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. + +#### kots.io/v1beta1 (useHelmInstall: false) Limitations {#v1beta1-false-limitations} + +The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: false`: + +* <ReplicatedHelmMigration/> + +* <TemplateLimitation/> + +* <VersionLimitation/> + + For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. + + +--- + + +# Configuring the HelmChart Custom Resource v2 + +import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" + +# Configuring the HelmChart Custom Resource v2 + +This topic describes how to configure the Replicated HelmChart custom resource version `kots.io/v1beta2` to support Helm chart installations with Replicated KOTS. + +## Workflow + +To support Helm chart installations with the KOTS `kots.io/v1beta2` HelmChart custom resource, do the following: +1. Rewrite image names to use the Replicated proxy registry. See [Rewrite Image Names](#rewrite-image-names). +1. Inject a KOTS-generated image pull secret that grants proxy access to private images. See [Inject Image Pull Secrets](#inject-image-pull-secrets). +1. Add a pull secret for any Docker Hub images that could be rate limited. See [Add Pull Secret for Rate-Limited Docker Hub Images](#docker-secret). +1. Configure the `builder` key to allow your users to push images to their own local registries. See [Support Local Image Registries](#local-registries). +1. (KOTS Existing Cluster and kURL Installations Only) Add backup labels to your resources to support backup and restore with the KOTS snapshots feature. See [Add Backup Labels for Snapshots](#add-backup-labels-for-snapshots). + :::note + Snapshots is not supported for installations with Replicated Embedded Cluster. For more information about configuring disaster recovery for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). + ::: + +## Task 1: Rewrite Image Names {#rewrite-image-names} + +Configure the KOTS HelmChart custom resource `values` key so that KOTS rewrites the names for both private and public images in your Helm values during deployment. This allows images to be accessed at one of the following locations, depending on where they were pushed: +* The [Replicated proxy registry](private-images-about) (`proxy.replicated.com` or your custom domain) +* A public image registry +* Your customer's local registry +* The built-in registry used in Replicated Embedded Cluster or Replicated kURL installations in air-gapped environments + +You will use the following KOTS template functions to conditionally rewrite image names depending on where the given image should be accessed: +* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry): Returns true if the installation environment is configured to use a local image registry. HasLocalRegistry is always true in air gap installations. HasLocalRegistry is also true in online installations if the user configured a local private registry. +* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost): Returns the host of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryHost returns the host of the built-in registry. +* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace): Returns the namespace of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryNamespace returns the namespace of the built-in registry. + + <details> + <summary>What is the registry namespace?</summary> + + The registry namespace is the path between the registry and the image name. For example, `images.mycompany.com/namespace/image:tag`. + </details> + +### Task 1a: Rewrite Private Image Names + +For any private images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the Replicated proxy registry (for online installations) or to the local registry in the user's installation environment (for air gap installations or online installations where the user configured a local registry). + +To rewrite image names to the location of the image in the proxy registry, use the format `<proxy-domain>/proxy/<app-slug>/<image>`, where: +* `<proxy-domain>` is `proxy.replicated.com` or your custom domain. For more information about configuring a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). +* `<app-slug>` is the unique application slug in the Vendor Portal +* `<image>` is the path to the image in your registry + +For example, if the private image is `quay.io/my-org/nginx:v1.0.1` and `images.mycompany.com` is the custom proxy registry domain, then the image name should be rewritten to `images.mycompany.com/proxy/my-app-slug/quay.io/my-org/nginx:v1.0.1`. + +For more information, see the example below. + +#### Example + +The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + ... + values: + image: + # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry's hostname + # Else use proxy.replicated.com or your custom proxy registry domain + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "images.mycompany.com" }}' + # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry namespace + # Else use the image's namespace at the proxy registry domain + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' + tag: v1.0.1 +``` + +The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource above correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown below: + +```yaml +# Helm chart values.yaml file + +image: + registry: quay.io + repository: my-org/nginx + tag: v1.0.1 +``` + +During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. + +Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - name: + image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} +``` + +### Task 1b: Rewrite Public Image Names + +For any public images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the location of the image in the public registry (for online installations) or the local registry (for air gap installations or online installations where the user configured a local registry. + +For more information, see the example below. + +#### Example + +The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + ... + values: + image: + # If a local registry is used, use that registry's hostname + # Else, use the public registry host (ghcr.io) + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "ghcr.io" }}' + # If a local registry is used, use the registry namespace provided + # Else, use the path to the image in the public registry + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "cloudnative-pg" }}/cloudnative-pg' + tag: catalog-1.24.0 +``` + +The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown in the example below: + +```yaml +# Helm chart values.yaml file + +image: + registry: ghcr.io + repository: cloudnative-pg/cloudnative-pg + tag: catalog-1.24.0 +``` + +During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in your Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: + +```yaml +apiVersion: v1 +kind: Pod +spec: + containers: + - name: + image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} +``` + +## Task 2: Inject Image Pull Secrets {#inject-image-pull-secrets} + +Kubernetes requires a Secret of type `kubernetes.io/dockerconfigjson` to authenticate with a registry and pull a private image. When you reference a private image in a Pod definition, you also provide the name of the Secret in a `imagePullSecrets` key in the Pod definition. For more information, see [Specifying imagePullSecrets on a Pod](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) in the Kubernetes documentation. + +During installation, KOTS creates a `kubernetes.io/dockerconfigjson` type Secret that is based on the customer license. This pull secret grants access to the private image through the Replicated proxy registry or in the Replicated registry. Additionally, if the user configured a local image registry, then the pull secret contains the credentials for the local registry. You must provide the name of this KOTS-generated pull secret in any Pod definitions that reference the private image. + +You can inject the name of this pull secret into a field in the HelmChart custom resource using the Replicated ImagePullSecretName template function. During installation, KOTS sets the value of the corresponding field in your Helm chart `values.yaml` file with the rendered value of the ImagePullSecretName template function. + +#### Example + +The following example shows a `spec.values.image.pullSecrets` array in the HelmChart custom resource that uses the ImagePullSecretName template function to inject the name of the KOTS-generated pull secret: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + image: + # Note: Use proxy.replicated.com or your custom domain + registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' + repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/ecr.us-east-1.amazonaws.com/my-org" }}/api' + pullSecrets: + - name: '{{repl ImagePullSecretName }}' +``` + +The `spec.values.image.pullSecrets` array in the HelmChart custom resource corresponds to a `image.pullSecrets` array in the Helm chart `values.yaml` file, as shown in the example below: + +```yaml +# Helm chart values.yaml file + +image: + registry: ecr.us-east-1.amazonaws.com + repository: my-org/api/nginx + pullSecrets: + - name: my-org-secret +``` + +During installation, KOTS renders the ImagePullSecretName template function and adds the rendered pull secret name to the `image.pullSecrets` array in the Helm chart `values.yaml` file. + +Any templates in the Helm chart that access the `image.pullSecrets` field are updated to use the name of the KOTS-generated pull secret, as shown in the example below: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - name: nginx + image: {{ .Values.image.registry }}/{{ .Values.image.repository }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 2 }} + {{- end }} +``` + +## Task 3: Add Pull Secret for Rate-Limited Docker Hub Images {#docker-secret} + +Docker Hub enforces rate limits for Anonymous and Free users. To avoid errors caused by reaching the rate limit, your users can run the `kots docker ensure-secret` command, which creates an `<app-slug>-kotsadm-dockerhub` secret for pulling Docker Hub images and applies the secret to Kubernetes manifests that have images. For more information, see [Avoiding Docker Hub Rate Limits](/enterprise/image-registry-rate-limits). + +If you are deploying a Helm chart with Docker Hub images that could be rate limited, to support the use of the `kots docker ensure-secret` command, any Pod definitions in your Helm chart templates that reference the rate-limited image must be updated to access the `<app-slug>-kotsadm-dockerhub` pull secret, where `<app-slug>` is your application slug. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). + +You can do this by adding the `<app-slug>-kotsadm-dockerhub` pull secret to a field in the `values` key of the HelmChart custom resource, along with a matching field in your Helm chart `values.yaml` file. During installation, KOTS sets the value of the matching field in the `values.yaml` file with the `<app-slug>-kotsadm-dockerhub` pull secret, and any Helm chart templates that access the value are updated. + +For more information about Docker Hub rate limiting, see [Understanding Docker Hub rate limiting](https://www.docker.com/increase-rate-limits) on the Docker website. + +#### Example + +The following Helm chart `values.yaml` file includes `image.registry`, `image.repository`, and `image.pullSecrets` for a rate-limited Docker Hub image: + +```yaml +# Helm chart values.yaml file + +image: + registry: docker.io + repository: my-org/example-docker-hub-image + pullSecrets: [] +``` + +The following HelmChart custom resource includes `spec.values.image.registry`, `spec.values.image.repository`, and `spec.values.image.pullSecrets`, which correspond to those in the Helm chart `values.yaml` file above. + +The `spec.values.image.pullSecrets` array lists the `<app-slug>-kotsadm-dockerhub` pull secret, where the slug for the application is `example-app-slug`: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + values: + image: + registry: docker.io + repository: my-org/example-docker-hub-image + pullSecrets: + - name: example-app-slug-kotsadm-dockerhub +``` + +During installation, KOTS adds the `example-app-slug-kotsadm-dockerhub` secret to the `image.pullSecrets` array in the Helm chart `values.yaml` file. Any templates in the Helm chart that access `image.pullSecrets` are updated to use `example-app-slug-kotsadm-dockerhub`: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: example +spec: + containers: + - name: example + image: {{ .Values.image.registry }}/{{ .Values.image.repository }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 2 }} + {{- end }} +``` + +## Task 4: Support the Use of Local Image Registries {#local-registries} + +Local image registries are required for KOTS installations in air-gapped environments with no outbound internet connection. Also, users in online environments can optionally use a local registry. For more information about how users configure a local image registry with KOTS, see [Configuring Local Image Registries](/enterprise/image-registry-settings). + +To support the use of local registries, configure the `builder` key. For more information about how to configure the `builder` key, see [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. + +## Task 5: Add Backup Labels for Snapshots (KOTS Existing Cluster and kURL Installations Only) {#add-backup-labels-for-snapshots} + +:::note +The Replicated [snapshots](snapshots-overview) feature for backup and restsore is supported only for existing cluster installations with KOTS. Snapshots are not support for installations with Embedded Cluster. For more information about disaster recovery for installations with Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery.mdx). +::: + +The snapshots feature requires the following labels on all resources in your Helm chart that you want to be included in the backup: +* `kots.io/backup: velero` +* `kots.io/app-slug: APP_SLUG`, where `APP_SLUG` is the slug of your Replicated application. + +For more information about snapshots, see [Understanding Backup and Restore](snapshots-overview). + +To support backup and restore with snapshots, add the `kots.io/backup: velero` and `kots.io/app-slug: APP_SLUG` labels to fields under the HelmChart custom resource `optionalValues` key. Add a `when` statement that evaluates to true only when the customer license has the `isSnapshotSupported` entitlement. + +The fields that you create under the `optionalValues` key must map to fields in your Helm chart `values.yaml` file. For more information about working with the `optionalValues` key, see [optionalValues](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. + +#### Example + +The following example shows how to add backup labels for snapshots in the `optionalValues` key of the HelmChart custom resource: + +```yaml +# kots.io/v1beta2 HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + ... + optionalValues: + # add backup labels only if the license supports snapshots + - when: "repl{{ LicenseFieldValue `isSnapshotSupported` }}" + recursiveMerge: true + values: + mariadb: + commonLabels: + kots.io/backup: velero + kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} + podLabels: + kots.io/backup: velero + kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} +``` + +## Additional Information + +### About the HelmChart Custom Resource + + +<KotsHelmCrDescription/> + +For more information about the HelmChart custom resource, including the unique requirements and limitations for the keys described in this topic, see [HelmChart v2](/reference/custom-resource-helmchart-v2). + +### HelmChart v1 and v2 Differences + +To support the use of local registries with version `kots.io/v1beta2` of the HelmChart custom resource, provide the necessary values in the builder field to render the Helm chart with all of the necessary images so that KOTS knows where to pull the images from to push them into the local registry. + +For more information about how to configure the `builder` key, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles) and [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. + +The `kots.io/v1beta2` HelmChart custom resource has the following differences from `kots.io/v1beta1`: + +<table> + <tr> + <th>HelmChart v1beta2</th> + <th>HelmChart v1beta1</th> + <th>Description</th> + </tr> + <tr> + <td><code>apiVersion: kots.io/v1beta2</code></td> + <td><code>apiVersion: kots.io/v1beta1</code></td> + <td><code>apiVersion</code> is updated to <code>kots.io/v1beta2</code></td> + </tr> + <tr> + <td><code>releaseName</code></td> + <td><code>chart.releaseName</code></td> + <td><code>releaseName</code> is a top level field under <code>spec</code></td> + </tr> + <tr> + <td>N/A</td> + <td><code>helmVersion</code></td> + <td><code>helmVersion</code> field is removed</td> + </tr> + <tr> + <td>N/A</td> + <td><code>useHelmInstall</code></td> + <td><code>useHelmInstall</code> field is removed</td> + </tr> +</table> + +### Migrate Existing KOTS Installations to HelmChart v2 + +Existing KOTS installations can be migrated to use the KOTS HelmChart v2 method, without having to reinstall the application. + +There are different steps for migrating to HelmChart v2 depending on the application deployment method used previously. For more information, see [Migrating Existing Installations to HelmChart v2](helm-v2-migrate). + + +--- + + +# Example: Including Optional Helm Charts + +# Example: Including Optional Helm Charts + +This topic describes using optional Helm charts in your application. It also provides an example of how to configure the Replicated HelmChart custom resource to exclude optional Helm charts from your application when a given condition is met. + +## About Optional Helm Charts + +By default, KOTS creates an instance of a Helm chart for every HelmChart custom resource manifest file in the upstream application manifests. However, you can configure your application so that KOTS excludes certain Helm charts based on a conditional statement. + +To create this conditional statement, you add a Replicated KOTS template function to an `exclude` field in the HelmChart custom resource file. For example, you can add a template function that evaluates to `true` or `false` depending on the user's selection for a configuration field on the KOTS Admin Console Config page. +KOTS renders the template function in the `exclude` field, and excludes the chart if the template function evaluates to `true`. + +For all optional components, Replicated recommends that you add a configuration option to allow the user to optionally enable or disable the component. +This lets you support enterprises that want everything to run in the cluster and those that want to bring their own services for stateful components. + +For more information about template functions, see [About Template Functions](/reference/template-functions-about). + +## Example + +This example uses an application that has a Postgres database. +The community-supported Postgres Helm chart is available at https://github.com/bitnami/charts/tree/main/bitnami/postgresql. + +In this example, you create a configuration field on the Admin Console Config page that lets the user provide their own Postgres instance or use a Postgres service that is embedded with the application. Then, you configure the HelmChart custom resource in a release for an application in the Replicated Vendor Portal to conditionally exclude the optional Postgres component. + +### Step 1: Create the Configuration Fields + +To start, define the Admin Console Config page that gives the user a choice of "Embedded Postgres" or "External Postgres", where "External Postgres" is user-supplied. + +1. Log in to the [Vendor Portal](https://vendor.replicated.com). Create a new application for this example, or open an existing application. Then, click **Releases > Create release** to create a new release for the application. + +1. In the Config custom resource manifest file in the release, add the following YAML to create the "Embedded Postgres" or "External Postgres" configuration options: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: example-application + spec: + groups: + - name: database + title: Database + description: Database Options + items: + - name: postgres_type + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres + - name: embedded_postgres_password + type: password + value: "{{repl RandomString 32}}" + hidden: true + - name: external_postgres_uri + type: text + title: External Postgres Connection String + help_text: Connection string for a Postgres 10.x server + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + ``` + + The YAML above does the following: + * Creates a field with "Embedded Postgres" or "External Postgres" radio buttons + * Uses the Replicated RandomString template function to generate a unique default password for the embedded Postgres instance at installation time + * Creates fields for the Postgres password and connection string, if the user selects the External Postgres option + + The following shows how this Config custom resource manifest file displays on the Admin Console Config page: + + ![Postgres Config Screen](/images/postgres-config-screen.gif) + +### Step 2: Create a Secret for Postgres + +The application has a few components that use Postgres, and they all mount the Postgres connection string from a single Secret. + +Define a Secret for Postgres that renders differently if the user selects the Embedded Postgres or External Postgres option: + +1. In the release, create a Secret file and add the following YAML: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: postgresql-secret + stringData: + uri: postgres://username:password@postgresql:5432/database?sslmode=disable + ``` + +1. Edit the `uri` field in the Secret to add a conditional statement that renders either a connection string to the embedded Postgres chart or to the user supplied instance: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: postgresql-secret + stringData: + uri: repl{{ if ConfigOptionEquals "postgres_type" "embedded_postgres" }}postgres://myapplication:repl{{ ConfigOption "embedded_postgres_password" }}@postgres:5432/mydatabase?sslmode=disablerepl{{ else }}repl{{ ConfigOption "external_postgres_uri" }}repl{{ end }} + ``` + + As shown above, you must use a single line for the conditional statement. Optionally, you can use the Replicated Base64Encode function to pipe a string through. See [Base64Encode](/reference/template-functions-static-context#base64encode) in _Static Context_. + +### Step 3: Add the Helm Chart + +Next, package the Helm chart and add it to the release in the Vendor Portal: + +1. Run the following commands to generate a `.tgz` package of the Helm chart: + + ``` + helm repo add bitnami https://charts.bitnami.com/bitnami + helm fetch bitnami/postgresql + ``` + +1. Drag and drop the `.tgz` file into the file tree of the release. The Vendor Portal automatically creates a new HelmChart custom resource named `postgresql.yaml`, which references the `.tgz` file you uploaded. + + For more information about adding Helm charts to a release in the Vendor Portal, see [Managing Releases with the Vendor Portal](releases-creating-releases). + +### Step 4: Edit the HelmChart Custom Resource + +Finally, edit the HelmChart custom resource: + +1. In the HelmChart custom resource, add a mapping to the `values` key so that it uses the password you created. Also, add an `exclude` field to specify that the Postgres Helm chart must only be included when the user selects the embedded Postgres option on the Config page: + + ```yaml + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: postgresql + spec: + exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' + chart: + name: postgresql + chartVersion: 12.1.7 + + releaseName: samplechart-release-1 + + # values are used in the customer environment, as a pre-render step + # these values will be supplied to helm template + values: + auth: + username: username + password: "repl{{ ConfigOption `embedded_postgres_password` }}" + database: mydatabase + ``` + +1. Save and promote the release. Then, install the release in a development environment to test the embedded and external Postgres options. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + + +--- + + +# Setting Helm Values with KOTS + +import Values from "../partials/helm/_helm-cr-values.mdx" +import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" +import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" +import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" +import ConfigExample from "../partials/helm/_set-values-config-example.mdx" +import LicenseExample from "../partials/helm/_set-values-license-example.mdx" +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Setting Helm Values with KOTS + +This topic describes how to use the Replicated KOTS HelmChart custom resource to set and delete values in `values.yaml` files for Helm charts deployed with Replicated KOTS. + +For a tutorial that demonstrates how to set Helm values in a sample Helm chart using the KOTS HelmChart custom resource, see [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). + +## Overview + +The KOTS HelmChart custom resource [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) keys create a mapping between KOTS and the `values.yaml` file for the corresponding Helm chart. This allows you to set or delete Helm values during installation or upgrade with KOTS, without having to make any changes to the Helm chart itself. + +You can create this mapping by adding a value under `values` or `optionalValues` that uses the exact same key name as a value in the corresponding Helm chart `values.yaml` file. During installation or upgrade, KOTS sets the Helm chart `values.yaml` file with any matching values from the `values` or `optionalValues` keys. + +The `values` and `optionalValues` keys also support the use of Replicated KOTS template functions. When you use KOTS template functions in the `values` and `optionalValues` keys, KOTS renders the template functions and then sets any matching values in the corresponding Helm chart `values.yaml` with the rendered values. For more information, see [About Template Functions](/reference/template-functions-about). + +Common use cases for the HelmChart custom resource `values` and `optionalValues` keys include: +* Setting Helm values based on user-supplied values from the KOTS Admin Console configuration page +* Setting values based on the user's unique license entitlements +* Conditionally setting values when a given condition is met +* Deleting a default value key from the `values.yaml` file that should not be included for KOTS installations + +For more information about the syntax for these fields, see [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. + +## Set Values + +This section describes how to use KOTS template functions or static values in the HelmChart custom resource `values` key to set existing Helm values. + +### Using a Static Value + +You can use static values in the HelmChart custom resource `values` key when a given Helm value must be set the same for all KOTS installations. This allows you to set values for KOTS installations only, without affecting values for any installations that use the Helm CLI. + +For example, the following Helm chart `values.yaml` file contains `kotsOnlyValue.enabled`, which is set to `false` by default: + +```yaml +# Helm chart values.yaml +kotsOnlyValue: + enabled: false +``` + +The following HelmChart custom resource contains a mapping to `kotsOnlyValue.enabled` in its `values` key, which is set to `true`: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + values: + kotsOnlyValue: + enabled: true +``` + +During installation or upgrade with KOTS, KOTS sets `kotsOnlyValue.enabled` in the Helm chart `values.yaml` file to `true` so that the KOTS-only value is enabled for the installation. For installations that use the Helm CLI instead of KOTS, `kotsOnlyValue.enabled` remains `false`. + +### Using KOTS Template Functions + +You can use KOTS template functions in the HelmChart custom resource `values` key to set Helm values with the rendered template functions. For more information, see [About Template Functions](/reference/template-functions-about). + +<Tabs> + <TabItem value="config" label="Config Context Example" default> + <ConfigExample/> + </TabItem> + <TabItem value="license" label="License Context Example" default> + <LicenseExample/> + </TabItem> +</Tabs> + +## Conditionally Set Values + +<OptionalValues/> + +For example, the following HelmChart custom resource uses the `optionalValues` key and the [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to set user-supplied values for an external MariaDB database: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: wordpress +spec: + chart: + name: wordpress + chartVersion: 15.3.2 + + releaseName: sample-release-1 + + optionalValues: + - when: "repl{{ ConfigOptionEquals `mariadb_type` `external`}}" + recursiveMerge: false + values: + externalDatabase: + host: "repl{{ ConfigOption `external_db_host`}}" + user: "repl{{ ConfigOption `external_db_user`}}" + password: "repl{{ ConfigOption `external_db_password`}}" + database: "repl{{ ConfigOption `external_db_database`}}" + port: "repl{{ ConfigOption `external_ db_port`}}" +``` + +During installation, KOTS renders the template functions and sets the `externalDatabase` values in the HelmChart `values.yaml` file only when the user selects the `external` option for `mariadb_type` on the Admin Console configuration page. + +### About Recursive Merge for optionalValues {#recursive-merge} + +<OptionalValuesRecursiveMerge/> + +For example, the following HelmChart custom resource has both `values` and `optionalValues`: + +```yaml +values: + favorite: + drink: + hot: tea + cold: soda + dessert: ice cream + day: saturday + +optionalValues: + - when: '{{repl ConfigOptionEquals "example_config_option" "1" }}' + recursiveMerge: false + values: + example_config_option: + enabled: true + favorite: + drink: + cold: lemonade +``` + +The `values.yaml` file for the associated Helm chart defines the following key value pairs: + +```yaml +favorite: + drink: + hot: coffee + cold: soda + dessert: pie +``` +The `templates/configmap.yaml` file for the Helm chart maps these values to the following fields: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap +data: + favorite_day: {{ .Values.favorite.day }} + favorite_dessert: {{ .Values.favorite.dessert }} + favorite_drink_cold: {{ .Values.favorite.drink.cold }} + favorite_drink_hot: {{ .Values.favorite.drink.hot }} +``` + +When `recursiveMerge` is set to `false`, the ConfigMap for the deployed application includes the following key value pairs: + +```yaml +favorite_day: null +favorite_dessert: pie +favorite_drink_cold: lemonade +favorite_drink_hot: coffee +``` + +In this case, the top level keys in `optionalValues` override the top level keys in `values`. + +KOTS then uses the values from the Helm chart `values.yaml` to populate the remaining fields in the ConfigMap: `favorite_day`, `favorite_dessert`, and `favorite_drink_hot`. + +When `recursiveMerge` is set to `true`, the ConfigMap for the deployed application includes the following key value pairs: + +```yaml +favorite_day: saturday +favorite_dessert: ice cream +favorite_drink_cold: lemonade +favorite_drink_hot: tea +``` + +In this case, all keys from `values` and `optionalValues` are merged. Because both include `favorite.drink.cold`, KOTS uses `lemonade` from `optionalValues`. + +## Delete a Default Key + +If the Helm chart `values.yaml` contains a static value that must be deleted when deploying with KOTS, you can set the value to `"null"` (including the quotation marks) in the `values` key of the HelmChart custom resource. + +A common use case for deleting default value keys is when you include a community Helm chart as a dependency. Because you cannot control how the community chart is built and structured, you might want to change some of the default behavior. + +For example, the following HelmChart custom resource sets an `exampleKey` value to `"null"` when the chart is deployed with KOTS: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + + releaseName: samplechart-release-1 + + values: + exampleKey: "null" +``` + +For more information about using a `null` value to delete a key, see [Deleting a Default Key](https://helm.sh/docs/chart_template_guide/values_files/#deleting-a-default-key) in the Helm documentation. + +--- + + +# Packaging Air Gap Bundles for Helm Charts + +import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" +import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" +import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" +import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" + +# Packaging Air Gap Bundles for Helm Charts + +This topic describes how to package and build air gap bundles for releases that contain one or more Helm charts. This topic applies to applications deployed with Replicated KOTS. + +## Overview + +<AirGapBundle/> + +When building the `.airgap` bundle for a release that contains one or more Helm charts, the Vendor Portal renders the Helm chart templates in the release using values supplied in the KOTS HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. + +## Configure the `builder` Key + +You should configure the `builder` key if you need to change any default values in your Helm chart so that the `.airgap` bundle for the release includes all images needed to successfully deploy the chart. For example, you can change the default Helm values so that images for any conditionally-deployed components are always included in the air gap bundle. Additionally, you can use the `builder` key to set any `required` values in your Helm chart that must be set for the chart to render. + +The values in the `builder` key map to values in the given Helm chart's `values.yaml` file. For example, `spec.builder.postgres.enabled` in the example HelmChart custom resource below would map to a `postgres.enabled` field in the `values.yaml` file for the `samplechart` chart: + +```yaml +# KOTS HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + builder: + postgres: + enabled: true +``` + +For requirements, recommendations, and examples of common use cases for the `builder` key, see the sections below. + +### Requirements and Recommendations + +<HelmBuilderRequirements/> + +### Example: Set the Image Registry for Air Gap Installations + +For air gap installations, if the [Replicated proxy registry](/vendor/private-images-about) domain `proxy.replicated.com` is used as the default image name for any images, you need to rewrite the image to the upstream image name so that it can be processed and included in the air gap bundle. You can use the `builder` key to do this by hardcoding the upstream location of the image (image registry, repository, and tag), as shown in the example below: + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + builder: + my-service: + image: + registry: 12345.dkr.ecr.us-west-1.amazonaws.com + repository: my-app + tag: "1.0.2" +``` +When building the `.airgap` bundle for the release, the Vendor Portal uses the registry, repository, and tag values supplied in the `builder` key to template the Helm chart, rather than the default values defined in the Helm `values.yaml` file. This ensures that the image is pulled from the upstream registry using the credentials supplied in the Vendor Portal, without requiring any changes to the Helm chart directly. + +### Example: Include Conditional Images + +Many applications have images that are included or excluded based on a given condition. For example, enterprise users might have the option to deploy an embedded database with the application or bring their own database. To support this use case for air gap installations, the images for any conditionally-deployed components must always be included in the air gap bundle. + +<BuilderExample/> + +## Related Topics + +* [builder](/reference/custom-resource-helmchart-v2#builder) +* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) +* [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped) + +--- + + +# Migrating Existing Installations to HelmChart v2 + +# Migrating Existing Installations to HelmChart v2 + +This topic describes how to migrate existing Replicated KOTS installations to the KOTS HelmChart `kots.io/v1beta2` (HelmChart v2) installation method, without having to reinstall the application. It also includes information about how to support both HelmChart v1 and HelmChart v2 installations from a single release, and lists frequently-asked questions (FAQs) related to migrating to HelmChart v2. + +## Migrate to HelmChart v2 + +### Requirements + +* The HelmChart v2 custom resource is supported with KOTS v1.99.0 and later. If any of your customers are running a version of KOTS earlier than v1.99.0, see [Support Customers on KOTS Versions Earlier Than v1.99.0](#support-both-v1-v2) below for more information about how to support both HelmChart v1 and HelmChart v2 installations from the same release. + +* The Helm `--take-ownership` flag is supported with KOTS v1.124.0 and later. + +* The `kots.io/keep` annotation is supported with KOTS v1.122.0 and later. + +### Migrate From HelmChart v1 with `useHelmInstall: true` + +To migrate existing installations from HelmChart v1 with `useHelmInstall: true` to HelmChart v2: + +1. In a development environment, install an application release using the KOTS HelmChart v1 with `useHelmInstall: true` method. You will use this installation to test the migration to HelmChart v2. + +1. Create a new release containing your application files. + +1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). + +1. Promote the release to an internal-only channel that your team uses for testing. + +1. In your development environment, log in to the Admin Console and confirm that you can upgrade to the new HelmChart v2 release. + +1. When you are done testing, promote the release to one or more of your customer-facing channels. Customers can follow the standard upgrade process in the Admin Console to update their instance. + +### Migrate From HelmChart v1 with `useHelmInstall: false` + +This section describes how to migrate existing HelmChart v1 installations with `useHelmInstall: false`. + +:::note +When the `useHelmInstall` field is _not_ set in the HelmChart custom resource, `false` is the default value. +::: + +These migration steps ensure that KOTS does not uninstall any resources that were previously deployed without Helm, and that Helm takes ownership of these existing resources. + +To migrate existing installations from HelmChart v1 and `useHelmInstall: false` to HelmChart v2: + +1. Create a new release containing your application files: + + 1. In the release, for any resources defined in Kubernetes manifests or in your Helm `templates` that were previously installed with HelmChart v1 and `useHelmInstall: false`, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling these resources when upgrading using the HelmChart v2 method. + + **Example:** + + ```yaml + apiVersion: apps/v1 + kind: Statefulset + metadata: + name: postgresql + # Add the kots.io/keep annotation + annotations: + kots.io/keep: "true" + ``` + + 1. Save the release. + +1. Create another new release: + + 1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). + + 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: + + ```yaml + # HelmChart v2 + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: samplechart + spec: + helmUpgradeFlags: + - --take-ownership + ``` + + When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. + + 1. Save the release. + +1. Test the migration process: + + 1. Promote the first release to an internal-only channel that your team uses for testing. + + 1. In a development environment, install the first release. + + 1. Promote the second release to the same channel. + + 1. In your development environment, access the Admin Console to upgrade to the second release. + +1. When you are done testing, promote the first release to one or more of your customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. + +1. Promote the second release to the same customer-facing channel(s). Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. + +1. Instruct customers to migrate by first upgrading to the release where the `kots.io.keep` annotation is applied to your resources, then upgrading to the release with HelmChart v2. + +1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. + +### Migrate From Standard Kubernetes Manifests + +This section describes how to migrate existing KOTS installations of applications that were previously packaged as standard Kubernetes manifests and are now packaged as one or more Helm charts. This migration path involves performing two upgrades to ensure that KOTS does not uninstall any resources that were adopted into Helm charts, and that Helm can take ownership of resources that were previously deployed without Helm. + +To migrate applications that were previously packaged as standard Kubernetes manifests: + +1. Create a new release containing the Kubernetes manifests for your application: + + 1. For each of the application manifests in the release, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling resources that were previously installed without Helm when upgrading using the HelmChart v2 method. + + **Example:** + + ```yaml + apiVersion: apps/v1 + kind: Statefulset + metadata: + name: postgresql + annotations: + kots.io/keep: "true" + ``` + + 1. Save the release. + +1. Create another new release: + + 1. In the release, add your application Helm chart(s). Remove the application manifests for resources that were adopted into the Helm chart(s). + + 1. For each Helm chart in the release, add a corresponding KOTS HelmChart custom resource with `apiVersion` set to `kots.io/v1beta2`. Configure the resource to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). + + 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: + + ```yaml + # HelmChart v1 beta2 + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: samplechart + spec: + helmUpgradeFlags: + - --take-ownership + ``` + + When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. + + 1. Save the release. + +1. Test the migration process: + + 1. Promote the first release to an internal-only channel that your team uses for testing. + + 1. In a development environment, install the first release. + + 1. Promote the second release to the same channel. + + 1. In your development environment, access the Admin Console to upgrade to the second release. Upgrading to the second release migrates the installation to HelmChart v2. + +1. After you are done testing the migration process, promote the first release containing your application manifests with the `kots.io/keep` annotation to one or more customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. + +1. Promote the second release containing your Helm chart(s) to the same channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. + +1. Instruct customers to migrate by first upgrading to the release containing the standard manifests, then upgrading to the release packaged with Helm. + +1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. + +## Support Customers on KOTS Versions Earlier Than v1.99.0 {#support-both-v1-v2} + +The HelmChart v2 installation method requires KOTS v1.99.0 or later. If you have existing customers that have not yet upgraded to KOTS v1.99.0 or later, Replicated recommends that you support both the HelmChart v2 and v1 installation methods from the same release until all installations are running KOTS v1.99.0 or later. + +To support both installation methods from the same release, include both versions of the HelmChart custom resource for each Helm chart in your application releases (HelmChart `kots.io/v1beta2` and HelmChart `kots.io/v1beta1` with `useHelmInstall: true`). + +When you include both versions of the HelmChart custom resource for a Helm chart, installations with KOTS v1.98.0 or earlier use the v1 method, while installations with KOTS v1.99.0 or later use v2. + +After all customers are using KOTS v1.99.0 or later, you can remove the HelmChart v1 custom resources so that all customers are using the HelmChart v2 method. + +## HelmChart v2 Migration FAQs + +This section includes FAQs related to migrating existing installations to the KOTS HelmChart v2 method. + +### Which migration scenarios require the `kots.io/keep` annotation? + +When applied to a resource in a release, the `kots.io/keep` annotation prevents the given resource from being uninstalled. The `kots.io/keep` annotation can be used to prevent KOTS from deleting resources that were adopted into Helm charts or otherwise previously deployed without Helm. + +To prevent existing resources from being uninstalled during upgrade, the `kots.io/keep` annotation is required for the following types of migrations: + * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 + * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 + +`kots.io/keep` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. + +### Which migration scenarios require the `--take-ownership` flag? + +When the `--take-ownership` flag is enabled, Helm automatically takes ownership of resources that were previously deployed to the cluster without Helm. + +The `--take-ownership` flag is required for the following types of migrations: + * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 + * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 + +`--take-ownership` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. + +### What is the difference between HelmChart v1 with `useHelmInstall: false` and `useHelmInstall: true`? + +With HelmChart v1 and `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. This differs from both the HelmChart v1 with `useHelmInstall: true` and HelmChart v2 methods, where KOTS installs the application using Helm. + +Because the HelmChart v1 with `useHelmInstall: false` method does not deploy resources with Helm, it is necessary to use the `kots.io/keep` annotation and the Helm `--take-ownership` flag when migrating to the HelmChart v2 installation method. These ensure that Helm can take ownership of existing resources and that the resources are not uninstalled during upgrade. + +For more information about how KOTS deploys Helm charts, including information about the deprecated HelmChart v1 installation methods, see [About Distributing Helm Charts with KOTS](helm-native-about). + +--- + + +# Enabling and Configuring Identity Service (Beta) + +:::important +This topic is deleted from the product documentation because this Beta feature is deprecated. +::: + +# Enabling and Configuring Identity Service (Beta) + +This topic describes how to enable the identity service (Beta) feature, and how to regulate access to application resources using role based access control (RBAC). + +## About Identity Service + +When you enable the identity service for an application, the Replicated app manager deploys [Dex](https://dexidp.io/) as an intermediary that can be configured to control access to the application. Dex implements an array of protocols for querying other user-management systems, known as connectors. For more information about connectors, see [Connectors](https://dexidp.io/docs/connectors/) in the Dex documentation. + + +## Limitations and Requirements + +Identity service has the following limitations and requirements: + +* Requires the identity service option is enabled in customer licenses. +* Is available only for embedded cluster installations with the kURL installer. +* Is available only through the Replicated Admin Console. + +## Enable and Configure Identity Service + +Use the Identity custom resource to enable and configure the identity service for your application. For an example application that demonstrates how to configure the identity service, see the [`kots-idp-example-app`](https://github.com/replicatedhq/kots-idp-example-app) on GitHub. + +To begin, create a new release in the [Vendor Portal](https://vendor.replicated.com). Add an Identity custom resource file and customize the file for your application. For more information about the Identity custom resource, see [Identity (Beta)](/reference/custom-resource-identity) in _Reference_. + +**Example:** + +```YAML +apiVersion: kots.io/v1beta1 +kind: Identity +metadata: + name: identity +spec: + requireIdentityProvider: true + identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + oidcRedirectUris: + - https://{{repl ConfigOption "ingress_hostname"}}/callback + roles: + - id: access + name: Access + description: Restrict access to IDP Example App +``` + +Make the identity service accessible from the browser by configuring the service name and port. The app manager provides the service name and port to the application through the identity template functions so that the application can configure ingress for the identity service. For more information about the identity template functions, see [Identity Context](/reference/template-functions-identity-context) in _Reference_. + +**Example:** + +```YAML +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: idp-app + annotations: + kubernetes.io/ingress.allow-http: 'false' + ingress.kubernetes.io/force-ssl-redirect: 'true' + kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} + labels: + app: idp-app +spec: + tls: + - hosts: + - repl{{ ConfigOption "ingress_hostname" }} + secretName: idp-ingress-tls + rules: + - host: repl{{ or (ConfigOption "ingress_hostname") "~" }} + http: + paths: + - path: / + backend: + serviceName: idp-app + servicePort: 80 + - path: /oidcserver + backend: + serviceName: repl{{ IdentityServiceName }} + servicePort: repl{{ IdentityServicePort }} +``` +In your Deployment manifest file, add environment variables to configure all of the information that your application needs to communicate and integrate with the identity service. + +**Example:** + +```YAML +apiVersion: apps/v1 +kind: Deployment +metadata: + name: idp-app + labels: + app: idp-app +spec: + replicas: 1 + selector: + matchLabels: + app: idp-app + template: + metadata: + labels: + app: idp-app + spec: + containers: + - name: idp-app + image: replicated/kots-idp-example-app:latest + imagePullPolicy: Always + ports: + - containerPort: 5555 + volumeMounts: + - name: tls-ca-volume + mountPath: /idp-example + readOnly: true + args: ["--issuer-root-ca=/idp-example/tls.ca"] + env: + - name: CERT_SHA + value: repl{{ sha256sum (ConfigOption "tls_cert") }} + - name: LISTEN + value: http://0.0.0.0:5555 + - name: ISSUER + value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + - name: CLIENT_ID + value: repl{{ IdentityServiceClientID }} + - name: CLIENT_SECRET + value: repl{{ IdentityServiceClientSecret }} # TODO: secret + - name: REDIRECT_URI + value: https://{{repl ConfigOption "ingress_hostname"}}/callback + - name: EXTRA_SCOPES + value: groups + - name: RESTRICTED_GROUPS + value: | + {{repl IdentityServiceRoles | keys | toJson }} + hostAliases: + - ip: 172.17.0.1 + hostnames: + - myapp.kotsadmdevenv.com + volumes: + - name: tls-ca-volume + secret: + secretName: idp-app-ca +``` + +## Configuring Access with RBAC + +You can also regulate access to your application resources using role based access control (RBAC). + +In the Identity custom resource, provide a list of the available roles within your application in the `roles` section. For more information, see [`roles`](/reference/custom-resource-identity#roles) in _Reference_. + +**Example:** + +```YAML +apiVersion: kots.io/v1beta1 +kind: Identity +metadata: + name: identity +spec: + requireIdentityProvider: true + identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + oidcRedirectUris: + - https://{{repl ConfigOption "ingress_hostname"}}/callback + roles: + - id: access + name: Access + description: Restrict access to IDP Example App +``` + +Then, using the Admin Console, your customer has the ability to create groups and assign specific roles to each group. +This mapping of roles to groups is returned to your application through the `IdentityServiceRoles` template function that you configure in your Deployment manifest file under the environment variable `RESTRICTED_GROUPS`. For more information, see [`IdentityServiceRoles`](/reference/template-functions-identity-context#identityserviceroles) in _Reference_. + +**Example:** + +```YAML +apiVersion: apps/v1 +kind: Deployment +metadata: + name: idp-app + labels: + app: idp-app +spec: + replicas: 1 + selector: + matchLabels: + app: idp-app + template: + metadata: + labels: + app: idp-app + spec: + containers: + - name: idp-app + image: replicated/kots-idp-example-app:latest + imagePullPolicy: Always + ports: + - containerPort: 5555 + volumeMounts: + - name: tls-ca-volume + mountPath: /idp-example + readOnly: true + args: ["--issuer-root-ca=/idp-example/tls.ca"] + env: + - name: CERT_SHA + value: repl{{ sha256sum (ConfigOption "tls_cert") }} + - name: LISTEN + value: http://0.0.0.0:5555 + - name: ISSUER + value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver + - name: CLIENT_ID + value: repl{{ IdentityServiceClientID }} + - name: CLIENT_SECRET + value: repl{{ IdentityServiceClientSecret }} # TODO: secret + - name: REDIRECT_URI + value: https://{{repl ConfigOption "ingress_hostname"}}/callback + - name: EXTRA_SCOPES + value: groups + - name: RESTRICTED_GROUPS + value: | + {{repl IdentityServiceRoles | keys | toJson }} + hostAliases: + - ip: 172.17.0.1 + hostnames: + - myapp.kotsadmdevenv.com + volumes: + - name: tls-ca-volume + secret: + secretName: idp-app-ca +``` + + +--- + + +# Enabling and Understanding Application Status + +import StatusesTable from "../partials/status-informers/_statusesTable.mdx" +import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" +import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" +import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" + +# Enabling and Understanding Application Status + +This topic describes how to configure your application so that you can view the status of application instances in the Replicated Vendor Portal. It also describes the meaning of the different application statuses. + +## Overview + +The Vendor Portal displays data on the status of instances of your application that are running in customer environments, including the current state (such as Ready or Degraded), the instance uptime, and the average amount of time it takes your application to reach a Ready state during installation. For more information about viewing instance data, see [Instance Details](instance-insights-details). + +To compute and display these insights, the Vendor Portal interprets and aggregates the state of one or more of the supported Kubernetes resources that are deployed to the cluster as part of your application. + +<SupportedResources/> + +For more information about how instance data is sent to the Vendor Portal, see [About Instance and Event Data](instance-insights-event-data). + +## Enable Application Status Insights + +To display insights on application status, the Vendor Portal requires that your application has one or more _status informers_. Status informers indicate the Kubernetes resources deployed as part of your application that are monitored for changes in state. + +To enable status informers for your application, do one of the following, depending on the installation method: +* [Helm Installations](#helm-installations) +* [KOTS Installations](#kots-installations) + +### Helm Installations + +To get instance status data for applications installed with Helm, the Replicated SDK must be installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). + +After you include the SDK as a dependency, the requirements for enabling status informers vary depending on how your application is installed: + +* For applications installed by running `helm install` or `helm upgrade`, the Replicated SDK automatically detects and reports the status of the resources that are part of the Helm release. No additional configuration is required to get instance status data. + +* For applications installed by running `helm template` then `kubectl apply`, the SDK cannot automatically detect and report the status of resources. You must configure custom status informers by overriding the `statusInformers` value in the Replicated SDK chart. For example: + + ```yaml + # Helm chart values.yaml file + + replicated: + statusInformers: + - deployment/nginx + - statefulset/mysql + ``` + + :::note + Applications installed by running `helm install` or `helm upgrade` can also use custom status informers. When the `replicated.statusInformers` field is set, the SDK detects and reports the status of only the resources included in the `replicated.statusInformers` field. + ::: + +### KOTS Installations + +For applications installed with Replicated KOTS, configure one or more status informers in the KOTS Application custom resource. For more information, see [Adding Resource Status Informers](admin-console-display-app-status). + +When Helm-based applications that include the Replicated SDK and are deployed by KOTS, the SDK inherits the status informers configured in the KOTS Application custom resource. In this case, the SDK does _not_ automatically report the status of the resources that are part of the Helm release. This prevents discrepancies in the instance data in the vendor platform. + +## View Resource Status Insights {#resource-status} + +For applications that include the Replicated SDK, the Vendor Portal also displays granular resource status insights in addition to the aggregate application status. For example, you can hover over the **App status** field on the **Instance details** page to view the statuses of the indiviudal resources deployed by the application, as shown below: + +<img src="/images/resource-status-hover-current-state.png" alt="resource status pop up" width="400px"/> + +[View a larger version of this image](/images/resource-status-hover-current-state.png) + +Viewing these resource status details is helpful for understanding which resources are contributing to the aggregate application status. For example, when an application has an Unavailable status, that means that one or more resources are Unavailable. By viewing the resource status insights on the **Instance details** page, you can quickly understand which resource or resources are Unavailable for the purpose of troubleshooting. + +Granular resource status details are automatically available when the Replicated SDK is installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). + +## Understanding Application Status + +This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. + +### About Resource Statuses {#resource-statuses} + +Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. + +The following table lists the supported Kubernetes resources and the conditions that contribute to each status: + +<StatusesTable/> + +### Aggregate Application Status + +<AggregateStatusIntro/> + +<AggregateStatus/> + +--- + + +# Installing with Helm + +import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" +import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" + +# Installing with Helm + +This topic describes how to use Helm to install releases that contain one or more Helm charts. For more information about the `helm install` command, including how to override values in a chart during installation, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. + +## Prerequisites + +Before you install, complete the following prerequisites: + +<Prerequisites/> + +## Firewall Openings for Online Installations with Helm {#firewall} + +<FirewallOpeningsIntro/> + +<table> + <tr> + <th width="50%">Domain</th> + <th>Description</th> + </tr> + <tr> + <td>`replicated.app` *</td> + <td><p>Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.</p></td> + </tr> + <tr> + <td>`registry.replicated.com`</td> + <td><p>Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.</p></td> + </tr> + <tr> + <td>`proxy.replicated.com`</td> + <td><p>Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.</p></td> + </tr> +</table> + +* Required only if the [Replicated SDK](/vendor/replicated-sdk-overview) is included as a dependency of the application Helm chart. + +## Install + +To install a Helm chart: + +1. In the Vendor Portal, go to **Customers** and click on the target customer. + +1. Click **Helm install instructions**. + + <img alt="Helm install button" src="/images/helm-install-button.png" width="700px"/> + + [View a larger image](/images/helm-install-button.png) + +1. In the **Helm install instructions** dialog, run the first command to log in to the Replicated registry: + + ```bash + helm registry login registry.replicated.com --username EMAIL_ADDRESS --password LICENSE_ID + ``` + Where: + * `EMAIL_ADDRESS` is the customer's email address + * `LICENSE_ID` is the ID of the customer's license + + :::note + You can safely ignore the following warning message: `WARNING: Using --password via the CLI is insecure.` This message is displayed because using the `--password` flag stores the password in bash history. This login method is not insecure. + + Alternatively, to avoid the warning message, you can click **(show advanced)** in the **Helm install instructions** dialog to display a login command that excludes the `--password` flag. With the advanced login command, you are prompted for the password after running the command. + ::: + +1. (Optional) Run the second and third commands to install the preflight plugin and run preflight checks. If no preflight checks are defined, these commands are not displayed. For more information about defining and running preflight checks, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). + +1. Run the fourth command to install using Helm: + + ```bash + helm install RELEASE_NAME oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART_NAME + ``` + Where: + * `RELEASE_NAME` is the name of the Helm release. + * `APP_SLUG` is the slug for the application. For information about how to find the application slug, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). + * `CHANNEL` is the lowercased name of the channel where the release was promoted, such as `beta` or `unstable`. Channel is not required for releases promoted to the Stable channel. + * `CHART_NAME` is the name of the Helm chart. + + :::note + To install the SDK with custom RBAC permissions, include the `--set` flag with the `helm install` command to override the value of the `replicated.serviceAccountName` field with a custom service account. For more information, see [Customizing RBAC for the SDK](/vendor/replicated-sdk-customizing#customize-rbac-for-the-sdk). + ::: + +1. (Optional) In the Vendor Portal, click **Customers**. You can see that the customer you used to install is marked as **Active** and the details about the application instance are listed under the customer name. + + **Example**: + + ![example customer in the Vendor Portal with an active instance](/images/sdk-customer-active-example.png) + [View a larger version of this image](/images/sdk-customer-active-example.png) + +--- + + +# Installer History + +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Installer History + +<KurlAvailability/> + +This topic describes how to access the installation commands for all active and inactive kURL installers promoted to a channel. + +## About Using Inactive Installers + +Each release channel in the Replicated Vendor Portal saves the history of kURL installers that were promoted to the channel. You can view the list of historical installers on the **kURL Installer History** page for each channel. For more information, see [About the Installer History Page](#about) below. + +It can be useful to access the installation commands for inactive installers to reproduce an issue that a user is experiencing for troubleshooting purposes. For example, if the user's cluster is running the inactive installer version 1.0.0, then you can install with version 1.0.0 in a test environment to troubleshoot. + +You can also send the installation commands for inactive installers to your users as needed. For example, a user might have unique requirements for specific versions of Kubernetes or add-ons. + +## About the Installer History Page {#about} + +The **kURL Installer History** page for each channel includes a list of all the kURL installers that have been promoted to the channel, including the active installer and any inactive installers. + +To access the **kURL Installer History** page, go to **Channels** and click the **Installer history** button on the target channel. + +The following image shows an example **kURL Installer History** page with three installers listed: + +![Installer History page in the Vendor Portal](/images/installer-history-page.png) + +[View a larger version of this image](/images/installer-history-page.png) + +The installers are listed in the order in which they were promoted to the channel. The installer at the top of the list is the active installer for the channel. + +The **kURL Installer History** page includes the following information for each installer listed: + +* Version label, if provided when the installer was promoted +* Sequence number +* Installation command +* Installer YAML content + +--- + + +# Export Customer and Instance Data + +import Download from "../partials/customers/_download.mdx" + +# Export Customer and Instance Data + +This topic describes how to download and export customer and instance data from the Replicated Vendor Portal. + +## Overview + +While you can always consume customer and instance insight data directly in the Replicated Vendor Portal, the data is also available in a CSV format so that it can be imported into any other system, such as: +* Customer Relationship Management (CRM) systems like Salesforce or Gainsight +* Data warehouses like Redshift, Snowflake, or BigQuery +* Business intelligence (BI) tools like Looker, Tableau, or PowerBI + +By collecting and organizing this data wherever it is most visible and valuable, you can enable your team to make better decisions about where to focus efforts across product, sales, engineering, and customer success. + +## Bulk Export Instance Event Timeseries Data + +You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Get instance events in either JSON or CSV format](https://replicated-vendor-api.readme.io/reference/listappinstanceevents) in the Vendor API v3 documentation. + +The `/app/{app_id}/events` endpoint returns data scoped to a given application identifier. It also allows filtering based on time periods, instances identifiers, customers identifers, and event types. You must provide at least **one** query parameter to scope the query in order to receive a response. + +By bulk exporting this instance event data with the `/app/{app_id}/events` endpoint, you can: +* Identify trends and potential problem areas +* Demonstrate the impact, adoption, and usage of recent product features + +### Filter Bulk Data Exports + +You can use the following types of filters to filter timeseries data for bulk export: + +- **Filter by date**: + - Get instance events recorded _at or before_ the query date. For example: + ```bash + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?before=2023-10-15" + ``` + - Get instance events recorded _at or after_ the query date. For example: + ```shell + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-10-15" + ``` + - Get instance events recorded within a range of dates [after, before]. For example: + ```shell + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-05-02&before=2023-10-15" + ``` +- **Filter by customer**: Get instance events from one or more customers using a comma-separated list of customer IDs. For example: + ```bash + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?customerIDs=1b13241,2Rjk2923481" + ``` +- **Filter by event type**: Get instance events by event type using a comma-separated list of event types. For example: + ```bash + curl -H "Authorization: $REPLICATED_API_TOKEN" \ + "https://api.replicated.com/vendor/v3/app/:appID/events?eventTypes=numUsers,numProjects" + ``` + +:::note +If any filter is passed for an object that does not exist, no warning is given. For example, if a `customerIDs` filter is passed for an ID that does not exist, or for an ID that the user does not have access to, then an empty array is returned. +::: + + +## Download Customer Instance Data CSVs +<Download/> + +### Data Dictionary + +The following table lists the data fields that can be included in the customers and instances CSV downloads, including the label, data type, and description. + +<table> + <tr> + <th>Label</th> + <th>Type</th> + <th>Description</th> + </tr> + <tr> + <td>customer_id</td> + <td>string</td> + <td>Customer identifier</td> + </tr> + <tr> + <td>customer_name</td> + <td>string</td> + <td>The customer name</td> + </tr> + <tr> + <td>customer_created_date</td> + <td>timestamptz</td> + <td>The date the license was created</td> + </tr> + <tr> + <td>customer_license_expiration_date</td> + <td>timestamptz</td> + <td>The expiration date of the license</td> + </tr> + <tr> + <td>customer_channel_id</td> + <td>string</td> + <td>The channel id the customer is assigned</td> + </tr> + <tr> + <td>customer_channel_name</td> + <td>string</td> + <td>The channel name the customer is assigned</td> + </tr> + <tr> + <td>customer_app_id</td> + <td>string</td> + <td>App identifier</td> + </tr> + <tr> + <td>customer_last_active</td> + <td>timestamptz</td> + <td>The date the customer was last active</td> + </tr> + <tr> + <td>customer_type</td> + <td>string</td> + <td>One of prod, trial, dev, or community</td> + </tr> + <tr> + <td>customer_status</td> + <td>string</td> + <td>The current status of the customer</td> + </tr> + <tr> + <td>customer_is_airgap_enabled</td> + <td>boolean</td> + <td>The feature the customer has enabled - Airgap</td> + </tr> + <tr> + <td>customer_is_geoaxis_supported</td> + <td>boolean</td> + <td>The feature the customer has enabled - GeoAxis</td> + </tr> + <tr> + <td>customer_is_gitops_supported</td> + <td>boolean</td> + <td>The feature the customer has enabled - KOTS Auto-GitOps</td> + </tr> + <tr> + <td>customer_is_embedded_cluster_download_enabled</td> + <td>boolean</td> + <td>The feature the customer has enabled - Embedded Cluster</td> + </tr> + <tr> + <td>customer_is_identity_service_supported</td> + <td>boolean</td> + <td>The feature the customer has enabled - Identity</td> + </tr> + <tr> + <td>customer_is_snapshot_supported</td> + <td>boolean</td> + <td>The feature the customer has enabled - Snapshot</td> + </tr> + <tr> + <td>customer_has_entitlements</td> + <td>boolean</td> + <td>Indicates the presence or absence of entitlements and entitlment_* columns</td> + </tr> + <tr> + <td>customer_entitlement__*</td> + <td>string/integer/boolean</td> + <td>The values of any custom license fields configured for the customer. For example, customer_entitlement__active-users.</td> + </tr> + <tr> + <td>customer_created_by_id</td> + <td>string</td> + <td>The ID of the actor that created this customer: user ID or a hashed value of a token.</td> + </tr> + <tr> + <td>customer_created_by_type</td> + <td>string</td> + <td>The type of the actor that created this customer: user, service-account, or service-account.</td> + </tr> + <tr> + <td>customer_created_by_description</td> + <td>string</td> + <td>The description of the actor that created this customer. Includes username or token name depending on actor type.</td> + </tr> + <tr> + <td>customer_created_by_link</td> + <td>string</td> + <td>The link to the actor that created this customer.</td> + </tr> + <tr> + <td>customer_created_by_timestamp</td> + <td>timestamptz</td> + <td>The date the customer was created by this actor. When available, matches the value in the customer_created_date column</td> + </tr> + <tr> + <td>customer_updated_by_id</td> + <td>string</td> + <td>The ID of the actor that updated this customer: user ID or a hashed value of a token.</td> + </tr> + <tr> + <td>customer_updated_by_type</td> + <td>string</td> + <td>The type of the actor that updated this customer: user, service-account, or service-account.</td> + </tr> + <tr> + <td>customer_updated_by_description</td> + <td>string</td> + <td>The description of the actor that updated this customer. Includes username or token name depending on actor type.</td> + </tr> + <tr> + <td>customer_updated_by_link</td> + <td>string</td> + <td>The link to the actor that updated this customer.</td> + </tr> + <tr> + <td>customer_updated_by_timestamp</td> + <td>timestamptz</td> + <td>The date the customer was updated by this actor.</td> + </tr> + <tr> + <td>instance_id</td> + <td>string</td> + <td>Instance identifier</td> + </tr> + <tr> + <td>instance_is_active</td> + <td>boolean</td> + <td>The instance has pinged within the last 24 hours</td> + </tr> + <tr> + <td>instance_first_reported_at</td> + <td>timestamptz</td> + <td>The timestamp of the first recorded check-in for the instance.</td> + </tr> + <tr> + <td>instance_last_reported_at</td> + <td>timestamptz</td> + <td>The timestamp of the last recorded check-in for the instance.</td> + </tr> + <tr> + <td>instance_first_ready_at</td> + <td>timestamptz</td> + <td>The timestamp of when the cluster was considered ready</td> + </tr> + <tr> + <td>instance_kots_version</td> + <td>string</td> + <td>The version of KOTS or the Replicated SDK that the instance is running. The version is displayed as a Semantic Versioning compliant string.</td> + </tr> + <tr> + <td>instance_k8s_version</td> + <td>string</td> + <td>The version of Kubernetes running in the cluster.</td> + </tr> + <tr> + <td>instance_is_airgap</td> + <td>boolean</td> + <td>The cluster is airgaped</td> + </tr> + <tr> + <td>instance_is_kurl</td> + <td>boolean</td> + <td>The instance is installed in a Replicated kURL cluster (embedded cluster)</td> + </tr> + <tr> + <td>instance_last_app_status</td> + <td>string</td> + <td>The instance's last reported app status</td> + </tr> + <tr> + <td>instance_client</td> + <td>string</td> + <td>Indicates whether this instance is managed by KOTS or if it's a Helm CLI deployed instance using the SDK.</td> + </tr> + <tr> + <td>instance_kurl_node_count_total</td> + <td>integer</td> + <td>Total number of nodes in the cluster. Applies only to kURL clusters.</td> + </tr> + <tr> + <td>instance_kurl_node_count_ready</td> + <td>integer</td> + <td>Number of nodes in the cluster that are in a healthy state and ready to run Pods. Applies only to kURL clusters.</td> + </tr> + <tr> + <td>instance_cloud_provider</td> + <td>string</td> + <td>The cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.</td> + </tr> + <tr> + <td>instance_cloud_provider_region</td> + <td>string</td> + <td>The cloud provider region where the instance is running. For example, us-central1-b</td> + </tr> + <tr> + <td>instance_app_version</td> + <td>string</td> + <td>The current application version</td> + </tr> + <tr> + <td>instance_version_age</td> + <td>string</td> + <td>The age (in days) of the currently deployed release. This is relative to the latest available release on the channel.</td> + </tr> + <tr> + <td>instance_is_gitops_enabled</td> + <td>boolean</td> + <td>Reflects whether the end user has enabled KOTS Auto-GitOps for deployments in their environment</td> + </tr> + <tr> + <td>instance_gitops_provider</td> + <td>string</td> + <td>If KOTS Auto-GitOps is enabled, reflects the GitOps provider in use. For example, GitHub Enterprise.</td> + </tr> + <tr> + <td>instance_is_skip_preflights</td> + <td>boolean</td> + <td>Indicates whether an end user elected to skip preflight check warnings or errors</td> + </tr> + <tr> + <td>instance_preflight_status</td> + <td>string</td> + <td>The last reported preflight check status for the instance</td> + </tr> + <tr> + <td>instance_k8s_distribution</td> + <td>string</td> + <td>The Kubernetes distribution of the cluster.</td> + </tr> + <tr> + <td>instance_has_custom_metrics</td> + <td>boolean</td> + <td>Indicates the presence or absence of custom metrics and custom_metric__* columns</td> + </tr> + <tr> + <td>instance_custom_metrics_reported_at</td> + <td>timestamptz</td> + <td>Timestamp of latest custom_metric</td> + </tr> + <tr> + <td>custom_metric__*</td> + <td>string/integer/boolean</td> + <td>The values of any custom metrics that have been sent by the instance. For example, custom_metric__active_users</td> + </tr> + <tr> + <td>instance_has_tags</td> + <td>boolean</td> + <td>Indicates the presence or absence of instance tags and instance_tag__* columns</td> + </tr> + <tr> + <td>instance_tag__*</td> + <td>string/integer/boolean</td> + <td>The values of any instance tag that have been set by the vendor. For example, instance_tag__name</td> + </tr> +</table> + + +--- + + +# Instance Details + +# Instance Details + +This topic describes using the Replicated Vendor Portal to quickly understand the recent events and performance of application instances installed in your customers' environments. +## About the Instance Details Page {#about-page} + +The Vendor Portal provides insights about the health, status, and performance of the active application instances associated with each customer license on the **Instance details** page. You can use the insights on the **Instance details** page to more quickly troubleshoot issues with your customers' active instances, helping to reduce support burden. + +For example, you can use the **Instance details** page to track the following events for each instance: + +* Recent performance degradation or downtime +* Length of instance downtime +* Recent changes to the cluster or infrastructure +* Changes in the number of nodes, such as nodes lost or added +* Changes in the cluster's Kubernetes version +* Changes in the application version that the instance is running + +To access the **Instance details** page, go to **Customers** and click the **Customer reporting** button for the customer that you want to view: + +![Customer reporting button on the Customers page](/images/customer-reporting-button.png) + +From the **Reporting** page for the selected customer, click the **View details** button for the desired application instance. + +The following shows an example of the **Instance details** page: + +![Instance details full page](/images/instance-details.png) + +[View a larger version of this image](/images/instance-details.png) + +As shown in the image above, the **Instance details** page includes the following sections: + +* **Current State**: Information about the state of the instance, such as the current application version. See [Current State](#current-state) below. +* **Instance Insights**: Key performance indicators (KPIs) related to health, performance, and adoption. See [Insights](#insights) below. +* **Instance Information**: Information about the cluster where the instance is installed, such as the version of Kubernetes running on the cluster. See [Instance Information](#instance-information) below. +* **Custom Metrics**: The values for any custom metrics that are configured for the application, from the most recent check-in. For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). +* **Instance Uptime**: Details about instance uptime over time. See [Instance Uptime](#instance-uptime) below. +* **Instance Activity**: Event data stream. See [Instance Activity](#instance-activity) below. + +### Current State + +The **Current State** section displays the following event data about the status and version of the instance: + +* **App status**: The status of the application. Possible statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information about enabling application status insights and how to interpret the different statuses, see [Enabling and Understanding Application Status](insights-app-status). + + Additionally, for applications that include the [Replicated SDK](/vendor/replicated-sdk-overview), you can hover over the **App status** field to view the statuses of the indiviudal resources deployed by the application, as shown in the example below: + + <img src="/images/resource-status-hover-current-state.png" alt="resource status pop up" width="400px"/> + + [View a larger version of this image](/images/resource-status-hover-current-state.png) + +* **App version**: The version label of the currently running release. You define the version label in the release properties when you promote the release. For more information about defining release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. + + If there is no version label for the release, then the Vendor Portal displays the release sequence in the **App version** field. You can find the sequence number associated with a release by running the `replicated release ls` command. See [release ls](/reference/replicated-cli-release-ls) in the _Replicated CLI_ documentation. + +* **Version age**: The absolute and relative ages of the instance: + + * **Absolute age**: `now - current_release.promoted_date` + + The number of days since the currently running application version was promoted to the channel. For example, if the instance is currently running version 1.0.0, and version 1.0.0 was promoted to the channel 30 days ago, then the absolute age is 30. + + * **Relative age (Days Behind Latest)**: `channel.latest_release.promoted_date - current_release.promoted_date` + + The number of days between when the currently running application version was promoted to the channel and when the latest available version on the channel was promoted. + + For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. The latest version available on the Stable channel is 1.5.0. If 1.0.0 was promoted 30 days ago and 1.5.0 was promoted 10 days ago, then the relative age of the application instance is 20 days. + +* **Versions behind**: The number of versions between the currently running version and the latest version available on the channel where the instance is assigned. + + For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. If the later versions 1.1.0, 1.2.0, 1.3.0, 1.4.0, and 1.5.0 were also promoted to the Stable channel, then the instance is five versions behind. + +* **Last check-in**: The timestamp when the instance most recently sent data to the Vendor Portal. + +### Instance Insights {#insights} + +The **Insights** section includes the following metrics computed by the Vendor Portal: + +* [Uptime](#uptime) +* [Time to Install](#time-to-install) + +#### Uptime + +The Vendor Portal computes the total uptime for the instance as the fraction of time that the instance spends with a Ready, Updating, or Degraded status. The Vendor Portal also provides more granular details about uptime in the **Instance Uptime** graph. See [Instance Uptime](#instance-uptime) below. + +High uptime indicates that the application is reliable and able to handle the demands of the customer environment. Low uptime might indicate that the application is prone to errors or failures. By measuring the total uptime, you can better understand the performance of your application. + +The following table lists the application statuses that are associated with an Up or Down state in the total uptime calculation: + +<table> + <tr> + <th>Uptime State</th> + <th>Application Statuses</th> + </tr> + <tr> + <td>Up</td> + <td>Ready, Updating, or Degraded</td> + </tr> + <tr> + <td>Down</td> + <td>Missing or Unavailable</td> + </tr> +</table> + +:::note +The Vendor Portal includes time spent in a Degraded status in the total uptime for an instance because an app may still be capable of serving traffic when some subset of desired replicas are available. Further, it is possible that a Degraded state is expected during upgrade. +::: + +#### Time to Install + +The Vendor Portal computes both _License time to install_ and _Instance time to install_ metrics to represent how quickly the customer was able to deploy the application to a Ready state in their environment. + +Replicated recommends that you use Time to Install as an indicator of the quality of the packaging, configuration, and documentation of your application. + +If the installation process for your application is challenging, poorly documented, lacks appropriate preflight checks, or relies heavily on manual steps, then it can take days or weeks to deploy the application in customer environments. A longer Time to Install generally represents a significantly increased support burden and a degraded customer installation experience. + +The following describes the _License time to install_ and _Instance time to install_ metrics: + +* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. + + License time to install represents the time that it takes for a customer to successfully deploy your application after you intend to distribute the application to the customer. Replicated uses the timestamp of when you create the customer license in the Vendor Portal to represent your intent to distribute the application because creating the license file is generally the final step before you share the installation materials with the customer. + + License time to install includes several activities that are involved in deploying the application, including the customer receiving the necessary materials and documentation, downloading the assets, provisioning the required hardware, networking, external systems, completing the preflight checks, and finally installing, configuring, and deploying the application. + +* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. + + Instance time to install is the length of time that it takes for the application to reach a Ready state after the customer starts a deployment attempt in their environment. Replicated considers a deployment attempt started when the Vendor Portal first records an event for the instance. + + For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. + + :::note + Instance time to install does _not_ include any deployment attempts that a customer might have made that did not generate an event. For example, time spent by the customer discarding the server used in a failed attempt before attempting to deploy the instance again on a new server. + ::: + +### Instance Information + +The **Instance Information** section displays the following details about the cluster infrastructure where the application is installed as well as vendor-defined metadata about the instance: + +* The Kubernetes distribution for the cluster. For example, GKE or EKS. +* The version of Kubernetes running in the cluster. +* The version of KOTS or the Replicated SDK installed in the cluster. +* For **First Seen**, the timestamp of the first event that the Vendor Portal generated for the instance. For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. +* If detected, the cloud provider and region where the cluster is running. For example, `GCP: us-central1`. +* An optional vendor-defined name for the instance. +* Optional vendor-defined instance tags in the form of key-value pairs. Each instance can have a maximum of 10 tags. + +In addition to the details listed above, the **Instance Information** section also displays the following for embedded clusters provisioned by Replicated kURL: +* Node operating systems +* Node operating systems versions +* Total number of cluster nodes +* Number of cluster nodes in a Ready state +* ID of the kURL installer specification + +### Instance Uptime + +The **Instance Uptime** graph shows the percentage of a given time period that the instance was in an Up, Degraded, or Down state. + +To determine if the instance is Up, Degraded, or Down, the Vendor Portal uses the application status. Possible application statuses are Ready, Updating, Degraded, Unavailable, and Missing. The following table lists the application statuses that are associated with each state in the **Instance Uptime** graph: + +<table> + <tr> + <th>Uptime State</th> + <th>Application Statuses</th> + </tr> + <tr> + <td>Up</td> + <td>Ready or Updating</td> + </tr> + <tr> + <td>Degraded</td> + <td>Degraded</td> + </tr> + <tr> + <td>Down</td> + <td>Missing or Unavailable</td> + </tr> +</table> + +The following shows an example of an **Instance Uptime** graph: + +![Uptime Graph on the Instance details page](/images/instance-uptime-graph.png) + +You can hover over the bars in the **Instance Uptime** graph to view more detail about the percent of time that the instance was in each state during the given time period. + +![Uptime Graph with event markers on the Instance details page](/images/instance-uptime-graph-event-markers.png) + +You can hover over the event markers in the **Instance Uptime** graph to view more detail about the events that occurred during that given interval on the graph. If more than two events occurred in that period, the event marker displays the number of events that occurred during that period. If you click the event marker or the event in the tooltip, the **Instance Activity** section highlights the event or the first event in the group. + +### Instance Activity + +The **Instance Activity** section displays recent events for the instance. The data stream is updated each time an instance _check-in_ occurs. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. + +The timestamp of events displayed in the **Instance Activity** stream is the timestamp when the Replicated Vendor API received data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. + +The following shows an example of the **Instance Activity** data stream: + +![Instance Activity section of Instance details page](/images/instance-activity.png) + +You can filter the **Instance Activity** stream by the following categories: + +* [App install/upgrade](#app-install-upgrade) +* [App status](#app-status) +* [Cluster status](#cluster) +* [Custom metrics](#custom-metrics) +* [Infrastructure status](#infrastructure) +* [KOTS version](#kots) +* [Replicated SDK version](#sdk) +* [Upstream update](#upstream) + +The following tables describe the events that can be displayed in the **Instance Activity** stream for each of the categories above: +#### App install/upgrade {#app-install-upgrade} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>App Channel</td> + <td>The ID of the channel the application instance is assigned.</td> + </tr> + <tr> + <td>App Version</td> + <td>The version label of the release that the instance is currently running. The version label is the version that you assigned to the release when promoting it to a channel.</td> + </tr> +</table> + +#### App status {#app-status} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>App Status</td> + <td> + <p>A string that represents the status of the application. Possible values: Ready, Updating, Degraded, Unavailable, Missing. For applications that include the <a href="/vendor/replicated-sdk-overview">Replicated SDK</a>, hover over the application status to view the statuses of the indiviudal resources deployed by the application.</p> + <p>For more information, see <a href="insights-app-status">Enabling and Understanding Application Status</a>.</p> + </td> + </tr> +</table> + +#### Cluster status {#cluster} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>Cluster Type</td> + <td> + <p>Indicates if the cluster was provisioned by kURL.</p> + <p>Possible values:</p> + <ul> + <li><code>kURL</code>: The cluster is provisioned by kURL.</li> + <li><code>Existing</code>: The cluster is <em>not</em> provisioned by kURL.</li> + </ul> + <p>For more information about kURL clusters, see <a href="packaging-embedded-kubernetes">Creating a kURL installer</a>.</p> + </td> + </tr> + <tr> + <td>Kubernetes Version</td> + <td>The version of Kubernetes running in the cluster.</td> + </tr> + <tr> + <td>Kubernetes Distribution</td> + <td> + <p>The Kubernetes distribution of the cluster.</p> + <p>Possible values:</p> + <ul> + <li>EKS</li> + <li>GKE</li> + <li>K3S</li> + <li>RKE2</li> + </ul> + </td> + </tr> + <tr> + <td>kURL Nodes Total</td> + <td> + <p>Total number of nodes in the cluster.</p> + <p><strong>Note:</strong> Applies only to kURL clusters.</p> + </td> + </tr> + <tr> + <td>kURL Nodes Ready</td> + <td> + <p>Number of nodes in the cluster that are in a healthy state and ready to run Pods.</p> + <p><strong>Note:</strong> Applies only to kURL clusters.</p> + </td> + </tr> + <tr> + <td>New kURL Installer</td> + <td> + <p>The ID of the kURL installer specification that kURL used to provision the cluster. Indicates that a new Installer specification was added. An installer specification is a manifest file that has <code>apiVersion: cluster.kurl.sh/v1beta1</code> and <code>kind: Installer</code>. </p> + <p>For more information about installer specifications for kURL, see <a href="packaging-embedded-kubernetes">Creating a kURL installer</a>.</p> + <p><strong>Note:</strong> Applies only to kURL clusters.</p> + </td> + </tr> +</table> + +#### Custom metrics {#custom-metrics} + +You can filter the activity feed by any custom metrics that are configured for the application. The labels for the custom metrics vary depending on the custom key value pairs included in the data set that is sent to the Vendor Portal. For example, the key value pair `"num_projects": 5` is displayed as **Num Projects: 5** in the activity feed. + +For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). +#### Infrastructure status {#infrastructure} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>Cloud Provider</td> + <td> + <p>The cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.</p> + <p>Possible values:</p> + <ul> + <li>AWS</li> + <li>GCP</li> + <li>DigitalOcean</li> + </ul> + </td> + </tr> + <tr> + <td>Cloud Region</td> + <td> + <p>The cloud provider region where the instance is running. For example, <code>us-central1-b</code></p> + </td> + </tr> +</table> + +#### KOTS version {#kots} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>KOTS Version</td> + <td>The version of KOTS that the instance is running. KOTS version is displayed as a Semantic Versioning compliant string.</td> + </tr> +</table> + +#### Replicated SDK version {#sdk} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>Replicated SDK Version</td> + <td>The version of the Replicated SDK that the instance is running. SDK version is displayed as a Semantic Versioning compliant string.</td> + </tr> +</table> + +#### Upstream update {#upstream} + +<table> + <tr> + <th>Label</th> + <th>Description</th> + </tr> + <tr> + <td>Versions Behind</td> + <td> + <p>The number of versions between the version that the instance is currently running and the latest version available on the channel.</p> + <p>Computed by the Vendor Portal each time it receives instance data.</p> + </td> + </tr> +</table> + + +--- + + +# About Instance and Event Data + +import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" + +# About Instance and Event Data + +This topic provides an overview of the customer and instance insights that you can view in the Replicated Vendor Portal. It includes information about how the Vendor Portal accesses data as well as requirements and limitations. + +## How the Vendor Portal Collects Instance Data {#about-reporting} + +This section describes how the Vendor Portal collects instance data from online and air gap environments. + +### Online Instances + +For instances running in online (internet-connected) environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), then both send instance data. + +The data sent to the Vendor Portal includes properties such as the current version and status of the instance. For a full overview of what data might be included, see the [Replicated Data Transmission Policy](https://docs.replicated.com/vendor/policies-data-transmission). + +The following diagram shows the flow of different types of data from customer environments to the Vendor Portal: + +![Telemetry sent from instances to vendor platform](/images/telemetry-diagram.png) + +[View a larger version of this image](/images/telemetry-diagram.png) + +As shown in the diagram above, application instance data, application status data, and details about the KOTS and the SDK instances running in the cluster are all sent to the Vendor Portal through the Replicated app service: +* When both KOTS and the SDK are installed in the cluster, they both send application instance data, including information about the cluster where the instance is running. +* KOTS and the SDK both send information about themselves, including the version of KOTS or the SDK running in the cluster. +* Any custom metrics configured by the software vendor are sent to the Vendor Portal through the Replicated SDK API. For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). +* Application status data, such as if the instance is ready or degraded, is sent by KOTS. If KOTS is not installed in the cluster, then the SDK sends the application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). + +### Air Gap Instances + +<AirGapTelemetry/> + +For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). + +## Frequency of Data Sent to the Vendor Portal + +This section describes how frequently data is sent to the Vendor Portal for online and air gap instances. + +### From the Replicated SDK (Online Instances Only) + +When installed alongside the application in an online environment, the SDK automatically sends instance data to the Vendor Portal when any of the following occur: + +* The SDK sends data every four hours. + +* The instance checks for updates. An update check occurs when the instance makes a request to the `/api/v1/app/updates` SDK API endpoint. See [app](/reference/replicated-sdk-apis#app) in _Replicated SDK API (Alpha)_. + +* The instance completes a Helm update to a new application version. After the update completes, the SDK sends data when it restarts. + +* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). + +### From KOTS (Online Instances Only) + +When installed alongisde the application in an online environment, KOTS automatically sends instance data to the Vendor Portal when any of the following occur: + +* The instance checks for updates. By default, KOTS checks for updates every four hours. Additionally, an update check can occur when a user clicks the **Check for updates** button in the Replicated Admin Console. + + :::note + KOTS users can modify or disable automatic update checks from the Admin Console. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). + ::: + +* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). + +* (KOTS v1.92 and later only) The instance deploys a new application version. + +### From Air Gap Instances + +For air gap instances, the frequency of data sent to the Vendor Portal depends on how frequently support bundles are collected in the customer environment and uploaded to the Vendor Portal. + +For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). + +## How the Vendor Portal Generates Events and Insights {#about-events} + +When the Vendor Portal receives instance data, it evaluates each data field to determine if there was a change in its value. For each field that changes in value, the Vendor Portal creates an _event_ to record the change. For example, a change from Ready to Degraded in the application status generates an event. + +In addition to creating events for changes in data sent by the instance, the Vendor Portal also generates events for changes in values of computed metrics. The Vendor Portal updates the values of computed metrics each time it receives instance data. For example, the Vendor Portal computes a _Versions behind_ metric that tracks the number of versions behind the latest available version for the instance. When the instance checks for updates and a new update is available, the value of this metric changes and the Vendor Portal generates an event. + +The Vendor Portal uses events to display insights for each active instance in a **Instance details** dashboard. For more information about using the Vendor Portal **Instance details** page to monitor active instances of your application, see [Instance Details](instance-insights-details). + +## Requirements + +The following requirements apply to collecting instance telemetry: + +* Replicated KOTS or the Replicated SDK must be installed in the cluster where the application instance is running. + +* For KOTS installations and for Helm CLI installations that use `helm template` then `kubectl apply`, additional configuration is required to get application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). + +* To view resource status details for an instance on the **Instance details** page, the Replicated SDK must be installed in the cluster alongside the application. For more information, see [View Resource Status Insights](insights-app-status#resource-status) in _Enabling and Understanding Application Status_. + +* There are additional requirements for collecting telemetry from air gap instances. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). + +## Limitations + +The Vendor Portal has the following limitations for reporting instance data and generating events: + +* **Active instances**: Instance data is available for _active_ instances. An instance is considered inactive when its most recent check-in was more than 24 hours ago. An instance can become inactive if it is decommissioned, stops checking for updates, or otherwise stops reporting. + + The Vendor Portal continues to display data for an inactive instance from its most-recently seen state. This means that data for an inactive instance might continue to show a Ready status after the instance becomes inactive. Replicated recommends that you use the timestamp in the **Last Check-in** field to understand if an instance might have become inactive, causing its data to be out-of-date. +* **Air gap instances**: There are additional limitations for air gap telemetry. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). +* **Instance data freshness**: The rate at which data is updated in the Vendor Portal varies depending on how often the Vendor Portal receives instance data. +* **Event timestamps**: The timestamp of events displayed on the **Instances details** page is the timestamp when the Replicated Vendor API received the data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. +* **Caching for kURL cluster data**: For clusters created with Replicated kURL (embedded clusters), KOTS stores the counts of total nodes and ready nodes in a cache for five minutes. If KOTS sends instance data to the Vendor Portal within the five minute window, then the reported data for total nodes and ready nodes reflects the data in the cache. This means that events displayed on the **Instances details** page for the total nodes and ready nodes can show values that differ from the current values of these fields. + + +--- + + +# Configuring Instance Notifications (Beta) + +import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" + + +# Configuring Instance Notifications (Beta) + +<NotificationsAbout/> + +This topic describes how to configure Slack or email notifications in the Replicted Vendor Portal for instances of your application. + +For information about creating and managing instance notifications with the Vendor API v3, see the [notifications](https://replicated-vendor-api.readme.io/reference/subscribeinstanceevents) section in the Vendor API v3 documentation. + +## Overview + +Teams can receive notifications about customer instances through a Slack channel. Individual users can also receive email notifications. + +Instance notifications can be disabled when they are no longer needed. For example, a team member can turn off their email notifications for a customer instance when they are no longer responsible for supporting that customer. + +## Prerequisite + +For Slack notifications, you must configure a Slack webhook in the Vendor Portal at the Team level before you can turn on instance notifications. For more information, see [Configuring a Slack Webhook (Beta)](team-management-slack-config). + +For email notification, no prior configuration is required. The email address listed in your Vendor Portal account settings is used. + +## Configure Notifications + +Follow this procedure to configure Slack or email notifications for application instances. You can enable notifications for application status changes, system events such as Kubernetes upgrades, or changes in the values of any custom metrics configured for the application. + +To configure notifications: + +1. Go to **Applications > Customers**, and click an active customer instance that you want to receive notifications for. + + <img src="/images/customer-instances.png" alt="Customer instances list in the Vendor Portal" width="600"/> + +1. On the Instance Details page, click **Notifications**. + + <img width="600px" src="/images/instance-notifications.png" /> + +1. From the **Configure Instance Notifications** dialog, select the types of notifications to enable. + + ![Configure Instance Notifications dialog](/images/instance-notifications-dialog.png) + + [View a larger version of this image](/images/instance-notifications-dialog.png) + +1. Click **Save**. + +1. Repeat these steps to configure notifications for other application instances. + + +## Test Notifications + +After you enable notifications for a running development instance, test that your notifications are working as expected. + +Do this by forcing your application into a non-ready state. For example, you can delete one or more application Pods and wait for a ReplicationController to recreate them. + +Then, look for notifications in the assigned Slack channel. You also receive an email if you enabled email notifications. + +:::note +There is a 30-second buffer between event detection and notifications being sent. This buffer provides better roll-ups and reduces noise. +::: + +--- + + +# Replicated FAQs + +import SDKOverview from "../partials/replicated-sdk/_overview.mdx" +import EmbeddedKubernetes from "../partials/kots/_embedded-kubernetes-definition.mdx" +import Helm from "../partials/helm/_helm-definition.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Replicated FAQs + +This topic lists frequently-asked questions (FAQs) for different components of the Replicated Platform. + +## Getting Started FAQs + +### What are the supported application packaging options? + +Replicated strongly recommends that all applications are packaged using Helm. + +<Helm/> + +Many enterprise customers expect to be able to install an application with Helm in their own cluster. Packaging with Helm allows you to support installation with the Helm CLI and with the Replicated installers (Replicated Emebdded Cluster and Replicated KOTS) from a single release in the Replicated Platform. + +For vendors that do not want to use Helm, applications distributed with Replicated can be packaged as Kubernetes manifest files. + +### How do I get started with Replicated? + +Replicated recommends that new users start by completing one or more labs or tutorials to get familiar with the processes of creating, installing, and iterating on releases for an application with the Replicated Platform. + +Then, when you are ready to begin onboarding your own application to the Replicated Platform, see [Replicated Onboarding](replicated-onboarding) for a list of Replicated features to begin integrating. + +#### Labs + +The following labs in Instruqt provide a hands-on introduction to working with Replicated features, without needing your own sample application or development environment: + +* [Distributing Your Application with Replicated](https://play.instruqt.com/embed/replicated/tracks/distributing-with-replicated?token=em_VHOEfNnBgU3auAnN): Learn how to quickly get value from the Replicated Platform for your application. +* [Delivering Your Application as a Kubernetes Appliance](https://play.instruqt.com/embed/replicated/tracks/delivering-as-an-appliance?token=em_lUZdcv0LrF6alIa3): Use Embedded Cluster to distribute Kubernetes and an application together as a single appliance. +* [Avoiding Installation Pitfalls](https://play.instruqt.com/embed/replicated/tracks/avoiding-installation-pitfalls?token=em_gJjtIzzTTtdd5RFG): Learn how to use preflight checks to avoid common installation issues and assure your customer is installing into a supported environment. +* [Closing the Support Information Gap](https://play.instruqt.com/embed/replicated/tracks/closing-information-gap?token=em_MO2XXCz3bAgwtEca): Learn how to use support bundles to close the information gap between your customers and your support team. +* [Protecting Your Assets](https://play.instruqt.com/embed/replicated/tracks/protecting-your-assets?token=em_7QjY34G_UHKoREBd): Assure your customers have the right access to your application artifacts and features using Replicated licensing. + +#### Tutorials + +The following getting started tutorials demonstrate how to integrate key Replicated features with a sample Helm chart application: +* [Install a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup): Create a release that can be installed on a VM with the Embedded Cluster installer. +* [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup): Create a release that can be installed with both the KOTS installer and the Helm CLI. +* [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup): Configure the Admin Console Config screen to collect user-supplied values. +* [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup): Create preflight checks for your application by addin a spec for preflight checks to a Secret in the Helm templates. + +### What are air gap installations? + +_Air gap_ refers to a computer or network that does not have outbound internet access. Air-gapped environments are common for enterprises that require high security, such as government agencies or financial institutions. + +Traditionally, air-gapped systems are physically isolated from the network. For example, an air-gapped server might be stored in a separate location away from network-connected servers. Physical access to air-gapped servers is often restricted as well. + +It is also possible to use _virtual_ or _logical_ air gaps, in which security controls such as firewalls, role-based access control (RBAC), and encryption are used to logically isolate a device from a network. In this way, network access is still restricted, but there is not a phyiscal air gap that disconnects the device from the network. + +Replicated supports installations into air-gapped environments. In an air gap installation, users first download the images and other assets required for installation on an internet-connected device. These installation assets are usually provided in an _air gap bundle_ that ISVs can build in the Replicated Vendor Portal. Then, users transfer the installation assets to their air-gapped machine where they can push the images to an internal private registry and install. + +For more information, see: +* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) +* [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap) + +### What is the Commercial Sotware Distribution Lifecycle? + +Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. + +Replicated has developed the Commercial Software Distribution Lifecycle to represent the stages that are essential for every company that wants to deliver their software securely and reliably to customer-controlled environments. + +This lifecycle was inspired by the DevOps lifecycle and the Software Development Lifecycle (SDLC), but it focuses on the unique things requirements for successfully distributing commercial software to tens, hundreds, or thousands of enterprise customers. + +The phases are: +* Develop +* Test +* Release +* License +* Install +* Report +* Support + +For more information about the Replicated features that enhance each phase of the lifecycle, see [Introduction to Replicated](../intro-replicated). + +## Compatibility Matrix FAQs + +### What types of clusters can I create with Compatibility Matrix? + +You can use Compatibility Matrix to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports a variety of VM and cloud distributions, including Red Hat OpenShift, Replicated Embedded Cluster, and Oracle Container Engine for Kubernetes (OKE). For a complete list, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). + +### How does billing work? + +Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a running status and ends when the cluster is deleted. For more information, see [Billing and Credits](/vendor/testing-about#billing-and-credits). + +### How do I buy credits? + +To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to **[Compatibility Matrix > Buy additional credits](https://vendor.replicated.com/compatibility-matrix)**. Otherwise, to request credits, log in to the Vendor Portal and go to **[Compatibility Matrix > Request more credits](https://vendor.replicated.com/compatibility-matrix)**. + +### How do I add Comaptibility Matrix to my CI/CD pipelines? + +You can use Replicated CLI commands to integrate Compatibility Matrix into your CI/CD development and production workflows. This allows you to programmatically create multiple different types of clusters where you can deploy and test your application before releasing. + +For more information, see [About Integrating with CI/CD](/vendor/ci-overview). + +## KOTS and Embedded Cluster FAQs + +### What is the Admin Console? + +The Admin Console is the user interface deployed by the Replicated KOTS installer. Users log in to the Admin Console to configure and install the application. Users also access to the Admin Console after installation to complete application mangement tasks such as performing updates, syncing their license, and generating support bundles. For installations with Embedded Cluster, the Admin Console also includes a **Cluster Management** tab where users can manage the nodes in the cluster. + +The Admin Console is available in installations with Replicated Embedded Cluster and Replicated KOTS. + +The following shows an example of the Admin Console dashboard for an Embedded Cluster installation of an application named "Gitea": + +<img src="/images/gitea-ec-ready.png" width="800px" alt="admin console dashboard"/> + +[View a larger version of this image](/images/gitea-ec-ready.png) + +### How do Embedded Cluster installations work? + +To install with Embedded Cluster, users first download and extract the Embedded Cluster installation assets for the target application release on their VM or bare metal server. Then, they run an Embedded Cluster installation command to provision the cluster. During installation, Embedded Cluster also installs Replicated KOTS in the cluster, which deploys the Admin Console. + +After the installation command finishes, users log in to the Admin Console to provide application configuration values, optionally join more nodes to the cluster, run preflight checks, and deploy the application. + +Customer-specific Embedded Cluster installation instructions are provided in the Replicated Vendor Portal. For more information, see [Installing with Embedded Cluster](/enterprise/installing-embedded). + +### Does Replicated support installations into air gap environments? + +Yes. The Embedded Cluster and KOTS installers support installation in _air gap_ environments with no outbound internet access. + +To support air gap installations, vendors can build air gap bundles for their application in the Vendor Portal that contain all the required assets for a specific release of the application. Additionally, Replicated provides bundles that contain the assets for the Replicated installers. + +For more information about how to install with Embedded Cluster and KOTS in air gap environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). + +### Can I deploy Helm charts with KOTS? + +Yes. An application deployed with KOTS can use one or more Helm charts, can include Helm charts as components, and can use more than a single instance of any Helm chart. Each Helm chart requires a unique HelmChart custom resource (`apiVersion: kots.io/v1beta2`) in the release. + +For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). + +### What's the difference between Embedded Cluster and kURL? + +Replicated Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster feature offers significantly faster installation, updates, and node joins, a redesigned Admin Console UI, improved support for multi-node clusters, one-click updates that update the application and the cluster at the same time, and more. + +<KurlAvailability/> + +For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). + +### How do I enable Embedded Cluster and KOTS installations for my application? + +Releases that support installation with KOTS include the manifests required by KOTS to define the Admin Console experience and install the application. + +In addition to the KOTS manifests, releases that support installation with Embedded Cluster also include the Embedded Cluster Config. The Embedded Cluster Config defines aspects of the cluster that will be provisioned and also sets the version of KOTS that will be installed. + +For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). + +### Can I use my own branding? + +The KOTS Admin Console and the Replicated Download Portal support the use of a custom logo. Additionally, software vendors can use custom domains to alias the endpoints for Replicated services. + +For more information, see [Customizing the Admin Console and Download Portal](/vendor/admin-console-customize-app-icon) and [About Custom Domains](custom-domains). + +## Replicated SDK FAQs + +### What is the SDK? + +<SDKOverview/> + +### Is the SDK supported in air gap environments? + +Yes. The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. + +For more information, see [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). + +### How do I develop against the SDK API? + +You can use the Replicated SDK in _integration mode_ to develop locally against the SDK API without needing to make real changes in the Replicated Vendor Portal or in your environment. + +For more information, see [Developing Against the SDK API](/vendor/replicated-sdk-development). + +### How does the Replicated SDK work with KOTS? + +The Replicated SDK is a Helm chart that can be installed as a small service alongside an application, or as a standalone component. The SDK can be installed using the Helm CLI or KOTS. + +Replicated recommends that all applications include the SDK because it provides access to key functionality not available through KOTS, such as support for sending custom metrics from application instances. When both the SDK and KOTS are installed in a cluster alongside an application, both send instance telemetry to the Vendor Portal. + +For more information about the SDK installation options, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). + +## Vendor Portal FAQs + +### How do I add and remove team members? + +Admins can add, remove, and manage team members from the Vendor Portal. For more information, see [Managing Team Members](/vendor/team-management). + +### How do I manage RBAC policies for my team members? + +By default, every team has two policies created automatically: Admin and Read Only. If you have an Enterprise plan, you will also have the Sales and Support policies created automatically. These default policies are not configurable. + +You can also configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. + +For more information, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). + +### Can I alias Replicated endpoints? + +Yes. Replicated supports the use of custom domains to alias the endpoints for Replicated services, such as the Replicated app service and the Replicated proxy registry. + +Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. + +For more information, see [Using Custom Domains](/vendor/custom-domains-using). + +### How does Replicated collect telemetry from instances of my application? + +For instances running in online (internet-connected) customer environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster, then both send instance data. + +For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. The telemetry stored in the Secret is collected when a support bundle is generated in the environment. When the support bundle is uploaded to the Vendor Portal, the telemetry is associated with the correct customer and instance ID, and the Vendor Portal updates the instance insights and event data accordingly. + +For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). + + +--- + + +# Introduction to kURL + +import KurlDefinition from "../partials/kurl/_kurl-definition.mdx" +import Installers from "../partials/kurl/_installers.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Introduction to kURL + +<KurlAvailability/> + +This topic provides an introduction to the Replicated kURL installer, including information about kURL specifications and installations. + +:::note +The Replicated KOTS entitlement is required to install applications with KOTS and kURL. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. +::: + +## Overview + +<KurlDefinition/> + +### kURL Installers + +<Installers/> + +To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release. For more information about creating kURL installers, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). + +### kURL Installations + +To install with kURL, users run a kURL installation script on their VM or bare metal server to provision a cluster. + +When the KOTS add-on is included in the kURL installer spec, the kURL installation script installs the KOTS CLI and KOTS Admin Console in the cluster. After the installation script completes, users can access the Admin Console at the URL provided in the ouput of the command to configure and deploy the application with KOTS. + +The following shows an example of the output of the kURL installation script: + +```bash + Installation + Complete ✔ + +Kotsadm: http://10.128.0.35:8800 +Login with password (will not be shown again): 3Hy8WYYid + +This password has been set for you by default. It is recommended that you change +this password; this can be done with the following command: +kubectl kots reset-password default +``` + +kURL installations are supported in online (internet-connected) and air gapped environments. + +For information about how to install applications with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). + +## About the Open Source kURL Documentation + +The open source documentation for the kURL project is available at [kurl.sh](https://kurl.sh/docs/introduction/). + +The open source kURL documentation contains additional information including kURL installation options, kURL add-ons, and procedural content such as how to add and manage nodes in kURL clusters. Software vendors can use the open source kURL documentation to find detailed reference information when creating kURL installer specs or testing installation. + +--- + + +# Exposing Services Using NodePorts + +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Exposing Services Using NodePorts + +<KurlAvailability/> + +This topic describes how to expose NodePort services in [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about) installations on VMs or bare metal servers. + +## Overview + +For installations into existing clusters, KOTS automatically creates a port forward tunnel to expose the Admin Console. Unlike installations into existing clusters, KOTS does _not_ automatically open the port forward tunnel for installations in embedded clusters provisioned on virtual machines (VMs) or bare metal servers. This is because it cannot be verified that the ports are secure and authenticated. For more information about the KOTS port forward tunnel, see [Port Forwarding Services with KOTS](/vendor/admin-console-port-forward). + +Instead, to expose the Admin Console in installations with [Embedded Cluster](/vendor/embedded-overview) or [kURL](/vendor/kurl-about), KOTS creates the Admin Console as a NodePort service so it can be accessed at the node's IP address on a node port (port 8800 for kURL installations and port 30000 for Embedded Cluster installations). Additionally, for kURL installations, the UIs of Prometheus, Grafana, and Alertmanager are also exposed using NodePorts. + +For installations on VMs or bare metal servers where your application must be accessible from the user's local machine rather than from inside the cluster, you can expose application services as NodePorts to provide access to the application after installation. + +## Add a NodePort Service + +Services with `type: NodePort` are able to be contacted from outside the cluster by connecting to any node using the appropriate protocol and port. For more information about working with the NodePort service type, see [type: NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) in the Kubernetes documentation. + +The following shows an example of a NodePort type service: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: sentry + labels: + app: sentry +spec: + type: NodePort + ports: + - port: 9000 + targetPort: 9000 + nodePort: 9000 + protocol: TCP + name: sentry + selector: + app: sentry + role: web +``` + +After configuring a NodePort service for your application, you can add a link to the service on the Admin Console dashboard where it can be accessed by users after the application is installed. For more information, see [About Accessing NodePort Services](#about-accessing-nodeport-services) below. + +### Use KOTS Annotations to Conditionally Deploy NodePort Services + +You can use the KOTS [`kots.io/when`](/vendor/packaging-include-resources#kotsiowhen) annotation to conditionally deploy a service. This is useful when you want to deploy a ClusterIP or LoadBalancer service for existing cluster installations, and deploy a NodePort service for Embedded Cluster or kURL installations. + +To conditionally deploy a service based on the installation method, you can use the following KOTS template functions in the `kots.io/when` annotation: +* [IsKurl](/reference/template-functions-static-context#iskurl): Detects kURL installations. For example, `repl{{ IsKurl }}` returns true for kURL installations, and `repl{{ not IsKurl }}` returns true for non-kURL installations. +* [Distribution](/reference/template-functions-static-context#distribution): Returns the distribution of the cluster where KOTS is running. For example, `repl{{ eq Distribution "embedded-cluster" }}` returns true for Embedded Cluster installations and `repl{{ ne Distribution "embedded-cluster" }}` returns true for non-Embedded Cluster installations. + +For example, the following `sentry` service with `type: NodePort` includes `annotation.kots.io/when: repl{{ eq Distribution "embedded-cluster" }}`. This creates a NodePort service _only_ when installing with Embedded Cluster: + + ```yaml + apiVersion: v1 + kind: Service + metadata: + name: sentry + labels: + app: sentry + annotations: + # This annotation ensures that the NodePort service + # is only created in Embedded Cluster installations + kots.io/when: repl{{ eq Distribution "embedded-cluster" }} + spec: + type: NodePort + ports: + - port: 9000 + targetPort: 9000 + nodePort: 9000 + protocol: TCP + name: sentry + selector: + app: sentry + role: web + ``` + +Similarly, to ensure that a `sentry` service with `type: ClusterIP` is only created in existing cluster installations, add `annotations.kots.io/when: repl{{ ne Distribution "embedded-cluster" }}` to the ClusterIP specification: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: sentry + labels: + app: sentry +annotations: + # This annotation ensures that the ClusterIP service + # is only created in existing cluster installations + kots.io/when: repl{{ ne Distribution "embedded-cluster" }} +spec: + type: ClusterIP + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + name: sentry + selector: + app: sentry + role: web +``` + +## About Accessing NodePort Services + +This section describes providing access to NodePort services after installation. + +### VM Firewall Requirements + +To be able to access the Admin Console and any NodePort services for your application, the firewall for the VM where the user installs must allow HTTP traffic and allow inbound traffic to the port where the service is exposed from their workstation. Users can consult their cloud provider's documentation for more information about updating firewall rules. + +### Add a Link on the Admin Console Dashboard {#add-link} + +You can provide a link to a NodePort service on the Admin Console dashboard by configuring the `links` array in the Kubernetes SIG Application custom resource. This provides users with an easy way to access the application after installation. For more information, see [Adding Links to the Dashboard](admin-console-adding-buttons-links). + +For example: + +<img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> + +[View a larger version of this image](/images/gitea-open-app.png) + +--- + + +# Resetting a kURL Cluster + +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Resetting a kURL Cluster + +<KurlAvailability/> + +This topic describes how to use the kURL `reset` command to reset a kURL cluster. + +## Overview + +If you need to reset a kURL installation, such as when you are testing releases with kURL, You can use the kURL `tasks.sh` `reset` command to remove Kubernetes from the system. + +Alterntaively, you can discard your current VM (if you are using one) and recreate the VM with a new OS to reinstall with kURL. + +For more information about the `reset` command, see [Resetting a Node](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node) in the kURL documentation. + +To reset a kURL installation: + +1. Access the machine where you installed with kURL. + +1. Run the following command to remove Kubernetes from the system: + + ``` + curl -sSL https://k8s.kurl.sh/latest/tasks.sh | sudo bash -s reset + ``` + +1. Follow the instructions in the output of the command to manually remove any files that the `reset` command does not remove. + +If the `reset` command is unsuccessful, discard your current VM, and recreate the VM with a new OS to reinstall the Admin Console and an application. + +--- + + +# About Community Licenses + +# About Community Licenses + +This topic describes community licenses. For more information about other types of licenses, see [Customer Types](licenses-about#customer-types) in _About Customers_. + +## Overview + +Community licenses are intended for use with a free or low cost version of your application. For example, you could use community licenses for an open source version of your application. + +After installing an application with a community license, users can replace their community license with a new license of a different type without having to completely reinstall the application. This means that, if you have several community users who install with the same license, then you can upgrade a single community user without editing the license for all community users. + +Community licenses are supported for applications that are installed with Replicated KOTS or with the Helm CLI. + +For applications installed with KOTS, community license users can upload a new license file of a different type in the Replicated admin console. For more information, see [Upgrade from a Community License](/enterprise/updating-licenses#upgrade-from-a-community-license) in _Updating Licenses in the Admin Console_. + +## Limitations + +Community licenses function in the same way as the other types of licenses, with the following +exceptions: + +* Updating a community license to another type of license cannot be reverted. +* Community license users are not supported by the Replicated Support team. +* Community licenses cannot support air gapped installations. +* Community licenses cannot include an expiration date. + +## Community License Admin Console Branding + +For applications installed with KOTS, the branding in the admin console for community users differs in the following ways: + +* The license tile on the admin console **Dashboard** page is highlighted in yellow and with the words **Community Edition**. + + ![Community License Dashboard](/images/community-license-dashboard.png) + + [View a larger version of this image](/images/community-license-dashboard.png) + +* All support bundles and analysis in the admin console are clearly marked as **Community Edition**. + + ![Community License Support Bundle](/images/community-license-bundle.png) + + [View a larger version of this image](/images/community-license-bundle.png) + +--- + + +# About Customers and Licensing + +import ChangeChannel from "../partials/customers/_change-channel.mdx" + +# About Customers and Licensing + +This topic provides an overview of customers and licenses in the Replicated Platform. + +## Overview + +The licensing features of the Replicated Platform allow vendors to securely grant access to software, making license agreements available to the application in end customer environments at startup and runtime. + +The Replicated Vendor Portal also allows vendors to create and manage customer records. Each customer record includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements. + +Vendors can use these licensing features to enforce entitlements such as license expiration dates, and to track and report on software usage for the purpose of surfacing insights to both internal teams and customers. + +The following diagram provides an overview of licensing with the Replicated Platform: + +![App instance communicates with the Replicated licensing server](/images/licensing-overview.png) + +[View a larger version of this image](/images/licensing-overview.png) + +As shown in the diagram above, the Replicated license and update server manages and distributes customer license information. The license server retrieves this license information from customer records managed by vendors in the Vendor Portal. + +During installation or upgrade, the customer's license ID is used to authenticate with the license server. The license ID also provides authentication for the Replicated proxy registry, securely granting proxy access to images in the vendor's external registry. + +The license server is identified with a CNAME record where it can be accessed from end customer environments. When running alongside an application in a customer environment, the Replicated SDK retrieves up-to-date customer license information from the license server during runtime. The in-cluster SDK API `/license/` endpoints can be used to get customer license information on-demand, allowing vendors to programmatically enforce and report on license agreements. + +Vendors can also integrate internal Customer Relationship Management (CRM) tools such as Salesforce with the Replicated Platform so that any changes to a customer's entitlements are automatically reflected in the Vendor Portal. This ensures that updates to license agreements are reflected in the customer environment in real time. + +## About Customers + +Each customer that you create in the Replicated Vendor Portal has a unique license ID. Your customers use their license when they install or update your application. + +You assign customers to channels in the Vendor Portal to control their access to your application releases. Customers can install or upgrade to releases that are promoted to the channel they are assigned. For example, assigning a customer to your Beta channel allows that customer to install or upgrade to only releases promoted to the Beta channel. + +Each customer license includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements, such as if the license has an expiration date or what application functionality the customer can access. Replicated securely delivers these entitlements to the application and makes them available at installation or at runtime. + +For more information about how to create and manage customers, see [Creating and Managing Customers](releases-creating-customer). + +### Customer Channel Assignment {#channel-assignment} + +<ChangeChannel/> + +For example, if the latest release promoted to the Beta channel is version 1.25.0 and version 1.10.0 is marked as required, when you edit an existing customer to assign them to the Beta channel, then the KOTS Admin Console always fetches 1.25.0, even though 1.10.0 is marked as required. The required release 1.10.0 is ignored and is not available to the customer for upgrade. + +For more information about how to mark a release as required, see [Properties](releases-about#properties) in _About Channels and Releases_. For more information about how to synchronize licenses in the Admin Console, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). + +### Customer Types + +Each customer is assigned one of the following types: + +* **Development**: The Development type can be used internally by the development +team for testing and integration. +* **Trial**: The Trial type can be used for customers who are on 2-4 week trials +of your software. +* **Paid**: The Paid type identifies the customer as a paying customer for which +additional information can be provided. +* **Community**: The Community type is designed for a free or low cost version of your application. For more details about this type, see [Community Licenses](licenses-about-types). +* (Beta) **Single Tenant Vendor Managed**: The Single Tenant Vendor Managed type is for customers for whom your team is operating the application in infrastructure you fully control and operate. Single Tenant Vendor Managed licenses are free to use, but come with limited support. The Single Tenant Vendor Managed type is a Beta feature. Reach out to your Replicated account representative to get access. + +Except Community licenses, the license type is used solely for reporting purposes and a customer's access to your application is not affected by the type that you assign. + +You can change the type of a license at any time in the Vendor Portal. For example, if a customer upgraded from a trial to a paid account, then you could change their license type from Trial to Paid for reporting purposes. + +### About Managing Customers + +Each customer record in the Vendor Portal has built-in fields and also supports custom fields: +* The built-in fields include values such as the customer name, customer email, and the license expiration date. You can optionally set initial values for the built-in fields so that each new customer created in the Vendor Portal starts with the same set of values. +* You can also create custom fields to define entitlements for your application. For example, you can create a custom field to set the number of active users permitted. + +For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). + +You can make changes to a customer record in the Vendor Portal at any time. The license ID, which is the unique identifier for the customer, never changes. For more information about managing customers in the Vendor Portal, see [Creating and Managing Customers](releases-creating-customer). + +### About the Customers Page + +The following shows an example of the **Customers** page: + +![Customers page](/images/customers-page.png) + +[View a larger version of this image](/images/customers-page.png) + +From the **Customers** page, you can do the following: + +* Create new customers. + +* Download CSVs with customer and instance data. + +* Search and filter customers. + +* Click the **Manage customer** button to edit details such as the customer name and email, the custom license fields assigned to the customer, and the license expiration policy. For more information, see [Creating and Managing Customers](releases-creating-customer). + +* Download the license file for each customer. + +* Click the **Customer reporting** button to view data about the active application instances associated with each customer. For more information, see [Customer Reporting](customer-reporting). + +* View instance details for each customer, including the version of the application that this instance is running, the Kubernetes distribution of the cluster, the last check-in time, and more: + + <img width="800px" src="/images/customer-reporting-details.png" /> + + [View a larger version of this image](/images/customer-reporting-details.png) + +* Archive customers. For more information, see [Creating and Managing Customers](releases-creating-customer). + +* Click on a customer on the **Customers** page to access the following customer-specific pages: + * [Reporting](#about-the-customer-reporting-page) + * [Manage customer](#about-the-manage-customer-page) + * [Support bundles](#about-the-customer-support-bundles-page) + +### About the Customer Reporting Page + +The **Reporting** page for a customer displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page for a customer that has two active application instances: + +![Customer reporting page in the Vendor Portal](/images/customer-reporting-page.png) +[View a larger version of this image](/images/customer-reporting-page.png) + +For more information about interpreting the data on the **Reporting** page, see [Customer Reporting](customer-reporting). + +### About the Manage Customer Page + +The **Manage customer** page for a customer displays details about the customer license, including the customer name and email, the license expiration policy, custom license fields, and more. + +The following shows an example of the **Manage customer** page: + +![Manage customer page in the Vendor Portal](/images/customer-details.png) +[View a larger version of this image](/images/customer-details.png) + +From the **Manage customer** page, you can view and edit the customer's license fields or archive the customer. For more information, see [Creating and Managing Customers](releases-creating-customer). + +### About the Customer Support Bundles Page + +The **Support bundles** page for a customer displays details about the support bundles collected from the customer. Customers with the **Support Bundle Upload Enabled** entitlement can provide support bundles through the KOTS Admin Console, or you can upload support bundles manually in the Vendor Portal by going to **Troubleshoot > Upload a support bundle**. For more information about uploading and analyzing support bundles, see [Inspecting Support Bundles](support-inspecting-support-bundles). + +The following shows an example of the **Support bundles** page: + +![Support bundles page in the Vendor Portal](/images/customer-support-bundles.png) +[View a larger version of this image](/images/customer-support-bundles.png) + +As shown in the screenshot above, the **Support bundles** page lists details about the collected support bundles, such as the date the support bundle was collected and the debugging insights found. You can click on a support bundle to view it in the **Support bundle analysis** page. You can also click **Delete** to delete the support bundle, or click **Customer Reporting** to view the **Reporting** page for the customer. + +## About Licensing with Replicated + +### About Syncing Licenses + +When you edit customer licenses for an application installed with a Replicated installer (Embedded Cluster, KOTS, kURL), your customers can use the KOTS Admin Console to get the latest license details from the Vendor Portal, then deploy a new version that includes the license changes. Deploying a new version with the license changes ensures that any license fields that you have templated in your release using [KOTS template functions](/reference/template-functions-about) are rendered with the latest license details. + +For online instances, KOTS pulls license details from the Vendor Portal when: +* A customer clicks **Sync license** in the Admin Console. +* An automatic or manual update check is performed by KOTS. +* An update is performed with Replicated Embedded Cluster. See [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). +* An application status changes. See [Current State](instance-insights-details#current-state) in _Instance Details_. + +For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). + +### About Syncing Licenses in Air-Gapped Environments + +To update licenses in air gap installations, customers need to upload the updated license file to the Admin Console. + +After you update the license fields in the Vendor Portal, you can notify customers by either sending them a new license file or instructing them to log into their Download Portal to downlaod the new license. + +For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). + +### Retrieving License Details with the SDK API + +The [Replicated SDK](replicated-sdk-overview) includes an in-cluster API that can be used to retrieve up-to-date customer license information from the Vendor Portal during runtime through the [`license`](/reference/replicated-sdk-apis#license) endpoints. This means that you can add logic to your application to get the latest license information without the customer needing to perform a license update. The SDK API polls the Vendor Portal for updated data every four hours. + +In KOTS installations that include the SDK, users need to update their licenses from the Admin Console as described in [About Syncing Licenses](#about-syncing-licenses) above. However, any logic in your application that uses the SDK API will update the user's license information without the customer needing to deploy a license update in the Admin Console. + +For information about how to use the SDK API to query license entitlements at runtime, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). + +### License Expiration Handling {#expiration} + +The built-in `expires_at` license field defines the expiration date for a customer license. When you set an expiration date in the Vendor Portal, the `expires_at` field is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. + +Replicated enforces the following logic when a license expires: +* By default, instances with expired licenses continue to run. + To change the behavior of your application when a license expires, you can can add custom logic in your application that queries the `expires_at` field using the Replicated SDK in-cluster API. For more information, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). +* Expired licenses cannot log in to the Replicated registry to pull a Helm chart for installation or upgrade. +* Expired licenses cannot pull application images through the Replicated proxy registry or from the Replicated registry. +* In Replicated KOTS installations, KOTS prevents instances with expired licenses from receiving updates. + +### Replacing Licenses for Existing Installations + +Community licenses are the only license type that can be replaced with a new license without needing to reinstall the application. For more information, see [Community Licenses](licenses-about-types). + +Unless the existing customer is using a community license, it is not possible to replace one license with another license without reinstalling the application. When you need to make changes to a customer's entitlements, Replicated recommends that you edit the customer's license details in the Vendor Portal, rather than issuing a new license. + + +--- + + +# Managing Customer License Fields + +# Managing Customer License Fields + +This topic describes how to manage customer license fields in the Replicated Vendor Portal, including how to add custom fields and set initial values for the built-in fields. + +## Set Initial Values for Built-In License Fields (Beta) + +You can set initial values to populate the **Create Customer** form in the Vendor Portal when a new customer is created. This ensures that each new customer created from the Vendor Portal UI starts with the same set of built-in license field values. + +:::note +Initial values are not applied to new customers created through the Vendor API v3. For more information, see [Create a customer](https://replicated-vendor-api.readme.io/reference/createcustomer-1) in the Vendor API v3 documentation. +::: + +These _initial_ values differ from _default_ values in that setting initial values does not update the license field values for any existing customers. + +To set initial values for built-in license fields: + +1. In the Vendor Portal, go to **License Fields**. + +1. Under **Built-in license options**, click **Edit** next to each license field where you want to set an initial value. + + ![Edit Initial Value](/images/edit-initial-value.png) + + [View a larger version of this image](/images/edit-initial-value.png) + +## Manage Custom License Fields + +You can create custom license fields in the Vendor Portal. For example, you can create a custom license field to set the number of active users permitted. Or, you can create a field that sets the number of nodes a customer is permitted on their cluster. + +The custom license fields that you create are displayed in the Vendor Portal for all new and existing customers. If the custom field is not hidden, it is also displayed to customers under the **Licenses** tab in the Replicated Admin Console. + +### Limitation + +The maximum size for a license field value is 64KB. + +### Create Custom License Fields + +To create a custom license field: + +1. Log in to the Vendor Portal and select the application. + +1. On the **License Fields** page, click **Create license field**. + + <img width="500" alt="create a new License Field dialog" src="/images/license-add-custom-field.png"/> + + [View a larger version of this image](/images/license-add-custom-field.png) + +1. Complete the following fields: + + | Field | Description | + |-----------------------|------------------------| + | Field | The name used to reference the field. This value cannot be changed. | + | Title| The display name for the field. This is how the field appears in the Vendor Portal and the Admin Console. You can change the title in the Vendor Portal. | + | Type| The field type. Supported formats include integer, string, text (multi-line string), and boolean values. This value cannot be changed. | + | Default | The default value for the field for both existing and new customers. It is a best practice to provide a default value when possible. The maximum size for a license field value is 64KB. | + | Required | If checked, this prevents the creation of customers unless this field is explicitly defined with a value. | + | Hidden | If checked, the field is not visible to your customer in the Replicated Admin Console. The field is still visible to you in the Vendor Portal. **Note**: The Hidden field is displayed only for vendors with access to the Replicated installers (KOTS, kURL, Embedded Cluster). | + +### Update Custom License Fields + +To update a custom license field: + +1. Log in to the Vendor Portal and select the application. +1. On the **License Fields** page, click **Edit Field** on the right side of the target row. Changing the default value for a field updates the value for each existing customer record that has not overridden the default value. + + :::important + Enabling **Is this field is required?** updates the license field to be required on all new and existing customers. If you enable **Is this field is required?**, you must either set a default value for the field or manually update each existing customer to provide a value for the field. + ::: + +### Set Customer-Specific Values for Custom License Fields + +To set a customer-specific value for a custom license field: + +1. Log in to the Vendor Portal and select the application. +1. Click **Customers**. +1. For the target customer, click the **Manage customer** button. +1. Under **Custom fields**, enter values for the target custom license fields for the customer. + + :::note + The maximum size for a license field value is 64KB. + ::: + + <img width="600" alt="Custom license fields section in the manage customer page" src="/images/customer-license-custom-fields.png"/> + + [View a larger version of this image](/images/customer-license-custom-fields.png) + +### Delete Custom License Fields + +Deleted license fields and their values do not appear in the customer's license in any location, including your view in the Vendor Portal, the downloaded YAML version of the license, and the Admin Console **License** screen. + +By default, deleting a custom license field also deletes all of the values associated with the field in each customer record. + +Only administrators can delete license fields. + +:::important +Replicated recommends that you take care when deleting license fields. + +Outages can occur for existing deployments if your application or the Admin Console **Config** page expect a license file to provide a required value. +::: + +To delete a custom license field: + +1. Log in to the Vendor Portal and select the application. +1. On the **License Fields** page, click **Edit Field** on the right side of the target row. +1. Click **Delete** on the bottom left of the dialog. +1. (Optional) Enable **Preserve License Values** to save values for the license field that were not set by the default in each customer record. Preserved license values are not visible to you or the customer. + + :::note + If you enable **Preserve License Values**, you can create a new field with the same name and `type` as the deleted field to reinstate the preserved values. + ::: + +1. Follow the instructions in the dialog and click **Delete**. + +--- + + +# Downloading Customer Licenses + +import AirGapLicenseDownload from "../partials/install/_airgap-license-download.mdx" + +# Downloading Customer Licenses + +This topic describes how to download a license file from the Replicated Vendor Portal. + +For information about how to download customer licenses with the Vendor API v3, see [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) in the Vendor API v3 documentation. + +## Download Licenses + +You can download license files for your customers from the **Customer** page in the Vendor Portal. + +To download a license: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page. +1. In the row for the target customer, click the **Download License** button. + + ![Download license button](/images/download-license-button.png) + + [View a larger version of this image](/images/download-license-button.png) + +## Enable and Download Air Gap Licenses {#air-gap-license} + +The **Airgap Download Enabled** license option allows KOTS to install an application without outbound internet access using the `.airgap` bundle. + +To enable the air gap entitlement and download the license: + +<AirGapLicenseDownload/> + +--- + + +# Managing Install Types for a License + +import InstallerOnlyAnnotation from "../partials/helm/_installer-only-annotation.mdx" + +# Managing Install Types for a License + +This topic describes how to manage which installation types and options are enabled for a license. + +## Overview + +You can control which installation methods are available to each of your customers by enabling or disabling **Install types** fields in the customer's license. + +The following shows an example of the **Install types** field in a license: + +![Install types license fields](/images/license-install-types.png) + +[View a larger version of this image](/images/license-install-types.png) + +The installation types that are enabled or disabled for a license determine the following: +* The Replicated installers ([Replicated KOTS](../intro-kots), [Replicated Embedded Cluster](/vendor/embedded-overview), [Replicated kURL](/vendor/kurl-about)) that the customer's license entitles them to use +* The installation assets and/or instructions provided in the Replicated Download Portal for the customer +* The customer's KOTS Admin Console experience + +Setting the supported installation types on a per-customer basis gives you greater control over the installation method used by each customer. It also allows you to provide a more curated Download Portal experience, in that customers will only see the installation assets and instructions that are relevant to them. + +## Understanding Install Types {#install-types} + +In the customer license, under **Install types**, the **Available install types** field allows you to enable and disable different installation methods for the customer. + +You can enable one or more installation types for a license. + +The following describes each installation type available, as well as the requirements for enabling each type: + +<table> + <tr> + <th width="30%">Install Type</th> + <th width="35%">Description</th> + <th>Requirements</th> + </tr> + <tr> + <th>Existing Cluster (Helm CLI)</th> + <td><p>Allows the customer to install with Helm in an existing cluster. The customer does not have access to the Replicated installers (Embedded Cluster, KOTS, and kURL).</p><p>When the <strong>Helm CLI Air Gap Instructions (Helm CLI only)</strong> install option is also enabled, the Download Portal displays instructions on how to pull Helm installable images into a local repository. See <a href="#install-options">Understanding Additional Install Options</a> below.</p></td> + <td> + <p>The latest release promoted to the channel where the customer is assigned must contain one or more Helm charts. It can also include Replicated custom resources, such as the Embedded Cluster Config custom resource, the KOTS HelmChart, Config, and Application custom resources, or the Troubleshoot Preflight and SupportBundle custom resources.</p> + <InstallerOnlyAnnotation/> + </td> + </tr> + <tr> + <th>Existing Cluster (KOTS install)</th> + <td>Allows the customer to install with Replicated KOTS in an existing cluster.</td> + <td> + <ul> + <li>Your Vendor Portal team must have the KOTS entitlement</li> + <li>The latest release promoted to the channel where the customer is assigned must contain KOTS custom resources, such as the KOTS HelmChart, Config, and Application custom resources. For more information, see [About Custom Resources](/reference/custom-resource-about).</li> + </ul> + </td> + </tr> + <tr> + <th>kURL Embedded Cluster (first generation product)</th> + <td> + <p>Allows the customer to install with Replicated kURL on a VM or bare metal server.</p> + <p><strong>Note:</strong> For new installations, enable Replicated Embedded Cluster (current generation product) instead of Replicated kURL (first generation product).</p> + </td> + <td> + <ul> + <li>Your Vendor Portal team must have the kURL entitlement</li> + <li>A kURL installer spec must be promoted to the channel where the customer is assigned. For more information, see <a href="/vendor/packaging-embedded-kubernetes">Creating a kURL Installer</a>.</li> + </ul> + </td> + </tr> + <tr> + <th>Embedded Cluster (current generation product)</th> + <td>Allows the customer to install with Replicated Embedded Cluster on a VM or bare metal server.</td> + <td> + <ul> + <li>Your Vendor Portal team must have the Embedded Cluster entitlement</li> + <li>The latest release promoted to the channel where the customer is assigned must contain an Embedded Cluster Config custom resource. For more information, see <a href="/reference/embedded-config">Embedded Cluster Config</a>.</li> + </ul> + </td> + </tr> +</table> + +## Understanding Additional Install Options {#install-options} + +After enabling installation types in the **Available install types** field, you can also enable the following options in the **Additional install options** field: + +<table> + <tr> + <th width="30%">Install Type</th> + <th>Description</th> + <th>Requirements</th> + </tr> + <tr> + <th>Helm CLI Air Gap Instructions (Helm CLI only)</th> + <td><p>When enabled, a customer will see instructions on the Download Portal on how to pull Helm installable images into their local repository.</p><p><strong>Helm CLI Air Gap Instructions</strong> is enabled by default when you select the <strong>Existing Cluster (Helm CLI)</strong> install type. For more information see [Installing with Helm in Air Gap Environments](/vendor/helm-install-airgap)</p></td> + <td>The <strong>Existing Cluster (Helm CLI)</strong> install type must be enabled</td> + </tr> + <tr> + <th>Air Gap Installation Option (Replicated Installers only)</th> + <td><p>When enabled, new installations with this license have an option in their Download Portal to install from an air gap package or do a traditional online installation.</p></td> + <td> + <p>At least one of the following Replicated install types must be enabled:</p> + <ul> + <li>Existing Cluster (KOTS install)</li> + <li>kURL Embedded Cluster (first generation product)</li> + <li>Embedded Cluster (current generation product)</li> + </ul> + </td> + </tr> +</table> + +## About Migrating Existing Licenses to Use Install Types + +By default, when an existing customer license is migrated to include the Beta **Install types** field, the Vendor Portal automatically enables certain install types so that the customer does not experience any interruptions or errors in their deployment. + +The Vendor Portal uses the following logic to enable install types for migrated licenses: + +If the existing license has the **KOTS Install Enabled** field enabled, then the Vendor Portal enables the following install types in the migrated license by default: +* Existing Cluster (Helm CLI) +* Existing Cluster (KOTS install) +* kURL Embedded Cluster (first generation product) +* Embedded Cluster (current generation product) + +Additionally, if the existing **KOTS Install Enabled** license also has the **Airgap Download Enabled** option enabled, then the Vendor Portal enables both of the air gap install options in the migrated license (**Helm CLI Air Gap Instructions (Helm CLI only)** and **Air Gap Installation Option (Replicated Installers only)**). + +Otherwise, if the **KOTS Install Enabled** field is disabled for the existing license, then the Vendor Portal enables only the **Existing Cluster (Helm CLI)** install type by default. All other install types will be disabled by default. + + +--- + + +# Checking Entitlements in Helm Charts Before Deployment + +# Checking Entitlements in Helm Charts Before Deployment + +This topic describes how to check license entitlements before a Helm chart is installed or upgraded. The information in this topic applies to Helm charts installed with Replicated KOTS or Helm. + +The Replicated SDK API can be used to check entitlements at runtime. For more information, see [Querying Entitlements with the Replicated SDK API](licenses-reference-sdk). + +## Overview + +The Replicated registry automatically injects customer entitlement information in the `global.replicated.licenseFields` field of your Helm chart values. For example: + +```yaml +# Helm chart values.yaml +global: + replicated: + licenseFields: + expires_at: + description: License Expiration + name: expires_at + signature: + v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... + title: Expiration + value: "2023-05-30T00:00:00Z" + valueType: String +``` + +You can access the values in the `global.replicated.licenseFields` field from your Helm templates to check customer entitlements before installation. + +## Prerequisite + +Add the Replicated SDK to your application: +* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ +* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Kubernetes Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ + +## Check Entitlements Before Installation or Upgrade + +To check entitlements before installation: + +1. Create or edit a customer to use for testing: + + 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). + + 1. Edit the built-in license fields or add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). + +1. In your Helm chart, update the Helm templates with one or more directives to access the license field. For example, you can access the built-in `expires_at` field with `{{ .Values.global.replicated.licenseFields.expires_at }}`. Add the desired logic to control application behavior based on the values of license fields. + + For more information about accessing values files from Helm templates, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the _Chart Template Guide_ section of the Helm documentation. + +1. Test your changes by promoting a new release and installing in a development environment: + + 1. Package your Helm chart and its dependencies into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). + + 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + + 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). + +1. Repeat these steps to add and test new license fields. + +--- + + +# Querying Entitlements with the KOTS License API + +# Querying Entitlements with the KOTS License API + +This topic describes how to use the Replicated KOTS License API to query license fields during runtme. The information in this topic applies to applications installed with KOTS. + +:::important +Using the KOTS License API to check entitlements during runtime is _not_ recommended for new applications distributed with Replciated. Instead, Replicated recommends that you include the Replicated SDK with your application and query entitlements during runtime using the SDK in-cluster API. See [Checking Entitlements with the Replicated SDK](licenses-reference-sdk). +::: + +## Overview + +KOTS includes default logic to control access to features in the KOTS Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. + +For information about creating custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). + +The KOTS Admin Console runs on the customer's cluster and provides entitlement information during application runtime. You can query the admin console `/license/v1/license` endpoint to enforce entitlements at runtime. + +## Query Fields + +To reference license fields at runtime, send an HTTP request to the admin console `/license/v1/license` endpoint at the following location: + +``` +http://kotsadm:3000/license/v1/license +``` + +The query returns a response in YAML format. For example: + +```javascript +{"license_id":"WicPRaoCv1pJ57ZMf-iYRxTj25eZalw3", +"installation_id":"a4r1s31mj48qw03b5vwbxvm5x0fqtdl6", +"assignee":"FirstCustomer", +"release_channel":"Unstable", +"license_type":"trial", +"expiration_time":"2026-01-23T00:00:00Z", +"fields":[ + {"field":"Customer ID","title":"Customer ID (Internal)","type":"Integer","value":121,"hide_from_customer":true}, + {"field":"Modules","title":"Enabled Modules","type":"String","value":"Analytics, Integration"}]} +``` +## Parse the API Response + +To return a license field value, parse the response using the name of the license +field. + +For example, the following Javascript parses the response for the value of a +`seat_count` custom field: + +```javascript +import * as rp from "request-promise"; + +rp({ + uri: "http://kotsadm:3000/license/v1/license", + json: true +}).then(license => { + const seatCount = license.fields.find((field) => { + return field.field === "seat_count"; + }); + console.log(seatCount.value); +}).catch(err => { + // Handle error response from `kotsadm` +}); +``` + + +--- + + +# Querying Entitlements with the Replicated SDK API + +# Querying Entitlements with the Replicated SDK API + +This topic describes how to query license entitlements at runtime using the Replicated SDK in-cluster API. The information in this topic applies to applications installed with Replicated KOTS or Helm. + +## Overview + +The Replicated SDK retrieves up-to-date customer license information from the Vendor Portal during runtime. This means that any changes to customer licenses are reflected in real time in the customer environment. For example, you can revoke access to your application when a license expires, expose additional product functionality dynamically based on entitlements, and more. For more information about distributing the SDK with your application, see [About the Replicated SDK](replicated-sdk-overview). + +After the Replicated SDK is initialized and running in a customer environment, you can use the following SDK API endpoints to get information about the license: +* `/api/v1/license/info`: List license details, including the license ID, the channel the customer is assigned, and the license type. +* `/api/v1/license/fields`: List all the fields in the license. +* `/api/v1/license/fields/{field_name}`: List details about a specific license field, including the field name, description, type, and the value. + +For more information about these endpoints, see [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API_. + +## Prerequisite + +Add the Replicated SDK to your application: +* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ +* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Standard Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ + +## Query License Entitlements at Runtime {#runtime} + +To use the SDK API to query entitlements at runtime: + +1. Create or edit a customer to use for testing: + + 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). + + 1. Edit the built-in fields and add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). + +1. (Recommended) Develop against the SDK API `license` endpoints locally: + + 1. Install the Replicated SDK as a standalone component in your cluster. This is called _integration mode_. Installing in integration mode allows you to develop locally against the SDK API without needing to create releases for your application in the Vendor Portal. See [Developing Against the SDK API](/vendor/replicated-sdk-development). + + 1. In your application, add logic to control application behavior based on the customer license information returned by the SDK API service running in your cluster. See [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API (Beta)_. + + **Example:** + + ```bash + curl replicated:3000/api/v1/license/fields/expires_at + ``` + + ```json + { + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2M..." + } + } + ``` + +1. When you are ready to test your changes outside of integration mode, do the following: + + 1. Package your Helm chart and its dependencies (including the Replicated SDK) into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). + + 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + + 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). + + 1. (Optional) As needed, verify the license information returned by the SDK API in your development environment using port forwarding to access the SDK service locally: + + 1. Use port forwarding to access the `replicated` service from the local development environment on port 3000: + + ```bash + kubectl port-forward service/replicated 3000 + ``` + + The output looks similar to the following: + + ```bash + Forwarding from 127.0.0.1:3000 -> 3000 + ``` + + For more information about `kubectl port-forward`, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the kubectl reference documentation. + + 1. With the port forward running, in another terminal, use the SDK API to return information about the license. + + **Example:** + + ``` + curl localhost:3000/api/v1/license/fields/expires_at + ``` + +1. Repeat these steps to add and test new license fields. + +1. (Recommended) Use signature verification in your application to ensure the integrity of the license field. See [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). + +--- + + +# Checking Entitlements in Preflights with KOTS Template Functions + +# Checking Entitlements in Preflights with KOTS Template Functions + +This topic describes how to check custom entitlements before installation or upgrade using preflight checks and KOTS template functions in the License context. The information in this topic applies to applications installed with KOTS. + +## Overview + +KOTS includes default logic to control access to features in the Replicated Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. + +For more information, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). + +## Add Preflights to Check Entitlements Before Installation or Upgrade {#install} + +To enforce entitlements when your customer installs or updates your application, +you can use the Replicated LicenseFieldValue template function in your application to read the value of license fields. The LicenseFieldValue template function accepts the built-in license fields and any custom fields that you configure. For more information, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. + +For example, a license might limit how many nodes are permitted in a customer's +cluster. You could define this limit by creating a `node_count` custom license field: + +| Name | Key | Type | Description | +|------|-----|------|-------------| +| Node Count | node_count | Integer | The maximum number of nodes permitted | + +To enforce the node count when a customer installs or updates your application, +you can use LicenseFieldValue to create a preflight check that references the custom `node_count` field: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: example-preflight-checks +spec: + analyzers: + - nodeResources: + checkName: Node Count Check + outcomes: + - fail: + when: 'count() > {{repl LicenseFieldValue "node_count"}}' + message: The cluster has more nodes than the {{repl LicenseFieldValue "node_count"}} you are licensed for. + - pass: + message: The number of nodes matches your license ({{repl LicenseFieldValue "node_count"}}) +``` + +In the example above, the preflight check uses the `nodeResources` analyzer and the value of the custom `node_count` field to determine if the customer has exceeded the maximum number of nodes permitted by their license. If the preflight checks fails, a failure message is displayed to the user and KOTS prevents the installation or upgrade from continuing. + +For more information about this example, see [How Can I Use License Custom Fields Value in a Pre-Flight Check?](https://help.replicated.com/community/t/how-can-i-use-license-custom-fields-value-in-a-pre-flight-check/624) in Replicated Community. + +For more information about defining preflight checks, see [Defining Preflight Checks](preflight-defining). + +--- + + +# Built-In License Fields + +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" + +# Built-In License Fields + +This topic describes the built-in license fields that appear customer licenses for applications distributed with Replicated. + +## Overview + +The license associated with each customer record in the Replicated Vendor Portal includes several built-in fields. These built-in fields include customer properties (such as the customer name, customer email, and the Vendor Portal channel where the customer is assigned), the license expiration date, as well as the Replicated features that are enabled for the customer (such as the supported install types or Admin Console features). + +When a customer installs an application distributed with Replicated, the values for each built-in and custom field in their license can be accessed using the [Replicated SDK](/vendor/replicated-sdk-overview) in-cluster API [license](/reference/replicated-sdk-apis#license) endpoints. Applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster) can also access license fields using the Replicated KOTS [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function. + +The following shows an example of a customer license: + +```yaml +apiVersion: kots.io/v1beta1 +kind: License +metadata: + name: customertest +spec: + appSlug: gitea + channelID: 2iy68JBTkvUqamgD... + channelName: Beta + channels: + - channelID: 2iy68JBTkvUqamgD... + channelName: Beta + channelSlug: beta + endpoint: https://replicated.app + isDefault: true + isSemverRequired: true + replicatedProxyDomain: proxy.replicated.com + customerEmail: example@replicated.com + customerName: Customer Test + endpoint: https://replicated.app + entitlements: + expires_at: + description: License Expiration + signature: {} + title: Expiration + value: "" + valueType: String + isAirgapSupported: true + isEmbeddedClusterDownloadEnabled: true + isKotsInstallEnabled: true + isSemverRequired: true + isSupportBundleUploadSupported: true + licenseID: 2sY6ZC2J9sO2... + licenseSequence: 4 + licenseType: prod + replicatedProxyDomain: proxy.replicated.com + signature: eyJsaWNlbnNlRGF... +``` + +## License Field Names + +This section lists the built-in fields that are included in customer licenses for applications distributed with Replicated. + +:::note +The built-in license fields are reserved field names. +::: + +### General License Fields + +<table> + <tr> + <td>Field Name</td> + <td>Description</td> + </tr> + <tr> + <td>`appSlug`</td> + <td>The unique application slug that the customer is associated with. This value never changes.</td> + </tr> + <tr> + <td>`channelID`</td> + <td>The ID of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.</td> + </tr> + <tr> + <td>`channelName`</td> + <td>The name of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.</td> + </tr> + <tr> + <td>`licenseID`, `licenseId`</td> + <td>Unique ID for the installed license. This value never changes.</td> + </tr> + <tr> + <td>`customerEmail`</td> + <td>The customer email address.</td> + </tr> + <tr> + <td>`endpoint`</td> + <td>For applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster), this is the endpoint that the KOTS Admin Console uses to synchronize the licenses and download updates. This is typically `https://replicated.app`.</td> + </tr> + <tr> + <td>`entitlementValues`</td> + <td>Includes both the built-in `expires_at` field and any custom license fields. For more information about adding custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields).</td> + </tr> + <tr> + <td>`expires_at`</td> + <td><p>Defines the expiration date for the license. The date is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. If a license does not expire, this field is missing.</p><p>For information about the default behavior when a license expires, see [License Expiration Handling](licenses-about#expiration) in _About Customers_.</p></td> + </tr> + <tr> + <td>`licenseSequence`</td> + <td>Every time a license is updated, its sequence number is incremented. This value represents the license sequence that the client currently has.</td> + </tr> + <tr> + <td>`customerName`</td> + <td>The name of the customer.</td> + </tr> + <tr> + <td>`signature`</td> + <td>The base64-encoded license signature. This value will change when the license is updated.</td> + </tr> + <tr> + <td>`licenseType`</td> + <td>A string value that describes the type of the license, which is one of the following: `paid`, `trial`, `dev`, `single-tenant-vendor-managed` or `community`. For more information about license types, see [Managing License Type](licenses-about-types).</td> + </tr> +</table> + +### Install Types + +The table below describes the built-in license fields related to the supported install type(s). For more information, see [Managing Install Types for a License](/vendor/licenses-install-types). + +<table> + <tr> + <td>Field Name</td> + <td>Description</td> + </tr> + <tr> + <td>`isEmbeddedClusterDownloadEnabled`</td> + <td><p>If a license supports installation with Replicated Embedded Cluster, this field is set to `true` or missing. If Embedded Cluster installations are not supported, this field is `false`.</p><p>This field requires that the vendor has the Embedded Cluster entitlement and that the release at the head of the channel includes an [Embedded Cluster Config](/reference/embedded-config) custom resource. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> + </tr> + <tr> + <td>`isHelmInstallEnabled`</td> + <td><p>If a license supports Helm installations, this field is set to `true` or missing. If Helm installations are not supported, this field is set to `false`. This field requires that the vendor packages the application as Helm charts and, optionally, Replicated custom resources.</p><p> This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> + </tr> + <tr> + <td>`isKotsInstallEnabled`</td> + <td><p>If a license supports installation with Replicated KOTS, this field is set to `true`. If KOTS installations are not supported, this field is either `false` or missing.</p><p>This field requires that the vendor has the KOTS entitlement.</p></td> + </tr> + <tr> + <td>`isKurlInstallEnabled`</td> + <td><p>If a license supports installation with Replicated kURL, this field is set to `true` or missing. If kURL installations are not supported, this field is `false`. </p><p>This field requires that the vendor has the kURL entitlement and a promoted kURL installer spec. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> + </tr> +</table> + +### Install Options + +The table below describes the built-in license fields related to install options. + +<table> + <tr> + <td>Field Name</td> + <td>Description</td> + </tr> + <tr> + <td>`isAirgapSupported`</td> + <td><p>If a license supports air gap installations with the Replicated installers (KOTS, kURL, Embedded Cluster), then this field is set to `true`. If Replicated installer air gap installations are not supported, this field is missing.</p><p>When you enable this field for a license, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.</p></td> + </tr> + <tr> + <td>`isHelmAirgapEnabled`</td> + <td><p>If a license supports Helm air gap installations, then this field is set to `true` or missing. If Helm air gap is not supported, this field is missing.</p><p> When you enable this feature, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.</p><p>This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> + </tr> +</table> + +### Admin Console Feature Options + +The table below describes the built-in license fields related to the Admin Console feature options. The Admin Console feature options apply only to licenses that support installation with the Replicated installers (KOTS, kURL, Embedded Cluster). + +<table> + <tr> + <td>Field Name</td> + <td>Description</td> + </tr> + <tr> + <td>`isDisasterRecoverySupported`</td> + <td>If a license supports the Embedded Cluster disaster recovery feature, this field is set to `true`. If a license does not support disaster recovery for Embedded Cluster, this field is either missing or `false`. **Note**: Embedded Cluster Disaster Recovery is in Alpha. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). For more information, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery).</td> + </tr> + <tr> + <td>`isGeoaxisSupported`</td> + <td>(kURL Only) If a license supports integration with GeoAxis, this field is set to `true`. If GeoAxis is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor has the GeoAxis entitlement. It also requires that the vendor has access to the Identity Service entitlement.</td> + </tr> + <tr> + <td>`isGitOpsSupported`</td> + <td><GitOpsNotRecommended/> If a license supports the KOTS AutoGitOps workflow in the Admin Console, this field is set to `true`. If Auto-GitOps is not supported, this field is either `false` or missing. See [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow).</td> + </tr> + <tr> + <td>`isIdentityServiceSupported`</td> + <td>If a license supports identity-service enablement for the Admin Console, this field is set to `true`. If identity service is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor have access to the Identity Service entitlement.</td> + </tr> + <tr> + <td>`isSemverRequired`</td> + <td>If set to `true`, this field requires that the Admin Console orders releases according to Semantic Versioning. This field is controlled at the channel level. For more information about enabling Semantic Versioning on a channel, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_.</td> + </tr> + <tr> + <td>`isSnapshotSupported`</td> + <td>If a license supports the snapshots backup and restore feature, this field is set to `true`. If a license does not support snapshots, this field is either missing or `false`. **Note**: This field requires that the vendor have access to the Snapshots entitlement.</td> + </tr> + <tr> + <td>`isSupportBundleUploadSupported`</td> + <td>If a license supports uploading a support bundle in the Admin Console, this field is set to `true`. If a license does not support uploading a support bundle, this field is either missing or `false`.</td> + </tr> +</table> + +--- + + +# Verifying License Field Signatures with the Replicated SDK API + +# Verifying License Field Signatures with the Replicated SDK API + +This topic describes how to verify the signatures of license fields when checking customer license entitlements with the Replicated SDK. + +## Overview + +To prevent man-in-the-middle attacks or spoofing by your customers, license fields are cryptographically signed with a probabilistic signature scheme (PSS) signature to ensure their integrity. The PSS signature for a license field is included in the response from the Replicated SDK API `/license/fields` and `/license/fields/{field-name}` endpoints as a Base64 encoded string. + +The following shows an example of a Base64 encoded PSS signature for an `expires_at` field returned by the SDK API: + +```json +{ + "name": "expires_at", + "title": "Expiration", + "description": "License Expiration", + "value": "2023-05-30T00:00:00Z", + "valueType": "String", + "signature": { + "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2MD0YBlIAZEs1zXpmvwLdfcoTsZMOj0lZbxkPN5dPhEPIVcQgrzfzwU5HIwQbwc2jwDrLBQS4hGOKdxOWXnBUNbztsHXMqlAYQsmAhspRLDhBiEoYpFV/8oaaAuNBrmRu/IVAW6ahB4KtP/ytruVdBup3gn1U/uPAl5lhzuBifaW+NDFfJxAX..." + } +} +``` + +Replicated recommends that you use signature verification to ensure the integrity of each license field you use in your application. For more information about how to check entitlements in your application for Helm CLI installations, see [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). + +## Requirement + +Include the Replicated SDK as a dependency of your application Helm chart. For more information, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. + +## Use Your Public Key to Verify License Field Signatures + +In your application, you can use your public key (available in the Vendor Portal) and the MD5 hash of a license field value to verify the PSS signature of the license field. + +To use your public key to verify license field signatures: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Settings** page. + +1. Click the **Replicated SDK Signature Verification** tab. + + ![signature verification page](/images/signature-verification.png) + [View a larger version of this image](/images/signature-verification.png) + +1. Under **Your public key**, copy the key and save it in a secure location. + +1. (Optional) Under **Verification**, select the tab for the necessary programming language, and copy the code sample provided. + +1. In your application, add logic that uses the public key to verify the integrity of license field signatures. If you copied one of the code samples from the Vendor Portal in the previous step, paste it into your application and make any additional edits as required. + + If you are not using one of the code samples provided, consider the following requirements for verifying license field values: + * License field signatures included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints are Base64 encoded and must be decoded before they are verified. + * The MD5 hash of the license field value is required to verify the signature of the license field. The raw license field value is included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints. The MD5 hash of the value must be calculated and used for signature verification. + + +--- + + +# Application Namespaces + +# Application Namespaces + +Replicated strongly recommends that applications are architected to deploy a single application into a single namespace when possible. + +If you are distributing your application with Replicated KOTS, you can implement an architecture in which a single application is deployed into a single namespace. + +To do this, omit any namespace in the application manifests `metadata.namespace`. Do not use the Config custom resource object to make the namespace user-configurable. + +When you do not specify a namespace in application manifests, KOTS deploys to whatever namespace it is already running in. This gives the most flexibility when deploying to end user environments, as users already select the namespace where KOTS runs. Scoping to a single namespace also allows the app to run with minimal Kubernetes permissions, which can reduce friction when an application runs as a tenant in a large cluster. Overall, letting the end user manage namespaces is the easiest way to reduce friction. + +The following examples demonstrate the recommended approach of excluding the namespace from the application manifests, as well as the incorrect approaches of hardcoding the namespace or injecting the namespace as a user-supplied value: + +**Recommended** + +```yaml +# good, namespace absent +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spline-reticulator +spec: +``` + +**Not Recommended** + +```yaml +# bad, hardcoded +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spline-reticulator + namespace: graphviz-pro +spec: +``` + +```yaml +# bad, configurable +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spline-reticulator + namespace: repl{{ ConfigOption "gv_namespace" }} +spec: +``` + + +--- + + +# Offsite Data Backup + +# Offsite Data Backup + +Replicated stores customer data in multiple databases across Amazon Web +Services (AWS) S3 buckets. Clustering and network redundancies help to avoid a +single point of failure. + +The offsite data backup add-on provides additional redundancy by copying data to +an offsite Google Cloud Provider (GCP) storage location. This helps to mitigate +any potential data loss caused by an outage to AWS. + +:::note +The offsite data backup add-on is available only to [Replicated Enterprise](https://www.replicated.com/pricing/) customers at an additional cost. Please [open a product request](https://vendor.replicated.com/support?requestType=feature&productArea=vendor) if you are interested in this feature. +::: + +## Overview + +When the offsite data backup add-on is enabled, data is migrated from Replicated's existing AWS S3 buckets to a dedicated second set of AWS S3 buckets. These buckets are only used for vendors with this add-on enabled, and all vendor data remains logically isolated by vendor Team. After data is migrated from existing S3 buckets to the secondary buckets, +all data is deleted from the original S3 buckets. + +To ensure customer data in the offsite GCP storage remains up-to-date, the GCP +account uses the Google Storage Transfer service to synchronize at least daily with the +secondary dedicated S3 buckets. + +The offsite GCP data backup functions only as secondary data storage and does not serve customer +data. Customer data continues to be served from the AWS S3 buckets. In the case of an AWS outage, Replicated can use a manual +process to restore customer data from the GCP backups into a production-grade database. + +For more information, see [Architecture](#architecture) below. + +## Architecture + +The following diagram shows the flow of air gap build data and registry image data +when the offsite data backup add-on is enabled. The flow of data that is backed +up offsite in GCP is depicted with green arrows. + +![architecture of offsite data storage with the offsite data backup add-on](../../static/images/offsite-backup.png) + +[View a larger version of this image](../../static/images/offsite-backup.png) + +As shown in the diagram above, when the offsite data backup add-on is enabled, +registry and air gap data are stored in dedicated S3 buckets. Both of +these dedicated S3 buckets back up data to offsite storage in GCP. + +The diagram also shows how customer installations continue to pull data from the +vendor registry and the customer portal when offsite data backup is enabled. + + +--- + + +# Defining Additional Images + +import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" + +# Defining Additional Images + +This topic describes how to define additional images to be included in the `.airgap` bundle for a release. + +## Overview + +<AirGapBundle/> + +When building the `.airgap` bundle for a release, the Replicated Vendor Portal finds and includes all images defined in the Pod specs for the release. During installation or upgrade, KOTS retags images from the `.airgap` bundle and pushes them to the registry configured in KOTS. + +Any required images that are _not_ defined in your application manifests must be listed in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the `.airgap` bundle for the release. + +## Define Additional Images for Air Gap Bundles + +KOTS supports including the following types of images in the `additionalImages` field: + +* Public images referenced by the docker pullable image name. +* Images pushed to a private registry that was configured in the Vendor Portal, referenced by the docker-pullable, upstream image name. For more information about configuring private registries, see [Connecting to an External Registry](/vendor/packaging-private-images). + :::note + If you use the [Replicated proxy registry](/vendor/private-images-about) for online (internet-connected) installations, be sure to use the _upstream_ image name in the `additionalImages` field, rather than referencing the location of the image at `proxy.replicated.com`. + ::: +* Images pushed to the Replicated registry referenced by the `registry.replicated.com` name. + +The following example demonstrates adding multiple images to `additionalImages`: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-app +spec: + additionalImages: + - elasticsearch:7.6.0 + - quay.io/orgname/private-image:v1.2.3 + - registry.replicated.com/my-operator/my-private-image:abd123f +``` + +--- + + +# Defining Additional Namespaces + +# Defining Additional Namespaces + +Operators often need to be able to manage resources in multiple namespaces in the cluster. +When deploying an application to an existing cluster, Replicated KOTS creates a Kubernetes Role and RoleBinding that are limited to only accessing the namespace that the application is being installed into. + +In addition to RBAC policies, clusters running in air gap environments or clusters that are configured to use a local registry also need to ensure that image pull secrets exist in all namespaces that the operator will manage resource in. + +## Creating additional namespaces + +An application can identify additional namespaces to create during installation time. +You can define these additional namespaces in the Application custom resource by adding an `additionalNamespaces` attribute to the Application custom resource manifest file. For more information, see [Application](../reference/custom-resource-application) in the _Custom Resources_ section. + +When these are defined, `kots install` will create the namespaces and ensure that the KOTS Admin Console has full access to manage resources in these namespaces. +This is accomplished by creating a Role and RoleBinding per namespace, and setting the Subject to the Admin Console service account. +If the current user account does not have access to create these additional namespaces, the installer will show an error and fail. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-operator +spec: + additionalNamespaces: + - namespace1 + - namespace2 +``` + +In addition to creating these namespaces, the Admin Console will ensure that the application pull secret exists in them, and that this secret has access to pull the application images. This includes both images that are used and additional images defined in the Application custom resource manifest. For more information, see [Defining Additional Images](operator-defining-additional-images). + +Pull secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. +An operator can reliably depend on this secret existing in all installs (online and air gapped), and can use this secret name in any created `podspec` to pull private images. + +## Dynamic namespaces + +Some applications need access to dynamically created namespaces or even all namespaces. +In this case, an application spec can list `"*"` as one of its `addtionalNamespaces` in the Application manifest file. +When KOTS encounters the wildcard, it will not create any namespaces, but it will ensure that the application image pull secret is copied to all namespaces. +The Admin Console will run an informer internally to watch namespaces in the cluster, and when a new namespace is created, the secret will automatically be copied to it. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Application +metadata: + name: my-operator +spec: + additionalNamespaces: + - "*" +``` + +When the wildcard (`"*"`) is listed in `additionalNamespaces`, KOTS will use a ClusterRole and ClusterRoleBinding for the Admin Console. +This will ensure that the Admin Console will continue to have permissions to all newly created namespaces, even after the install has finished. + + +--- + + +# About Packaging a Kubernetes Operator Application + +# About Packaging a Kubernetes Operator Application + +Kubernetes Operators can be packaged and delivered as an application using the same methods as other Kubernetes applications. + +Operators are good for [specific use cases](https://blog.replicated.com/operators-in-kots/). In general, we recommend thinking deeply about the problem space an application solves before going down the Operator path because, although powerful, Operators take a lot of time to build and maintain. + +Operators are generally defined using one or more `CustomResourceDefinition` manifests, and the controller is often a `StatefulSet`, along with other additional objects. +These Kubernetes manifests can be included in an application by adding them to a release and promoting the release to a channel. + +Kubernetes Operators differ from traditional applications because they interact with the Kubernetes API to create and manage other objects at runtime. +When a `CustomResource` is deployed to the cluster that has the operator running, the Operator may need to create new Kubernetes objects to fulfill the request. +When an Operator creates an object that includes a `PodSpec`, the Operator should use locally-available images in order to remain compatible with air gapped environments and customers who have configured a local registry to push all images to. +Even environments that aren't air gapped may need access to private images that are included as part of the application at runtime. + +An application includes a definition for the developer to list the additional images that are required for the application, and by exposing the local registry details (endpoint, namespace and secrets) to the application so that they can be referenced when creating a `PodSpec` at runtime. + + +--- + + +# Referencing Images + +# Referencing Images + +This topic explains how to support the use of private image registries for applications that are packaged with Kubernetes Operators. + +## Overview + +To support the use of private images in all environments, the Kubernetes Operator code must use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. + +There are several template functions available to assist with this. +This might require two new environment variables to be added to a manager to read these values. + +The steps to ensure that an Operator is using the correct image names and has the correct image pull secrets in dynamically created pods are: + +1. Add a new environment variables to the Manager Pod so that the Manager knows the location of the private registry, if one is set. +2. Add a new environment variable to the Manager Pod so that the Manager also knows the `imagePullSecret` that's needed to pull the local image. + +## Step 1: Add a reference to the local registry + +The manager of an Operator is often a `Statefulset`, but could be a `Deployment` or another kind. +Regardless of where the spec is defined, the location of the private images can be read using the Replicated KOTS template functions. For more information about using template functions, see [About Template Functions](/reference/template-functions-about). + +#### Option 1: Define each image +If an Operator only requires one additional image, the easiest way to determine this location is to use the `LocalImageName` function. +This will always return the image name to use, whether the customer's environment is configured to use a local registry or not. + +**Example:** + +```yaml +env: + - name: IMAGE_NAME_ONE + value: 'repl{{ LocalImageName "elasticsearch:7.6.0" }}' +``` + +For online installations (no local registry), this will be written with no changes -- the variable will contain `elasticsearch:7.6.0`. +For installations that are air gapped or have a locally-configured registry, this will be rewritten as the locally referenceable image name. For example, `registry.somebigbank.com/my-app/elasticsearch:7.6.0`. + +**Example:** + +```yaml +env: + - name: IMAGE_NAME_TWO + value: 'repl{{ LocalImageName "quay.io/orgname/private-image:v1.2.3" }}' +``` + +In the above example, this is a private image, and will always be rewritten. For online installations, this will return `proxy.replicated.com/proxy/app-name/quay.io/orgname/private-image:v1.2.3` and for installations with a locally-configured registry it will return `registry.somebigbank.com/org/my-app-private-image:v.1.2.3`. + +#### Option 2: Build image names manually + +For applications that have multiple images or dynamically construct the image name at runtime, the KOTS template functions can also return the elements that make up the local registry endpoint and secrets, and let the application developer construct the locally-referenceable image name. + +**Example:** + +```yaml +env: + - name: REGISTRY_HOST + value: 'repl{{ LocalRegistryHost }}' + - name: REGISTRY_NAMESPACE + value: 'repl{{ LocalRegistryNamespace }}' +``` + +## Step 2: Determine the imagePullSecret + +Private, local images will need to reference an image pull secret to be pulled. +The value of the secret's `.dockerconfigjson` is provided in a template function, and the application can write this pull secret as a new secret to the namespace. +If the application is deploying the pod to the same namespace as the Operator, the pull secret will already exist in the namespace, and the secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. +KOTS will create this secret automatically, but only in the namespace that the Operator is running in. +It's the responsibility of the application developer (the Operator code) to ensure that this secret is present in any namespace that new pods will be deployed to. + +This template function returns the base64-encoded, docker auth that can be written directly to a secret, and referenced in the `imagePullSecrets` attribute of the PodSpec. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: myregistrykey + namespace: awesomeapps +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +type: kubernetes.io/dockerconfigjson +``` + +This will return an image pull secret for the locally configured registry. + +If your application has both public and private images, it is recommended that the image name is passed to the image pull secret for the locally configured registry. This will ensure that installs without a local registry can differentiate between private, proxied and public images. + +**Example:** + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-pull-secret + namespace: awesomeapps +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +type: kubernetes.io/dockerconfigjson +``` + +In the above example, the `LocalRegistryImagePullSecret()` function will return an empty auth array if the installation is not air gapped, does not have a local registry configured, and the `elasticsearch:7.6.0` image is public. +If the image is private, the function will return the license-key derived pull secret. +And finally, if the installation is using a local registry, the image pull secret will contain the credentials needed to pull from the local registry. + +**Example:** + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-pull-secret + namespace: awesomeapps +data: + .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' +type: kubernetes.io/dockerconfigjson +``` + +The above example will always return an image pull secret. +For installations without a local registry, it will be the Replicated license secret, and for installations with a local registry, it will be the local registry. + +## Using the local registry at runtime + +The developer of the Operator should use these environment variables to change the `image.name` in any deployed PodSpec to ensure that it will work in air gapped environments. + + +--- + + +# Orchestrating Resource Deployment + +import WeightLimitation from "../partials/helm/_helm-cr-weight-limitation.mdx" +import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" +import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" + +# Orchestrating Resource Deployment + +This topic describes how to orchestrate the deployment order of resources deployed as part of your application. The information in this topic applies to Helm chart- and standard manifest-based applications deployed with Replicated KOTS. + +## Overview + +Many applications require that certain resources are deployed and in a ready state before other resources can be deployed. + +When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. + +For applications deployed with KOTS, you can manage the order in which resources are deployed using the following methods: + +* For Helm charts, set the `weight` property in the corresponding HelmChart custom resource. See [HelmChart `weight`](#weight). + +* For standard manifests, add KOTS annotations to the resources. See [Standard Manifest Deployment Order with KOTS Annotations](#manifests). + +## Helm Chart Deployment Order with `weight` {#weight} + +You can configure the [`weight`](/reference/custom-resource-helmchart-v2#weight) property of the Replicated HelmChart custom resource to define the order in which the Helm charts in your release are installed. + +KOTS directs Helm to install the Helm charts based on the value of `weight` in ascending order, deploying the chart with the lowest weight first. Any dependencies are installed along with the parent chart. For example, a chart with a `weight` of `-1` deploys before a chart with a `weight` of `0`. + +The value for the `weight` property can be any negative or positive integer or `0`. By default, when you do not provide a `weight` for a Helm chart, the `weight` is `0`. + +For example: + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: samplechart +spec: + chart: + name: samplechart + chartVersion: 3.1.7 + releaseName: samplechart-release-1 + # weight determines the order that charts are applied, with lower weights first. + weight: 4 +``` + +#### Limitations + +The `weight` field in the HelmChart custom resource has the following limitations: + +* <WeightLimitation/> + +* When installing a Helm chart-based application, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. The `weight` property does not allow Helm charts to be deployed before standard manifests. + +## Standard Manifest Deployment Order with KOTS Annotations {#manifests} + +You can use the KOTS annotations described in this section to control the order in which standard manifests are deployed. + +### Requirement + +You must quote the boolean or integer values in annotations because Kubernetes annotations must be strings. For more information about working with annotations in Kubernetes resources, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. + +### `kots.io/creation-phase` + +When the `kots.io/creation-phase: '<integer>'` annotation is present on a resource, KOTS groups the resource into the specified creation phase. KOTS deploys each phase in order from lowest to highest. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. + +Resources in the same phase are deployed in the same order that Helm installs resources. To view the order in which KOTS deploys resources of the same phase, see [Helm installs resources in the following order](https://helm.sh/docs/intro/using_helm/#:~:text=Helm%20installs%20resources%20in%20the,order) in the Helm documentation. + +The following example deploys the `CustomResourceDefinition` before the default creation phase: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: myresources.example.com + annotations: + kots.io/creation-phase: "-1" +... +``` + +### `kots.io/deletion-phase` + +When the `kots.io/deletion-phase: '<integer>'` annotation is present on a resource, KOTS groups the resource into the specified deletion phase. KOTS deletes each phase in order from lowest to highest. Resources within the same phase are deleted in the reverse order from which they were created. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. + +The following example deploys the `CustomResourceDefinition` before the default creation phase and deletes the resource after the default deletion phase: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: myresources.example.com + annotations: + kots.io/creation-phase: "-1" + kots.io/deletion-phase: "1" +... +``` +### `kots.io/wait-for-ready` + +When the `kots.io/wait-for-ready: '<bool>'` annotation is present on a resource and evaluates to `'true'`, KOTS waits for the resource to be in a ready state before deploying any other resources. For most resource types, KOTS has existing logic to determine if a resource is ready. If there is no existing logic for the given resource type, then KOTS waits until the resource exists and is queryable from the Kubernetes API server. + +In the following example, KOTS waits for the Postgres `StatefulSet` to be ready before continuing to deploy other resources: + +```yaml +apiVersion: apps/v1 +kind: Statefulset +metadata: + name: postgresql + annotations: + kots.io/wait-for-ready: 'true' + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + strategy: + type: Recreate + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:9.6" + imagePullPolicy: "" +... +``` + +### `kots.io/wait-for-properties` + +When the `kots.io/wait-for-properties: '<jsonpath>=<value>,<jsonpath>=<value>'` annotation is present on a resource, KOTS waits for one or more specified resource properties to match the desired values before deploying other resources. This annotation is useful when the `kots.io/wait-for-ready` annotation, which waits for a resource to exist, is not sufficient. + +The value for this annotation is a comma-separated list of key-value pairs, where the key is a JSONPath specifying the path to the property and the value is the desired value for the property. In the following example, KOTS waits for a resource to reach a desired state before deploying other resources. In this case, KOTS waits until each of the three status properties have the target values: + +```yaml +kind: MyResource +metadata: + name: my-resource + annotations: + kots.io/wait-for-properties: '.status.tasks.extract=true,.status.tasks.transform=true,.status.tasks.load=true' +... +status: + tasks: + extract: false + transform: false + load: false +``` + +--- + + +# Excluding MinIO from Air Gap Bundles (Beta) + +# Excluding MinIO from Air Gap Bundles (Beta) + +The Replicated KOTS Admin Console requires an S3-compatible object store to store application archives and support bundles. By default, KOTS deploys MinIO to satisfy the object storage requirement. For more information about the options for installing without MinIO in existing clusters, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). + +As a software vendor, you can exclude MinIO images from all Admin Console air gap distributions (`kotsadm.tar.gz`) in the download portal. Excluding MinIO from the `kotsadm.tar.gz` air gap bundle is useful if you want to prevent MinIO images from appearing in the air gap distribution that your end users download. It also reduces the file size of `kotsadm.tar.gz`. + +:::note +You can still retrieve a bundle with MinIO images from the KOTS release page in GitHub when this feature is enabled. See [replicatedhq/kots](https://github.com/replicatedhq/kots/releases/) in GitHub. +::: + +To exclude MinIO from the `kotsadm.tar.gz` Admin Console air gap bundle: + +1. Log in to your Vendor Portal account. Select **Support** > **Request a feature**, and submit a feature request for "Exclude MinIO image from air gap bundle". After this feature is enabled, all `kotsadm.tar.gz` files in the download portal will not include MinIO. + +1. Instruct your end users to set the flag `--with-minio=false` with the `kots install` command during an air gap installation. For more information about setting this runtime flag, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). + + :::important + If you have this feature enabled in your Team account and the end user does not include `--with-minio=false` with the `kots install` command, then the installation fails. + ::: + + +--- + + +# Cleaning Up Kubernetes Jobs + +# Cleaning Up Kubernetes Jobs + +This topic describes how to use the Replicated KOTS `kots.io/hook-delete-policy` annotation to remove Kubernetes job objects from the cluster after they complete. + +## About Kubernetes Jobs + +Kubernetes Jobs are designed to run and then terminate. But, they remain in the namespace after completion. Because Job objects are immutable, this can cause conflicts and errors when attempting to update the Job later. + +A common workaround is to use a content SHA from the Job object in the name. However, a user can update their application instance through various events (upstream update, license sync, config update, CLI upload). If the Job is already completed, it is an error to reapply the same job to the cluster again. + +The built-in Replicated KOTS operator/controller can help by deleting Jobs upon completion. +This allows the same Job to be deployed again without polluting the namespace with completed Jobs. + +For more information about Job objects, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) in the Kubernetes documentation. + +## KOTS `hook-delete-policy` Annotation + +To enable the built-in KOTS operator/controller to automatically delete Jobs when they complete, specify a delete hook policy as an annotation on the Job object. + +The KOTS annotation key is `kots.io/hook-delete-policy` and there are two possible values (you can use both simultaneously): `hook-succeeded` and `hook-failed`. + +When this annotation is present and includes `hook-succeeded`, the job is deleted when it completes successfully. +If this annotation is present and includes `hook-failed`, the job is deleted on failure. + +For Helm charts deployed with KOTS, KOTS automatically adds this `kots.io/hook-delete-policy` annotation to any Job objects in the Helm chart that include a `helm.sh/hook-delete-policy` annotation. This means that there is nothing extra to configure when deploying a Helm chart with Helm delete hooks. + +The following example shows a Job object with the `kots.io/hook-delete-policy` annotation: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: pi + annotations: + "kots.io/hook-delete-policy": "hook-succeeded, hook-failed" +spec: + template: + spec: + containers: + - name: pi + image: perl + command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] + restartPolicy: Never + backoffLimit: 4 +``` + +--- + + +# Creating a kURL Installer + +import Installers from "../partials/kurl/_installers.mdx" +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Creating a kURL Installer + +<KurlAvailability/> + +This topic describes how to create a kURL installer spec in the Replicated Vendor Portal to support installations with Replicated kURL. + +For information about creating kURL installers with the Replicated CLI, see [installer create](/reference/replicated-cli-installer-create). + +## Overview + +<Installers/> + +For more information about kURL, see [Introduction to kURL](kurl-about). + +## Create an Installer + +To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release: + +<table> + <tr> + <th width="30%">Method</th> + <th width="70%">Description</th> + </tr> + <tr> + <td><a href="packaging-embedded-kubernetes#channel">Promote the installer to a channel</a></td> + <td><p>The installer is promoted to one or more channels. All releases on the channel use the kURL installer that is currently promoted to that channel. There can be only one active kURL installer on each channel at a time.</p><p>The benefit of promoting an installer to one or more channels is that you can create a single installer without needing to add a separate installer for each release. However, because all the releases on the channel will use the same installer, problems can occur if all releases are not tested with the given installer.</p></td> + </tr> + <tr> + <td><a href="packaging-embedded-kubernetes#release">Include the installer in a release (Beta)</a></td> + <td><p>The installer is included as a manifest file in a release. This makes it easier to test the installer and release together. It also makes it easier to know which installer spec customers are using based on the application version that they have installed.</p></td> + </tr> +</table> + +### Promote the Installer to a Channel {#channel} + +To promote a kURL installer to a channel: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **kURL Installers**. + +1. On the **kURL Installers** page, click **Create kURL installer**. + + <img alt="vendor portal kurl installers page" src="/images/kurl-installers-page.png" width="650px"/> + + [View a larger version of this image](/images/kurl-installers-page.png) + +1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [Requirements and Recommendations](#requirements-and-recommendations) below. + + You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: + + <img alt="kurl.sh landing page" src="/images/kurl-build-an-installer.png" width="650px"/> + + [View a larger version of this image](/images/kurl-build-an-installer.png) + +1. Click **Save installer**. You can continue to edit your file until it is promoted. + +1. Click **Promote**. In the **Promote Installer** dialog that opens, edit the fields: + + <img alt="promote installer dialog" src="/images/promote-installer.png" width="450px"/> + + [View a larger version of this image](/images/promote-installer.png) + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Description</th> + </tr> + <tr> + <td>Channel</td> + <td>Select the channel or channels where you want to promote the installer.</td> + </tr> + <tr> + <td>Version label</td> + <td>Enter a version label for the installer.</td> + </tr> + </table> + +1. Click **Promote** again. The installer appears on the **kURL Installers** page. + + To make changes after promoting, create and promote a new installer. + +### Include an Installer in a Release (Beta) {#release} + +To include the kURL installer in a release: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Releases**. Then, either click **Create Release** to create a new release, or click **Edit YAML** to edit an existing release. + + The YAML editor opens. + +1. Create a new file in the release with `apiVersion: cluster.kurl.sh/v1beta1` and `kind: Installer`: + + ```yaml + apiVersion: cluster.kurl.sh/v1beta1 + kind: Installer + metadata: + name: "latest" + spec: + + ``` + +1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [ kURL Add-on Requirements and Recommendations](#requirements-and-recommendations) below. + + You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: + + <img alt="kurl.sh landing page" src="/images/kurl-build-an-installer.png" width="650px"/> + + [View a larger version of this image](/images/kurl-build-an-installer.png) + +1. Click **Save**. This saves a draft that you can continue to edit until you promote it. + +1. Click **Promote**. + + To make changes after promoting, create a new release. + +## kURL Add-on Requirements and Recommendations {#requirements-and-recommendations} + +KURL includes several add-ons for networking, storage, ingress, and more. The add-ons that you choose depend on the requirements for KOTS and the unique requirements for your application. For more information about each add-on, see the open source [kURL documentation](https://kurl.sh/docs/introduction/). + +When creating a kURL installer, consider the following requirements and guidelines for kURL add-ons: + +- You must include the KOTS add-on to support installation with KOTS and provision the KOTS Admin Console. See [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the kURL documentation. + +- To support the use of KOTS snapshots, Velero must be installed in the cluster. Replicated recommends that you include the Velero add-on in your kURL installer so that your customers do not have to manually install Velero. + + :::note + During installation, the Velero add-on automatically deploys internal storage for backups. The Velero add-on requires the MinIO or Rook add-on to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. + ::: + +- You must select storage add-ons based on the KOTS requirements and the unique requirements for your application. For more information, see [About Selecting Storage Add-ons](packaging-installer-storage). + +- kURL installers that are included in releases must pin specific add-on versions and cannot pin `latest` versions or x-ranges (such as 1.2.x). Pinning specific versions ensures the most testable and reproducible installations. For example, pin `Kubernetes 1.23.0` in your manifest to ensure that version 1.23.0 of Kubernetes is installed. For more information about pinning Kubernetes versions, see [Versions](https://kurl.sh/docs/create-installer/#versions) and [Versioned Releases](https://kurl.sh/docs/install-with-kurl/#versioned-releases) in the kURL open source documentation. + + :::note + For kURL installers that are _not_ included in a release, pinning specific versions of Kubernetes and Kubernetes add-ons in the kURL installer manifest is not required, though is highly recommended. + ::: + +- After you configure a kURL installer, Replicated recommends that you customize host preflight checks to support the installation experience with kURL. Host preflight checks help ensure successful installation and the ongoing health of the cluster. For more information about customizing host preflight checks, see [Customizing Host Preflight Checks for Kubernetes Installers](preflight-host-preflights). + +- For installers included in a release, Replicated recommends that you define a preflight check in the release to ensure that the target kURL installer is deployed before the release is installed. For more information about how to define preflight checks, see [Defining Preflight Checks](preflight-defining). + + For example, the following preflight check uses the `yamlCompare` analyzer with the `kots.io/installer: "true"` annotation to compare the target kURL installer that is included in the release against the kURL installer that is currently deployed in the customer's environment. For more information about the `yamlCompare` analyzer, see [`yamlCompare`](https://troubleshoot.sh/docs/analyze/yaml-compare/) in the open source Troubleshoot documentation. + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: installer-preflight-example + spec: + analyzers: + - yamlCompare: + annotations: + kots.io/installer: "true" + checkName: Kubernetes Installer + outcomes: + - fail: + message: The kURL installer for this version differs from what you have installed. It is recommended that you run the updated kURL installer before deploying this version. + uri: https://kurl.sh/my-application + - pass: + message: The kURL installer for this version matches what is currently installed. + ``` + + + +--- + + +# Conditionally Including or Excluding Resources + +# Conditionally Including or Excluding Resources + +This topic describes how to include or exclude optional application resources based on one or more conditional statements. The information in this topic applies to Helm chart- and standard manifest-based applications. + +## Overview + +Software vendors often need a way to conditionally deploy resources for an application depending on users' configuration choices. For example, a common use case is giving the user the choice to use an external database or an embedded database. In this scenario, when a user chooses to use their own external database, it is not desirable to deploy the embedded database resources. + +There are different options for creating conditional statements to include or exclude resources based on the application type (Helm chart- or standard manifest-based) and the installation method (Replicated KOTS or Helm CLI). + +### About Replicated Template Functions + +For applications deployed with KOTS, Replicated template functions are available for creating the conditional statements that control which optional resources are deployed for a given user. Replicated template functions can be used in standard manifest files such as Replicated custom resources or Kubernetes resources like StatefulSets, Secrets, and Services. + +For example, the Replicated ConfigOptionEquals template functions returns true if the specified configuration option value is equal to a supplied value. This is useful for creating conditional statements that include or exclude a resource based on a user's application configuration choices. + +For more information about the available Replicated template functions, see [About Template Functions](/reference/template-functions-about). + +## Include or Exclude Helm Charts + +This section describes methods for including or excluding Helm charts from your application deployment. + +### Helm Optional Dependencies + +Helm supports adding a `condition` field to dependencies in the Helm chart `Chart.yaml` file to include subcharts based on one or more boolean values evaluating to true. + +For more information about working with dependencies and defining optional dependencies for Helm charts, see [Dependencies](https://helm.sh/docs/chart_best_practices/dependencies/) in the Helm documentation. + +### HelmChart `exclude` Field + +For Helm chart-based applications installed with KOTS, you can configure KOTS to exclude certain Helm charts from deployment using the HelmChart custom resource [`exclude`](/reference/custom-resource-helmchart#exclude) field. When the `exclude` field is set to a conditional statement, KOTS excludes the chart if the condition evaluates to `true`. + +The following example uses the `exclude` field and the ConfigOptionEquals template function to exclude a postgresql Helm chart when the `external_postgres` option is selected on the Replicated Admin Console **Config** page: + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: postgresql +spec: + exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' + chart: + name: postgresql + chartVersion: 12.1.7 + releaseName: samplechart-release-1 +``` + +## Include or Exclude Standard Manifests + +For standard manifest-based applications installed with KOTS, you can use the `kots.io/exclude` or `kots.io/when` annotations to include or exclude resources based on a conditional statement. + +By default, if neither `kots.io/exclude` nor `kots.io/when` is present on a resource, the resource is included. + +### Requirements + +The `kots.io/exclude` and `kots.io/when` annotations have the following requirements: + +* Only one of the `kots.io/exclude` nor `kots.io/when` annotations can be present on a single resource. If both are present, the `kots.io/exclude` annotation is applied, and the `kots.io/when` annotation is ignored. + +* The values of the `kots.io/exclude` and `kots.io/when` annotations must be wrapped in quotes. This is because Kubernetes annotations must be strings. For more information about working with Kubernetes annotations, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. + +### `kots.io/exclude` + +When the `kots.io/exclude: '<bool>'` annotation is present on a resource and evaluates to true, the resource is excluded from the deployment. + +The following example uses the `kots.io/exclude` annotation and the ConfigOptionEquals template function to exclude the postgresql `StatefulSet` when an `install_postgres` checkbox on the Admin Console **Config** page is disabled: + +```yaml +apiVersion: apps/v1 +kind: Statefulset +metadata: + name: postgresql + annotations: + kots.io/exclude: '{{repl ConfigOptionEquals "install_postgres" "0" }}' + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + strategy: + type: Recreate + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:9.6" + imagePullPolicy: "" +... +``` + +### `kots.io/when` + +When the `kots.io/when: '<bool>'` annotation is present on a resource and evaluates to true, the resource is included in the deployment. + +The following example uses the `kots.io/when` annotation and the ConfigOptionEquals template function to include the postgresql `StatefulSet` resource when the `install_postgres` checkbox on the Admin Console **Config** page is enabled: + +```yaml +apiVersion: apps/v1 +kind: Statefulset +metadata: + name: postgresql + annotations: + kots.io/when: '{{repl ConfigOptionEquals "install_postgres" "1" }}' + labels: + app: postgresql +spec: + selector: + matchLabels: + app: postgresql + strategy: + type: Recreate + template: + metadata: + labels: + app: postgresql + spec: + containers: + - name: postgresql + image: "postgres:9.6" + imagePullPolicy: "" +... +``` + +--- + + +# Adding Cluster Ingress Options + +# Adding Cluster Ingress Options + +When delivering a configurable application, ingress can be challenging as it is very cluster specific. +Below is an example of a flexible `ingress.yaml` file designed to work in most Kubernetes clusters, including embedded clusters created with Replicated kURL. + +## Example + +The following example includes an Ingress resource with a single host based routing rule. +The resource works in both existing clusters and kURL clusters. + +### Config + +A config option `enable_ingress` has been provided to allow the end-user to choose whether or not to enable the Ingress resource. +In some clusters a custom Ingress resource may be desired — when an ingress controller is not available, other means of exposing services may be preferred. + +An `annotations` text area has been made available for the end-user to add additional annotations to the ingress. +Here, cluster specific annotations can be added to support a variety of ingress controllers. +For example, when using the [ALB ingress controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) in AWS, it is necessary to include the `kubernetes.io/ingress.class: alb` annotation on your Ingress resource. + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: example-application +spec: + groups: + - name: ingress + title: Ingress + items: + - name: enable_ingress + type: bool + title: Enable Kubernetes Ingress + help_text: | + When checked, deploy the provided Kubernetes Ingress resource. + default: "1" + - name: hostname + type: text + title: Hostname + help_text: | + Use this field to provide a hostname for your Example Application installation. + required: true + when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} + - name: allow_http + type: bool + title: Allow Unsecured Access through HTTP + help_text: | + Uncheck this box to disable HTTP traffic between the client and the load balancer. + default: "1" + when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} + - name: annotations + type: textarea + title: Annotations + help_text: | + Use this textarea to provide annotations specific to your ingress controller. + For example, `kubernetes.io/ingress.class: alb` when using the ALB ingress controller. + when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} +``` + +### Ingress + +For ingress, you must create two separate resources. +The first of which will be deployed to existing cluster installations, while the second will only be deployed to an embedded cluster. +Both of these resources are selectively excluded with the [`exclude` annotation](packaging-include-resources). + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-application-ingress + annotations: + kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) IsKurl }}' + kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' + nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' + kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} +spec: + rules: + - host: repl{{ or (ConfigOption "hostname") "~" }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: nginx + port: + number: 80 +``` + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: example-application-ingress-embedded + annotations: + kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) (not IsKurl) }}' + kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' + nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' + kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} +spec: + tls: + - hosts: + - repl{{ ConfigOption "hostname" }} + secretName: kotsadm-tls + rules: + - host: repl{{ ConfigOption "hostname" }} + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: nginx + port: + number: 80 +``` + + +--- + + +# About Selecting Storage Add-ons + +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# About Selecting Storage Add-ons + +<KurlAvailability/> + +This topic provides guidance for selecting Replicated kURL add-ons to provide highly available data storage in kURL clusters. For additional guidance, see [Choosing a PV Provisioner](https://kurl.sh/docs/create-installer/choosing-a-pv-provisioner) in the open source kURL documentation. + +## Overview + +kURL includes add-ons for object storage and for dynamic provisioning of PersistentVolumes (PVs) in clusters. You configure these add-ons in your kURL installer to define how data for your application and data for Replicated KOTS is managed in the cluster. + +The following lists the kURL add-ons for data storage: +* **MinIO**: MinIO is an open source, S3-compatible object store. See [MinIO Add-on](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. +* **Rook**: Rook provides dynamic PV provisioning of distributed Ceph storage. Ceph is a distributed storage system that provides S3-compatible object storage. See [Rook Add-on](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. +* **OpenEBS**: OpenEBS Local PV creates a StorageClass to dynamically provision local PersistentVolumes (PVs) in a cluster. See [OpenEBS Add-on](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. +* **Longhorn**: Longhorn is an open source distributed block storage system for Kubernetes. See [Longhorn Add-on](https://kurl.sh/docs/add-ons/longhorn) in the kURL documentation. + + :::important + The Longhorn add-on is deprecated and not supported in production clusters. If you are currently using Longhorn, you must migrate data from Longhorn to either OpenEBS or Rook. For more information about migrating from Longhorn, see [Migrating to Change CSI Add-On](https://kurl.sh/docs/install-with-kurl/migrating-csi) in the kURL documentation. + ::: + +## About Persistent Storage for KOTS + +This section describes the default storage requirements for KOTS. Each of the [Supported Storage Configurations](#supported-storage-configurations) described below satisfy these storage requirements for KOTS. + +### rqlite StatefulSet + +KOTS deploys a rqlite StatefulSet to store the version history, application metadata and other small amounts of data needed to manage the application(s). No configuration is required to deploy rqlite. + +Rqlite is a distributed relational database that uses SQLite as its storage engine. For more information, see the [rqlite](https://rqlite.io/) website. + +### Object Storage or Local PV + +By default, KOTS requires an S3-compatible object store to store the following: +* Support bundles +* Application archives +* Backups taken with Replicated snapshots that are configured to NFS or host path storage destinations + +Both the Rook add-on and the MinIO add-on satisfy this object store requirement. + +Alternatively, you can configure KOTS to be deployed without object storage. This installs KOTS as a StatefulSet using a persistent volume (PV) for storage. When there is no object storage available, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in the local PV. In this case, the OpenEBS add-on can be included to provide the local PV storage. For more information, see [Installing Without Object Storage](/enterprise/installing-stateful-component-requirements). + +### Distributed Storage in KOTS v1.88 and Earlier + +KOTS v1.88 and earlier requires distributed storage. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. For more information, see [Rook Ceph](#rook-ceph) below. + +## Factors to Consider When Choosing a Storage Configuration + +The object store and/or PV provisioner add-ons that you choose to include in your kURL installer depend on the following factors: +* **KOTS storage requirements**: The storage requirements for the version of the KOTS add-on that you include in the spec. For example, KOTS v1.88 and earlier requires distributed storage. +* **Other add-on storage requirements**: The storage requirements for the other add-ons that you include in the spec. For example, the Velero add-on requires object storage to deploy the default internal storage for snapshots during installation. +* **Application storage requirements**: The storage requirements for your application. For example, you might include different add-ons depending on if your application requires a single or multi-node cluster, or if your application requires distributed storage. + +## Supported Storage Configurations + +This section describes the supported storage configurations for embedded clusters provisioned by kURL. + +### OpenEBS Without Object Storage (Single Node) {#single-node} + +If your application can be deployed to a single node cluster and does not require object storage, then you can choose to exclude object storage and instead use the OpenEBS add-on only to provide local storage on the single node in the cluster. + +When configured to use local PV storage instead of object storage, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in a PV on the single node in the cluster. + +#### Requirements + +To use the OpenEBS add-on without object storage, your kURL installer must meet the following requirements: + +* When neither the MinIO nor the Rook add-on are included in the kURL installer, you must set the `disableS3` field to `true` in the KOTS add-on. Setting `disableS3: true` in the KOTS add-on allows KOTS to use the local PV storage provided by OpenEBS instead of using object storage. For more information, see [Effects of the disableS3 Flag](https://kurl.sh/docs/add-ons/kotsadm#effects-of-the-disables3-flag) in _KOTS Add-on_ in the kURL documentation. + +* When neither the MinIO nor the Rook add-on are included in the kURL installer, the Velero add-on cannot be included. This is because, during installation, the Velero add-on automatically deploys internal storage for backups taken with the Replicated snapshots feature. The Velero add-on requires object storage to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. + + When the Velero add-on is not included, your users must install and configure Velero on the cluster after installation in order to use Replicated snapshots for backup and restore. See [About Backup and Restore with Snapshots](/vendor/snapshots-overview). + + For a storage configuration for single node clusters that supports the use of the Velero add-on, see [OpenEBS with MinIO (Single or Multi-Node)](#openebs-minio) below. + +#### Example + +The following is an example installer that uses OpenEBS v3.3.x with Local PV for local storage and disables object storage for KOTS: + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "local" +spec: + ... + openebs: + version: "3.3.x" + isLocalPVEnabled: true + localPVStorageClassName: "default" + kotsadm: + disables3: true +``` + +For more information about properties for the OpenEBS add-on, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. + +### OpenEBS with MinIO (Single or Multi-Node) {#openebs-minio} + +Using the OpenEBS add-on with the MinIO add-on provides a highly available data storage solution for multi-node clusters that is lighter-weight compared to using Rook Ceph. Replicated recommends that you use OpenEBS Local PV with MinIO for multi-node clusters if your application does _not_ require distributed storage. If your application requires distributed storage, see [Rook Ceph](#rook-ceph) below. + +When both the MinIO and OpenEBS add-ons are included, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in MinIO object storage. Additionally, KOTS uses OpenEBS Local PV to provision the PVs on each node that MinIO uses for local storage. + +#### Requirement + +To use both the OpenEBS add-on and the MinIO add-on, the KOTS add-on must use KOTS v1.89 or later. + +KOTS v1.88 and earlier requires distributed storage, which is not provided by OpenEBS Local PV. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. See [Rook Ceph](#rook-ceph) below. + +#### Example + +The following is an example installer that uses both the OpenEBS add-on version 3.3.x and MinIO add-on version `2022-09-07T22-25-02Z`: + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "openebs-with-minio" +spec: + ... + openebs: + version: "3.3.x" + isLocalPVEnabled: true + localPVStorageClassName: "default" + minio: + version: "2022-09-07T22-25-02Z" +``` + +For more information about properties for the OpenEBS and MinIO add-ons, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) and [MinIO](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. + +### Rook Ceph (Multi-Node) {#rook-ceph} + +If your application requires multiple nodes and distributed storage, Replicated recommends that you use the Rook add-on for storage. The Rook add-on creates an S3-compatible, distributed object store with Ceph and also creates a StorageClass for dynamically provisioning PVs. + +#### Requirement + +Rook versions 1.4.3 and later require a dedicated block device attached to each node in the cluster. The block device must be unformatted and dedicated for use by Rook only. The device cannot be used for other purposes, such as being part of a Raid configuration. If the device is used for purposes other than Rook, then the installer fails, indicating that it cannot find an available block device for Rook. + +For Rook Ceph versions earlier than 1.4.3, a dedicated block device is recommended in production clusters. Running distributed storage such as Rook on block devices is recommended for improved data stability and performance. + +#### Example + +The following is an example installer that uses the Rook add-on version 1.7.x: + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "distributed" +spec: + ... + rook: + version: "1.7.x" + storageClassName: "distributed" + isSharedFilesystemDisabled: true +``` + +For more information about properties for the Rook add-on, see [Rook](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. + + +--- + + +# Setting Minimum and Target Versions for KOTS + +# Setting Minimum and Target Versions for KOTS + +This topic describes how to set minimum and target version for Replicated KOTS in the KOTS [Application](/reference/custom-resource-application) custom resource. + +## Limitation + +Setting minimum and target versions for KOTS is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). + +This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed.`. + +To avoid installation failures, do not use `targetKotsVersion` or `minKotsVersion` in releases that support installation with Embedded Cluster. + +## Using Minimum KOTS Versions (Beta) + +The `minKotsVersion` attribute in the Application custom resource defines the minimum version of Replicated KOTS that is required by the application release. This can be useful when you want to get users who are lagging behind to update to a more recent KOTS version, or if your application requires functionality that was introduced in a particular KOTS version. + +Including this attribute enforces compatibility checks for both new installations and application updates. An installation or update is blocked if the currently deployed KOTS version is earlier than the specified minimum KOTS version. Users must upgrade to at least the specified minimum version of KOTS before they can install or update the application. + +### How the Admin Console Handles minKotsVersion + +When you promote a new release specifying a minimum KOTS version that is later than what a user currently has deployed, and that user checks for updates, that application version appears in the version history of the Admin Console. However, it is not downloaded. + +The Admin Console temporarily displays an error message that informs the user that they must update KOTS before downloading the application version. This error also displays when the user checks for updates with the [`kots upstream upgrade`](/reference/kots-cli-upstream-upgrade) command. + +KOTS cannot update itself automatically, and users cannot update KOTS from the Admin Console. For more information on how to update KOTS in existing clusters or in kURL clusters, see [Performing Updates in Existing Clusters](/enterprise/updating-app-manager) and [Performing Updates in kURL Clusters](/enterprise/updating-kurl). + +After updating KOTS to the minimum version or later, users can use the Admin Console or the [`kots upstream download`](/reference/kots-cli-upstream-download) command to download the release and subsequently deploy it. + + +## Using Target KOTS Versions + +Including `targetKotsVersion` in the Application custom resource enforces compatibility checks for new installations. It blocks the installation if a user tries to install a version of KOTS that is later than the target version. For example, this can prevent users from installing a version of KOTS that you have not tested yet. + +If the latest release in a channel includes `targetKotsVersion`, the install command for existing clusters is modified to install that specific version of KOTS. The install command for existing clusters is on the channel card in the [Vendor Portal](https://vendor.replicated.com). + +### How the Admin Console Handles targetKotsVersion + +Specifying a `targetKotsVersion` does not prevent an end user from upgrading to a later version of KOTS after the initial installation. + +If a new version of the application specifies a later target KOTS version than what is currently installed, users are not prevented from deploying that version of the application. + +If a user's Admin Console is running a version of KOTS that is earlier than the target version specified in a new version of the application, the Admin Console displays a notification in the footer, indicating that a newer supported version of KOTS is available. + +### Using Target Versions with kURL + +For installations in a cluster created by Replicated kURL, the version of the KOTS add-on must not be later than the target KOTS version specified in the Application custom resource. If the KOTS add-on version is later than the version specified for `targetKotsVersion`, the initial installation fails. + +For more information about the KOTS add-on, see [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the open source kURL documentation. + + +--- + + +# Connecting to an External Registry + +# Connecting to an External Registry + +This topic describes how to add credentials for an external private registry using the Replicated Vendor Portal or Replicated CLI. Adding an external registry allows you to grant proxy access to private images using the Replicated proxy registry. For more information, see [About the Replicated Proxy Registry](private-images-about). + +For information about adding a registry with the Vendor API v3, see [Create an external registry with the specified parameters](https://replicated-vendor-api.readme.io/reference/createexternalregistry) in the Vendor API v3 documentation. + +## Supported Registries + +Replicated recommends that application vendors use one the following external private registries: + +* Amazon Elastic Container Registry (ECR) +* DockerHub +* GitHub Container Registry +* Google Artifact Registry +* Google Container Registry (Deprecated) +* Sonatype Nexus +* Quay.io + +These registries have been tested for compatibility with KOTS. + +You can also configure access to most other external registries if the registry conforms to the Open Container Initiative (OCI) standard. + +## Add Credentials for an External Registry + +All applications in your team have access to the external registry that you add. This means that you can use the images in the external registry across multiple apps in the same team. + +### Using the Vendor Portal + +To add an external registry using the Vendor Portal: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com) and go to the **Images** page. +1. Click **Add External Registry**. + + <img src="/images/add-external-registry.png" alt="/images/add-external-registry.png" width="400px"></img> + + [View a larger version of this image](/images/add-external-registry.png) + +1. In the **Provider** drop-down, select your registry provider. + +1. Complete the fields in the dialog, depending on the provider that you chose: + + :::note + Replicated stores your credentials encrypted and securely. Your credentials and the encryption key do not leave Replicated servers. + ::: + + * **Amazon ECR** + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as 123456689.dkr.ecr.us-east-1.amazonaws.com</td> + </tr> + <tr> + <td>Access Key ID</td> + <td>Enter the Access Key ID for a Service Account User that has pull access to the registry. See <a href="tutorial-ecr-private-images#setting-up-the-service-account-user">Setting up the Service Account User</a>.</td> + </tr> + <tr> + <td>Secret Access Key</td> + <td>Enter the Secret Access Key for the Service Account User.</td> + </tr> + </table> + + * **DockerHub** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as index.docker.io.</td> + </tr> + <tr> + <td>Auth Type</td> + <td>Select the authentication type for a DockerHub account that has pull access to the registry.</td> + </tr> + <tr> + <td>Username</td> + <td>Enter the host name for the account.</td> + </tr> + <tr> + <td>Password or Token</td> + <td>Enter the password or token for the account, depending on the authentication type you selected.</td> + </tr> + </table> + + * **GitHub Container Registry** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry.</td> + </tr> + <tr> + <td>Username</td> + <td>Enter the username for an account that has pull access to the registry.</td> + </tr> + <tr> + <td>GitHub Token</td> + <td>Enter the token for the account.</td> + </tr> + </table> + + * **Google Artifact Registry** + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as <br/>us-east1-docker.pkg.dev</td> + </tr> + <tr> + <td>Auth Type</td> + <td>Select the authentication type for a Google Cloud Platform account that has pull access to the registry.</td> + </tr> + <tr> + <td>Service Account JSON Key or Token</td> + <td> + <p>Enter the JSON Key from Google Cloud Platform assigned with the Artifact Registry Reader role, or token for the account, depending on the authentication type you selected.</p> + <p>For more information about creating a Service Account, see <a href="https://cloud.google.com/container-registry/docs/access-control">Access Control with IAM</a> in the Google Cloud documentation.</p> + </td> + </tr> + </table> + * **Google Container Registry** + :::important + Google Container Registry is deprecated. For more information, see <a href="https://cloud.google.com/container-registry/docs/deprecations/container-registry-deprecation">Container Registry deprecation</a> in the Google documentation. + ::: + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as gcr.io.</td> + </tr> + <tr> + <td>Service Account JSON Key</td> + <td><p>Enter the JSON Key for a Service Account in Google Cloud Platform that is assigned the Storage Object Viewer role.</p><p>For more information about creating a Service Account, see <a href="https://cloud.google.com/container-registry/docs/access-control">Access Control with IAM</a> in the Google Cloud documentation.</p></td> + </tr> + </table> + + * **Quay.io** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as quay.io.</td> + </tr> + <tr> + <td>Username and Password</td> + <td>Enter the username and password for an account that has pull access to the registry.</td> + </tr> + </table> + + * **Sonatype Nexus** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as nexus.example.net.</td> + </tr> + <tr> + <td>Username and Password</td> + <td>Enter the username and password for an account that has pull access to the registry.</td> + </tr> + </table> + + * **Other** + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Instructions</th> + </tr> + <tr> + <td>Hostname</td> + <td>Enter the host name for the registry, such as example.registry.com.</td> + </tr> + <tr> + <td>Username and Password</td> + <td>Enter the username and password for an account that has pull access to the registry.</td> + </tr> + </table> + +1. For **Image name & tag**, enter the image name and image tag and click **Test** to confirm that the Vendor Portal can access the image. For example, `api:v1.0.1` or `my-app/api:v1.01`. + +1. Click **Link registry**. + +### Using the CLI + +To configure access to private images in an external registry using the Replicated CLI: + +1. Install and configure the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Run the `registry add` command for your external private registry. For more information about the `registry add` command, see [registry add](/reference/replicated-cli-registry-add) in _Replicated CLI_. + + For example, to add a DockerHub registry: + + ```bash + replicated registry add dockerhub --username USERNAME \ + --password PASSWORD + ``` + + Where: + * `USERNAME` is the username for DockerHub credentials with access to the registry. + * `PASSWORD` is the password for DockerHub credentials with access to the registry. + + :::note + To prevent the password from being saved in your shell history, Replicated recommends that you use the `--password-stdin` flag and entering the password when prompted. + ::: + +## Test External Registry Credentials + +Replicated recommends that you test external registry credentials to ensure that the saved credentials on Replicated servers can pull the specified image. + +To validate that the configured registry can pull specific images: + +```bash +replicated registry test HOSTNAME \ + --image IMAGE_NAME +``` + +Where: +* `HOSTNAME` is the name of the host, such as `index.docker.io`. +* `IMAGE_NAME` is the name of the target image in the registry. + +For example: + +```bash +replicated registry test index.docker.io --image my-company/my-image:v1.2.3 +``` + +## Related Topic + +[Tutorial: Using ECR for Private Images](tutorial-ecr-private-images) + + +--- + + +# Replicated Registry Security + +# Replicated Registry Security + +This document lists the security measures and processes in place to ensure that images pushed to the Replicated registry remain private. For more information about pushing images to the Replicated registry, see [Using the Replicated Registry for KOTS Installations](private-images-replicated). + + +## Single Tenant Isolation + +The registry is deployed and managed as a multi-tenant application, but each tenant is completely isolated from data that is created and pulled by other tenants. Docker images have shared base layers, but the private registry does not share these between tenants. For example, if a tenant creates an image `FROM postgres:10.3` and pushes the image to Replicated, all of the layers are uploaded, even if other tenants have this base layer uploaded. + +A tenant in the private registry is a team on the Replicated [Vendor Portal](https://vendor.replicated.com). Licenses and customers created by the team are also granted some permissions to the registry data, as specified in the following sections. Cross-tenant access is never allowed in the private registry. + + +## Authentication and Authorization + +The private registry supports several methods of authentication. Public access is never allowed because the registry only accepts authenticated requests. + + +### Vendor Authentication + +All accounts with read/write access on the Vendor Portal have full access to all images pushed by the tenant to the registry. These users can push and pull images to and from the registry. + + +### End Customer Authentication + +A valid (unexpired) license file has an embedded `registry_token` value. Replicated components shipped to customers use this value to authenticate to the registry. Only pull access is enabled when authenticating using a `registry_token`. A `registry_token` has pull access to all images in the tenant's account. All requests to pull images are denied when a license expires or the expiration date is changed to a past date. + + +## Networking and Infrastructure + +A dedicated cluster is used to run the private registry and is not used for any services. + +The registry metadata is stored in a shared database instance. This database contains information about each layer in an image, but not the image data itself. + +The registry image data is securely stored in an encrypted S3 bucket. Each layer is encrypted at rest, using a shared key stored in [Amazon Key Management Service](https://aws.amazon.com/kms/). Each tenant has a unique directory in the shared bucket and access is limited to the team or license making the request. + +The registry cluster runs on a hardened operating system image (CentOS-based), and all instances are on a private virtual private cloud (VPC). Public IP addresses are not assigned to the instances running the cluster and the registry images. Instead, only port 443 traffic is allowed from a layer 7 load balancer to these servers. + +There are no SSH public keys on these servers, and password-based SSH login is disallowed. The servers are not configured to have any remote access. All deployments to these servers are automated using tools such as Terraform and a custom-built CI/CD process. Only verified images are pulled and run. + + +## Runtime Monitoring + +Replicated uses a Web Application Firewall (WAF) on the cluster that monitors and blocks any unusual activity. When unusual activity is detected, access from that endpoint is automatically blocked for a period of time, and a Replicated site reliability engineer (SRE) is alerted. + + +## Penetration Testing + +Replicated completed a formal pen test that included the private registry in the scope of the test. Replicated also runs a bug bounty program and encourages responsible disclosure on any vulnerabilities that are found. + + +--- + + +# Connecting to a Public Registry through the Proxy Registry + +# Connecting to a Public Registry through the Proxy Registry + +This topic describes how to pull images from public registries using the Replicated proxy registry. + +For more information about the Replicated proxy registry, see [About the Replicated Proxy Registry](private-images-about). + +## Pull Public Images Through the Replicated Proxy Registry + +You can use the Replicated proxy registry to pull both public and private images. Using the Replicated proxy registry for public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. + +For public images, you need to first configure registry credentials. + +To pull public images through the Replicated proxy registry, use the following `docker` command: + +```bash +docker pull REPLICATED_PROXY_DOMAIN/proxy/APPSLUG/UPSTREAM_REGISTRY_HOSTNAME/IMAGE:TAG +``` +Where: +* `APPSLUG` is your Replicated app slug found on the [app settings page](https://vendor.replicated.com/settings). +* `REPLICATED_PROXY_DOMAIN` is `proxy.replicated.com` or your custom domain. For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). +* `UPSTREAM_REGISTRY_HOSTNAME` is the hostname for the public registry where the image is located. If the image is located in a namespace within the registry, include the namespace after the hostname. For example, `quay.io/namespace`. +* `IMAGE` is the image name. +* `TAG` is the image tag. + +## Examples + +This section includes examples of pulling public images through the Replicated proxy registry. + +### Pull Images from DockerHub + +The following examples show how to pull public images from DockerHub: + +```bash +# DockerHub is the default when no hostname is specified +docker pull proxy.replicated.com/proxy/APPSLUG/busybox +docker pull proxy.replicated.com/proxy/APPSLUG/nginx:1.16.0 +``` +```bash +# You can also optionally specify docker.io +docker pull proxy.replicated.com/proxy/APPSLUG/docker.io/replicated/replicated-sdk:1.0.0 +``` + +### Pull Images from Other Registries + +The following example shows how to pull images from the Amazon ECR Public Gallery: + +```bash +docker pull proxy.replicated.com/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest +``` + +### Pull Images Using a Custom Domain for the Proxy Registry + +The following example shows how to pull a public image when a custom domain is configured for the proxy registry: + +```bash +docker pull my.customdomain.io/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest +``` +For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). + +## Related Topic + +[Connecting to an External Registry](packaging-private-images) + + +--- + + +# Configuring KOTS RBAC + +# Configuring KOTS RBAC + +This topic describes role-based access control (RBAC) for Replicated KOTS in existing cluster installations. It includes information about how to change the default cluster-scoped RBAC permissions granted to KOTS. + +## Cluster-scoped RBAC + +When a user installs your application with KOTS in an existing cluster, Kubernetes RBAC resources are created to allow KOTS to install and manage the application. + +By default, the following ClusterRole and ClusterRoleBinding resources are created that grant KOTS access to all resources across all namespaces in the cluster: + +```yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "ClusterRole" +metadata: + name: "kotsadm-role" +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +``` + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kotsadm-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kotsadm-role +subjects: +- kind: ServiceAccount + name: kotsadm + namespace: appnamespace +``` + +Alternatively, if your application does not require access to resources across all namespaces in the cluster, then you can enable namespace-scoped RBAC for KOTS. For information, see [About Namespace-scoped RBAC](#min-rbac) below. + +## Namespace-scoped RBAC {#min-rbac} + +Rather than use the default cluster-scoped RBAC, you can configure your application so that the RBAC permissions granted to KOTS are limited to a target namespace or namespaces. By default, for namespace-scoped installations, the following Role and RoleBinding resources are created that grant KOTS permissions to all resources in a target namespace: + +```yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: "Role" +metadata: + name: "kotsadm-role" +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] +``` + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kotsadm-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kotsadm-role +subjects: +- kind: ServiceAccount + name: kotsadm + namespace: appnamespace +``` + +Namespace-scoped RBAC is supported for applications that use Kubernetes Operators or multiple namespaces. During application installation, if there are `additionalNamespaces` specified in the Application custom resource manifest file, then Roles and RoleBindings are created to grant KOTS access to resources in all specified namespaces. + +### Enable Namespace-scoped RBAC {#enable} + +To enable namespace-scoped RBAC permissions for KOTS, specify one of the following options in the Application custom resource manifest file: + +* `supportMinimalRBACPrivileges`: Set to `true` to make namespace-scoped RBAC optional for existing cluster installations. When `supportMinimalRBACPrivileges` is `true`, cluster-scoped RBAC is used by default and users must pass the `--use-minimal-rbac` flag with the installation or upgrade command to use namespace-scoped RBAC. + +* `requireMinimalRBACPrivileges`: Set to `true` to require that all installations to existing clusters use namespace-scoped access. When `requireMinimalRBACPrivileges` is `true`, all installations use namespace-scoped RBAC automatically and users do not pass the `--use-minimal-rbac` flag. + +For more information about these options, see [requireMinimalRBACPrivileges](/reference/custom-resource-application#requireminimalrbacprivileges) and [supportMinimalRBACPrivileges](/reference/custom-resource-application#supportminimalrbacprivileges) in _Application_. + +### About Installing with Minimal RBAC + +In some cases, it is not possible to grant the user `* * *` permissions in the target namespace. For example, an organization might have security policies that prevent this level of permissions. + +If the user installing or upgrading KOTS cannot be granted `* * *` permissions in the namespace, then they can instead request the following: +* The minimum RBAC permissions required by KOTS +* RBAC permissions for any CustomResourceDefinitions (CRDs) that your application includes + +Installing with the minimum KOTS RBAC permissions also requires that the user manually creates a ServiceAccount, Role, and RoleBinding for KOTS, rather than allowing KOTS to automatically create a Role with `* * *` permissions. + +For more information about how users can install KOTS with minimal RBAC when namespace-scoped RBAC is enabled, see [Namespace-scoped RBAC Requirements](/enterprise/installing-general-requirements#namespace-scoped) in _Installation Requirements_. + +### Limitations + +The following limitations apply when using the `requireMinimalRBACPrivileges` or `supportMinimalRBACPrivileges` options to enable namespace-scoped RBAC for KOTS: + +* **Existing clusters only**: The `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` options apply only to installations in existing clusters. + +* **Preflight checks**: When namespace-scoped access is enabled, preflight checks cannot read resources outside the namespace where KOTS is installed. The preflight checks continue to function, but return less data. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). + +* **Velero namespace access for KOTS snapshots**: Velero is required for enabling backup and restore with the KOTS snapshots feature. Namespace-scoped RBAC does not grant access to the namespace where Velero is installed in the cluster. + + To set up snapshots when KOTS has namespace-scoped access, users can run the `kubectl kots velero ensure-permissions` command. This command creates additional Roles and RoleBindings to allow the necessary cross-namespace access. For more information, see [`velero ensure-permissions`](/reference/kots-cli-velero-ensure-permissions/) in the KOTS CLI documentation. + + For more information about snapshots, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). + +* **Air Gap Installations**: For air gap installations, the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags are supported only in automated, or _headless_, installations. In headless installations, the user passes all the required information to install both KOTS and the application with the `kots install` command. In non-headless installations, the user provides information to install the application through the Admin Console UI after KOTS is installed. + + In non-headless installations in air gap environments, KOTS does not have access to the application's `.airgap` package during installation. This means that KOTS does not have the information required to determine whether namespace-scoped access is needed, so it defaults to the more permissive, default cluster-scoped RBAC policy. + + For more information about how to do headless installations in air gap environments, see [Air Gap Installation](/enterprise/installing-existing-cluster-automation#air-gap) in _Installing with the KOTS CLI_. + +* **Changing RBAC permissions for installed instances**: The RBAC permissions for KOTS are set during its initial installation. KOTS runs using the assumed identity and cannot change its own authorization. When you update your application to add or remove the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags in the Application custom resource, the RBAC permissions for KOTS are affected only for new installations. Existing KOTS installations continue to run with their current RBAC permissions. + + To expand the scope of RBAC for KOTS from namespace-scoped to cluster-scoped in new installations, Replicated recommends that you include a preflight check to ensure the permission is available in the cluster. + + +--- + + +# Using TLS Certificates + +import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" + +# Using TLS Certificates + +<KurlAvailability/> + +Replicated KOTS provides default self-signed certificates that renew automatically. For embedded clusters created with Replicated kURL, the self-signed certificate renews 30 days before expiration when you enable the kURL EKCO add-on version 0.7.0 and later. + +Custom TLS options are supported: + +- **Existing clusters:** The expectation is for the end customer to bring their own Ingress Controller such as Contour or Istio and upload their own `kubernetes.io/tls` secret. For an example, see [Ingress with TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) in the Kubernetes documentation. + +- **Embedded kURL clusters:** End customers can upload a custom TLS certificate. Replicated kURL creates a TLS secret that can reused by other Kubernetes resources, such as Deployment or Ingress, which can be easier than providing and maintaining multiple certificates. As a vendor, you can enable the use of custom TLS certificates with these additional resources. + +For example, if your application does TLS termination, your deployment would need the TLS secret. Or if the application is connecting to another deployment that is also secured using the same SSL certificate (which may not be a trusted certificate), the custom TLS certificate can be used to do validation without relying on the trust chain. + +### Get the TLS Secret + +kURL sets up a Kubernetes secret called `kotsadm-tls`. The secret stores the TLS certificate, key, and hostname. Initially, the secret has an annotation set called `acceptAnonymousUploads`. This indicates that a new TLS certificate can be uploaded by the end customer during the installation process. For more information about installing with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). + +Before you can reference the TLS certificate in other resources, you must get the `kotsadm-tls` secret output. + +To get the `kots-adm-tls` secret, run: + +```shell +kubectl get secret kotsadm-tls -o yaml +``` + +In the output, the `tls.crt` and `tls.key` hold the certificate and key that can be referenced in other Kubernetes resources. + +**Example Output:** + +```yaml +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: kotsadm-tls +data: + tls.crt: <base64_encoded> + tls.key: <base64_encoded> +``` + +### Use TLS in a Deployment Resource + +This procedure shows how to reference the `kotsadm-tls` secret using an example nginx Deployment resource (`kind: Deployment`). + +To use the `kotsadm-tls` secret in a Deployment resource: + +1. In the Deployment YAML file, configure SSL for volumeMounts and volumes, and add the `kotsadm-tls` secret to volumes: + + **Example:** + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx + spec: + template: + spec: + containers: + volumeMounts: + - mountPath: "/etc/nginx/ssl" + name: nginx-ssl + readOnly: true + volumes: + - name: nginx-ssl + secret: + secretName: kotsadm-tls + ``` + +1. Deploy the release, and then verify the pod deployment using the `kubectl exec` command: + + **Example:** + + ```shell + export POD_NAME=nginx-<hash> + kubectl exec -it ${POD_NAME} bash + ``` + +1. Run the `ls` and `cat` commands to verify that the certificate and key were deployed to the specified volumeMount: + + **Example:** + + ```shell + $ ls /etc/nginx/ssl + tls.crt tls.key + + $ cat /etc/nginx/ssl/tls.crt + -----BEGIN CERTIFICATE----- + MIID8zCCAtugAwIBAgIUZF+NWHnpJCt2R1rDUhYjwgVv72UwDQYJKoZIhvcNAQEL + + $ cat /etc/nginx/ssl/tls.key + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCyiGNuHw2LY3Rv + ``` + +### Use TLS in an Ingress Resource + +You can add the `kotsadm-tls` secret to the Ingress resource to terminate TLS at the contour layer. The following example shows how to configure `secretName: kotsadm-tls` under the TLS `hosts` field in an Ingress resource (`kind: Ingress`): + +**Example:** + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: nginx +spec: + rules: + tls: + - hosts: + - 'tls.foo.com' + secretName: kotsadm-tls + - host: tls.foo.com + http: + paths: + - path: / + backend: + serviceName: nginx + servicePort: 80 +``` +:::note +`tls.foo.com` must resolve to a valid IP, and also must match the Common Name (CN) or Subjective Alternative Name (SAN) of the TLS certificate. +::: + + +--- + + +# Customer Application Deployment Questionnaire + +# Customer Application Deployment Questionnaire + +Before you package and distribute an application, Replicated recommends that you +understand several key characteristics about the environments where your customers +will deploy your application. + +To gather this information about your customers' environments: +1. Copy and customize the [$APP Deployment Questionnaire](#app-deployment-questionnaire) below. +1. Replace $APP with the name of your application. +1. Send the questionnaire to your users. + +## $APP Deployment Questionnaire + +### Infrastructure + +This section includes questions about your infrastructure and how you deploy software. +This includes both internally-written and Commercial Off The Shelf (COTS) applications. + +If it’s more convenient, limit answers to the scope of the target infrastructure for deploying $APP. + +- Do you use any IaaS like AWS, GCP, or Azure? + +- If you deploy to a physical datacenter, do you use a Hypervisor like VSphere? + +- Do you ever install on bare metal? + +- Do you have any restrictions on what operating systems are used? + +- Does the target infrastructure have a direct outbound internet connection? Can it connect out via a Proxy? + +- If the environment has no outbound network, do machines in a DMZ have direct network access to the air gapped infrastructure, or do release artifacts need to be copied to physical media for installation? + +- If there is an issue causing downtime in the on-prem application, would you be willing to give the $APP team direct SSH access to the instance(s)? + +### Development and Deployment Processes + +- Do you require applications be deployed by a configuration management framework like Chef, Ansible, or Puppet? + +- Do you run any container-based workloads today? + +- If you run container workloads, do you run any kind of orchestration like Kubernetes, Mesos, or Docker Swarm? + +- If you run container workloads, what tools do you use to host and serve container images? + +- If you run container workloads, what tools do you use to scan and secure container images? + +- If you are deploying $APP to your existing Kubernetes cluster, can your cluster nodes pull images from the public internet, or do you require images to be stored in an internal registry? + +### Change Management + +- How do you test new releases of COTS software? Do you have a UAT or Staging environment? Are there other change management requirements? + +- How often do you like to receive planned (non-critical) software updates? Quarterly? Monthly? As often as possible? + +- For critical updates, what is your target deployment time for new patches? Do you have a requirement for how quickly patches are made available after a vulnerability is announced? + +- Do you drive production deploys automatically from version control (“gitops”)? + + +### Application Usage and Policy Requirements + +- For applications that expose a web UI, how will you be connecting to the instance? As much as possible, include details about your workstation, any tunneling/VPN/proxy infrastructure, and what browsers you intend to use. + +- Do you require a disaster recovery strategy for deployed applications? If so, where are backups stored today? (SFTP? NAS? S3-compliant object store? Something else?) + +- Do you require deployed COTS applications to support logins with an internal identity provider like OpenLDAP, Windows AD or SAML? + +- Do you require an audit log of all user activity performed in $APP? What are your needs around exporting / aggregating audit log data? + +- Do you anticipate the need to scale the capacity of $APP up and down during its lifetime? + +- What are your requirements around log aggregation? What downstream systems do you need system logs to be piped to? + + +--- + + +# Data Transmission Policy + +# Data Transmission Policy + +A Replicated installation connects to a Replicated-hosted endpoint periodically to perform various tasks including checking for updates and synchronizing the installed license properties. During this time, some data is transmitted from an installed instance to the Replicated API. This data is limited to: + +- The IP address of the primary Replicated instance. +- The ID of the installation. +- [Resource statuses](/enterprise/status-viewing-details#resource-statuses) +- Information about the installation including data needed for [instance details](/vendor/instance-insights-details). +- [Custom metrics](/vendor/custom-metrics) which the vendor may configure as part of the installation. +- Date and timestamps of the data transmission. + +This data is required to provide the expected update and license services. The data is also used to provide telemetry and other reporting features. + +By default, no additional data is collected and transmitted from the instance to external servers. + +All data is encrypted in transit according to industry best practices. For more information about Replicated's security practices, see [Security at Replicated](https://www.replicated.com/security/) on the Replicated website. + +For more information about application instance data fields that the Replicated Vendor Portal uses to generate events for instances, see [About Instance and Event Data](/vendor/instance-insights-event-data). + +Last modified December 31, 2023 + + +--- + + +# Infrastructure and Subprocessor Providers + + +# Infrastructure and Subprocessor Providers + +This lists describes the infrastructure environment, subprocessors and other entities material to the Replicated products and services. + +Prior to engaging any third party, Replicated performs diligence to evaluate their privacy, security and confidentiality practices. Whenever possible, Replicated uses encryption for data at rest and in motion so that all information is not available to these third parties. + +Replicated does not engage in the business of selling or trading personal information. Any personally identifible information Replicated might possibly hold is data that a customer has provided to us. + +The fields that Replicated may posess as identifiable to a physical person may include: +- Name +- Email +- Phone Number +- Job Title +- Business Address +- Github Username + +Note: This does not imply that all these fields are collected for each person. It also does not mean all these datapoints are used with each declared provider. + + +## Replicated Infrastructure Providers + +Replicated might use the following entities to provide infrastructure that helps with delivery of our products: + + +| Entity Name | Purpose | Country where Infrastructure Resides | Notes +|---------------------|----------------------------|-------|----| +| Amazon Web Services | Various IaaS | United States | Vendor portal, registry, api and supporting infrastructure services. +| Cloudflare | Network security, DDoS mitigation, DNS | United States | +| Datadog | Performance monitoring | United States | +| DBT Labs | Data transformation or migration | United States | +| FiveTran | Data transformation or migration | United States | +| Github | Customer support | United States | Replicated's customers may engage with our customer support team using Github issues in a private repo. +| Google Looker | Product usage metrics | United States | +| Hex | Data transformation or migration | United States | +| Knock Labs, Inc.| Event notifications | United States | | +| Postmark / Active Campaign | Transactional emails from Vendor Portal. Marketing related communications. | United States | Active Campaign and Postmark businesses merged.| +| Salesforce |Customer and sales relationship management| United States | +| Snowflake | Usage data analysis and transformation | United States | +| Timescale | Time-series data of instance metrics | United States | See our [Data Transmission Policy](/vendor/policies-data-transmission) + +Last modified January 4, 2024 + + +--- + + +# Support Lifecycle Policy + +# Support Lifecycle Policy + +Replicated will provide support for products per our terms and services until that product is noted as End of Life (EOL). + +<table> + <tr> + <th width="30%">Product Phase</th> + <th width="70%">Definition</th> + </tr> + <tr> + <td>Alpha</td> + <td>A product or feature that is exploratory or experimental. Typically, access to alpha features and their documentation is limited to customers providing early feedback. While most alpha features progress to beta and general availability (GA), some are deprecated based on assessment learnings.</td> + </tr> + <tr> + <td>Beta</td> + <td><p>A product or feature that is typically production-ready, but has not met Replicated’s definition of GA for one or more of the following reasons:</p><ul><li>Remaining gaps in intended functionality</li><li>Outstanding needs around testing</li><li>Gaps in documentation or sales enablement</li><li>In-progress customer value validation efforts</li></ul><p>Documentation for beta products and features is published on the Replicated Documentation site with a "(Beta)" label. Beta products or features follow the same build and test processes required for GA.</p><p>Please contact your Replicated account representative if you have questions about why a product or feature is beta.</p></td> + </tr> + <tr> + <td>“GA” - General Availability</td> + <td>A product or feature that has been validated as both production-ready and value-additive by a percentage of Replicated customers. Products in the GA phase are typically those that are available for purchase from Replicated.</td> + </tr> + <tr> + <td>“LA” - Limited Availability</td> + <td>A product has reached the Limited Availability phase when it is no longer available for new purchases from Replicated. Updates will be primarily limited to security patches, critical bugs and features that enable migration to GA products.</td> + </tr> + <tr> + <td>“EOA” - End of Availability</td> + <td><p>A product has reached the End of Availability phase when it is no longer available for renewal purchase by existing customers. This date may coincide with the Limited Availability phase.</p><p>This product is considered deprecated, and will move to End of Life after a determined support window. Product maintenance is limited to critical security issues only.</p></td> + </tr> + <tr> + <td>“EOL” - End of Life</td> + <td><p>A product has reached its End of Life, and will no longer be supported, patched, or fixed by Replicated. Associated product documentation may no longer be available.</p><p>The Replicated team will continue to engage to migrate end customers to GA product based deployments of your application.</p></td> + </tr> +</table> + +<table> + <tr> + <th width="25%">Replicated Product</th> + <th width="15%">Product Phase</th> + <th width="25%">End of Availability</th> + <th width="25%">End of Life</th> + </tr> + <tr> + <td><a href="/vendor/testing-about">Compatibility Matrix</a></td> + <td>GA</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="/vendor/replicated-sdk-overview">Replicated SDK</a></td> + <td>Beta</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="/intro-kots">Replicated KOTS Installer</a></td> + <td>GA</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="/vendor/kurl-about">Replicated kURL Installer</a></td> + <td>GA</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="/vendor/embedded-overview">Replicated Embedded Cluster Installer</a></td> + <td>GA</td> + <td>N/A</td> + <td>N/A</td> + </tr> + <tr> + <td><a href="https://help.replicated.com/docs/native/getting-started/overview/">Replicated Classic Native Installer</a></td> + <td>EOL</td> + <td>2023-12-31*</td> + <td>2024-12-31*</td> + </tr> +</table> + +*Except for customers who have specifically contracted different dates for the End of Availability and End of Life timelines. + +## Supported Replicated Installer Versions + +The following table lists the versions of Replicated KOTS and Replicated kURL that are supported on each Kubernetes version. + +The End of Replicated Support date is the End Of Life (EOL) date for the Kubernetes version. The EOL date for each Kubernetes version is published on the [Releases](https://kubernetes.io/releases/) page in the Kubernetes documentation. + +<table> + <tr> + <th>Kubernetes Version</th> + <th>Embedded Cluster Versions</th> + <th>KOTS Versions</th> + <th>kURL Versions</th> + <th>End of Replicated Support</th> + </tr> + <tr> + <td>1.32</td> + <td>N/A</td> + <td>N/A</td> + <td>N/A</td> + <td>2026-02-28</td> + </tr> + <tr> + <td>1.31</td> + <td>N/A</td> + <td>1.117.0 and later</td> + <td>v2024.08.26-0 and later</td> + <td>2025-10-28</td> + </tr> + <tr> + <td>1.30</td> + <td>1.16.0 and later</td> + <td>1.109.1 and later</td> + <td>v2024.05.03-0 and later</td> + <td>2025-06-28</td> + </tr> + <tr> + <td>1.29</td> + <td>1.0.0 and later</td> + <td>1.105.2 and later</td> + <td>v2024.01.02-0 and later</td> + <td>2025-02-28</td> + </tr> +</table> + +Replicated support for end-customer installations is limited to those installs using a Replicated provided installer product, such as KOTS, kURL or Embedded Cluster, available with the [Business or Enterprise plans](https://www.replicated.com/pricing). Replicated support for direct Helm CLI installs or other vendor provided installers is limited to the successful distribution of the software to the end-customer, as well as any issues with the Replicated SDK if included with the installation. + + +The information contained herein is believed to be accurate as of the date of publication, but updates and revisions may be posted periodically and without notice. + +Last modified January 2, 2025. + + +--- + + +# Vulnerability Patch Policy + + +# Vulnerability Patch Policy + +While it’s our goal to distribute vulnerability-free versions of all components, this isn’t always possible. +Kubernetes and KOTS are made from many components, each authored by different vendors. + +The best way to stay ahead of vulnerabilities is to run the latest version and have a strategy to quickly update when a patch is available. + +## How We Scan + +Our build pipeline uses [Trivy](https://www.aquasec.com/products/trivy/) to scan for and detect known, published vulnerabilities in our images. +It’s possible that other security scanners will detect a different set of results. +We commit to patching vulnerabilities according to the timeline below based on the results of our internal scans. + +If you or your customer detects a different vulnerability using a different scanner, we encourage you to report it to us in a GitHub issue, Slack message, or opening a support issue from the Replicated Vendor Portal. +Our team will evaluate the vulnerability and determine the best course of action. + +## Base Images + +KOTS images are built on top of Chainguard's open source [Wolfi](https://edu.chainguard.dev/open-source/wolfi/overview/) base image. Wolfi is a Linux undistro that is focused on supply chain security. + +KOTS has automation that uses the Chainguard [melange](https://edu.chainguard.dev/open-source/melange/overview/) and [apko](https://edu.chainguard.dev/open-source/apko/overview/) projects to build packages and assemble images on Wolfi. Building and assembling images in this way helps to ensure that any CVEs can be resolved quickly and efficiently. + +## Upstream CVE Disclosure + +Replicated KOTS, kURL, and Embedded Cluster deliver many upstream Kubernetes and ecosystem components. +We do not build these packages and rely on the upstream software vendor to distribute patches. +Our intent is to make any patches available as soon as possible, but guarantee the following timeline to make upstream patches available after we learn about the vulnerability and a patch is available to us: + +| CVE Level | Time to release | +|-----------|-----------------| +| Critical | Within 2 weeks | +| High | Within 60 days | +| Medium | Within 90 days | +| Low | Best effort unless risk accepted | + +## Notable Upstream CVEs + +This section lists CVEs that have yet to be resolved by the upstream maintainers and therefore are not patched in Replicated. This is not an exhaustive list of unpatched upstream CVEs; instead, these are noteworthy CVEs that we have evaluated and on which we offer our opinion to help with your own security reviews. When available, we will apply upstream patches in accordance with our policy desribed in [Upstream CVE Disclosure](#upstream-cve-disclosure) above. We will update this list after applying any upstream patches. + +| CVE ID | Explanation| +|--------|------------| +| None | N/A | + +## Vulnerability Management Exception Policy +There might be instances where policy exceptions are required to continue using third party software with known vulnerabilities in our on premises products. Some reasons for an exception include: + +- Feature breakage or bugs in patched versions +- Performance issues in patched versions +- Patched version contains higher severity vulnerabilities + +Regardless of the reason, an exception is vetted from a business impact and security standpoint. The business review assesses the overall impact to the product created by the patched, but otherwise problematic, piece of software. The security portion determines if the CVE is applicable to this specific context and if that CVE's impact to the product’s overall security posture is acceptable. + +In the event of a vulnerability management exception, a notice is posted containing: + +- The impacted product(s) +- The rationale for the exception +- The relevant CVE(s) +- A risk assessment in the product context for each CVE + +As subsequent versions of the vulnerable software are released, Replicated continues to research to find a solution that satisfies the business and security requirements of the original exception.  + +## Known Disclosed Vulnerabilities in our On Premises Products + +| CVE | CVE Summary | Rationale | Additional Reading | +|-----|-------------|-----------|--------------------| +| None | N/A | N/A | N/A | + +Last modified January 29, 2025. + + +--- + + +# Defining Preflight Checks + +# Defining Preflight Checks + +This topic describes how to define preflight checks in Helm and Kubernetes manifest-based applications. For more information about preflight checks, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). + +The information in this topic applies to applications that are installed with Helm or with Replicated KOTS. + +## Step 1: Create the Manifest File + +You can define preflight checks in a Kubernetes Secret or in a Preflight custom resource. The type of manifest file that you use depends on your application type (Helm or Kubernetes manifest-based) and the installation methods that your application supports (Helm, KOTS v1.101.0 or later, or KOTS v1.100.3 or earlier). + +* **Helm Applications**: For Helm applications, see the following guidance: + + * **(Recommended) Helm or KOTS v1.101.0 or Later**: For Helm applications installed with Helm or KOTS v1.101.0 or later, define the preflight checks in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). + + * **KOTS v1.100.3 or Earlier**: For Helm applications installed with KOTS v1.100.3 or earlier, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). + +* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). + +### Kubernetes Secret {#secret} + +For Helm applications installed with Helm or KOTS v1.101.0 or later, define preflight checks in a Kubernetes Secret in your Helm chart `templates`. This allows you to define the preflights spec only one time to support running preflight checks in both Helm and KOTS installations. + +For a tutorial that demonstrates how to define preflight checks in a Secret in chart `templates` and then run the preflight checks in both Helm and KOTS installations, see [Tutorial: Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup). + +Add the following YAML to a Kubernetes Secret in your Helm chart `templates` directory: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" +stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + collectors: [] + analyzers: [] +``` + +As shown above, the Secret must include the following: + +* The label `troubleshoot.sh/kind: preflight` +* A `stringData` field with a key named `preflight.yaml` so that the preflight binary can use this Secret when it runs from the CLI + +### Preflight Custom Resource {#preflight-cr} + +Define preflight checks in a Preflight custom resource for the following installation types: +* Kubernetes manifest-based applications installed with any version of KOTS +* Helm applications installed with KOTS v1.100.3 and earlier + :::note + For Helm charts installed with KOTS v1.101.0 and later, Replicated recommends that you define preflight checks in a Secret in the Helm chart `templates` instead of using the Preflight custom resource. See [Create a Secret](#secret) above. + + In KOTS v1.101.0 and later, preflights defined in the Helm chart override the Preflight custom resource used by KOTS. During installation, if KOTS v1.101.0 and later cannot find preflights specified in the Helm chart archive, then KOTS searches for `kind: Preflight` in the root of the release. + ::: + +Add the following YAML to a new file in a release: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: Preflight +metadata: + name: preflights +spec: + collectors: [] + analyzers: [] +``` + +For more information about the Preflight custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). + +## Step 2: Define Collectors and Analyzers + +This section describes how to define collectors and analyzers for preflight checks based on your application needs. You add the collectors and analyzers that you want to use in the `spec.collectors` and `spec.analyzers` keys in the manifest file that you created. + +### Collectors + +Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define to generate results for the preflight checks. + +The following default collectors are included automatically to gather information about the cluster and cluster resources: +* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) +* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) + +You do not need manually include the `clusterInfo` or `clusterResources` collectors in the specification. To use only the `clusterInfo` and `clusterResources` collectors, delete the `spec.collectors` key from the preflight specification. + +The Troubleshoot open source project includes several additional collectors that you can include in the specification to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. + +### Analyzers + +Analyzers use the output from the collectors to generate results for the preflight checks, including the criteria for pass, fail, and warn outcomes and custom messages for each outcome. + +For example, in a preflight check that checks the version of Kubernetes running in the target cluster, the analyzer can define a fail outcome when the cluster is running a version of Kubernetes less than 1.25 that includes the following custom message to the user: `The application requires Kubernetes 1.25.0 or later, and recommends 1.27.0`. + +The Troubleshoot open source project includes several analyzers that you can include in your preflight check specification. The following are some of the analyzers in the Troubleshoot project that use the default `clusterInfo` or `clusterResources` collectors: +* [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) +* [clusterVersion](https://troubleshoot.sh/docs/analyze/cluster-version/) +* [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) +* [distribution](https://troubleshoot.sh/docs/analyze/distribution/) +* [nodeResources](https://troubleshoot.sh/docs/analyze/node-resources/) +* [statefulsetStatus](https://troubleshoot.sh/docs/analyze/stateful-set-status/) +* [storageClass](https://troubleshoot.sh/docs/analyze/storage-class/) + +To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. + +### Block Installation with Required (Strict) Preflights {#strict} + +For applications installed with KOTS, you can set any preflight analyzer to `strict: true`. When `strict: true` is set, any `fail` outcomes for the analyzer block the deployment of the release. + +:::note +Strict preflight analyzers are ignored if the `exclude` property is also included and evaluates to `true`. See [exclude](https://troubleshoot.sh/docs/analyze/#exclude) in the Troubleshoot documentation. +::: + +### Examples + +For common examples of collectors and analyzers used in preflight checks, see [Examples of Preflight Specs](/vendor/preflight-examples). + +--- + + +# Example Preflight Specs + +import HttpSecret from "../partials/preflights/_http-requests-secret.mdx" +import HttpCr from "../partials/preflights/_http-requests-cr.mdx" +import MySqlSecret from "../partials/preflights/_mysql-secret.mdx" +import MySqlCr from "../partials/preflights/_mysql-cr.mdx" +import K8sVersionSecret from "../partials/preflights/_k8s-version-secret.mdx" +import K8sVersionCr from "../partials/preflights/_k8s-version-cr.mdx" +import K8sDistroSecret from "../partials/preflights/_k8s-distro-secret.mdx" +import K8sDistroCr from "../partials/preflights/_k8s-distro-cr.mdx" +import NodeReqSecret from "../partials/preflights/_node-req-secret.mdx" +import NodeReqCr from "../partials/preflights/_node-req-cr.mdx" +import NodeCountSecret from "../partials/preflights/_node-count-secret.mdx" +import NodeCountCr from "../partials/preflights/_node-count-cr.mdx" +import NodeMemSecret from "../partials/preflights/_node-mem-secret.mdx" +import NodeMemCr from "../partials/preflights/_node-mem-cr.mdx" +import NodeStorageClassSecret from "../partials/preflights/_node-storage-secret.mdx" +import NodeStorageClassCr from "../partials/preflights/_node-storage-cr.mdx" +import NodeEphemStorageSecret from "../partials/preflights/_node-ephem-storage-secret.mdx" +import NodeEphemStorageCr from "../partials/preflights/_node-ephem-storage-cr.mdx" +import NodeCpuSecret from "../partials/preflights/_node-cpu-secret.mdx" +import NodeCpuCr from "../partials/preflights/_node-cpu-cr.mdx" +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Example Preflight Specs + +This section includes common examples of preflight check specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/preflight) in GitHub. + +## Check HTTP or HTTPS Requests from the Cluster + +The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. + +For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <HttpSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <HttpCr/> + <p>The following shows how the <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing pass message" src="/images/preflight-http-pass.png"/> + <a href="/images/preflight-http-pass.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Kubernetes Version + +The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. + +For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <K8sVersionSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <K8sVersionCr/> + <p>The following shows how the <code>warn</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing warning message" src="/images/preflight-k8s-version-warn.png"/> + <a href="/images/preflight-k8s-version-warn.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Kubernetes Distribution + +The examples below use the `distribution` analyzer to check the Kubernetes distribution of the cluster. The `distribution` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. + +For more information, see [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) and [Distribution](https://troubleshoot.sh/docs/analyze/distribution/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <K8sDistroSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <K8sDistroCr/> + <p>The following shows how the <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing pass message" src="/images/preflight-k8s-distro.png"/> + <a href="/images/preflight-k8s-distro.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check MySQL Version Using Template Functions + +The examples below use the `mysql` collector and the `mysql` analyzer to check the version of MySQL running in the cluster. + +For more information, see [Collect > MySQL](https://troubleshoot.sh/docs/collect/mysql/) and [Analyze > MySQL](https://troubleshoot.sh/docs/analyze/mysql/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <p>This example uses Helm template functions to render the credentials and connection details for the MySQL server that were supplied by the user. Additionally, it uses Helm template functions to create a conditional statement so that the MySQL collector and analyzer are included in the preflight checks only when MySQL is deployed, as indicated by a <code>.Values.global.mysql.enabled</code> field evaluating to true.</p> + <p>For more information about using Helm template functions to access values from the values file, see <a href="https://helm.sh/docs/chart_template_guide/values_files/">Values Files</a>.</p> + <MySqlSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <p>This example uses KOTS template functions in the Config context to render the credentials and connection details for the MySQL server that were supplied by the user in the Replicated Admin Console <strong>Config</strong> page. Replicated recommends using a template function for the URI, as shown above, to avoid exposing sensitive information. For more information about template functions, see <a href="/reference/template-functions-about">About Template Functions</a>.</p> + <p>This example also uses an analyzer with <code>strict: true</code>, which prevents installation from continuing if the preflight check fails.</p> + <MySqlCr/> + <p>The following shows how a <code>fail</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade when <code>strict: true</code> is set for the analyzer:</p> + <img alt="Strict preflight checks in Admin Console showing fail message" src="/images/preflight-mysql-fail-strict.png"/> + <a href="/images/preflight-mysql-fail-strict.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Node Memory + +The examples below use the `nodeResources` analyzer to check that a required storage class is available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeMemSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeMemCr/> + <p>The following shows how a <code>warn</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing warn message" src="/images/preflight-node-memory-warn.png"/> + <a href="/images/preflight-node-memory-warn.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Node Storage Class Availability + +The examples below use the `storageClass` analyzer to check that a required storage class is available in the nodes in the cluster. The `storageClass` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeStorageClassSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeStorageClassCr/> + <p>The following shows how a <code>fail</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing fail message" src="/images/preflight-storageclass-fail.png"/> + <a href="/images/preflight-storageclass-fail.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Node Ephemeral Storage + +The examples below use the `nodeResources` analyzer to check the ephemeral storage available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeEphemStorageSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeEphemStorageCr/> + <p>The following shows how a <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing pass message" src="/images/preflight-ephemeral-storage-pass.png"/> + <a href="/images/preflight-ephemeral-storage-pass.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Requirements Are Met By At Least One Node + +The examples below use the `nodeResources` analyzer with filters to check that the requirements for memory, CPU cores, and architecture are met by at least one node in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeReqSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeReqCr/> + <p>The following shows how the <code>fail</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing fail message" src="/images/preflight-node-filters-faill.png"/> + <a href="/images/preflight-node-filters-faill.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +## Check Total CPU Cores Across Nodes + +The examples below use the `nodeResources` analyzer to check the version of Kubernetes running in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeCpuSecret/> + </TabItem> + <TabItem value="custom-resource" label="Preflight Custom Resource"> + <NodeCpuCr/> + <p>The following shows how the <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> + <img alt="Preflight checks in Admin Console showing fail message" src="/images/preflight-cpu-pass.png"/> + <a href="/images/preflight-cpu-pass.png">View a larger version of this image</a> + </TabItem> +</Tabs> + +--- + + +# Customizing Host Preflight Checks for kURL + +# Customizing Host Preflight Checks for kURL + +This topic provides information about how to customize host preflight checks for installations with Replicated kURL. For information about the default host preflight checks that run for installations with Replicated Embedded Cluster, see [About Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. + +## About Host Preflight Checks +You can include host preflight checks with kURL to verify that infrastructure requirements are met for: + +- Kubernetes +- kURL add-ons +- Your application + +This helps to ensure successful installation and the ongoing health of the cluster. + +While host preflights are intended to ensure requirements are met for running the cluster, you can also use them to codify some of your application requirements so that users get feedback even earlier in the installation process, rather than waiting to run preflights after the cluster is already installed. For more information about application checks, collectors, and analyzers, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). + +Default host preflight checks verify conditions such as operating system and disk usage. Default host preflight failures block the installation from continuing and exit with a non-zero return code. Users can then update their environment and run the kURL installation script again to re-run the host preflight checks. + +Host preflight checks run automatically. The default host preflight checks that run can vary, depending on whether the installation is new, an upgrade, joining a node, or an air gap installation. Additionally, some checks only run when certain add-ons are enabled in the installer. For a complete list of default host preflight checks, see [Default Host Preflights](https://kurl.sh/docs/install-with-kurl/host-preflights#default-host-preflights) in the kURL documentation. + +There are general kURL host preflight checks that run with all installers. There are also host preflight checks included with certain add-ons. Customizations include the ability to: + + - Bypass failures + - Block an installation for warnings + - Exclude certain preflights under specific conditions, such as when a particular license entitlement is enabled + - Skip the default host preflight checks and run only custom checks + - Add custom checks to the default host preflight checks + +For more information about customizing host preflights, see [Customize Host Preflight Checks](#customize-host-preflight-checks). + +## Customize Host Preflight Checks + +The default host preflights run automatically as part of your kURL installation. You can customize the host preflight checks by disabling them entirely, adding customizations to the default checks to make them more restrictive, or completely customizing them. You can also customize the outcomes to enforce warnings or ignore failures. + +### Add Custom Preflight Checks to the Defaults + +To run customized host preflight checks in addition to the default host preflight checks, add a `hostPreflights` field to the `kurl` field in your Installer manifest. Under the `hostPreflights` field, add a host preflight specification (`kind: HostPreflight`) with your customizations. You only need to specify your customizations because the default host preflights run automatically. + +Customized host preflight checks run in addition to default host preflight checks, if the default host preflight checks are enabled. + +If you only want to make the default host preflight checks more restrictive, add your more restrictive host preflight checks to `kurl.hostPreflights`, and do not set `excludeBuiltinHostPreflights`. For example, if your application requires 6 CPUs but the default host preflight check requires 4 CPUs, you can simply add a custom host preflight check for 6 CPUs, since the default host preflight must pass if the more restrictive custom check passes. + +The following example shows customized `kurl` host preflight checks for: + + - An application that requires more CPUs than the default + - Accessing a website that is critical to the application + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "latest" +spec: + kurl: + hostPreflights: + apiVersion: troubleshoot.sh/v1beta2 + kind: HostPreflight + spec: + collectors: + - cpu: {} + - http: + collectorName: Can Access A Website + get: + url: https://myFavoriteWebsite.com + analyzers: + - cpu: + checkName: Number of CPU check + outcomes: + - fail: + when: "count < 4" + message: This server has less than 4 CPU cores + - warn: + when: "count < 6" + message: This server has less than 6 CPU cores + - pass: + message: This server has at least 6 CPU cores + - http: + checkName: Can Access A Website + collectorName: Can Access A Website + outcomes: + - warn: + when: "error" + message: Error connecting to https://myFavoriteWebsite.com + - pass: + when: "statusCode == 200" + message: Connected to https://myFavoriteWebsite.com +``` + +### Customize the Default Preflight Checks + +To customize the default host preflights: + +1. Disable the default host preflight checks using `excludeBuiltinHostPreflights: true`. +1. Copy the default `host-preflights.yaml` specification for kURL from [host-preflights.yaml](https://github.com/replicatedhq/kURL/blob/main/pkg/preflight/assets/host-preflights.yaml) in the kURL repository. +1. Copy the default `host-preflight.yaml` specification for any and all add-ons that are included in your specification and have default host preflights. For links to the add-on YAML files, see [Finding the Add-on Host Preflight Checks](https://kurl.sh/docs/create-installer/host-preflights/#finding-the-add-on-host-preflight-checks) in the kURL documentation. +1. Merge the copied host preflight specifications into one host preflight specification, and paste it to the `kurl.hostPreflights` field in the Installer YAML in the Vendor Portal. +1. Edit the defaults as needed. + +### Ignore or Enforce Warnings and Failures + +Set either of the following flags to customize the outcome of your host preflight checks: + +<table> +<tr> + <th width="30%">Flag: Value</th> + <th width="70%">Description</th> +</tr> +<tr> + <td><code>hostPreflightIgnore: true</code></td> + <td>Ignores host preflight failures and warnings. The installation proceeds regardless of host preflight outcomes.</td> +</tr> +<tr> + <td><code>hostPreflightEnforceWarnings: true</code></td> + <td>Blocks an installation if the results include a warning.</td> +</tr> +</table> + +### Disable Host Preflight Checks + +To disable the default host preflight checks for Kubernetes and all included add-ons, add the `kurl` field to your Installer manifest and add `kurl.excludeBuiltinHostPreflights: true`. In this case, no host preflight checks are run. + +`excludeBuiltinHostPreflights` is an aggregate flag, so setting it to `true` disables the default host preflights for Kubernetes and all included add-ons. + +**Example:** + + ```yaml + apiVersion: "cluster.kurl.sh/v1beta1" + kind: "Installer" + metadata: + name: "latest" + spec: + kurl: + excludeBuiltinHostPreflights: true + ``` + +## Example of Customized Host Preflight Checks + +The following example shows: + +- Default host preflights checks are disabled +- Customized host preflight checks run +- The installation is blocked if there is a warning + +```yaml +apiVersion: "cluster.kurl.sh/v1beta1" +kind: "Installer" +metadata: + name: "latest" +spec: + kurl: + excludeBuiltinHostPreflights: true + hostPreflightEnforceWarnings: true + hostPreflights: + apiVersion: troubleshoot.sh/v1beta2 + kind: HostPreflight + spec: + collectors: + - cpu: {} + - http: + collectorName: Can Access A Website + get: + url: https://myFavoriteWebsite.com + analyzers: + - cpu: + checkName: Number of CPU check + outcomes: + - fail: + when: "count < 4" + message: This server has less than 4 CPU cores + - warn: + when: "count < 6" + message: This server has less than 6 CPU cores + - pass: + message: This server has at least 6 CPU cores + - http: + checkName: Can Access A Website + collectorName: Can Access A Website + outcomes: + - warn: + when: "error" + message: Error connecting to https://myFavoriteWebsite.com + - pass: + when: "statuscode == 200" + message: Connected to https://myFavoriteWebsite.com + ``` + +--- + + +# Running Preflight Checks for Helm Installations + +# Running Preflight Checks for Helm Installations + +This topic describes how to use the preflight kubectl plugin to run preflight checks for applications installed with the Helm CLI. + +## Overview + +For applications installed with the Helm CLI, your users can optionally run preflight checks using the open source preflight kubectl plugin before they run `helm install`. + +The preflight plugin requires a preflight check specification as input. For Helm chart-based applications, the specification is defined in a Secret in the Helm chart `templates` directory. For information about how to configure preflight checks for your application, see [Defining Preflight Checks](preflight-defining). + +To run preflight checks that are defined in your application Helm chart templates, your users run `helm template` to render the Helm chart templates and then provide the result to the preflight plugin as stdin. The preflight plugin automatically filters the stream of stdout from the `helm template` command to find and run any preflight specifications. + +## Prerequisite + +The preflight kubectl plugin is required to run preflight checks for Helm CLI installations. The preflight plugin is a client-side utility that adds a single binary to the path. + +To install the preflight plugin, run the following command to install the preflight plug-in using krew: + +``` +curl https://krew.sh/preflight | bash +``` +For information about the preflight plugin, including additional installation options, see [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. + +## Command + +``` +helm template HELM_CHART | kubectl preflight - +``` + +Where `HELM_CHART` is the Helm chart that contains the preflight specification. + +For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. + +**Examples:** + +``` +helm template gitea-1.0.6.tgz | kubectl preflight - +``` +``` +helm template gitea | kubectl preflight - +``` +``` +helm template oci://myregistry.io/org/examplechart | kubectl preflight - +``` + +## Run Preflight Checks from a Release + +When you promote a release that contains one or more Helm charts, the Helm charts are automatically pushed to the Replicated registry. To run preflight checks before installing a release, your users must first log in to the Replicated registry where they can access your application Helm chart containing the preflight specification. + +To run preflights checks from a release before installation: + +1. In the [Vendor Portal](https://vendor.replicated.com/apps/gitea-boxer/customers), go to the **Customers** page. Click on the name of the target customer. + +1. On the landing page for the customer, click **Helm install instructions**. + + The **Helm install instructions** dialog opens: + + <img alt="Helm install instructions dialog with preflight checks" src="/images/helm-install-preflights.png" width="550px"/> + + [View a larger version of this image](/images/helm-install-preflights.png) + +1. Run the commands provided in the dialog: + + 1. Run the first command to log in to the Replicated registry: + + ``` + helm registry login registry.replicated.com --username USERNAME --password PASSWORD + ``` + + Where: + - `USERNAME` is the customer's email address. + - `PASSWORD` is the customer's license ID. + + **Example:** + ``` + helm registry login registry.replicated.com --username example@companyname.com password 1234abcd + ``` + + 1. Run the second command to install the kubectl plugin with krew: + + ``` + curl https://krew.sh/preflight | bash + ``` + + 1. Run the third command to run preflight checks: + + ``` + helm template oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART | kubectl preflight - + ``` + + Where: + - `APP_SLUG` is the name of the application. + - `CHANNEL` is the lowercased name of the release channel. + - `CHART` is the name of the Helm chart. + + **Examples:** + + ``` + helm template oci://registry.replicated.com/gitea-app/unstable/gitea | kubectl preflight - + ``` + ``` + helm template oci://registry.replicated.com/gitea-app/unstable/gitea --values values.yaml | kubectl preflight - + ``` + + For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. + + 1. (Optional) Run the fourth command to install the application. For more information, see [Installing with Helm](install-with-helm). + +## (Optional) Save Preflight Check Results + +The output of the preflight plugin shows the success, warning, or fail message for each preflight, depending on how they were configured. You can ask your users to send you the results of the preflight checks if needed. + +To save the results of preflight checks to a `.txt` file, users can can press `s` when viewing the results from the CLI, as shown in the example below: + +![Save output dialog](/images/helm-preflight-save-output.png) + +[View a larger version of this image](/images/helm-preflight-save-output.png) + + +--- + + +# About Preflight Checks and Support Bundles + +import Overview from "../partials/preflights/_preflights-sb-about.mdx" + +# About Preflight Checks and Support Bundles + +This topic provides an introduction to preflight checks and support bundles, which are provided by the [Troubleshoot](https://troubleshoot.sh/) open source project. + +## Overview + +<Overview/> + +Preflight checks and support bundles consist of _collectors_, _redactors_, and _analyzers_ that are defined in a YAML specification. When preflight checks or support bundles are executed, data is collected, redacted, then analyzed to provide insights to users, as illustrated in the following diagram: + +![Troubleshoot Workflow Diagram](/images/troubleshoot-workflow-diagram.png) + +[View a larger version of this image](/images/troubleshoot-workflow-diagram.png) + +For more information about each step in this workflow, see the sections below. + +### Collect + +During the collection phase, _collectors_ gather information from the cluster, the environment, the application, and other sources. + +The data collected depends on the types of collectors that are included in the preflight or support bundle specification. For example, the Troubleshoot project provides collectors that can gather information about the Kubernetes version that is running in the cluster, information about database servers, logs from pods, and more. + +For more information, see the [Collect](https://troubleshoot.sh/docs/collect/) section in the Troubleshoot documentation. + +### Redact + +During the redact phase, _redactors_ censor sensitive customer information from the data before analysis. By default, the following information is automatically redacted: + +- Passwords +- API token environment variables in JSON +- AWS credentials +- Database connection strings +- URLs that include usernames and passwords + +For Replicated KOTS installations, it is also possible to add custom redactors to redact additional data. For more information, see the [Redact](https://troubleshoot.sh/docs/redact/) section in the Troubleshoot documentation. + +### Analyze + +During the analyze phase, _analyzers_ use the redacted data to provide insights to users. + +For preflight checks, analyzers define the pass, fail, and warning outcomes, and can also display custom messages to the user. For example, you can define a preflight check that fails if the cluster's Kubernetes version does not meet the minimum version that your application supports. + +For support bundles, analyzers can be used to identify potential problems and share relevant troubleshooting guidance with users. Additionally, when a support bundle is uploaded to the Vendor Portal, it is extracted and automatically analyzed. The goal of analyzers in support bundles is to surface known issues or hints of what might be a problem to make troubleshooting easier. + +For more information, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. + +## Preflight Checks + + +This section provides an overview of preflight checks, including how preflights are defined and run. + +### Overview + +Preflight checks let you define requirements for the cluster where your application is installed. When run, preflight checks provide clear feedback to your customer about any missing requirements or incompatibilities in the cluster before they install or upgrade your application. For KOTS installations, preflight checks can also be used to block the deployment of the application if one or more requirements are not met. + +Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. + +### About Host Preflights {#host-preflights} + +_Host preflight checks_ automatically run during [Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated kURL](/vendor/kurl-about) installations on a VM or bare metal server. The purpose of host preflight checks is to verify that the user's installation environment meets the requirements of the Embedded Cluster or kURL installer, such as checking the number of CPU cores in the system, available disk space, and memory usage. If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. + +Host preflight checks are separate from any application-specific preflight checks that are defined in the release, which run in the Admin Console before the application is deployed with KOTS. Both Embedded Cluster and kURL have default host preflight checks that are specific to the requirements of the given installer. For kURL installations, it is possible to customize the default host preflight checks. + +For more information about the default Embedded Cluster host preflight checks, see [Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. + +For more information about kURL host preflight checks, including information about how to customize the defaults, see [Customizing Host Preflight Checks for kURL](/vendor/preflight-host-preflights). + +### Defining Preflights + +To add preflight checks for your application, create a Preflight YAML specification that defines the collectors and analyzers that you want to include. + +For information about how to add preflight checks to your application, including examples, see [Defining Preflight Checks](preflight-defining). + +### Blocking Installation with Required (Strict) Preflights + +For applications installed with KOTS, it is possible to block the deployment of a release if a preflight check fails. This is helpful when it is necessary to prevent an installation or upgrade from continuing unless a given requirement is met. + +You can add required preflight checks for an application by including `strict: true` for the target analyzer in the preflight specification. For more information, see [Block Installation with Required Preflights](preflight-defining#strict) in _Defining Preflight Checks_. + +### Running Preflights + +This section describes how users can run preflight checks for KOTS and Helm installations. + +#### Replicated Installations + +For Replicated installations with Embedded Cluster, KOTS, or kURL, preflight checks run automatically as part of the installation process. The results of the preflight checks are displayed either in the KOTS Admin Console or in the KOTS CLI, depending on the installation method. + +Additionally, users can access preflight checks from the Admin Console after installation to view their results and optionally re-run the checks. + +The following shows an example of the results of preflight checks displayed in the Admin Console during installation: + +![Preflight results in Admin Console](/images/preflight-warning.png) + +[View a larger version of this image](/images/preflight-warning.png) + +#### Helm Installations + +For installations with Helm, the preflight kubectl plugin is required to run preflight checks. The preflight plugin is a client-side utility that adds a single binary to the path. For more information, see [Getting Started](https://troubleshoot.sh/docs/) in the Troubleshoot documentation. + +Users can optionally run preflight checks before they run `helm install`. The results of the preflight checks are then displayed through the CLI, as shown in the example below: + +![Save output dialog](/images/helm-preflight-save-output.png) + +[View a larger version of this image](/images/helm-preflight-save-output.png) + +For more information, see [Running Preflight Checks for Helm Installations](preflight-running). + +## Support Bundles + +This section provides an overview of support bundles, including how support bundles are customized and generated. + +### Overview + +Support bundles collect and analyze troubleshooting data from customer environments, helping both users and support teams diagnose problems with application deployments. + +Support bundles can collect a variety of important cluster-level data from customer environments, such as: +* Pod logs +* Node resources and status +* The status of replicas in a Deployment +* Cluster information +* Resources deployed to the cluster +* The history of Helm releases installed in the cluster + +Support bundles can also be used for more advanced use cases, such as checking that a command successfully executes in a pod in the cluster, or that an HTTP request returns a succesful response. + +Support bundles then use the data collected to provide insights to users on potential problems or suggested troubleshooting steps. The troubleshooting data collected and analyzed by support bundles not only helps users to self-resolve issues with their application deployment, but also helps reduce the amount of time required by support teams to resolve requests by ensuring they have access to all the information they need up front. + +### About Host Support Bundles + +For installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about), it is possible to generate a support bundle that includes host-level information to help troubleshoot failures related to host configuration like DNS, networking, or storage problems. + +For Embedded Cluster installations, a default spec can be used to generate support bundles that include cluster- and host-level information. See [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). + +For kURL installations, vendors can customize a host support bundle spec for their application. See [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). + +### Customizing Support Bundles + +To enable support bundles for your application, add a support bundle YAML specification to a release. An empty support bundle specification automatically includes several default collectors and analzyers. You can also optionally customize the support bundle specification for by adding, removing, or editing collectors and analyzers. + +For more information, see [Adding and Customizing Support Bundles](support-bundle-customizing). + +### Generating Support Bundles + +Users generate support bundles as `tar.gz` files from the command line, using the support-bundle kubectl plugin. Your customers can share their support bundles with your team by sending you the resulting `tar.gz` file. + +KOTS users can also generate and share support bundles from the KOTS Admin Console. + +For more information, see [Generating Support Bundles](support-bundle-generating). + +--- + + +# About the Replicated Proxy Registry + +# About the Replicated Proxy Registry + +This topic describes how the Replicated proxy registry can be used to grant proxy access to your application's private images or allow pull through access of public images. + +## Overview + +If your application images are available in a private image registry exposed to the internet such as Docker Hub or Amazon Elastic Container Registry (ECR), then the Replicated proxy registry can grant proxy, or _pull-through_, access to the images without exposing registry credentials to your customers. When you use the proxy registry, you do not have to modify the process that you already use to build and push images to deploy your application. + +To grant proxy access, the proxy registry uses the customer licenses that you create in the Replicated vendor portal. This allows you to revoke a customer’s ability to pull private images by editing their license, rather than having to manage image access through separate identity or authentication systems. For example, when a trial license expires, the customer's ability to pull private images is automatically revoked. + +The following diagram demonstrates how the proxy registry pulls images from your external registry, and how deployed instances of your application pull images from the proxy registry: + +![Proxy registry workflow diagram](/images/private-registry-diagram.png) + +[View a larger version of this image](/images/private-registry-diagram-large.png) + +## About Enabling the Proxy Registry + +The proxy registry requires read-only credentials to your private registry to access your application images. See [Connecting to an External Registry](/vendor/packaging-private-images). + +After connecting your registry, the steps the enable the proxy registry vary depending on your application deployment method. For more information, see: +* [Using the Proxy Registry with KOTS Installations](/vendor/private-images-kots) +* [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) + +## About Allowing Pull-Through Access of Public Images + +Using the Replicated proxy registry to grant pull-through access to public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. + +For more information about how to pull public images through the proxy registry, see [Connecting to a Public Registry through the Proxy Registry](/vendor/packaging-public-images). + +--- + + +# Using the Proxy Registry with KOTS Installations + +import Deprecated from "../partials/helm/_replicated-deprecated.mdx" +import StepCreds from "../partials/proxy-service/_step-creds.mdx" +import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" + +# Using the Proxy Registry with KOTS Installations + +This topic describes how to use the Replicated proxy registry with applications deployed with Replicated KOTS. + +## Overview + +Replicated KOTS automatically creates the required image pull secret for accessing the Replicated proxy registry during application deployment. When possible, KOTS also automatically rewrites image names in the application manifests to the location of the image at `proxy.replicated.com` or your custom domain. + +### Image Pull Secret + +During application deployment, KOTS automatically creates an `imagePullSecret` with `type: kubernetes.io/dockerconfigjson` that is based on the customer license. This secret is used to authenticate with the proxy registry and grant proxy access to private images. + +For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. + +### Image Location Patching (Standard Manifests and HelmChart v1) + +For applications packaged with standard Kubernetes manifests (or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource), KOTS automatically patches image names to the location of the image at at `proxy.replicated.com` or your custom domain during deployment. If KOTS receives a 401 response when attempting to load image manifests using the image reference from the PodSpec, it assumes that this is a private image that must be proxied through the proxy registry. + +KOTS uses Kustomize to patch the `midstream/kustomization.yaml` file to change the image name during deployment to reference the proxy registry. For example, a PodSpec for a Deployment references a private image hosted at `quay.io/my-org/api:v1.0.1`: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example +spec: + template: + spec: + containers: + - name: api + image: quay.io/my-org/api:v1.0.1 +``` + +When this application is deployed, KOTS detects that it cannot access +the image at quay.io. So, it creates a patch in the `midstream/kustomization.yaml` +file that changes the image name in all manifest files for the application. This causes the container runtime in the cluster to use the proxy registry to pull the images, using the license information provided to KOTS for authentication. + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +bases: +- ../../base +images: +- name: quay.io/my-org/api:v1.0.1 + newName: proxy.replicated.com/proxy/my-kots-app/quay.io/my-org/api +``` + +## Enable the Proxy Registry + +This section describes how to enable the proxy registry for applications deployed with KOTS, including how to ensure that image names are rewritten and that the required image pull secret is provided. + +To enable the proxy registry: + +1. <StepCreds/> + +1. <StepCustomDomain/> + +1. Rewrite images names to the location of the image at `proxy.replicated.com` or your custom domain. Also, ensure that the correct image pull secret is provided for all private images. The steps required to configure image names and add the image pull secret vary depending on your application type: + + * **HelmChart v2**: For Helm charts deployed with the[ HelmChart v2](/reference/custom-resource-helmchart-v2) custom resource, configure the HelmChart v2 custom resource to dynamically update image names in your Helm chart and to inject the image pull secret that is automatically created by KOTS. For instructions, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). + + * **Standard Manifests or HelmChart v1**: For standard manifest-based applications or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource, no additional configuration is required. KOTS automatically rewrites image names and injects image pull secrets during deployment for these application types. + + :::note + <Deprecated/> + ::: + + * **Kubernetes Operators**: For applications packaged with Kubernetes Operators, KOTS cannot modify pods that are created at runtime by the Operator. To support the use of private images in all environments, the Operator code should use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. For instructions, see [Referencing Images](/vendor/operator-referencing-images) in the _Packaging Kubernetes Operators_ section. + +1. If you are deploying Pods to namespaces other than the application namespace, add the namespace to the `additionalNamespaces` attribute of the KOTS Application custom resource. This ensures that KOTS can provision the `imagePullSecret` in the namespace to allow the Pod to pull the image. For instructions, see [Defining Additional Namespaces](operator-defining-additional-namespaces). + +--- + + +# Using the Replicated Registry for KOTS Installations + +import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" + +# Using the Replicated Registry for KOTS Installations + +This topic describes how to push images to the Replicated private registry. + +## Overview + +For applications installed with KOTS, you can host private images on the Replicated registry. Hosting your images on the Replicated registry is useful if you do not already have your images in an existing private registry. It is also useful for testing purposes. + +Images pushed to the Replicated registry are displayed on the **Images** page in the Vendor Portal: + +![Replicated Private Registry section of the vendor portal Images page](/images/images-replicated-registry.png) + +[View a larger version of this image](/images/images-replicated-registry.png) + +For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). + +## Limitations + +The Replicated registry has the following limitations: + +* You cannot delete images from the Replicated registry. As a workaround, you can push a new, empty image to the registry using the same tags as the target image. Replicated does not recommend removing tags from the registry because it could break older releases of your application. + +* When using Docker Build to build and push images to the Replicated registry, provenance attestations are not supported. To avoid a 400 error, include the `--provenance=false` flag to disable all provenance attestations. For more information, see [docker buildx build](https://docs.docker.com/engine/reference/commandline/buildx_build/#provenance) and [Provenance Attestations](https://docs.docker.com/build/attestations/slsa-provenance/) in the Docker documentation. + +* You might encounter a timeout error when pushing images with layers close to or exceeding 2GB in size, such as: "received unexpected HTTP status: 524." To work around this, reduce the size of the image layers and push the image again. If the 524 error persists, continue decreasing the layer sizes until the push is successful. + +## Push Images to the Replicated Registry + +This procedure describes how to tag and push images to the Replicated registry. For more information about building, tagging, and pushing Docker images, see the +[Docker CLI documentation](https://docs.docker.com/engine/reference/commandline/cli/). + +To push images to the Replicated registry: + +1. Do one of the following to connect with the `registry.replicated.com` container registry: + * **(Recommended) Log in with a user token**: Use `docker login registry.replicated.com` with your Vendor Portal email as the username and a Vendor Portal user token as the password. For more information, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. + * **Log in with a service account token:** Use `docker login registry.replicated.com` with a Replicated Vendor Portal service account as the password. If you have an existing team token, you can use that instead. You can use any string as the username. For more information, see [Service Accounts](replicated-api-tokens#service-accounts) in _Generating API Tokens_. + + <TeamTokenNote/> + + * **Log in with your credentials**: Use `docker login registry.replicated.com` with your Vendor Portal email and password as the credentials. + +1. Tag your private image with the Replicated registry hostname in the standard +Docker format: + + ``` + docker tag IMAGE_NAME registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG + ``` + + Where: + * `IMAGE_NAME` is the name of the existing private image for your application. + * `APPLICATION_SLUG` is the unique slug for the application. You can find the application slug on the **Application Settings** page in the Vendor Portal. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. + * `TARGET_IMAGE_NAME` is a name for the image. Replicated recommends that the `TARGET_IMAGE_NAME` is the same as the `IMAGE_NAME`. + * `TAG` is a tag for the image. + + For example: + + ```bash + docker tag worker registry.replicated.com/myapp/worker:1.0.1 + ``` + +1. Push your private image to the Replicated registry using the following format: + + ``` + docker push registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG + ``` + Where: + * `APPLICATION_SLUG` is the unique slug for the application. + * `TARGET_IMAGE_NAME` is a name for the image. Use the same name that you used when tagging the image in the previous step. + * `TAG` is a tag for the image. Use the same tag that you used when tagging the image in the previous step. + + For example: + + ```bash + docker push registry.replicated.com/myapp/worker:1.0.1 + ``` + +1. In the [Vendor Portal](https://vendor.replicated.com/), go to **Images** and scroll down to the **Replicated Private Registry** section to confirm that the image was pushed. + + +--- + + +# Using Image Tags and Digests + +# Using Image Tags and Digests + +This topic describes using image tags and digests with your application images. It includes information about when image tags and digests are supported, and how to enable support for image digests in air gap bundles. + +## Support for Image Tags and Digests + +The following table describes the use cases in which image tags and digests are supported: + +<table> + <tr> + <th width="10%">Installation</th> + <th width="30%">Support for Image Tags</th> + <th width="30%">Support for Image Digests</th> + </tr> + <tr> + <td>Online</td> + <td>Supported by default</td> + <td>Supported by default</td> + </tr> + <tr> + <td>Air Gap</td> + <td>Supported by default for Replicated KOTS installations</td> + <td> + <p>Supported for applications on KOTS v1.82.0 and later when the <b>Enable new air gap bundle format</b> toggle is enabled on the channel.</p> + <p>For more information, see <a href="#digests-air-gap">Using Image Digests in Air Gap Installations</a> below.</p> + </td> + </tr> +</table> + +:::note +You can use image tags and image digests together in any case where both are supported. +::: + +## Using Image Digests in Air Gap Installations {#digests-air-gap} + +For applications installed with KOTS v1.82.0 or later, you can enable a format for air gap bundles that supports the use of image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. + +You can enable or disable this air gap bundle format using the **Enable new air gap bundle format** toggle in the settings for any channel in the Vendor Portal. The **Enable new air gap bundle format** toggle is enabled by default. + +When you enable **Enable new air gap bundle format** on a channel, all air gap bundles that you build or rebuild on that channel use the updated air gap bundle format. + +If users on a version of KOTS earlier than v1.82.0 attempt to install or upgrade an application with an air gap bundle that uses the **Enable new air gap bundle format** format, then the Admin Console displays an error message when they attempt to upload the bundle. + +To enable the new air gap bundle format on a channel: + +1. In the Replicated [Vendor Portal](https://vendor.replicated.com/channels), go to the Channels page and click the edit icon in the top right of the channel where you want to use the new air gap bundle format. +1. Enable the **Enable new air gap bundle format** toggle. +1. (Recommended) To prevent users on a version of KOTS earlier than v1.82.0 from attempting to upgrade with an air gap bundle that uses the new air gap bundle format, set `minKotsVersion` to "1.82.0" in the Application custom resource manifest file. + + `minKotsVersion` defines the minimum version of KOTS required by the application release. Including `minKotsVersion` displays a warning in the Admin Console when users attempt to install or upgrade the application if they are not on the specified minimum version or later. For more information, see [Setting Minimum and Target Versions for KOTS](packaging-kots-versions). + + **Example**: + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: my-application + spec: + ... + minKotsVersion: "1.82.0" + ... + ``` + +1. Test your changes: + 1. Save and promote the release to a development environment. + 1. On the channel where you enabled **Enable new air gap bundle format**, click **Release history**. On the Release History page, click **Build** next to the latest release to create an air gap bundle with the new format. + + ![Vendor portal release history page](../../static/images/airgap-download-bundle.png) + + 1. Click **Download Airgap Bundle**. + 1. Install or upgrade the application with version 1.82.0 or later of the Admin Console or the KOTS CLI. Upload the new air gap bundle to confirm that the installation or upgrade completes successfully. + +--- + + +# Replicated Quick Start + +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import HelmPackage from "../partials/helm/_helm-package.mdx" +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import EcCr from "../partials/embedded-cluster/_ec-config.mdx" +import Requirements from "../partials/embedded-cluster/_requirements.mdx" + +# Replicated Quick Start + +Welcome! This topic provides a quick start workflow to help new users learn about the Replicated Platform. Complete this quick start before you onboard your application to the platform. + +## Introduction + +This quick start shows how to create, install, and update releases for a sample Helm chart in the Replicated Platform. You will repeat these same basic steps to create and test releases throughout the onboarding process to integrate Replicated features with your own application. + +The goals of this quick start are to introduce new Replicated users to the following common tasks for the purpose of preparing to onboard to the Replicated Platform: + +* Working with _applications_, _channels_, _releases_, and _customers_ in the Replicated Vendor Portal + +* Working with the Replicated CLI + +* Installing and updating applications on a VM with Replicated Embedded Cluster + +* Managing an installation with the Replicated KOTS Admin Console + +## Set Up the Environment + +Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: + +<Requirements/> + +## Quick Start + +1. Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). + +1. Create an application using the Replicated CLI: + + 1. On your local machine, install the Replicated CLI: + + ```bash + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + + 1. Authorize the Replicated CLI: + + ```bash + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your Vendor Portal account and authorize the CLI. + + 1. Create an application named `Gitea`: + + ```bash + replicated app create Gitea + ``` + + 1. Set the `REPLICATED_APP` environment variable to the application that you created: + + ```bash + export REPLICATED_APP=APP_SLUG + ``` + Where `APP_SLUG` is the unique application slug provided in the output of the `app create` command. For example, `export REPLICATED_APP=gitea-kite`. + + This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command. + +1. Get the sample Bitnami Gitea Helm chart and add the Replicated SDK as a dependency: + + 1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. + + 1. Change to the new `gitea` directory that was created: + + ```bash + cd gitea + ``` + + 1. In the Helm chart `Chart.yaml`, add the Replicated SDK as a dependency: + + <DependencyYaml/> + + The Replicated SDK is a Helm chart that provides access to Replicated features and can be installed as a small service alongside your application. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + + 1. Update dependencies and package the Helm chart to a `.tgz` chart archive: + + ```bash + helm package -u . + ``` + Where `-u` or `--dependency-update` is an option for the helm package command that updates chart dependencies before packaging. For more information, see [Helm Package](https://helm.sh/docs/helm/helm_package/) in the Helm documentation. + +1. Add the chart archive to a release: + + 1. In the `gitea` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with Replicated KOTS and Replicated Embedded Cluster to this subdirectory. + + 1. Move the Helm chart archive that you created to `manifests`: + + ``` + mv gitea-1.0.6.tgz manifests + ``` + + 1. In `manifests`, create the following YAML files: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml + ``` + + 1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The <a href="/vendor/helm-optional-value-keys#conditionally-set-values"><code>optionalValues</code></a> field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment for the purpose of this quick start.</p> + <h5>YAML</h5> + <HelmChartCr/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml"> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.</p> + <h5>YAML</h5> + <KotsCr/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes SIG Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sCr/> + </TabItem> + <TabItem value="ec" label="embedded-cluster.yaml"> + <h5>Description</h5> + <p>To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.</p> + <h5>YAML</h5> + <EcCr/> + </TabItem> + </Tabs> + + 1. Lint the YAML files: + + ```bash + replicated release lint --yaml-dir . + ``` + **Example output:** + ```bash + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + You can ignore any warning messages for the purpose of this quick start. + ::: + + 1. Create the release and promote it to the Unstable channel: + + ```bash + replicated release create --yaml-dir . --promote Unstable + ``` + **Example output**: + ```bash + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 + • Promoting ✓ + • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 1 + ``` + +1. Create a customer so that you can install the release on your VM with Embedded Cluster: + + 1. In the [Vendor Portal](https://vendor.replicated.com), under the application drop down, select the Gitea application that you created. + + <img alt="App drop down" src="/images/quick-start-select-gitea-app.png" width="250px"/> + + [View a larger version of this image](/images/quick-start-select-gitea-app.png) + + 1. Click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + + 1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. + + 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + + 1. For **License type**, select **Development**. + + 1. For **License options**, enable the following entitlements: + * **KOTS Install Enabled** + * **Embedded Cluster Enabled** + + 1. Click **Save Changes**. + +1. Install the application with Embedded Cluster: + + 1. On the page for the customer that you created, click **Install instructions > Embedded Cluster**. + + ![Customer install instructions dropdown](/images/customer-install-instructions-dropdown.png) + + [View a larger image](/images/customer-install-instructions-dropdown.png) + + 1. On the command line, SSH onto your VM and run the commands in the **Embedded cluster install instructions** dialog to download the latest release, extract the installation assets, and install. + + <img width="500px" src="/images/embedded-cluster-install-dialog-latest.png" alt="embedded cluster install instructions dialog"/> + + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + + 1. When prompted, enter a password for accessing the Admin Console. + + The installation command takes a few minutes to complete. + + **Example output:** + + ```bash + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Host files materialized! + ✔ Running host preflights + ✔ Node installation finished! + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Additional components are ready! + Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 + ``` + + At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. + + 1. Go to the URL provided in the output to access to the Admin Console. + + 1. On the Admin Console landing page, click **Start**. + + 1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. + + 1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. + + By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. + + 1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. + + 1. On the **Configure the cluster** screen, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. + + The Admin Console dashboard opens. + + 1. On the Admin Console dashboard, next to the version, click **Deploy** and then **Yes, Deploy**. + + The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. + + 1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser. + + For example: + + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + + [View a larger version of this image](/images/gitea-ec-ready.png) + + <img alt="Gitea app landing page" src="/images/gitea-app.png" width="600px"/> + + [View a larger version of this image](/images/gitea-app.png) + +1. Return to the Vendor Portal and go to **Customers**. Under the name of the customer, confirm that you can see an active instance. + + This instance telemetry is automatically collected and sent back to the Vendor Portal by both KOTS and the Replicated SDK. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). + +1. Under **Instance ID**, click on the ID to view additional insights including the versions of Kubernetes and the Replicated SDK running in the cluster where you installed the application. For more information, see [Instance Details](/vendor/instance-insights-details). + +1. Create a new release that adds preflight checks to the application: + + 1. In your local filesystem, go to the `gitea` directory. + + 1. Create a `gitea-preflights.yaml` file in the `templates` directory: + + ``` + touch templates/gitea-preflights.yaml + ``` + + 1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a simple preflight spec: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + labels: + troubleshoot.sh/kind: preflight + name: "{{ .Release.Name }}-preflight-config" + stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: preflight-sample + spec: + collectors: + - http: + collectorName: slack + get: + url: https://api.slack.com/methods/api.test + analyzers: + - textAnalyze: + checkName: Slack Accessible + fileName: slack.json + regex: '"status": 200,' + outcomes: + - pass: + when: "true" + message: "Can access the Slack API" + - fail: + when: "false" + message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." + ``` + The YAML above defines a preflight check that confirms that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. + + 1. In the `Chart.yaml` file, increment the version to 1.0.7: + + ```yaml + # Chart.yaml + version: 1.0.7 + ``` + + 1. Update dependencies and package the chart to a `.tgz` chart archive: + + ```bash + helm package -u . + ``` + + 1. Move the chart archive to the `manifests` directory: + + ```bash + mv gitea-1.0.7.tgz manifests + ``` + + 1. In the `manifests` directory, open the KOTS HelmChart custom resource (`gitea.yaml`) and update the `chartVersion`: + + ```yaml + # gitea.yaml KOTS HelmChart + chartVersion: 1.0.7 + ``` + + 1. Remove the chart archive for version 1.0.6 of the Gitea chart from the `manifests` directory: + + ``` + rm gitea-1.0.6.tgz + ``` + + 1. From the `manifests` directory, create and promote a new release, setting the version label of the release to `0.0.2`: + + ```bash + replicated release create --yaml-dir . --promote Unstable --version 0.0.2 + ``` + **Example output**: + ```bash + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 2 + • Promoting ✓ + • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 2 + ``` + +1. On your VM, update the application instance to the new version that you just promoted: + + 1. In the Admin Console, go to the **Version history** tab. + + The new version is displayed automatically. + + 1. Click **Deploy** next to the new version. + + The Embedded Cluster upgrade wizard opens. + + 1. In the Embedded Cluster upgrade wizard, on the **Preflight checks** screen, note that the "Slack Accessible" preflight check that you added was successful. Click **Next: Confirm and deploy**. + + ![preflight page of the embedded cluster upgrade wizard](/images/quick-start-ec-upgrade-wizard-preflight.png) + + [View a larger version of this image](/images/quick-start-ec-upgrade-wizard-preflight.png) + + :::note + The **Config** screen in the upgrade wizard is bypassed because this release does not contain a KOTS Config custom resource. The KOTS Config custom resource is used to set up the Config screen in the KOTS Admin Console. + ::: + + 1. On the **Confirm and Deploy** page, click **Deploy**. + +1. Reset and reboot the VM to remove the installation: + + ```bash + sudo ./APP_SLUG reset + ``` + Where `APP_SLUG` is the unique slug for the application. + + :::note + You can find the application slug by running `replicated app ls` on your local machine. + ::: + +## Next Steps + +Congratulations! As part of this quick start, you: +* Added the Replicated SDK to a Helm chart +* Created a release with the Helm chart +* Installed the release on a VM with Embedded Cluster +* Viewed telemetry for the installed instance in the Vendor Portal +* Created a new release to add preflight checks to the application +* Updated the application from the Admin Console + +Now that you are familiar with the workflow of creating, installing, and updating releases, you can begin onboarding your own application to the Replicated Platform. + +To get started, see [Replicated Onboarding](replicated-onboarding). + +## Related Topics + +For more information about the Replicated Platform features mentioned in this quick start, see: + +* [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) +* [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Introduction to KOTS](/intro-kots) +* [Managing Releases with the CLI](/vendor/releases-creating-cli) +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) +* [Using Embedded Cluster](/vendor/embedded-overview) + +## Related Tutorials + +For additional tutorials related to this quick start, see: + +* [Deploying a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup) +* [Adding Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) +* [Deploying a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup) + +--- + + +# About Channels and Releases + +import ChangeChannel from "../partials/customers/_change-channel.mdx" +import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" +import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" +import VersionLabelReqsHelm from "../partials/releases/_version-label-reqs-helm.mdx" + +# About Channels and Releases + +This topic describes channels and releases, including information about the **Releases** and **Channels** pages in the Replicated Vendor Portal. + +## Overview + +A _release_ represents a single version of your application. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. + +Channels also control which customers are able to install a release. You assign each customer to a channel to define the releases that the customer can access. For example, a customer assigned to the Stable channel can only install releases that are promoted to the Stable channel, and cannot see any releases promoted to other channels. For more information about assigning customers to channels, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. + +Using channels and releases helps you distribute versions of your application to the right customer segments, without needing to manage different release workflows. + +You can manage channels and releases with the Vendor Portal, the Replicated CLI, or the Vendor API v3. For more information about creating and managing releases or channels, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Creating and Editing Channels](releases-creating-channels). + +## About Channels + +This section provides additional information about channels, including details about the default channels in the Vendor Portal and channel settings. + +### Unstable, Beta, and Stable Channels + +Replicated includes the following channels by default: + +* **Unstable**: The Unstable channel is designed for internal testing and development. You can create and assign an internal test customer to the Unstable channel to install in a development environment. Replicated recommends that you do not license any of your external users against the Unstable channel. +* **Beta**: The Beta channel is designed for release candidates and early-adopting customers. Replicated recommends that you promote a release to the Beta channel after it has passed automated testing in the Unstable channel. You can also choose to license early-adopting customers against this channel. +* **Stable**: The Stable channel is designed for releases that are generally available. Replicated recommends that you assign most of your customers to the Stable channel. Customers licensed against the Stable channel only receive application updates when you promote a new release to the Stable channel. + +You can archive or edit any of the default channels, and create new channels. For more information, see [Creating and Editing Channels](releases-creating-channels). + +### Settings + +Each channel has settings. You can customize the settings for a channel to control some of the behavior of releases promoted to the channel. + +The following shows the **Channel Settings** dialog, accessed by clicking the settings icon on a channel: + +<img src="/images/channel-settings.png" alt="Channel Settings dialog in the Vendor Portal" width="500"/> + +[View a larger version of this image](/images/channel-settings.png) + +The following describes each of the channel settings: + +* **Channel name**: The name of the channel. You can change the channel name at any time. Each channel also has a unique ID listed below the channel name. +* **Description**: Optionally, add a description of the channel. +* **Set this channel to default**: When enabled, sets the channel as the default channel. The default channel cannot be archived. +* **Custom domains**: Select the customer-facing domains that releases promoted to this channel use for the Replicated registry, Replicated proxy registry, Replicated app service, or Replicated Download Portal endpoints. If a default custom domain exists for any of these endpoints, choosing a different domain in the channel settings overrides the default. If no custom domains are configured for an endpoint, the drop-down for the endpoint is disabled. + + For more information about configuring custom domains and assigning default domains, see [Using Custom Domains](custom-domains-using). +* The following channel settings apply only to applications that support KOTS: + * **Automatically create airgap builds for newly promoted releases in this channel**: When enabled, the Vendor Portal automatically builds an air gap bundle when a new release is promoted to the channel. When disabled, you can generate an air gap bundle manually for a release on the **Release History** page for the channel. + * **Enable semantic versioning**: When enabled, the Vendor Portal verifies that the version label for any releases promoted to the channel uses a valid semantic version. For more information, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_. + * **Enable new airgap bundle format**: When enabled, air gap bundles built for releases promoted to the channel use a format that supports image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. For more information, see [Using Image Digests in Air Gap Installations](private-images-tags-digests#digests-air-gap) in _Using Image Tags and Digests_. + + :::note + The new air gap bundle format is supported for applications installed with KOTS v1.82.0 or later. + ::: + +## About Releases + +This section provides additional information about releases, including details about release promotion, properties, sequencing, and versioning. + +### Release Files + +A release contains your application files as well as the manifests required to install the application with the Replicated installers ([Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated KOTS](../intro-kots)). + +The application files in releases can be Helm charts and/or Kubernetes manifests. Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise customers will expect to be able to install with Helm. + +### Promotion + +Each release is promoted to one or more channels. While you are developing and testing releases, Replicated recommends promoting to a channel that does not have any real customers assigned, such as the default Unstable channel. When the release is ready to be shared externally with customers, you can then promote to a channel that has the target customers assigned, such as the Beta or Stable channel. + +A release cannot be edited after it is promoted to a channel. This means that you can test a release on an internal development channel, and know with confidence that the same release will be available to your customers when you promote it to a channel where real customers are assigned. + +### Properties + +Each release has properties. You define release properties when you promote a release to a channel. You can edit release properties at any time from the channel **Release History** page in the Vendor Portal. For more information, see [Edit Release Properties](releases-creating-releases#edit-release-properties) in _Managing Releases with the Vendor Portal_. + +The following shows an example of the release properties dialog: + +<img src="/images/release-properties.png" width="500px" alt="release properties dialog for a release with version label 0.1.22"/> + +[View a larger version of this image](/images/release-properties.png) + +As shown in the screenshot above, the release has the following properties: + +* **Version label**: The version label for the release. Version labels have the following requirements: + + * If semantic versioning is enabled for the channel, you must use a valid semantic version. For more information, see [Semantic Versioning](#semantic-versioning). + + <VersionLabelReqsHelm/> + +* **Requirements**: Select **Prevent this release from being skipped during upgrades** to mark the release as required. + + <RequiredReleasesDescription/> + + <RequiredReleasesLimitations/> + +* **Release notes (supports markdown)**: Detailed release notes for the release. The release notes support markdown and are shown to your customer. + +### Sequencing + +By default, Replicated uses release sequence numbers to organize and order releases, and uses instance sequence numbers in an instance's internal version history. + +#### Release Sequences + +In the Vendor Portal, each release is automatically assigned a unique, monotonically-increasing sequence number. You can use this number as a fallback to identify a promoted or draft release, if you do not set the `Version label` field during promotion. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). + +The following graphic shows release sequence numbers in the Vendor Portal: + +<img alt="Release sequence numbers" src="/images/release-sequences.png" width="750px"/> + +[View a larger version of this image](/images/release-sequences.png) + +#### Instance Sequences + +When a new version is available for upgrade, including when KOTS checks for upstream updates as well as when the user syncs their license or makes a config change, the KOTS Admin Console assigns a unique instance sequence number to that version. The instance sequence in the Admin Console starts at 0 and increments for each identifier that is returned when a new version is available. + +This instance sequence is unrelated to the release sequence dispalyed in the Vendor Portal, and it is likely that the instance sequence will differ from the release sequence. Instance sequences are only tracked by KOTS instances, and the Vendor Portal has no knowledge of these numbers. + +The following graphic shows instance sequence numbers on the Admin Console dashboard: + +<img alt="Instance sequence numbers" src="/images/instance-sequences.png" width="550px"/> + +[View a larger version of this image](/images/instance-sequences.png) + +#### Channel Sequences + +When a release is promoted to a channel, a channel sequence number is assigned. This unique sequence number increments by one and tracks the order in which releases were promoted to a channel. You can view the channel sequence on the **Release History** page in the Vendor Portal, as shown in the image below: + +<img alt="Channel sequence on Release History page" src="/images/release-history-channel-sequence.png" width="750px"/> + +[View a larger version of this image](/images/release-history-channel-sequence.png) + +The channel sequence is also used in certain URLs. For example, a release with a *release sequence* of `170` can have a *channel sequence* of `125`. The air gap download URL for that release can contain `125` in the URL, even though the release sequence is `170`. + +Ordering is more complex if some or all of the releases in a channel have a semantic version label and semantic versioning is enabled for the channel. For more information, see [Semantic Versioning Sequence](#semantic-versioning-sequence). + +#### Semantic Versioning Sequence + +For channels with semantic versioning enabled, the Admin Console sequences instance releases by their semantic versions instead of their promotion dates. + +If releases without a valid semantic version are already promoted to a channel, the Admin Console sorts the releases that do have semantic versions starting with the earliest version and proceeding to the latest. The releases with non-semantic versioning stay in the order of their promotion dates. For example, assume that you promote these releases in the following order to a channel: + +- 1.0.0 +- abc +- 0.1.0 +- xyz +- 2.0.0 + +Then, you enable semantic versioning on that channel. The Admin Console sequences the version history for the channel as follows: + +- 0.1.0 +- 1.0.0 +- abc +- xyz +- 2.0.0 + +### Semantic Versioning + +Semantic versioning is available with the Replicated KOTS v1.58.0 and later. Note the following: + +- For applications created in the Vendor Portal on or after February 23, 2022, semantic versioning is enabled by default on the Stable and Beta channels. Semantic versioning is disabled on the Unstable channel by default. + +- For existing applications created before February 23, 2022, semantic versioning is disabled by default on all channels. + +Semantic versioning is recommended because it makes versioning more predictable for users and lets you enforce versioning so that no one uses an incorrect version. + +To use semantic versioning: + +1. Enable semantic versioning on a channel, if it is not enabled by default. Click the **Edit channel settings** icon, and turn on the **Enable semantic versioning** toggle. +1. Assign a semantic version number when you promote a release. + +Releases promoted to a channel with semantic versioning enabled are verified to ensure that the release version label is a valid semantic version. For more information about valid semantic versions, see [Semantic Versioning 2.0.0](https://semver.org). + +If you enable semantic versioning for a channel and then promote releases to it, Replicated recommends that you do not later disable semantic versioning for that channel. + +You can enable semantic versioning on a channel that already has releases promoted to it without semantic versioning. Any subsequently promoted releases must use semantic versioning. In this case, the channel will have releases with and without semantic version numbers. For information about how Replicated organizes these release sequences, see [Semantic Versioning Sequences](#semantic-versioning-sequence). + +### Demotion + +A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. + +The demoted release's channel sequence and version are not reused. For customers, the release will appear to have been skipped. Un-demoting a release will restore its place in the channel sequence making it again available for download and installation. + +For information about how to demote a release, see [Demote a Release](/vendor/releases-creating-releases#demote-a-release) in _Managing Releases with the Vendor Portal_. + +## Vendor Portal Pages + +This section provides information about the channels and releases pages in the Vendor Portal. + +### Channels Page + +The **Channels** page in the Vendor Portal includes information about each channel. From the **Channels** page, you can edit and archive your channels. You can also edit the properties of the releases promoted to each channel, and view and edit the customers assigned to each channel. + +The following shows an example of a channel in the Vendor Portal **Channels** page: + +<img src="/images/channel-card.png" alt="Channel card in the Vendor Portal" width="400"/> + +[View a larger version of this image](/images/channel-card.png) + +As shown in the image above, you can do the following from the **Channels** page: + +* Edit the channel settings by clicking on the settings icon, or archive the channel by clicking on the trash can icon. For information about channel settings, see [Settings](#settings). + +* In the **Adoption rate** section, view data on the adoption rate of releases promoted to the channel among customers assigned to the channel. + +* In the **Customers** section, view the number of active and inactive customers assigned to the channel. Click **Details** to go to the **Customers** page, where you can view details about the customers assigned to the channel. + +* In the **Latest release** section, view the properties of the latest release, and get information about any warnings or errors in the YAML files for the latest release. + + Click **Release history** to access the history of all releases promoted to the channel. From the **Release History** page, you can view the version labels and files in each release that has been promoted to the selected channel. + + You can also build and download air gap bundles to be used in air gap installations with Replicated installers (Embedded Cluster, KOTS, kURL), edit the release properties for each release promoted to the channel from the **Release History** page, and demote a release from the channel. + + The following shows an example of the **Release History** page: + + <img src="/images/channels-release-history.png" alt="Release history page in the Vendor Portal" width="750"/> + + [View a larger version of this image](/images/channel-card.png) + +* For applications that support KOTS, you can also do the following from the **Channel** page: + + * In the **kURL installer** section, view the current kURL installer promoted to the channel. Click **Installer history** to view the history of kURL installers promoted to the channel. For more information about creating kURL installers, see [Creating a kURL Installer](packaging-embedded-kubernetes). + + * In the **Install** section, view and copy the installation commands for the latest release on the channel. + +### Draft Release Page + +For applications that support installation with KOTS, the **Draft** page provides a YAML editor to add, edit, and delete your application files and Replicated custom resources. You click **Releases > Create Release** in the Vendor Portal to open the **Draft** page. + +The following shows an example of the **Draft** page in the Vendor Portal: + + <img alt="Draft release page"src="/images/guides/kots/default-yaml.png" width="700px"/> + + [View a larger version of this image](/images/guides/kots/default-yaml.png) + +You can do the following tasks on the **Draft** page: + +- In the file directory, manage the file directory structure. Replicated custom resource files are grouped together above the white line of the file directory. Application files are grouped together underneath the white line in the file directory. + + Delete files using the trash icon that displays when you hover over a file. Create a new file or folder using the corresponding icons at the bottom of the file directory pane. You can also drag and drop files in and out of the folders. + + ![Manage File Directory](/images/new-file-and-trash.png) + +- Edit the YAML files by selecting a file in the directory and making changes in the YAML editor. + +- In the **Help** or **Config help** pane, view the linter for any errors. If there are no errors, you get an **Everything looks good!** message. If an error displays, you can click the **Learn how to configure** link. For more information, see [Linter Rules](/reference/linter). + +- Select the Config custom resource to preview how your application's Config page will look to your customers. The **Config preview** pane only appears when you select that file. For more information, see [About the Configuration Screen](config-screen-about). + +- Select the Application custom resource to preview how your application icon will look in the Admin Console. The **Application icon preview** only appears when you select that file. For more information, see [Customizing the Application Icon](admin-console-customize-app-icon). + + +--- + + +# Creating and Editing Channels + +# Creating and Editing Channels + +This topic describes how to create and edit channels using the Replicated Vendor Portal. For more information about channels, see [About Channels and Releases](releases-about). + +For information about creating channels with the Replicated CLI, see [channel create](/reference/replicated-cli-channel-create). + +For information about creating and managing channels with the Vendor API v3, see the [channels](https://replicated-vendor-api.readme.io/reference/createchannel) section in the Vendor API v3 documentation. + +## Create a Channel + +To create a channel: + +1. From the Replicated [Vendor Portal](https://vendor.replicated.com), select **Channels** from the left menu. +1. Click **Create Channel**. + + The Create a new channel dialog opens. For example: + + <img src="/images/channels-create.png" alt="Create channel dialog" width="400px"/> + +1. Enter a name and description for the channel. +1. (Recommended) Enable semantic versioning on the channel if it is not enabled by default by turning on **Enable semantic versioning**. For more information about semantic versioning and defaults, see [Semantic Versioning](releases-about#semantic-versioning). + +1. (Recommended) Enable an air gap bundle format that supports image digests and deduplication of image layers, by turning on **Enable new air gap bundle format**. For more information, see [Using Image Tags and Digests](private-images-tags-digests). + +1. Click **Create Channel**. + +## Edit a Channel + +To edit the settings of an existing channel: + +1. In the Vendor Portal, select **Channels** from the left menu. +1. Click the gear icon on the top right of the channel that you want to modify. + + The Channel settings dialog opens. For example: + + <img src="/images/channel-settings.png" alt="Channel Settings dialog in the Vendor Portal" width="500"/> + +1. Edit the fields and click **Save**. + + For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. + +## Archive a Channel + +You can archive an existing channel to prevent any new releases from being promoted to the channel. + +:::note +You cannot archive a channel if: +* There are customers assigned to the channel. +* The channel is set as the default channel. + +Assign customers to a different channel and set a different channel as the default before archiving. +::: + +To archive a channel with the Vendor Portal or the Replicated CLI: + +* **Vendor portal**: In the Vendor Portal, go to the **Channels** page and click the trash can icon in the top right corner of the card for the channel that you want to archive. +* **Replicated CLI**: + 1. Run the following command to find the ID for the channel that you want to archive: + ``` + replicated channel ls + ``` + The output of this command includes the ID and name for each channel, as well as information about the latest release version on the channels. + + 1. Run the following command to archive the channel: + ``` + replicated channel rm CHANNEL_ID + ``` + Replace `CHANNEL_ID` with the channel ID that you retrieved in the previous step. + + For more information, see [channel rm](/reference/replicated-cli-channel-rm) in the Replicated CLI documentation. + + +--- + + +# Managing Releases with the CLI + +# Managing Releases with the CLI + +This topic describes how to use the Replicated CLI to create and promote releases. + +For information about creating and managing releases with the Vendor Portal, see [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + +For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) section in the Vendor API v3 documentation. + +## Prerequisites + +Before you create a release using the Replicated CLI, complete the following prerequisites: + +* Install the Replicated CLI and then log in to authorize the CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* Create a new application using the `replicated app create APP_NAME` command. You only need to do this procedure one time for each application that you want to deploy. See [`app create`](/reference/replicated-cli-app-create) in _Reference_. + +* Set the `REPLICATED_APP` environment variable to the slug of the target application. See [Set Environment Variables](/reference/replicated-cli-installing#env-var) in _Installing the Replicated CLI_. + + **Example**: + + ```bash + export REPLICATED_APP=my-app-slug + ``` + +## Create a Release From a Local Directory {#dir} + +You can use the Replicated CLI to create a release from a local directory that contains the release files. + +To create and promote a release: + +1. (Helm Charts Only) If your release contains any Helm charts: + + 1. Package each Helm chart as a `.tgz` file. See [Packaging a Helm Chart for a Release](/vendor/helm-install-release). + + 1. Move the `.tgz` file or files to the local directory that contains the release files: + + ```bash + mv CHART_TGZ PATH_TO_RELEASE_DIR + ``` + Where: + * `CHART_TGZ` is the `.tgz` Helm chart archive. + * `PATH_TO_RELEASE_DIR` is path to the directory that contains the release files. + + **Example** + + ```bash + mv wordpress-1.3.5.tgz manifests + ``` + + 1. In the same directory that contains the release files, add a HelmChart custom resource for each Helm chart in the release. See [Configuring the HelmChart Custom Resource](helm-native-v2-using). + +1. Lint the application manifest files and ensure that there are no errors in the YAML: + + ```bash + replicated release lint --yaml-dir=PATH_TO_RELEASE_DIR + ``` + + Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. + + For more information, see [release lint](/reference/replicated-cli-release-lint) and [Linter Rules](/reference/linter). + +1. Do one of the following: + + * **Create and promote the release with one command**: + + ```bash + replicated release create --yaml-dir PATH_TO_RELEASE_DIR --lint --promote CHANNEL + ``` + Where: + * `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. + * `CHANNEL` is the channel ID or the case sensitive name of the channel. + + * **Create and edit the release before promoting**: + + 1. Create the release: + + ```bash + replicated release create --yaml-dir PATH_TO_RELEASE_DIR + ``` + Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. + + For more information, see [release create](/reference/replicated-cli-release-create). + + 1. Edit and update the release as desired: + + ``` + replicated release update SEQUENCE --yaml-dir PATH_TO_RELEASE_DIR + ``` + Where: + + - `SEQUENCE` is the release sequence number. This identifies the existing release to be updated. + - `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. + + For more information, see [release update](/reference/replicated-cli-release-update). + + 1. Promote the release when you are ready to test it. Releases cannot be edited after they are promoted. To make changes after promotion, create a new release. + + ``` + replicated release promote SEQUENCE CHANNEL + ``` + + Where: + + - `SEQUENCE` is the release sequence number. + - `CHANNEL` is the channel ID or the case sensitive name of the channel. + + For more information, see [release promote](/reference/replicated-cli-release-promote). + +1. Verify that the release was promoted to the target channel: + + ``` + replicated release ls + ``` + +--- + + +# Creating and Managing Customers + +import ChangeChannel from "../partials/customers/_change-channel.mdx" +import Download from "../partials/customers/_download.mdx" +import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" + +# Creating and Managing Customers + +This topic describes how to create and manage customers in the Replicated Vendor Portal. For more information about customer licenses, see [About Customers](licenses-about). + +## Create a Customer + +This procedure describes how to create a new customer in the Vendor Portal. You can edit customer details at any time. + +For information about creating a customer with the Replicated CLI, see [customer create](/reference/replicated-cli-customer-create). + +For information about creating and managing customers with the Vendor API v3, see the [customers](https://replicated-vendor-api.readme.io/reference/getcustomerentitlements) section in the Vendor API v3 documentation. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. + +1. For **Customer email**, enter the email address for the customer. + + :::note + A customer email address is required for Helm installations. This email address is never used to send emails to customers. + ::: + +1. For **Assigned channel**, assign the customer to one of your channels. You can select any channel that has at least one release. The channel a customer is assigned to determines the application releases that they can install. For more information, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. + + :::note + <ChangeChannel/> + ::: + +1. For **Custom ID**, you can enter a custom ID for the customer. Setting a custom ID allows you to easily associate this Replicated customer record to your own internal customer data systems during data exports. Replicated recommends using an alphanumeric value such as your Salesforce ID or Hubspot ID. + + :::note + Replicated does _not_ require that the custom ID is unique. The custom ID is for vendor data reconciliation purposes, and is not used by Replicated for any functionality purposes. + ::: + +1. For **Expiration policy**, by default, **Customer's license does not expire** is enabled. To set an expiration date for the license, enable **Customer's license has an expiration date** and specify an expiration date in the **When does this customer expire?** calendar. + +1. For **Customer type**, set the customer type. Customer type is used only for reporting purposes. Customer access to your application is not affected by the type you assign to them. By default, **Trial** is selected. For more information, see [About Customer License Types](licenses-about-types). + +1. Enable any of the available options for the customer. For more information about the license options, see [Built-in License Fields](/vendor/licenses-using-builtin-fields). For more information about enabling install types, see [Managing Install Types for a License (Beta)](/vendor/licenses-install-types). + +1. For **Custom fields**, configure any custom fields that you have added for your application. For more information about how to create custom fields for your application, see [Managing Customer License Fields](licenses-adding-custom-fields). + +1. Click **Save Changes**. + +## Edit a Customer + +You can edit the built-in and custom license fields for a customer at any time by going to the **Manage customer** for a customer. For more information, see [Manage Customer Page](licenses-about#about-the-manage-customer-page) in _About Customers and Licensing_. + +Replicated recommends that you test any licenses changes in a development environment. If needed, install the application using a developer license matching the current customer's entitlements before editing the developer license. Then validate the updated license. + +:::important +For online environments, changing license entitlements can trigger changes to the customer's installed application instance during runtime. Replicated recommends that you verify the logic your application uses to query and enforce the target entitlement before making any changes. +::: + +To edit license fields: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers**. + +1. Select the target customer and click the **Manage customer** tab. + +1. On the **Manage customer** page, edit the desired fields and click **Save**. + + ![Full manage customer page for a customer named Prestige Financial](/images/customer-details.png) + +1. Test the changes by installing or updating in a development environment. Do one of the following, depending on the installation method for your application: + * For applications installed with Helm that use the Replicated SDK, you can add logic to your application to enforce entitlements before installation or during runtime using the Replicated SDK API license endpoints. See [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). + * For applications installed with Replicated KOTS, update the license in the admin console. See [Update Online Licenses](/enterprise/updating-licenses#update-online-licenses) and [Update Air Gap Licenses](/enterprise/updating-licenses#update-air-gap-licenses) in _Updating Licenses in the Admin Console_. + +## Archive a Customer + +When you archive a customer in the Vendor Portal, the customer is hidden from search by default and becomes read-only. Archival does not affect the utility of license files downloaded before the customer was archived. + +To expire a license, set an expiration date and policy in the **Expiration policy** field before you archive the customer. + +To archive a customer: + +1. In the Vendor Portal, click **Customers**. Select the target customer then click the **Manage customer** tab. + +1. Click **Archive Customer**. In the confirmation dialog, click **Archive Customer** again. + +You can unarchive by clicking **Unarchive Customer** in the customer's **Manage customer** page. + +## Export Customer and Instance Data {#export} + +<Download/> + +For more information about the data fields in the CSV downloads, see [Data Dictionary](/vendor/instance-data-export#data-dictionary) in _Export Customers and Instance Data_. +## Filter and Search Customers + +The **Customers** page provides a search box and filters that help you find customers: + +<img alt="search box and filters on the customers page" src="/images/customers-filter.png" width="400px"/> + +[View a larger version of this image](/images/customers-filter.png) + +You can filter customers based on whether they are active, by license type, and by channel name. You can filter using more than one criteria, such as Active, Paid, and Stable. However, you can select only one license type and one channel at a time. + +If there is adoption rate data available for the channel that you are filtering by, you can also filter by current version, previous version, and older versions. + +You can also filter customers by custom ID or email address. To filter customers by custom ID or email, use the search box and prepend your search term with "customId:" (ex: `customId:1234`) or "email:" (ex: `email:bob@replicated.com`). + +If you want to filter information using multiple license types or channels, you can download a CSV file instead. For more information, see [Export Customer and Instance Data](#export) above. + + +--- + + +# Managing Releases with the Vendor Portal + +import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" +import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" + +# Managing Releases with the Vendor Portal + +This topic describes how to use the Replicated Vendor Portal to create and promote releases, edit releases, edit release properties, and archive releases. + +For information about creating and managing releases with the CLI, see [Managing Releases with the CLI](/vendor/releases-creating-cli). + +For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) and [channelReleases](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) sections in the Vendor API v3 documentation. + +## Create a Release + +To create and promote a release in the Vendor Portal: + +1. From the **Applications** dropdown list, select **Create an app** or select an existing application to update. + +1. Click **Releases > Create release**. + + ![Create Release](/images/release-create-new.png) + + [View a larger version of this image](/images/release-create-new.png) + +1. Add your files to the release. You can do this by dragging and dropping files to the file directory in the YAML editor or clicking the plus icon to add a new, untitled YAML file. + +1. For any Helm charts that you add to the release, in the **Select Installation Method** dialog, select the version of the HelmChart custom resource that KOTS will use to install the chart. kots.io/v1beta2 is recommended. For more information about the HelmChart custom resource, see [Configuring the HelmChart Custom Resource](helm-native-v2-using). + + <img src="/images/helm-select-install-method.png" alt="select installation method dialog" width="550px"/> + + [View a larger version of this image](/images/helm-select-install-method.png) + +1. Click **Save release**. This saves a draft that you can continue to edit until you promote it. + +1. Click **Promote**. In the **Promote Release** dialog, edit the fields: + + For more information about the requirements and limitations of each field, see <a href="releases-about#properties">Properties</a> in _About Channels and Releases_. + + <table> + <tr> + <th width="30%">Field</th> + <th width="70%">Description</th> + </tr> + <tr> + <td>Channel</td> + <td> + <p>Select the channel where you want to promote the release. If you are not sure which channel to use, use the default Unstable channel.</p> + </td> + </tr> + <tr> + <td>Version label</td> + <td> + <p>Enter a version label.</p> + <p>If you have one or more Helm charts in your release, the Vendor Portal automatically populates this field. You can change the version label to any <code>version</code> specified in any of the <code>Chart.yaml</code> files included in the release.</p> + </td> + </tr> + <tr> + <td>Requirements</td> + <td> + Select the <strong>Prevent this release from being skipped during upgrades</strong> to mark the release as required for KOTS installations. This option does not apply to installations with Helm. + </td> + </tr> + <tr> + <td>Release notes</td> + <td>Add release notes. The release notes support markdown and are shown to your customer.</td> + </tr> + </table> + +1. Click **Promote**. + + The release appears in an **Active** state on the Releases page. + +## Edit a Draft Release + +To edit a draft release: + +1. From the **Applications** dropdown list, select an existing application to update. +1. On the **Releases** page, find the draft release you want to edit and click **Edit YAML**. + + <img src="/images/releases-edit-draft.png" alt="Edit YAML button for a draft release in the Vendor Portal" width="400"/> + + [View a larger image](/images/releases-edit-draft.png) + +1. Click **Save** to save your updated draft. +1. (Optional) Click **Promote**. + +## Edit Release Properties + +You can edit the properties of a release at any time. For more information about release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. + +To edit release properties: + +1. Go to **Channels**. +1. In the channel where the release was promoted, click **Release History**. +1. For the release sequence that you want to edit, open the dot menu and click **Edit release**. +1. Edit the properties as needed. + <img src="/images/release-properties.png" alt="Release Properties dialog in the Vendor Portal" width="300"/> + + [View a larger image](/images/release-properties.png) +1. Click **Update Release**. + +## Archive a Release + +You can archive releases to remove them from view on the **Releases** page. Archiving a release that has been promoted does _not_ remove the release from the channel's **Release History** page or prevent KOTS from downloading the archived release. + +To archive one or more releases: + +1. From the **Releases** page, click the trash can icon in the upper right corner. +1. Select one or more releases. +1. Click **Archive Releases**. +1. Confirm the archive action when prompted. + +## Demote a Release + +A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. For more information, see [Demotion](/vendor/releases-about#demotion) in _About Channels and Releases_. + +For information about demoting and un-demoting releases with the Replicated CLI, see [channel demote](/reference/replicated-cli-channel-demote) and [channel un-demote](/reference/replicated-cli-channel-un-demote). + +To demote a release in the Vendor Portal: + +1. Go to **Channels**. +1. In the channel where the release was promoted, click **Release History**. +1. For the release sequence that you want to demote, open the dot menu and select **Demote Release**. + + ![Release history page](/images/channels-release-history.png) + [View a larger version of this image](/images/channels-release-history.png) + + After the release is demoted, the given release sequence is greyed out and a **Demoted** label is displayed next to the release on the **Release History** page. + +--- + + +# Downloading Assets from the Download Portal + +import DownloadPortal from "../partials/kots/_download-portal-about.mdx" + +# Downloading Assets from the Download Portal + +This topic describes how to download customer license files, air gap bundles, and other assets from the Replicated Download Portal. + +For information about downloading air gap bundles and licenses with the Vendor API v3, see the following pages in the Vendor API v3 documentation: +* [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) +* [Trigger airgap build for a channel's release](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbuild) +* [Get airgap bundle download URL for the active release on the channel](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) + +## Overview + +<DownloadPortal/> + +The most common use case for the Download Portal is for customers installing into air gap environments who need to download both their license file as well as multiple air gap bundles. + +The following is an example of the Download Portal for an air gap customer installing in their own existing cluster: + +![Download Portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) + +[View a larger version of this image](/images/download-portal-existing-cluster.png) + +## Limitations + +* Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. + +* Sessions in the Download Portal are valid for 72 hours. After the session expires, your customer must log in again. The Download Portal session length is not configurable. + +## Download Assets from the Download Portal + +To log in to the Download Portal and download assets: + +1. In the [Vendor Portal](https://vendor.replicated.com), on the **Customers** page, click on the name of the customer. + +1. (Optional) On the **Manage customer** tab, enable the **Airgap Download Enabled** option. This makes air gap bundles available in the Download Portal. + + ![airgap download enabled license option](/images/airgap-download-enabled.png) + + [View a larger version of this image](/images/airgap-download-enabled.png) + +1. On the **Reporting** tab, in the **Download portal** section, click **Manage customer password**. + + ![download portal section](/images/download-portal-link.png) + + [View a larger version of this image](/images/download-portal-link.png) + +1. In the pop-up window, enter a password or click **Generate**. + + <img alt="download portal password pop-up" src="/images/download-portal-password-popup.png" width="450px"/> + + [View a larger version of this image](/images/download-portal-password-popup.png) + +1. Click **Copy** to copy the password to your clipboard. + + After the password is saved, it cannot be retrieved again. If you lose the password, you can generate a new one. + +1. Click **Save** to set the password. + +1. Click **Visit download portal** to log in to the Download Portal +and preview your customer's experience. + + :::note + By default, the Download Portal uses the domain `get.replicated.com`. You can optionally use a custom domain for the Download Portal. For more information, see [Using Custom Domains](/vendor/custom-domains-using). + ::: + +1. In the Download Portal, on the left side of the screen, select one of the following: + * **Bring my own Kubernetes**: View the downloadable assets for existing cluster installations with KOTS. + * **Embedded Kubernetes**: View the downloadable assets for Replicated kURL installations. + + :::note + Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. + ::: + + The following is an example of the Download Portal for an air gap customer: + + ![download portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) + + [View a larger version of this image](/images/download-portal-existing-cluster.png) + +1. Under **Select application version**, use the dropdown to select the target application release version. The Download Portal automatically makes the correct air gap bundles available for download based on the selected application version. + +1. Click the download button to download each asset. + +1. To share installation files with a customer, send the customer their unique link and password for the Download Portal. + + +--- + + +# Finding Installation Commands for a Release + +import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; + +# Finding Installation Commands for a Release + +This topic describes where to find the installation commands and instructions for releases in the Replicated Vendor Portal. + +For information about getting installation commands with the Replicated CLI, see [channel inspect](/reference/replicated-cli-channel-inspect). For information about getting installation commands with the Vendor API v3, see [Get install commands for a specific channel release](https://replicated-vendor-api.readme.io/reference/getchannelreleaseinstallcommands) in the Vendor API v3 documentation. + +## Get Commands for the Latest Release + +Every channel in the Vendor Portal has an **Install** section where you can find installation commands for the latest release on the channel. + +To get the installation commands for the latest release: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. + +1. On the target channel card, under **Install**, click the tab for the type of installation command that you want to view: + + <Tabs> + <TabItem value="kots" label="KOTS" default> + <p>View the command for installing with Replicated KOTS in existing clusters.</p> + + <img alt="Install section of the channel card" src="/images/channel-card-install-kots.png" width="400px"/> + [View a larger version of this image](/images/channel-card-install-kots.png) + </TabItem> + <TabItem value="embedded" label="Embedded K8s" default> + <p>View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.</p> + + <p>In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:</p> + + <img alt="Install section of the channel card" src="/images/channel-card-install-kurl.png" width="400px"/> + [View a larger version of this image](/images/channel-card-install-kurl.png) + + <img alt="Install section of the channel card" src="/images/channel-card-install-ec.png" width="400px"/> + [View a larger version of this image](/images/channel-card-install-ec.png) + + :::note + The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: + </TabItem> + <TabItem value="helm" label="Helm" default> + <p>View the command for installing with the Helm CLI in an existing cluster.</p> + + <img alt="Install section of the channel card" src="/images/channel-card-install-helm.png" width="400px"/> + [View a larger version of this image](/images/channel-card-install-helm.png) + + :::note + The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: + </TabItem> + </Tabs> + +## Get Commands for a Specific Release + +Every channel in the Vendor Portal has a **Release history** page where you can find the installation commands for specific release versions. + +To get the command for a specific release version: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. + +1. On the channel card, click **Release history**. + + <img alt="Release history link on channel card" src="/images/release-history-link.png" width="500px"/> + + [View a larger version of this image](/images/release-history-link.png) + +1. For the target release version, open the dot menu and click **Install Commands**. + + ![Release history page](/images/channels-release-history.png) + + [View a larger version of this image](/images/channels-release-history.png) + +1. In the **Install Commands** dialog, click the tab for the type of installation command that you want to view: + + <Tabs> + <TabItem value="kots" label="KOTS" default> + <p>View the command for installing with Replicated KOTS in existing clusters.</p> + + <img alt="Install section of the channel card" src="/images/release-history-install-kots.png" width="500px"/> + [View a larger version of this image](/images/release-history-install-kots.png) + </TabItem> + <TabItem value="embedded" label="Embedded K8s" default> + <p>View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.</p> + + <p>In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:</p> + + <img alt="Install section of the channel card" src="/images/release-history-install-kurl.png" width="500px"/> + [View a larger version of this image](/images/release-history-install-kurl.png) + + <img alt="Install section of the channel card" src="/images/release-history-install-embedded-cluster.png" width="500px"/> + [View a larger version of this image](/images/release-history-install-embedded-cluster.png) + + :::note + The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: + </TabItem> + <TabItem value="helm" label="Helm" default> + <p>View the command for installing with the Helm CLI in an existing cluster.</p> + + <img alt="Install section of the channel card" src="/images/release-history-install-helm.png" width="500px"/> + [View a larger version of this image](/images/release-history-install-helm.png) + + :::note + The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. + ::: + </TabItem> + </Tabs> + +## Get Customer-Specific Installation Instructions for Helm or Embedded Cluster {#customer-specific} + +Installation instructions for the Helm CLI and Replicated Embedded Cluster are customer-specific. You can find installation instructions on the page for the target customer. + +To get customer-specific Helm or Embedded Cluster installation instructions: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page and click on the target customer. + +1. At the top of the page, click the **Install instructions** drop down, then click **Helm** or **Embedded cluster**. + + ![Install instructions button](/images/customer-install-instructions-dropdown.png) + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + +1. In the dialog that opens, follow the installation instructions to install. + + <Tabs> + <TabItem value="helm" label="Helm" default> + <p>View the customer-specific Helm CLI installation instructions. For more information about installing with the Helm CLI, see [Installing with Helm](/vendor/install-with-helm).</p> + <img alt="Helm install button" src="/images/helm-install-instructions-dialog.png" width="500px"/> + [View a larger version of this image](/images/helm-install-instructions-dialog.png) + </TabItem> + <TabItem value="ec" label="Embedded Cluster" default> + <p>View the customer-specific Embedded Cluster installation instructions. For more information about installing with Embedded Cluster, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded).</p> + <img alt="Embedded cluster install instructions" src="/images/embedded-cluster-install-dialog-latest.png" width="500px"/> + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + </TabItem> + </Tabs> + +--- + + +# Generating API Tokens + +import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" + +# Generating API Tokens + +This topic describes the available types of API tokens and how to generate them for use with the Replicated CLI and Replicated Vendor API v3. + +## About API Tokens + +The Vendor API v3 is the API that manages applications in the Replicated Vendor Portal. The Replicated CLI is an implementation of the Vendor API v3. + +Using the Replicated CLI and Vendor API V3 requires an API token for authorization. Tokens are primarily used for automated customer, channel, and release management. You create tokens in the Vendor Portal. + +The following types of tokens are available: + +- [Service Accounts](#service-accounts) +- [User API Tokens](#user-api-tokens) + +<TeamTokenNote/> + +### Service Accounts + +Service accounts are assigned a token and associated with an RBAC policy. Users with the proper permissions can create, retrieve, or revoke service account tokens. Admin users can assign any RBAC policy to a service account. Non-admin users can only assign their own RBAC policy when they create a service account. + +Service accounts are useful for operations that are not tied to a particular user, such as CI/CD or integrations. + +Updates to a service account's RBAC policy are automatically applied to its associated token. When a service account is removed, its tokens are also invalidated. + +### User API Tokens + +User API tokens are private to the user creating the token. User tokens assume the user's account when used, including any RBAC permissions. + +Updates to a user's RBAC role are applied to all of the tokens belonging to that user. + +Revoking a user token immediately invalidates that token. When a user account is deleted, its user tokens are also deleted. + +## Generate Tokens + +To use the Replicated CLI or the Vendor API v3, you need a User API token or a Service Account token. Existing team API tokens also continue to work. + +### Generate a Service Account + +To generate a service account: + +1. Log in to the Vendor Portal, and select [**Team > Service Accounts**](https://vendor.replicated.com/team/serviceaccounts). +1. Select **New Service Account**. If one or more service accounts already exist, you can add another by selecting **New Service Account**. + +1. Edit the fields in the **New Service Account** dialog: + + <img alt="New Service Accounts Dialog" src="/images/service-accounts.png" width="400px"/> + + [View a larger version of this image](/images/service-accounts.png) + + 1. For **Nickname**, enter a name the token. Names for service accounts must be unique within a given team. + + 1. For **RBAC**, select the RBAC policy from the dropdown list. The token must have `Admin` access to create new releases. + + This list includes the Vendor Portal default policies `Admin` and `Read Only`. Any custom policies also display in this list. For more information, see [Configuring RBAC Policies](team-management-rbac-configuring). + + Users with a non-admin RBAC role cannot select any other RBAC role when creating a token. They are restricted to creating a token with their same level of access to avoid permission elevation. + + 1. (Optional) For custom RBAC policies, select the **Limit to read-only version of above policy** check box to if you want use a policy that has Read/Write permissions but limit this service account to read-only. This option lets you maintain one version of a custom RBAC policy and use it two ways: as read/write and as read-only. + +1. Select **Create Service Account**. + +1. Copy the service account token and save it in a secure location. The token will not be available to view again. + + :::note + To remove a service account, select **Remove** for the service account that you want to delete. + ::: + +### Generate a User API Token + +To generate a user API token: + +1. Log in to the Vendor Portal and go to the [Account Settings](https://vendor.replicated.com/account-settings) page. +1. Under **User API Tokens**, select **Create a user API token**. If one or more tokens already exist, you can add another by selecting **New user API token**. + + <img alt="User API Token Page" src="/images/user-token-list.png" width="600px"/> + + [View a larger version of this image](/images/user-token-list.png) + +1. In the **New user API token** dialog, enter a name for the token in the **Nickname** field. Names for user API tokens must be unique per user. + + <img alt="Create New User Token Dialog" src="/images/user-token-create.png" width="400px"/> + + [View a larger version of this image](/images/user-token-create.png) + +1. Select the required permissions or use the default **Read and Write** permissions. Then select **Create token**. + + :::note + The token must have `Read and Write` access to create new releases. + ::: + +1. Copy the user API token that displays and save it in a secure location. The token will not be available to view again. + + :::note + To revoke a token, select **Revoke token** for the token that you want to delete. + ::: + + +--- + + +# Replicated Onboarding + +import CreateRelease from "../partials/getting-started/_create-promote-release.mdx" +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import EcCr from "../partials/embedded-cluster/_ec-config.mdx" +import HelmPackage from "../partials/helm/_helm-package.mdx" +import Requirements from "../partials/embedded-cluster/_requirements.mdx" +import SDKOverview from "../partials/replicated-sdk/_overview.mdx" +import TestYourChanges from "../partials/getting-started/_test-your-changes.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + +# Replicated Onboarding + +This topic describes how to onboard applications to the Replicated Platform. + +## Before You Begin + +This section includes guidance and prerequisites to review before you begin onboarding your application. + +### Best Practices and Recommendations + +The following are some best practices and recommendations for successfully onboarding with Replicated: + +* When integrating new Replicated features with an application, make changes in small iterations and test frequently by installing or upgrading the application in a development environment. This will help you to more easily identify issues and troubleshoot. This onboarding workflow will guide you through the process of integrating features in small iterations. + +* Use the Replicated CLI to create and manage your application and releases. Getting familiar with the Replicated CLI will also help later on when integrating Replicated workflows into your CI/CD pipelines. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* These onboarding tasks assume that you will test the installation of each release on a VM with the Replicated Embedded Cluster installer _and_ in a cluster with the Replicated KOTS installer. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then can choose to test with Embedded Cluster only. + +* Ask for help from the Replicated community. For more information, see [Getting Help from the Community](#community) below. + +### Getting Help from the Community {#community} + +The [Replicated community site](https://community.replicated.com/) is a forum where Replicated team members and users can post questions and answers related to working with the Replicated Platform. It is designed to help Replicated users troubleshoot and learn more about common tasks involved with distributing, installing, observing, and supporting their application. + +Before posting in the community site, use the search to find existing knowledge base articles related to your question. If you are not able to find an existing article that addresses your question, create a new topic or add a reply to an existing topic so that a member of the Replicated community or team can respond. + +To search and participate in the Replicated community, see https://community.replicated.com/. + +### Prerequisites + +* Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). + +* Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +* Complete a basic quick start workflow to create an application with a sample Helm chart and then promote and install releases in a development environment. This helps you get familiar with the process of creating, installing, and updating releases in the Replicated Platform. See [Replicated Quick Start](/vendor/quick-start). + +* Ensure that you have access to a VM that meets the requirements for the Replicated Embedded Cluster installer. You will use this VM to test installation with Embedded Cluster. + + Embedded Cluster has the following requirements: + + <Requirements/> + +* (Optional) Ensure that you have kubectl access to a Kubernetes cluster. You will use this cluster to test installation with KOTS. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then you do not need access to a cluster for the main onboarding tasks. + + You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. + +## Onboard + +Complete the tasks in this section to onboard your application. When you are done, you can continue to [Next Steps](#next-steps) to integrate other Replicated features with your application. + +### Task 1: Create An Application + +To get started with onboarding, first create a new application. This will be the official Vendor Portal application used by your team to create and promote both internal and customer-facing releases. + +To create an application: + +1. Create a new application using the Replicated CLI or the Vendor Portal. Use an official name for your application. See [Create an Application](/vendor/vendor-portal-manage-app#create-an-application). + + <details> + <summary>Can I change the application name in the future?</summary> + + You can change the application name, but you cannot change the application _slug_. + + The Vendor Portal automatically generates and assigns a unique slug for each application based on the application's name. For example, the slug for "Example App" would be `example-app`. + + Application slugs are unique across all of Replicated. This means that, if necessary, the Vendor Portal will append a random word to the end of slug to ensure uniqueness. For example, `example-app-flowers`. + </details> + +1. Set the `REPLICATED_APP` environment variable to the unique slug of the application that you created. This will allow you to interact with the application from the Replicated CLI throughout onboarding. See [Set Environment Variables](/reference/replicated-cli-installing#replicated_app) in _Installing the Replicated CLI_. + + For example: + + ```bash + export REPLICATED_APP=my-app + ``` + +### Task 2: Connect Your Image Registry + +Add credentials for your image registry to the Vendor Portal. This will allow you to use the Replicated proxy registry in a later step so that you can grant proxy access to application images without exposing registry credentials to your customers. + +For more information, see [Connecting to an External Registry](/vendor/packaging-private-images). + +### Task 3: Add the Replicated SDK and Package your Chart + +Next, add the Replicated SDK as a dependency of your Helm chart and package the chart as a `.tgz` archive. + +The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. The SDK provides access to key Replicated functionality, including an in-cluster API and automatic access to insights and operational telemetry for instances running in customer environments. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). + +To package your Helm chart with the Replicated SDK: + +1. Go to the local directory where your Helm chart is. + +1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. + + If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. For more information, see [Packaging a Helm Chart for a Release](helm-install-release). + + <DependencyYaml/> + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + + <UnauthorizedError/> + +1. If your application is deployed as multiple Helm charts, package each chart as a separate `.tgz` archive using the `helm package -u PATH_TO_CHART` command. Do not declare the SDK in more than one chart. + +### Task 4: Create the Initial Release with KOTS HelmChart and Embedded Cluster Config {#first-release} + +After packaging your Helm chart, you can create a release. The initial release for your application will include the minimum files required to install a Helm chart with the Embedded Cluster installer: +* The Helm chart `.tgz` archive +* [KOTS HelmChart custom resource](/reference/custom-resource-helmchart-v2) +* [Embedded Cluster Config](/reference/embedded-config) + +If you have multiple charts, you will add each chart archive to the release, plus a corresponding KOTS HelmChart custom resource for each archive. + +:::note +Configuring the KOTS HelmChart custom resource includes several tasks, and involves the use of KOTS template functions. Depending on how many Helm charts your application uses, Replicated recommends that you allow about two to three hours for configuring the HelmChart custom resource and creating and testing your initial release. +::: + +To create the first release for your application: + +1. In the local directory for your Helm chart, create a subdirectory named `manifests` where you will add the files for the release. + +1. In the `manifests` directory: + + 1. Move the `.tgz` chart archive that you packaged. If your application is deployed as multiple Helm charts, move each `.tgz` archive to `manifests`. + + 1. Create an `embedded-cluster.yaml` file with the following default Embedded Cluster Config: + + <EcCr/> + + <details> + <summary>What is the Embedded Cluster Config?</summary> + + The Embedded Cluster Config is required to install with Embedded Cluster. + </details> + + For more information, see [Using Embedded Cluster](/vendor/embedded-overview). + + 1. Create a new YAML file. In this file, configure the KOTS HelmChart custom resource by completing the workflow in [Configuring the HelmChart Custom Resource](helm-native-v2-using). + + <details> + <summary>What is the KOTS HelmChart custom resource?</summary> + + The KOTS HelmChart custom resource is required to install Helm charts with KOTS and Embedded Cluster. As part of configuring the KOTS HelmChart custom resource, you will rewrite image names and add image pull secrets to allow your application images to be accessed through the Replicated proxy registry. + </details> + + 1. If your application is deployed as multiple Helm charts, repeat the step above to add a separate HelmChart custom resource for each Helm chart archive in the release. + + 1. If there are values in any of your Helm charts that need to be set for the installation to succeed, you can set those values using the `values` key in the corresponding HelmChart custom resource. See [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys). + + This is a temporary measure to ensure the values get passed to the Helm chart during installation until you configure the Admin Console Config screen in a later onboarding task. If your default Helm values are sufficient for installation, you can skip this step. + + 1. If your application requires that certain components are deployed before the application and as part of the Embedded Cluster itself, then update the Embedded Cluster Config to add [extensions](/reference/embedded-config#extensions). Extensions allow you to provide Helm charts that are deployed before your application. For example, one situation where this is useful is if you want to ship an ingress controller because Embedded Cluster does not include one. + + For more information, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. + +1. From the `manifests` directory, create a release and promote it to the Unstable channel. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + + ```bash + replicated release create --yaml-dir . --promote Unstable + ``` + +1. Install the release in your development environment to test: + + 1. Install with Embedded Cluster on a VM. See [Online Installation with Embedded Cluster](/enterprise/installing-embedded). + + 1. (Optional) Install in an existing cluster with KOTS. See [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). + +After successfully installing the initial release on a VM with Embedded Cluster (and optionally in an existing cluster with KOTS), go to the next task. You will continue to iterate throughout the rest of the onboarding process by creating and promoting new releases, then upgrading to the new version in your development environment. + +### Task 5: Customize the KOTS Admin Console {#admin-console} + +Configure the KOTS Application custom resource to add an application name, icon, and status informers. The name and icon will be displayed in the Admin Console and the Replicated Download Portal. The status informers will be used to display the application status on the Admin Console dashboard. + +To configure the KOTS Application custom resource: + +1. In your `manifests` directory, create a new `kots-app.yaml` file. + +1. In the `kots-app.yaml` file, add the [KOTS Application](/reference/custom-resource-application) custom resource YAML and set the `title`, `icon`, and `statusInformers` fields. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Application + metadata: + name: gitea + spec: + title: Gitea + # Base64 encoded image string + icon: fyJINrigNkt5VsRiub9nXICdsYyVd2NcVvA3ScE5t2rb5JuEeyZnAhmLt9NK63vX1O + statusInformers: + - deployment/gitea + ``` + For more information, see: + * [Customizing the Application Icon](/vendor/admin-console-customize-app-icon) + * [Enabling and Understanding Application Status](/vendor/insights-app-status) + * [Application](/reference/custom-resource-application) + <br/> + <details> + <summary>Can I preview the icon before installing the release?</summary> + + Yes. The Vendor Portal includes a **Application icon preview** in the **Help** pane on the **Edit release** page. + + ![Icon preview](/images/icon-preview.png) + + [View a larger version of this image](/images/icon-preview.png) + + </details> + +1. <CreateRelease/> + +1. <TestYourChanges/> + +### Task 6: Set Up the Admin Console Config Screen and Map to Helm Values + +The KOTS Admin Console Config screen is used to collect required and optional application configuration values from your users. User-supplied values provided on the Config screen can be mapped to your Helm values. + +Before you begin this task, you can complete the [Set Helm Values with KOTS](/vendor/tutorial-config-setup) tutorial to learn how to map user-supplied values from the Admin Console Config screen to a Helm chart. + +:::note +Setting up the Admin Console config screen can include the use of various types of input fields, conditional statements, and KOTS template functions. Depending on your application's configuration options, Replicated recommends that you allow about two to three hours for configuring the Config custom resource and testing the Admin Console config screen. +::: + +To set up the Admin Console Config screen for your application: + +1. In your `manifests` directory, create a new file named `kots-config.yaml`. + +1. In `kots-config.yaml`, add the KOTS Config custom resource. Configure the KOTS Config custom resource based on the values that you need to collect from users. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: my-application + spec: + groups: + - name: example_group + title: Example Group + items: + - name: example_item + title: Example Item + type: text + default: "Hello World" + ``` + + For more information, see: + * [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen) + * [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional) + * [Config](/reference/custom-resource-config) + + <br/> + + <details> + <summary>Can I preview the Admin Console config screen before installing the release?</summary> + + Yes. The Vendor Portal includes a **Config preview** in the **Help** pane on the **Edit release** page. + + For example: + + ![Config preview](/images/config-preview.png) + + [View a larger version of this image](/images/config-preview.png) + </details> + +1. <CreateRelease/> + +1. <TestYourChanges/> + +1. In `manifests`, open the KOTS HelmChart custom resource that you configured in a previous step. Configure the `values` key of the HelmChart custom resource to map the fields in the KOTS Config custom resource to your Helm values. + + For more information, see: + * [Mapping User-Supplied Values](/vendor/config-screen-map-inputs) + * [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup) + * [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) + * [`values`](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_ + +1. <CreateRelease/> + +1. <TestYourChanges/> + +1. Continue to create and test new releases with new config fields until you are ready to move on to the next task. + +### Task 7: Define Preflight Checks + +In the next two tasks, you will add specs for _preflight checks_ and _support bundles_. + +Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. Troubleshoot is a kubectl plugin that provides diagnostic tools for Kubernetes applications. For more information, see the open source [Troubleshoot](https://troubleshoot.sh/docs/) documentation. + +Preflight checks and support bundles analyze data from customer environments to provide insights that help users to avoid or troubleshoot common issues with an application: +* **Preflight checks** run before an application is installed to check that the customer environment meets the application requirements. +* **Support bundles** collect troubleshooting data from customer environments to help users diagnose problems with application deployments. + +:::note +Before you begin this task, you can complete the [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) tutorial to learn how to add a preflight spec to a Helm chart in a Kubernetes secret and run the preflight checks before installation. +::: + +To define preflight checks for your application: + +1. In your Helm chart `templates` directory, add a Kubernetes Secret that includes a preflight spec. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). For examples, see [Example Preflight Specs](/vendor/preflight-examples). + :::note + If your application is deployed as multiple Helm charts, add the Secret to the `templates` directory for the chart that is installed first. + ::: + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + +1. Move the `.tgz` file to the `manifests` directory. + +1. <CreateRelease/> + +1. <TestYourChanges/> + + Preflight checks run automatically during installation. + +1. Continue to create and test new releases with additional preflight checks until you are ready to move on to the next task. + +### Task 8: Add a Support Bundle Spec + +To add the default support bundle spec to your application: + +1. In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret to enable the default support bundle spec for your application: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + labels: + troubleshoot.sh/kind: support-bundle + name: example + stringData: + support-bundle-spec: | + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: support-bundle + spec: + collectors: [] + analyzers: [] + ``` + :::note + If your application is installed as multiple Helm charts, you can optionally create separate support bundle specs in each chart. The specs are automatically merged when a support bundle is generated. Alternatively, continue with a single support bundle spec and then optionally revisit how you organize your support bundle specs after you finish onboarding. + ::: + +1. (Recommended) At a minimum, Replicated recommends that all support bundle specs include the `logs` collector. This collects logs from running Pods in the cluster. + + **Example:** + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: example + labels: + troubleshoot.sh/kind: support-bundle + stringData: + support-bundle-spec: |- + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: example + spec: + collectors: + - logs: + selector: + - app.kubernetes.io/name=myapp + namespace: {{ .Release.Namespace }} + limits: + maxAge: 720h + maxLines: 10000 + ``` + + For more information, see: + * [Adding and Customizing Support Bundles](/vendor/support-bundle-customizing) + * [Example Support Bundle Specs](/vendor/support-bundle-examples) + * [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. + +1. (Recommended) Ensure that any preflight checks that you added are also include in your support bundle spec. This ensures that support bundles collect at least the same information collected when running preflight checks. + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + +1. Move the `.tgz` file to the `manifests` directory. + +1. <CreateRelease/> + +1. <TestYourChanges/> + + For information about how to generate support bundles, see [Generating Support Bundles](/vendor/support-bundle-generating). + +1. (Optional) Customize the support bundle spec by adding additional collectors and analyzers. + +### Task 9: Alias Replicated Endpoints with Your Own Domains + +Your customers are exposed to several Replicated domains by default. Replicated recommends you use custom domains to unify the customer's experience with your brand and simplify security reviews. + +For more information, see [Using Custom Domains](/vendor/custom-domains-using). + +## Next Steps + +After completing the main onboarding tasks, Replicated recommends that you also complete the following additional tasks to integrate other Replicated features with your application. You can complete these next recommended tasks in any order and at your own pace. + +### Add Support for Helm Installations + +Existing KOTS releases that include one or more Helm charts can be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. + +To enable Helm installations for Helm charts distributed with Replicated, the only extra step is to add a Secret to your chart to authenticate with the Replicated proxy registry. + +This is the same secret that is passed to KOTS in the HelmChart custom resource using `'{{repl ImagePullSecretName }}'`, which you did as part of [Task 4: Create and Install the Initial Release](#first-release). So, whereas this Secret is created automatically for KOTS and Embedded Cluster installations, you need to create it and add it to your Helm chart for Helm installations. + +:::note +Before you test Helm installations for your application, you can complete the [Deploy a Helm Chart with KOTS and the Helm CLI](tutorial-kots-helm-setup) tutorial to learn how to install a single release with both KOTS and Helm. +::: + +To support and test Helm installations: + +1. Follow the steps in [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) to authenticate with the Replicated proxy registry by creating a Secret with `type: kubernetes.io/dockerconfigjson` in your Helm chart. + +1. Update dependencies and package the chart as a `.tgz` file: + + <HelmPackage/> + +1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + +1. Install the release in a cluster with the Helm CLI to test your changes. For more information, see [Installing with Helm](/vendor/install-with-helm). + +### Add Support for Air Gap Installations + +Replicated Embedded Cluster and KOTS support installations in _air gap_ environments with no outbound internet access. Users can install with Embedded Cluster and KOTS in air gap environments by providing air gap bundles that contain the required images for the installers and for your application. + +:::note +Replicated also offers Alpha support for air gap installations with Helm. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. +::: + +To add support for air gap installations: + +1. If there are any images for your application that are not listed in your Helm chart, list these images in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release. One common use case for this is applications that use Kubernetes Operators. See [Define Additional Images](/vendor/operator-defining-additional-images). + +1. In the KOTS HelmChart custom resource `builder` key, pass any values that are required in order for `helm template` to yield all the images needed to successfully install your application. See [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). + + :::note + If the default values in your Helm chart already enable all the images needed to successfully deploy, then you do not need to configure the `builder` key. + ::: + + <details> + <summary>How do I know if I need to configure the `builder` key?</summary> + + When building an air gap bundle, the Vendor Portal templates the Helm charts in a release with `helm template` in order to detect the images that need to be included in the bundle. Images yielded by `helm template` are included in the bundle for the release. + + For many applications, running `helm template` with the default values would not yield all the images required to install. In these cases, vendors can pass the additional values in the `builder` key to ensure that the air gap bundle includes all the necessary images. + </details> + +1. If you have not done so already as part of [Task 4: Create and Install the Initial Release](#first-release), ensure that the `values` key in the KOTS HelmChart custom resource correctly rewrites image names for air gap installations. This is done using the KOTS HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace template functions to render the location of the given image in the user's own local registry. + + For more information, see [Rewrite Image Names](/vendor/helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart Custom Resource v2_. + +1. Create and promote a new release with your changes. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). + +1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the release was promoted to build the air gap bundle. Do one of the following: + * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. + * If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. + +1. Create a customer with the **Airgap Download Enabled** entitlement enabled so that you can test air gap installations. See [Creating and Managing Customers](/vendor/releases-creating-customer). + +1. Download the Embedded Cluster air gap installation assets, then install with Embedded Cluster on an air gap VM to test. See [Installing in Air Gap Environments with Embedded Cluster](/enterprise/installing-embedded-air-gap). + +1. (Optional) Download the `.airgap` bundle for the release and the air gap bundle for the KOTS Admin Console. You can also download both bundles from the Download Portal for the target customer. Then, install in an air gap existing cluster to test. See [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). + +1. (Optional) Follow the steps in [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap) to test air gap installation with Helm. + + :::note + Air gap Helm installations are an Alpha feature. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. + ::: + +### Add Roles for Multi-Node Clusters in Embedded Cluster Installations + +The Embedded Cluster Config supports roles for multi-node clusters. One or more roles can be selected and assigned to a node when it is joined to the cluster. Node roles can be used to determine which nodes run the Kubernetes control plane, and to assign application workloads to particular nodes. + +For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. + +### Add and Map License Entitlements + +You can add custom license entitlements for your application in the Vendor Portal. Custom license fields are useful when there is entitlement information that applies to a subset of customers. For example, you can use entitlements to: +* Limit the number of active users permitted +* Limit the number of nodes a customer is permitted on their cluster +* Identify a customer on a "Premium" plan that has access to additional features or functionality not available with your base plan + +For more information about how to create and assign custom entitlements in the Vendor Portal, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields) and [Creating and Managing Customers](/vendor/releases-creating-customer). + +#### Map Entitlements to Helm Values + +You can map license entitlements to your Helm values using KOTS template functions. This can be useful when you need to set certain values based on the user's license information. For more information, see [Using KOTS Template Functions](/vendor/helm-optional-value-keys#using-kots-template-functions) in _Setting Helm Values with KOTS_. + +#### Query Entitlements Before Installation and at Runtime + +You can add logic to your application to query license entitlements both before deployment and at runtime. For example, you might want to add preflight checks that verify a user's entitlements before installing. Or, you can expose additional product functionality dynamically at runtime based on a customer's entitlements. + +For more information, see: +* [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk) +* [Checking Entitlements in Preflights with KOTS Template Functions](/vendor/licenses-referencing-fields) + +### Add Application Links to the Admin Console Dashboard + +You can add the Kubernetes SIG Application custom resource to your release to add a link to your application from the Admin Console dashboard. This makes it easier for users to access your application after installation. + +You can also configure the Kubernetes SIG Application resource add links to other resources like documentation or dashboards. + +For more information, see [Adding Application Links to the Dashboard](/vendor/admin-console-adding-buttons-links). + +### Update the Preflight and Support Bundles Specs + +After adding basic specs for preflights and support bundles, you can continue to add more collectors and analyzers as needed. + +Consider the following recommendations and best practices: + +* Revisit your preflight and support bundle specs when new support issues arise that are not covered by your existing specs. + +* Your support bundles should include all of the same collectors and analyzers that are in your preflight checks. This ensures that support bundles include all the necessary troubleshooting information, including any failures in preflight checks. + +* Your support bundles will most likely need to include other collectors and analyzers that are not in your preflight checks. This is because some of the information used for troubleshooting (such as logs) is not necessary when running preflight checks before installation. + +* If your application is installed as multiple Helm charts, you can optionally add separate support bundle specs in each chart. This can make it easier to keep the specs up-to-date and to avoid merge conflicts that can be caused when multiple team members contribute to a single, large support bundle spec. When an application has multiple support bundle specs, the specs are automatically merged when generating a support bundle so that only a single support bundle is provided to the user. + +The documentation for the open-source Troubleshoot project includes the full list of available collectors and analyzers that you can use. See [All Collectors](https://troubleshoot.sh/docs/collect/all/) and the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. + +You can also view common examples of collectors and analyzers used in preflight checks and support bundles in [Preflight Spec Examples](preflight-examples) and [Support Bundle Spec Examples](support-bundle-examples). + +### Configure Backup and Restore + +Enable backup and restore with Velero for your application so that users can back up and restore their KOTS Admin Console and application data. + +There are different steps to configure backup and restore for Embedded Cluster and for existing cluster installations with KOTS: +* To configure the disaster recovery feature for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery) +* To configure the snapshots feature for existing cluster KOTS installations, see [Configuring Snapshots](snapshots-configuring-backups). + +### Add Custom Metrics + +In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running in customer environments. Custom metrics can be collected for application instances running in online or air gap environments using the Replicated SDK. + +For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). + +### Integrate with CI/CD + +Replicated recommends that teams integrate the Replicated Platform into their existing develeopment and production CI/CD workflows. This can be useful for automating the processes of creating new releases, promoting releases, and testing releases with the Replicated Compatibility Matrix. + +For more information, see: +* [About Integrating with CI/CD](/vendor/ci-overview) +* [About Compatibility Matrix](/vendor/testing-about) +* [Recommended CI/CD Workflows](/vendor/ci-workflows) + +### Customize Release Channels + +By default, the Vendor Portal includes Unstable, Beta, and Stable channels. You can customize the channels in the Vendor Portal based on your application needs. + +Consider the following recommendations: +* Use the Stable channel for your primary release cadence. Releases should be promoted to the Stable channel only as frequently as your average customer can consume new releases. Typically, this is no more than monthly. However, this cadence varies depending on the customer base. +* If you have a SaaS product, you might want to create an "Edge" channel where you promote the latest SaaS releases. +* You can consider a “Long Term Support” channel where you promote new releases less frequently and support those releases for longer. +* It can be useful to create channels for each feature branch so that internal teams reviewing a PR can easily get the installation artifacts as well as review the code. You can automate channel creation as part of a pipeline or Makefile. + +For more information, see: +* [About Channels and Releases](/vendor/releases-about) +* [Creating and Editing Channels](/vendor/releases-creating-channels) + +### Write Your Documentation + +Before distributing your application to customers, ensure that your documentation is up-to-date. In particular, be sure to update the installation documentation to include the procedures and requirements for installing with Embedded Cluster, Helm, and any other installation methods that you support. + +For guidance on how to get started with documentation for applications distributed with Replicated, including key considerations, examples, and templates, see [Writing Great Documentation for On-Prem Software Distributed with Replicated](https://www.replicated.com/blog/writing-great-documentation-for-on-prem-software-distributed-with-replicated) in the Replicated blog. + +--- + + +# Installing the SDK in Air Gap Environments + +# Installing the SDK in Air Gap Environments + +This topic explains how to install the Replicated SDK in air gap environments by enabling air gap mode. + +## Overview + +The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. + +Air gap mode is enabled when `isAirgap: true` is set in the values for the SDK Helm chart. For more information, see [Install the SDK in Air Gap Mode](#install) below. Allowing air gap mode to be controlled with the `isAirgap` value means that vendors and enterprise customers do not need to rely on air gap environments being automatically detected, which is unreliable and error-prone. The `isAirgap` value also allows the SDK to be installed in air gap mode even if the instance can access the internet. + +## Differences in Air Gap Mode + +Air gap mode differs from non-air gap installations of the SDK in the following ways: +* The SDK stores instance telemetry and custom metrics in a Kubernetes Secret in the customer environment, rather than attempting to send telemetry and custom metrics back to the Replicated Vendor Portal. The telemetry and custom metrics stored in the Secret are collected whenever a support bundle is generated in the environment, and are reported when the support bundle is uploaded to the Vendor Portal. For more information about telemetry for air gap instances, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). +* The SDK returns an empty array (`[]`) for any requests to check for updates using the [`/api/v1/app/updates`](/reference/replicated-sdk-apis#get-appupdates) SDK API endpoint. This is because the SDK is not able to receive updates from the Vendor Portal when running in air gap environments. +* Instance tags cannot be updated with the [`/app/instance-tags`](/reference/replicated-sdk-apis#post-appinstance-tags) SDK API endpoint. + +In air gap mode, the SDK can still make requests to SDK API endpoints that do not require outbound internet access, such as the [`license`](/reference/replicated-sdk-apis#license) endpoints and the [`/app/info`](/reference/replicated-sdk-apis#get-appinfo) endpoint. However, these endpoints will return whatever values were injected into the SDK when the chart was most recently pulled. These values might not match the latest information available in the Vendor Portal because the SDK cannot receive updates when running in air gap environments. + +## Install the SDK in Air Gap Mode {#install} + +This section describes how to install the Replicated SDK in air gap mode with the Helm CLI and with Replicated KOTS. + +### Helm CLI + +When the SDK is installed with the Helm CLI, air gap mode can be enabled by passing `--set replicated.isAirgap=true` with the Helm CLI installation command. + +For example: + +``` +helm install gitea oci://registry.replicated.com/my-app/gitea --set replicated.isAirgap=true +``` + +For more information about Helm CLI installations with Replicated, see [Installing with Helm](/vendor/install-with-helm). For more information about setting Helm values with the `helm install` command, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. + +:::note +Replicated does not provide air gap bundles for applications installed with the Helm CLI. Air gap bundles are a feature of KOTS. +::: + +### KOTS + +When the SDK is installed by KOTS in an air gap environment, KOTS automatically sets `isAirGap: true` in the SDK Helm chart values to enable air gap mode. No additional configuration is required. + +--- + + +# Customizing the Replicated SDK + +# Customizing the Replicated SDK + +This topic describes various ways to customize the Replicated SDK, including customizing RBAC, setting environment variables, adding tolerations, and more. + +## Customize RBAC for the SDK + +This section describes role-based access control (RBAC) for the Replicated SDK, including the default RBAC, minimum RBAC requirements, and how to install the SDK with custom RBAC. + +### Default RBAC + +The SDK creates default Role, RoleBinding, and ServiceAccount objects during installation. The default Role allows the SDK to get, list, and watch all resources in the namespace, to create Secrets, and to update the `replicated` and `replicated-instance-report` Secrets: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "replicated.labels" . | nindent 4 }} + name: replicated-role +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - 'get' + - 'list' + - 'watch' +- apiGroups: + - '' + resources: + - 'secrets' + verbs: + - 'create' +- apiGroups: + - '' + resources: + - 'secrets' + verbs: + - 'update' + resourceNames: + - replicated + - replicated-instance-report + - replicated-custom-app-metrics-report +``` + +### Minimum RBAC Requirements + +The SDK requires the following minimum RBAC permissions: +* Create Secrets. +* Get and update Secrets named `replicated`, `replicated-instance-report`, and `replicated-custom-app-metrics-report`. +* The SDK requires the following minimum RBAC permissions for status informers: + * If you defined custom status informers, then the SDK must have permissions to get, list, and watch all the resources listed in the `replicated.statusInformers` array in your Helm chart `values.yaml` file. + * If you did _not_ define custom status informers, then the SDK must have permissions to get, list, and watch the following resources: + * Deployments + * Daemonsets + * Ingresses + * PersistentVolumeClaims + * Statefulsets + * Services + * For any Ingress resources used as status informers, the SDK requires `get` permissions for the Service resources listed in the `backend.Service.Name` field of the Ingress resource. + * For any Daemonset and Statefulset resources used as status informers, the SDK requires `list` permissions for pods in the namespace. + * For any Service resources used as status informers, the SDK requires `get` permissions for Endpoint resources with the same name as the service. + + The Replicated Vendor Portal uses status informers to provide application status data. For more information, see [Helm Installations](/vendor/insights-app-status#helm-installations) in _Enabling and Understanding Application Status_. +### Install the SDK with Custom RBAC + +#### Custom ServiceAccount + +To use the SDK with custom RBAC permissions, provide the name for a custom ServiceAccount object during installation. When a service account is provided, the SDK uses the RBAC permissions granted to the service account and does not create the default Role, RoleBinding, or ServiceAccount objects. + +To install the SDK with custom RBAC: + +1. Create custom Role, RoleBinding, and ServiceAccount objects. The Role must meet the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. +1. During installation, provide the name of the service account that you created by including `--set replicated.serviceAccountName=CUSTOM_SERVICEACCOUNT_NAME`. + + **Example**: + + ``` + helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.serviceAccountName=mycustomserviceaccount + ``` + + For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). + +#### Custom ClusterRole + +To use the SDK with an existing ClusterRole, provide the name for a custom ClusterRole object during installation. When a cluster role is provided, the SDK uses the RBAC permissions granted to the cluster role and does not create the default RoleBinding. Instead, the SDK creates a ClusterRoleBinding as well as a ServiceAccount object. + +To install the SDK with a custom ClusterRole: + +1. Create a custom ClusterRole object. The ClusterRole must meet at least the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. However, it can also provide additional permissions that can be used by the SDK, such as listing cluster Nodes. +1. During installation, provide the name of the cluster role that you created by including `--set replicated.clusterRole=CUSTOM_CLUSTERROLE_NAME`. + + **Example**: + + ``` + helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.clusterRole=mycustomclusterrole + ``` + + For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). + +## Set Environment Variables {#env-var} + +The Replicated SDK provides a `replicated.extraEnv` value that allows users to set additional environment variables for the deployment that are not exposed as Helm values. + +This ensures that users can set the environment variables that they require without the SDK Helm chart needing to be modified to expose the values. For example, if the SDK is running behind an HTTP proxy server, then the user could set `HTTP_PROXY` or `HTTPS_PROXY` environment variables to provide the hostname or IP address of their proxy server. + +To add environment variables to the Replicated SDK deployment, include the `replicated.extraEnv` array in your Helm chart `values.yaml` file. The `replicated.extraEnv` array accepts a list of environment variables in the following format: + +```yaml +# Helm chart values.yaml + +replicated: + extraEnv: + - name: ENV_VAR_NAME + value: ENV_VAR_VALUE +``` + +:::note +If the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` variables are configured with the [kots install](/reference/kots-cli-install) command, these variables will also be set automatically in the Replicated SDK. +::: + +**Example**: + +```yaml +# Helm chart values.yaml + +replicated: + extraEnv: + - name: MY_ENV_VAR + value: my-value + - name: MY_ENV_VAR_2 + value: my-value-2 +``` + +## Custom Certificate Authority + +When installing the Replicated SDK behind a proxy server that terminates TLS and injects a custom certificate, you must provide the CA to the SDK. This can be done by storing the CA in a ConfigMap or a Secret prior to installation and providing appropriate values during installation. + +### Using a ConfigMap + +To use a CA stored in a ConfigMap: + +1. Create a ConfigMap and the CA as the data value. Note that name of the ConfigMap and data key can be anything. + ```bash + kubectl -n <NAMESPACE> create configmap private-ca --from-file=ca.crt=./ca.crt + ``` +1. Add the name of the config map to the values file: + ```yaml + replicated: + privateCAConfigmap: private-ca + ``` + +:::note +If the `--private-ca-configmap` flag is used with the [kots install](/reference/kots-cli-install) command, this value will be populated in the Replicated SDK automatically. +::: + +### Using a Secret + +To use a CA stored in a Secret: + +1. Create a Secret and the CA as a data value. Note that the name of the Secret and the key can be anything. + ```bash + kubectl -n <NAMESPACE> create secret generic private-ca --from-file=ca.crt=./ca.crt + ``` +1. Add the name of the secret and the key to the values file: + ```yaml + replicated: + privateCASecret: + name: private-ca + key: ca.crt + ``` + +## Add Tolerations + +The Replicated SDK provides a `replicated.tolerations` value that allows users to add custom tolerations to the deployment. For more information about tolerations, see [Taints and Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the Kubernetes documentation. + +To add tolerations to the Replicated SDK deployment, include the `replicated.tolerations` array in your Helm chart `values.yaml` file. The `replicated.tolerations` array accepts a list of tolerations in the following format: + +```yaml +# Helm chart values.yaml + +replicated: + tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule" +``` + +## Add Affinity + +The Replicated SDK provides a `replicated.affinity` value that allows users to add custom affinity to the deployment. For more information about affinity, see [Affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) in the Kubernetes documentation. + +To add affinity to the Replicated SDK deployment, include the `replicated.affinity` map in your Helm chart `values.yaml` file. The `replicated.affinity` map accepts a standard Kubernets affinity object in the following format: + +```yaml +# Helm chart values.yaml + +replicated: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: production/node-pool + operator: In + values: + - private-node-pool +``` +## Add Custom Labels + +With the Replicated SDK version 1.1.0 and later, you can pass custom labels to the Replicated SDK Helm Chart by setting the `replicated.commonLabels` and `replicated.podLabels` Helm values in your Helm chart. + +### Requirement + +The `replicated.commonLabels` and `replicated.podLabels` values are available with the Replicated SDK version 1.1.0 and later. + +### commonLabels + +The `replicated.commonLabels` value allows you to add one or more labels to all resources created by the SDK chart. + +For example: + +```yaml +# Helm chart values.yaml + +replicated: + commonLabels: + environment: production + team: platform +``` + +### podLabels + +The `replicated.podLabels` value allows you to add pod-specific labels to the pod template. + +For example: + +```yaml +# Helm chart values.yaml + +replicated: + podLabels: + monitoring: enabled + custom.company.io/pod-label: value +``` + +--- + + +# Developing Against the SDK API + +import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" + +# Developing Against the SDK API + +This topic describes how to develop against the SDK API to test changes locally. It includes information about installing the SDK in integration mode and port forwarding the SDK API service to your local machine. For more information about the SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). + +## Install the SDK in Integration Mode + +<IntegrationMode/> + +## Port Forwarding the SDK API Service {#port-forward} + +After the Replicated SDK is installed and initialized in a cluster, the Replicated SDK API is exposed at `replicated:3000`. You can access the SDK API for testing by forwarding port 3000 to your local machine. + +To port forward the SDK API service to your local machine: + +1. Run the following command to port forward to the SDK API service: + + ```bash + kubectl port-forward service/replicated 3000 + ``` + ``` + Forwarding from 127.0.0.1:3000 -> 3000 + Forwarding from [::1]:3000 -> 3000 + ``` + +1. With the port forward running, test the SDK API endpoints as desired. For example: + + ```bash + curl localhost:3000/api/v1/license/fields/expires_at + curl localhost:3000/api/v1/license/fields/{field} + ``` + + For more information, see [Replicated SDK API](/reference/replicated-sdk-apis). + + :::note + When the SDK is installed in integration mode, requests to the `license` endpoints use your actual development license data, while requests to the `app` endpoints use the default mock data. + ::: + +--- + + +# Installing the Replicated SDK + +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" +import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" +import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" + +# Installing the Replicated SDK + +This topic describes the methods for distributing and installing the Replicated SDK. + +It includes information about how to install the SDK alongside Helm charts or Kubernetes manifest-based applications using the Helm CLI or a Replicated installer (Replicated KOTS, kURL, Embedded Cluster). It also includes information about installing the SDK as a standalone component in integration mode. + +For information about installing the SDK in air gap mode, see [Installing the SDK in Air Gap Environments](replicated-sdk-airgap). + +## Requirement + +<KotsVerReq/> + +## Install the SDK as a Subchart + +When included as a dependency of your application Helm chart, the SDK is installed as a subchart alongside the application. + +To install the SDK as a subchart: + +1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. + + <DependencyYaml/> + +1. Update the `charts/` directory: + + ``` + helm dependency update + ``` + :::note + <RegistryLogout/> + ::: + +1. Package the Helm chart into a `.tgz` archive: + + ``` + helm package . + ``` + +1. Add the chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + +1. (Optional) Add a KOTS HelmChart custom resource to the release to support installation with Embedded Cluster, KOTS, or kURL. For more information, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). + +1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. + +1. Install the release using Helm or a Replicated installer. For more information, see: + * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) + * [Installing with Helm](/vendor/install-with-helm) + * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) + * [Online Installation with kURL](/enterprise/installing-kurl) + +1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: + + ``` + kubectl get deploy --namespace NAMESPACE + ``` + Where `NAMESPACE` is the namespace in the cluster where the application and the SDK are installed. + + **Example output**: + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + my-app 1/1 1 1 35s + replicated 1/1 1 1 35s + ``` + +## Install the SDK Alongside a Kubernetes Manifest-Based Application {#manifest-app} + +For applications that use Kubernetes manifest files instead of Helm charts, the SDK Helm chart can be added to a release and then installed by KOTS alongside the application. + +<KotsVerReq/> + +To add the SDK Helm chart to a release for a Kubernetes manifest-based application: + +1. Install the Helm CLI using Homebrew: + + ``` + brew install helm + ``` + For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. + +1. Download the `.tgz` chart archive for the SDK Helm chart: + + ``` + helm pull oci://registry.replicated.com/library/replicated --version SDK_VERSION + ``` + Where `SDK_VERSION` is the version of the SDK to install. For a list of available SDK versions, see the [replicated-sdk repository](https://github.com/replicatedhq/replicated-sdk/tags) in GitHub. + + The output of this command is a `.tgz` file with the naming convention `CHART_NAME-CHART_VERSION.tgz`. For example, `replicated-1.1.1.tgz`. + + For more information and additional options, see [Helm Pull](https://helm.sh/docs/helm/helm_pull/) in the Helm documentation. + +1. Add the SDK `.tgz` chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). + + The following shows an example of the SDK Helm chart added to a draft release for a standard manifest-based application: + + ![SDK Helm chart in a draft release](/images/sdk-kots-release.png) + + [View a larger version of this image](/images/sdk-kots-release.png) + +1. If one was not created automatically, add a KOTS HelmChart custom resource to the release. HelmChart custom resources have `apiVersion: kots.io/v1beta2` and `kind: HelmChart`. + + **Example:** + + ```yaml + apiVersion: kots.io/v1beta2 + kind: HelmChart + metadata: + name: replicated + spec: + # chart identifies a matching chart from a .tgz + chart: + # for name, enter replicated + name: replicated + # for chartversion, enter the version of the + # SDK Helm chart in the release + chartVersion: 1.1.1 + ``` + + As shown in the example above, the HelmChart custom resource requires the name and version of the SDK Helm chart that you added to the release: + * **`chart.name`**: The name of the SDK Helm chart is `replicated`. You can find the chart name in the `name` field of the SDK Helm chart `Chart.yaml` file. + * **`chart.chartVersion`**: The chart version varies depending on the version of the SDK that you pulled and added to the release. You can find the chart version in the `version` field of SDK Helm chart `Chart.yaml` file. + + For more information about configuring the HelmChart custom resource to support KOTS installations, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) and [HelmChart v2](/reference/custom-resource-helmchart-v2). + +1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. + +1. Install the release using a Replicated installer. For more information, see: + * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) + * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) + * [Online Installation with kURL](/enterprise/installing-kurl) + +1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: + + ``` + kubectl get deploy --namespace NAMESPACE + ``` + Where `NAMESPACE` is the namespace in the cluster where the application, the Admin Console, and the SDK are installed. + + **Example output**: + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + kotsadm 1/1 1 1 112s + my-app 1/1 1 1 28s + replicated 1/1 1 1 27s + ``` + +## Install the SDK in Integration Mode + +<IntegrationMode/> + +## Troubleshoot + +### 401 Unauthorized Error When Updating Helm Dependencies {#401} + +#### Symptom + +You see an error message similar to the following after adding the Replicated SDK as a dependency in your Helm chart then running `helm dependency update`: + +``` +Error: could not download oci://registry.replicated.com/library/replicated-sdk: failed to authorize: failed to fetch oauth token: unexpected status from GET request to https://registry.replicated.com/v2/token?scope=repository%3Alibrary%2Freplicated-sdk%3Apull&service=registry.replicated.com: 401 Unauthorized +``` + +#### Cause + +When you run `helm dependency update`, Helm attempts to pull the Replicated SDK chart from the Replicated registry. An error can occur if you are already logged in to the Replicated registry with a license that has expired, such as when testing application releases. + +#### Solution + +To solve this issue: + +1. Run the following command to remove login credentials for the Replicated registry: + + ``` + helm registry logout registry.replicated.com + ``` + +1. Re-run `helm dependency update` for your Helm chart. + +--- + + +# About the Replicated SDK + +import SDKOverview from "../partials/replicated-sdk/_overview.mdx" +import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" + +# About the Replicated SDK + +This topic provides an introduction to using the Replicated SDK with your application. + +## Overview + +<SDKOverview/> + +For more information about the Replicated SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). For information about developing against the SDK API locally, see [Developing Against the SDK API](replicated-sdk-development). + +## Limitations + +The Replicated SDK has the following limitations: + +* Some popular enterprise continuous delivery tools, such as ArgoCD and Pulumi, deploy Helm charts by running `helm template` then `kubectl apply` on the generated manifests, rather than running `helm install` or `helm upgrade`. The following limitations apply to applications installed by running `helm template` then `kubectl apply`: + + * The `/api/v1/app/history` SDK API endpoint always returns an empty array because there is no Helm history in the cluster. See [GET /app/history](/reference/replicated-sdk-apis#get-apphistory) in _Replicated SDK API_. + + * The SDK does not automatically generate status informers to report status data for installed instances of the application. To get instance status data, you must enable custom status informers by overriding the `replicated.statusInformers` Helm value. See [Enable Application Status Insights](/vendor/insights-app-status#enable-application-status-insights) in _Enabling and Understanding Application Status_. + +## SDK Resiliency + +At startup and when serving requests, the SDK retrieves and caches the latest information from the upstream Replicated APIs, including customer license information. + +If the upstream APIs are not available at startup, the SDK does not accept connections or serve requests until it is able to communicate with the upstream APIs. If communication fails, the SDK retries every 10 seconds and the SDK pod is at `0/1` ready. + +When serving requests, if the upstream APIs become unavailable, the SDK serves from the memory cache and sets the `X-Replicated-Served-From-Cache` header to `true`. Additionally, rapid successive requests to same SDK endpoint with the same request properties will be rate-limited returning the last cached payload and status code without reaching out to the upstream APIs. A `X-Replicated-Rate-Limited` header will set to `true`. + +## Replicated SDK Helm Values + +<SdkValues/> + +--- + + +# SLSA Provenance Validation Process for the Replicated SDK + +# SLSA Provenance Validation Process for the Replicated SDK + +This topic describes the process to perform provenance validation on the Replicated SDK. + +## About Supply Chain Levels for Software Artifacts (SLSA) + +[Supply Chain Levels for Software Artifacts (SLSA)](https://slsa.dev/), pronounced “salsa,” is a security framework that comprises standards and controls designed to prevent tampering, enhance integrity, and secure software packages and infrastructure. + + +## Purpose of Attestations +Attestations enable the inspection of an image to determine its origin, the identity of its creator, the creation process, and its contents. When building software using the Replicated SDK, the image’s Software Bill of Materials (SBOM) and SLSA-based provenance attestations empower your customers to make informed decisions regarding the impact of an image on the supply chain security of your application. This process ultimately enhances the security and assurances provided to both vendors and end customers. + +## Prerequisite +Before you perform these tasks, you must install [slsa-verifier](https://github.com/slsa-framework/slsa-verifier) and [crane](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md). + +## Validate the SDK SLSA Attestations + +The Replicated SDK build process utilizes Wolfi-based images to minimize the number of CVEs. The build process automatically generates SBOMs and attestations, and then publishes the image along with these metadata components. For instance, you can find all the artifacts readily available on [DockerHub](https://hub.docker.com/r/replicated/replicated-sdk/tags). The following shell script is a tool to easily validate the SLSA attestations for a given Replicated SDK image. + +``` +#!/bin/bash + +# This script verifies the SLSA metadata of a container image +# +# Requires +# - slsa-verifier (https://github.com/slsa-framework/slsa-verifier) +# - crane (https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md) +# + + +# Define the image and version to verify +VERSION=v1.0.0-beta.20 +IMAGE=replicated/replicated-sdk:${VERSION} + +# expected source repository that should have produced the artifact, e.g. github.com/some/repo +SOURCE_REPO=github.com/replicatedhq/replicated-sdk + + +# Use `crane` to retrieve the digest of the image without pulling the image +IMAGE_WITH_DIGEST="${IMAGE}@"$(crane digest "${IMAGE}") + +echo "Verifying artifact" +echo "Image: ${IMAGE_WITH_DIGEST}" +echo "Source Repo: ${SOURCE_REPO}" + +slsa-verifier verify-image "${IMAGE_WITH_DIGEST}" \ + --source-uri ${SOURCE_REPO} \ + --source-tag ${VERSION} + +``` + + +--- + + +# Templating Annotations + +# Templating Annotations + +This topic describes how to use Replicated KOTS template functions to template annotations for resources and objects based on user-supplied values. + +## Overview + +It is common for users to need to set custom annotations for a resource or object deployed by your application. For example, you might need to allow your users to provide annotations to apply to a Service or Ingress object in public cloud environments. + +For applications installed with Replicated KOTS, you can apply user-supplied annotations to resources or objects by first adding a field to the Replicated Admin Console **Config** page where users can enter one or more annotations. For information about how to add fields on the **Config** page, see [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen). + +You can then map these user-supplied values from the **Config** page to resources and objects in your release using KOTS template functions. KOTS template functions are a set of custom template functions based on the Go text/template library that can be used to generate values specific to customer environments. The template functions in the Config context return user-supplied values on the **Config** page. + +For more information about KOTS template functions in the Config text, see [Config Context](/reference/template-functions-config-context). For more information about the Go library, see [text/template](https://pkg.go.dev/text/template) in the Go documentation. + +## About `kots.io/placeholder` + +For applications installed with KOTS that use standard Kubernetes manifests, the `kots.io/placeholder` annotation allows you to template annotations in resources and objects without breaking the base YAML or needing to include the annotation key. + +The `kots.io/placeholder` annotation uses the format `kots.io/placeholder 'bool' 'string'`. For example: + +```yaml +# Example manifest file + +annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "additional_annotations" | nindent 4 }} +``` + +:::note +For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file using the Replicated HelmChart custom resource, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. + +For an example, see [Map User-Supplied Annotations to Helm Chart Values](#map-user-supplied-annotations-to-helm-chart-values) below. +::: + +## Annotation Templating Examples + +This section includes common examples of templating annotations in resources and objects to map user-supplied values. + +For additional examples of how to map values to Helm chart-based applications, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. + +### Map Multiple Annotations from a Single Configuration Field + +You can map one or more annotations from a single `textarea` field on the **Config** page. The `textarea` type defines multi-line text input and supports properties such as `rows` and `cols`. For more information, see [textarea](/reference/custom-resource-config#textarea) in _Config_. + +For example, the following Config custom resource adds an `ingress_annotations` field of type `textarea`: + +```yaml +# Config custom resource + +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config +spec: + groups: + - name: ingress_settings + title: Ingress Settings + description: Configure Ingress + items: + - name: ingress_annotations + type: textarea + title: Ingress Annotations + help_text: See your cloud provider’s documentation for the required annotations. +``` + +On the **Config** page, users can enter one or more key value pairs in the `ingress_annotations` field, as shown in the example below: + +![Config page with custom annotations in a Ingress Annotations field](/images/config-map-annotations.png) + +[View a larger version of this image](/images/config-map-annotations.png) + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "ingress_annotations" | nindent 4 }} +``` + +During installation, KOTS renders the YAML with the multi-line input from the configuration field as shown below: + +```yaml +# Rendered Ingress object +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + + key1: value1 + key2: value2 + key3: value3 +``` + +### Map Annotations from Multiple Configuration Fields + +You can specify multiple annotations using the same `kots.io/placeholder` annotation. + +For example, the following Ingress object includes ConfigOption template functions that render the user-supplied values for the `ingress_annotation` and `ingress_hostname` fields: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + repl{{ ConfigOption "ingress_annotation" | nindent 4 }} + repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} +``` + +During installation, KOTS renders the YAML as shown below: + +```yaml +# Rendered Ingress object + +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + + key1: value1 + my.custom/annotation.ingress.hostname: example.hostname.com +``` + +### Map User-Supplied Value to a Key + +You can map a user-supplied value from the **Config** page to a pre-defined annotation key. + +For example, in the following Ingress object, `my.custom/annotation.ingress.hostname` is the key for the templated annotation. The annotation also uses the ConfigOption template function to map the user-supplied value from a `ingress_hostname` configuration field: + +```yaml +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} +``` + +During installation, KOTS renders the YAML as shown below: + +```yaml +# Rendered Ingress object + +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: example-annotation + annotations: + kots.io/placeholder: |- + + my.custom/annotation.ingress.hostname: example.hostname.com +``` + +### Include Conditional Statements in Templated Annotations + +You can include or exclude templated annotations based on a conditional statement. + +For example, the following Ingress object includes a conditional statement for `kots.io/placeholder` that renders `my.custom/annotation.class: somevalue` if the user enables a `custom_annotation` field on the **Config** page: + +```yaml +apiVersion: v1 +kind: Ingress +metadata: + name: myapp + labels: + app: myapp +annotations: + kots.io/placeholder: |- + repl{{if ConfigOptionEquals "custom_annotation" "1" }}repl{{ printf "my.custom/annotation.class: somevalue" | nindent 4 }}repl{{end}} +spec: +... +``` + +During installation, if the user enables the `custom_annotation` configuration field, KOTS renders the YAML as shown below: + +```yaml +# Rendered Ingress object + +apiVersion: v1 +kind: Ingress +metadata: + name: myapp + labels: + app: myapp + annotations: + kots.io/placeholder: |- + my.custom/annotation.class: somevalue +spec: +... +``` + +Alternatively, if the condition evaluates to false, the annotation does not appear in the rendered YAML: + +```yaml +apiVersion: v1 +kind: Ingress +metadata: + name: myapp + labels: + app: myapp + annotations: + kots.io/placeholder: |- +spec: +... +``` + +### Map User-Supplied Annotations to Helm Chart Values + +For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. + +To map user-supplied annotations from the **Config** page to the Helm chart `values.yaml` file, you use the `values` field of the Replicated HelmChart custom resource. For more information, see [values](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_. + +For example, the following HelmChart custom resource uses a ConfigOption template function in `values.services.myservice.annotations` to map the value of a configuration field named `additional_annotations`: + +```yaml +# HelmChart custom resource + +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: myapp +spec: + values: + services: + myservice: + annotations: repl{{ ConfigOption "additional_annotations" | nindent 10 }} +``` + +The `values.services.myservice.annotations` field in the HelmChart custom resource corresponds to a `services.myservice.annotations` field in the `value.yaml` file of the application Helm chart, as shown in the example below: + +```yaml +# Helm chart values.yaml + +services: + myservice: + annotations: {} +``` + +During installation, the ConfigOption template function in the HelmChart custom resource renders the user-supplied values from the `additional_annotations` configuration field. + +Then, KOTS replaces the value in the corresponding field in the `values.yaml` in the chart archive, as shown in the example below. + +```yaml +# Rendered Helm chart values.yaml + +services: + myservice: + annotations: + key1: value1 +``` + +In your Helm chart templates, you can access these values from the `values.yaml` file to apply the user-supplied annotations to the target resources or objects. For information about how to access values from a `values.yaml` file, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the Helm documentation. + +--- + + +# Configuring Snapshots + +# Configuring Snapshots + +This topic provides information about how to configure the Velero Backup resource to enable Replicated KOTS snapshots for an application. + +For more information about snapshots, see [About Backup and Restore with snapshots](/vendor/snapshots-overview). + +## Configure Snapshots + +Add a Velero Backup custom resource (`kind: Backup`, `apiVersion: velero.io/v1`) to your release and configure it as needed. After configuring the Backup resource, add annotations for each volume that you want to be included in backups. + +To configure snapshots for your application: + +1. In a new release containing your application files, add a Velero Backup resource (`kind: Backup` and `apiVersion: velero.io/v1`): + + ```yaml + apiVersion: velero.io/v1 + kind: Backup + metadata: + name: backup + spec: {} + ``` + +1. Configure the Backup resource to specify the resources that will be included in backups. + + For more information about the Velero Backup resource, including limitations, the list of supported fields for snapshots, and an example, see [Velero Backup Resource for Snapshots](/reference/custom-resource-backup). + +1. (Optional) Configure backup and restore hooks. For more information, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). + +1. For each volume that requires a backup, add the `backup.velero.io/backup-volumes` annotation. The annotation name is `backup.velero.io/backup-volumes` and the value is a comma separated list of volumes to include in the backup. + + <details> + <summary>Why do I need to use the backup annotation?</summary> + <p>By default, no volumes are included in the backup. If any pods mount a volume that should be backed up, you must configure the backup with an annotation listing the specific volumes to include in the backup.</p> + </details> + + **Example:** + + In the following Deployment manifest file, `pvc-volume` is the only volume that is backed up. The `scratch` volume is not included in the backup because it is not listed in annotation on the pod specification. + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: sample + labels: + app: foo + spec: + replicas: 1 + selector: + matchLabels: + app: foo + template: + metadata: + labels: + app: foo + annotations: + backup.velero.io/backup-volumes: pvc-volume + spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-webserver + volumeMounts: + - name: pvc-volume + mountPath: /volume-1 + - name: scratch + mountPath: /volume-2 + volumes: + - name: pvc-volume + persistentVolumeClaim: + claimName: test-volume-claim + - name: scratch + emptyDir: {} + + ``` + +1. (Optional) Configure manifest exclusions. By default, Velero also includes backups of all of the Kubernetes objects in the namespace. + + To exclude any manifest file, add a [`velero.io/exclude-from-backup=true`](https://velero.io/docs/v1.5/resource-filtering/#veleroioexclude-from-backuptrue) label to the manifest to be excluded. The following example shows the Secret manifest file with the `velero.io/exclude-from-backup` label: + + ```yaml + apiVersion: apps/v1 + kind: Secret + metadata: + name: sample + labels: + velero.io/exclude-from-backup: "true" + stringData: + uri: Secret To Not Include + + ``` + +1. If you distribute multiple applications with Replicated, repeat these steps for each application. Each application must have its own Backup resource to be included in a full backup with snapshots. + +1. (kURL Only) If your application supports installation with Replicated kURL, Replicated recommends that you include the kURL Velero add-on so that customers do not have to manually install Velero in the kURL cluster. For more information, see [Creating a kURL Installer](packaging-embedded-kubernetes). + +--- + + +# Configuring Backup and Restore Hooks for Snapshots + +# Configuring Backup and Restore Hooks for Snapshots + +This topic describes the use of custom backup and restore hooks and demonstrates a common example. + +## About Backup and Restore Hooks + +Velero supports the use of backup hooks and restore hooks. + +Your application workload might require additional processing or scripts to be run before or after creating a backup to prepare the system for a backup. Many application workloads also require additional processing or scripts to run during or after the restore process. + +Some common examples of how to use a hook to create backups are: +- Run `pg_dump` to export a postgres database prior to backup +- Lock a file before running a backup, and unlock immediately after +- Delete TMP files that should not be backed up +- Restore a database file only if that file exists +- Perform required setup tasks in a restored Pod before the application containers can start + +Additionally, for embedded clusters created by Replicated kURL, you must write custom backup and restore hooks to enable back ups for any object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs). For more information about object-stored data, see [Other Object Stored Data](snapshots-overview#other-object-stored-data) in _Backup and Restore_. + +For more information about backup and restore hooks, see [Backup Hooks](https://velero.io/docs/v1.10/backup-hooks/) and [Restore Hooks](https://velero.io/docs/v1.10/restore-hooks) in the Velero documentation. + +## Example + +The following example demonstrates how to include Velero backup and restore hooks for a Postgres database in a Replicated HelmChart custom resource manifest file. + +The use case for this example is an application packaged with a Helm chart that includes a Postgres database. A description of key fields from the YAML follows the example. + +```yaml +apiVersion: kots.io/v1beta2 +kind: HelmChart +metadata: + name: postgresql +spec: + exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' + + chart: + name: postgresql + chartVersion: 8.7.4 + + values: + + master: + podAnnotations: + backup.velero.io/backup-volumes: backup + pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U username -d dbname -h 127.0.0.1 > /scratch/backup.sql"]' + pre.hook.backup.velero.io/timeout: 3m + post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U username -h 127.0.0.1 -d dbname -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' + + extraVolumes: + - name: backup + emptyDir: + sizeLimit: 1Gi + extraVolumeMounts: + - name: backup + mountPath: /scratch + + global: + postgresql: + postgresqlUsername: username + postgresqlPassword: "repl{{ ConfigOption `embedded_postgres_password` }}" + postgresqlDatabase: dbname +``` + +The following describes key fields from the example above: + +* `spec.exclude`: A common and recommended pattern for applications. The customer can choose to bring an external Postgres instance instead of running it in-cluster. The Replicated KOTS template function in `spec.exclude` evaluates to true when the user specifies the external database option in the Admin Console **Config** page. This means that the internal Postgres database is not included in the deployment. + +* `spec.values.master.podAnnotations`: Adds podAnnotations to the postgres master PodSpec. Velero backup and restore hooks are included in the podAnnotations. The following table describes the podAnnotations: + + :::note + Run backup hooks inside the container that contains the data to back up. + ::: + + <table> + <tr> + <th>podAnnotation</th> + <th>Description</th> + </tr> + <tr> + <td><code>backup.velero.io/backup-volumes</code></td> + <td>A comma-separated list of volumes from the Pod to include in the backup. The primary data volume is not included in this field because data is exported using the backup hook.</td> + </tr> + <tr> + <td><code>pre.hook.backup.velero.io/command</code></td> + <td>A stringified JSON array containing the command for the backup hook. + This command is a <code>pg_dump</code> from the running database to the backup volume.</td> + </tr> + <tr> + <td><code>pre.hook.backup.velero.io/timeout</code></td> + <td>A duration for the maximum time to let this script run.</td> + </tr> + <tr> + <td><code>post.hook.restore.velero.io/command</code></td> + <td>A Velero exec restore hook that runs a script to check if the database file exists, and restores only if it exists. Then, the script deletes the file after the operation is complete.</td> + </tr> + </table> + +* `spec.master.extraVolumes`: A new volume that is injected into the postgres Pod. The new volume is an empty volume that uses ephemeral storage. The ephemeral storage must have enough space to accommodate the size of the exported data. +The `extraVolumeMounts` field mounts the volume into the `/scratch` directory of the master Pod. The volume is used as a destination when the backup hook command described above runs `pg_dump`. This is the only volume that is backed up. + + +--- + + +# About Backup and Restore with Snapshots + +import RestoreTable from "../partials/snapshots/_restoreTable.mdx" +import NoEcSupport from "../partials/snapshots/_limitation-no-ec-support.mdx" +import RestoreTypes from "../partials/snapshots/_restore-types.mdx" +import Dr from "../partials/snapshots/_limitation-dr.mdx" +import Os from "../partials/snapshots/_limitation-os.mdx" +import InstallMethod from "../partials/snapshots/_limitation-install-method.mdx" +import CliRestores from "../partials/snapshots/_limitation-cli-restores.mdx" + +# About Backup and Restore with Snapshots + +This topic provides an introduction to the Replicated KOTS snapshots feature for backup and restore. It describes how vendors enable snapshots, the type of data that is backed up, and how to troubleshoot issues for enterprise users. + +:::note +<NoEcSupport/> +::: + +## Overview + +An important part of the lifecycle of an application is backup and restore. You can enable Replicated KOTS snapshots to support backup and restore for existing cluster installations with KOTS and Replicated kURL installations. + +When snapshots is enabled for your application, your customers can manage and perform backup and restore from the Admin Console or KOTS CLI. + +Snapshots uses the Velero open source project as the backend to back up Kubernetes manifests and persistent volumes. Velero is a mature, fully-featured application. For more information, see the [Velero documentation](https://velero.io/docs/). + +In addition to the default functionality that Velero provides, KOTS exposes hooks that let you inject scripts that can execute both before and after a backup, and before and after a restore. For more information, see [Configuring Backup and Restore Hooks for Snapshots](/vendor/snapshots-hooks). + +### Limitations and Considerations + +* <NoEcSupport/> + +- The snapshots feature is available only for licenses with the **Allow Snapshots** option enabled. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). + +- Snapshots are useful for rollback and disaster recovery scenarios. They are not intended to be used for application migration. + +- <Dr/> + +- <Os/> + +- <InstallMethod/> + +- <CliRestores/> + +- Removing data from the snapshot storage itself results in data corruption and the loss of snapshots. Instead, use the **Snapshots** tab in the Admin Console to cleanup and remove snapshots. + +- Snapshots does not support Amazon Simple Storage Service (Amazon S3) buckets that have a bucket policy requiring the server-side encryption header. If you want to require server-side encryption for objects, you can enable default encryption on the bucket instead. For more information about Amazon S3, see the [Amazon S3](https://docs.aws.amazon.com/s3/?icmpid=docs_homepage_featuredsvcs) documentation. + +### Velero Version Compatibility + +The following table lists which versions of Velero are compatible with each version of KOTS. For more information, see the [Velero documentation](https://velero.io/docs/). + +| KOTS version | Velero version | +|------|-------------| +| 1.15 to 1.20.2 | 1.2.0 | +| 1.20.3 to 1.94.0 | 1.5.1 through 1.9.x | +| 1.94.1 and later | 1.6.x through 1.12.x | + +## About Backups + +This section describes the types of backups that are supported with snapshots. For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. + +### Application and Admin Console (Full) Backups + +Full backups (also referred to as _instance_ backups) include the KOTS Admin Console and all application data, including application volumes and manifest files. + +For clusters created with Replicated kURL, full backups also back up the Docker registry, which is required for air gapped installations. + +If you manage multiple applications with the Admin Console, data from all applications that support backups is included in a full backup. To be included in full backups, each application must include a manifest file with `kind: Backup` and `apiVersion: velero.io/v1`, which you can check for in the Admin Console. + +Full backups are recommended because they support all types of restores. For example, you can restore both the Admin Console and application from a full backup to a new cluster in disaster recovery scenarios. Or, you can use a full backup to restore only application data for the purpose of rolling back after deploying a new version of an application. + +### Application-Only (Partial) Backups + +Partial backups back up the application volumes and manifest files only. Partial backups do not back up the KOTS Admin Console. + +Partial backups can be useful if you need to roll back after deploying a new application version. Partial backups of the application only _cannot_ be restored to a new cluster, and are therefore not useable for disaster recovery scenarios. + +### Backup Storage Destinations + +For disaster recovery, backups should be configured to use a storage destination that exists outside of the cluster. This is especially true for installations in clusters created with Replicated kURL, because the default storage location on these clusters is internal. + +You can use a storage provider that is compatible with Velero as the storage destination for backups created with the Replicated snapshots feature. For a list of the compatible storage providers, see [Providers](https://velero.io/docs/v1.9/supported-providers/) in the Velero documentation. + +You initially configure backups on a supported storage provider backend using the KOTS CLI. If you want to change the storage destination after the initial configuration, you can use the the **Snapshots** page in the Admin Console, which has built-in support for the following storage destinations: + +- Amazon Web Services (AWS) +- Google Cloud Provider (GCP) +- Microsoft Azure +- S3-Compatible +- Network File System (NFS) +- Host Path + +kURL installers that include the Velero add-on also include a locally-provisioned object store. By default, kURL clusters are preconfigured in the Admin Console to store backups in the locally-provisioned object store. This object store is sufficient for only rollbacks and downgrades and is not a suitable configuration for disaster recovery. Replicated recommends that you configure a snapshots storage destination that is external to the cluster in the Admin Console for kURL clusters. + +For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. + +### What Data is Backed Up? + +Full backups include the Admin Console and all application data, including KOTS-specific object-stored data. For Replicated kURL installations, this also backs up the Docker registry, which is required for air gapped installations. + +#### Other Object-Stored Data + +For kURL clusters, you might be using object-stored data that is not specific to the kURL KOTS add-on. + +For object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs), you must write custom backup and restore hooks to enable back ups for that object-stored data. For example, Rook and Ceph do not use PVCs and so require custom backup and restore hooks. For more information about writing custom hooks, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). + +#### Pod Volume Data + +Replicated supports only the restic backup program for pod volume data. + +By default, Velero requires that you opt-in to have pod volumes backed up. In the Backup resource that you configure to enable snapshots, you must annotate each specific volume that you want to back up. For more information about including and excluding pod volumes, see [Configuring Snapshots](/vendor/snapshots-configuring-backups). + +## About Restores {#restores} + +<RestoreTypes/> + +When you restore an application with snapshots, KOTS first deletes the selected application. All existing application manifests are removed from the cluster, and all `PersistentVolumeClaims` are deleted. This action is not reversible. + +Then, the restore process redeploys all of the application manifests. All Pods are given an extra `initContainer` and an extra directory named `.velero`, which are used for restore hooks. For more information about the restore process, see [Restore Reference](https://velero.io/docs/v1.9/restore-reference/) in the Velero documentation. + +When you restore the Admin Console only, no changes are made to the application. + +For information about how to restore using the Admin Console or the KOTS CLI, see [Restoring from Backups](/enterprise/snapshots-restoring-full). + +## Using Snapshots + +This section provides an overview of how vendors and enterprise users can configure and use the snapshots feature. + +### How to Enable Snapshots for Your Application + +To enable the snapshots backup and restore feature for your users, you must: + +- Have the snapshots entitlement enabled in your Replicated vendor account. For account entitlements, contact the Replicated TAM team. +- Define a manifest for creating backups. See [Configuring Snapshots](snapshots-configuring-backups). +- When needed, configure backup and restore hooks. See [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). +- Enable the **Allow Snapshot** option in customer licenses. See [Creating and Managing Customers](releases-creating-customer). + +### Understanding Backup and Restore for Users {#how-users} + +After vendors enable backup and restore, enterprise users install Velero and configure a storage destination in the Admin Console. Then users can create backups manually or schedule automatic backups. + +Replicated recommends advising your users to make full backups for disaster recovery purposes. Additionally, full backups give users the flexibility to do a full restore, a partial restore (application only), or restore just the Admin Console. + +From a full backup, users restore using the KOTS CLI or the Admin Console as indicated in the following table: + +<RestoreTable/> + +Partial backups are not recommended as they are a legacy feature and only back up the application volumes and manifests. Partial backups can be restored only from the Admin Console. + +### Troubleshooting Snapshots + +To support end users with backup and restore, use the following resources: + +- To help troubleshoot error messages, see [Troubleshooting Snapshots](/enterprise/snapshots-troubleshooting-backup-restore). + +- Review the Limitations and Considerations section to make sure an end users system is compliant. + +- Check that the installed Velero version and KOTS version are compatible. + +--- + + +# Adding and Customizing Support Bundles + +# Adding and Customizing Support Bundles + +This topic describes how to add a default support bundle spec to a release for your application. It also describes how to customize the default support bundle spec based on your application's needs. For more information about support bundles, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). + +The information in this topic applies to Helm applications and Kubernetes manifest-based application installed with Helm or with Replicated KOTS. + +## Step 1: Add the Default Spec to a Manifest File + +You can add the support bundle spec to a Kubernetes Secret or a SupportBundle custom resource. The type of manifest file that you use depends on your application type (Helm or manifest-based) and installation method (Helm or KOTS). + +Use the following guidance to determine which type of manifest file to use for creating a support bundle spec: + +* **Helm Applications**: For Helm applications, see the following guidance: + + * **(Recommended) Helm or KOTS v1.94.2 and Later**: For Helm applications installed with Helm or KOTS v1.94.2 or later, create the support bundle spec in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). + + * **KOTS v1.94.1 and Earlier**: For Helm applications installed with KOTS v1.94.1 or earlier, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). + +* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). + +### Kubernetes Secret {#secret} + +You can define support bundle specs in a Kubernetes Secret for the following installation types: +* Installations with Helm +* Helm applications installed with KOTS v1.94.2 and later + +In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + troubleshoot.sh/kind: support-bundle + name: example +stringData: + support-bundle-spec: | + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: support-bundle + spec: + collectors: [] + analyzers: [] +``` + +As shown above, the Secret must include the following: + +* The label `troubleshoot.sh/kind: support-bundle` +* A `stringData` field with a key named `support-bundle-spec` + +This empty support bundle spec includes the following collectors by default: +* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) +* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) + +You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. + +:::note +If your application is deployed as multiple Helm charts, Replicated recommends that you create separate support bundle specs for each subchart. This allows you to make specs that are specific to different components of your application. When a support bundle is generated, all the specs are combined to provide a single bundle. +::: + +After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. + +### SupportBundle Custom Resource {#sb-cr} + +You can define support bundle specs in a SupportBundle custom resource for the following installation types: +* Kubernetes manifest-based applications installed with KOTS +* Helm applications installed with KOTS v1.94.1 and earlier + +In a release for your application, add the following YAML to a new `support-bundle.yaml` manifest file: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: example +spec: + collectors: [] + analyzers: [] +``` +For more information about the SupportBundle custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). + +This empty support bundle spec includes the following collectors by default: +* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) +* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) + +You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. + +After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. + +## Step 2: Customize the Spec {#customize-the-spec} + +You can customize the support bundles for your application by: +* Adding collectors and analyzers +* Editing or excluding the default `clusterInfo` and `clusterResources` collectors + +### Add Collectors + +Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define. + +In addition to the default `clusterInfo` and `clusterResources` collectors, the Troubleshoot open source project includes several collectors that you can include in the spec to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. + +The following are some recommended collectors: + +- [logs](https://troubleshoot.sh/docs/collect/logs/) +- [secret](https://troubleshoot.sh/docs/collect/secret/) and [configMap](https://troubleshoot.sh/docs/collect/configmap/) +- [postgresql](https://troubleshoot.sh/docs/collect/postgresql/), [mysql](https://troubleshoot.sh/docs/collect/mysql/), and [redis](https://troubleshoot.sh/docs/collect/redis/) +- [runPod](https://troubleshoot.sh/docs/collect/run-pod/) +- [copy](https://troubleshoot.sh/docs/collect/copy/) and [copyFromHost](https://troubleshoot.sh/docs/collect/copy-from-host/) +- [http](https://troubleshoot.sh/docs/collect/http/) + +### Add Analyzers + +Analyzers use the data from the collectors to generate output for the support bundle. Good analyzers clearly identify failure modes and provide troubleshooting guidance for the user. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log and provides a description of the error to the user. + +The Troubleshoot open source project includes several analyzers that you can include in the spec. To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. + +The following are some recommended analyzers: + +- [textAnalyze](https://troubleshoot.sh/docs/analyze/regex/) +- [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) +- [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) +- [replicasetStatus](https://troubleshoot.sh/docs/analyze/replicaset-status/) +- [statefulsetStatus](https://troubleshoot.sh/docs/analyze/statefulset-status/) +- [postgresql](https://troubleshoot.sh/docs/analyze/postgresql/), [mysql](https://troubleshoot.sh/docs/analyze/mysql/), and [redis](https://troubleshoot.sh/docs/analyze/redis/) + +### Customize the Default `clusterResources` Collector + +You can edit the default `clusterResources` using the following properties: + +* `namespaces`: The list of namespaces where the resources and information is collected. If the `namespaces` key is not specified, then the `clusterResources` collector defaults to collecting information from all namespaces. The `default` namespace cannot be removed, but you can specify additional namespaces. + +* `ignoreRBAC`: When true, the `clusterResources` collector does not check for RBAC authorization before collecting resource information from each namespace. This is useful when your cluster uses authorization webhooks that do not support SelfSubjectRuleReviews. Defaults to false. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. + +The following example shows how to specify the namespaces where the `clusterResources` collector collects information: + +```yaml +spec: + collectors: + - clusterResources: + namespaces: + - default + - my-app-namespace + ignoreRBAC: true +``` + +The following example shows how to use Helm template functions to set the namespace: + +```yaml +spec: + collectors: + - clusterResources: + namespaces: {{ .Release.Namespace }} + ignoreRBAC: true +``` + +The following example shows how to use the Replicated Namespace template function to set the namespace: + +```yaml +spec: + collectors: + - clusterResources: + namespaces: '{{repl Namespace }}' + ignoreRBAC: true +``` +For more information, see [Namespace](/reference/template-functions-static-context#namespace) in _Static Context_. + +### Exclude the Default Collectors + +Although Replicated recommends including the default `clusterInfo` and `clusterResources` collectors because they collect a large amount of data to help with installation and debugging, you can optionally exclude them. + +The following example shows how to exclude both the clusterInfo and clusterResources collectors from your support bundle spec: + +```yaml +spec: + collectors: + - clusterInfo: + exclude: true + - clusterResources: + exclude: true +``` + +### Examples + +For common examples of collectors and analyzers used in support bundle specs, see [Examples of Support Bundle Specs](/vendor/support-bundle-examples). + +--- + + +# Generating Support Bundles for Embedded Cluster + +import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" +import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" + +# Generating Support Bundles for Embedded Cluster + +This topic describes how to generate a support bundle that includes cluster- and host-level information for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. + +For information about generating host support bundles for Replicated kURL installations, see [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). + +## Overview + +<SupportBundleIntro/> + +## Generate a Support Bundle + +<EmbeddedClusterSupportBundle/> + +--- + + +# Example Support Bundle Specs + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HttpSecret from "../partials/support-bundles/_http-requests-secret.mdx" +import HttpCr from "../partials/support-bundles/_http-requests-cr.mdx" +import NodeStatusSecret from "../partials/support-bundles/_node-status-secret.mdx" +import NodeStatusCr from "../partials/support-bundles/_node-status-cr.mdx" +import K8sVersionSecret from "../partials/support-bundles/_k8s-version-secret.mdx" +import K8sVersionCr from "../partials/support-bundles/_k8s-version-cr.mdx" +import DeployStatusSecret from "../partials/support-bundles/_deploy-status-secret.mdx" +import DeployStatusCr from "../partials/support-bundles/_deploy-status-cr.mdx" +import NodeResourcesSecret from "../partials/support-bundles/_node-resources-secret.mdx" +import NodeResourcesCr from "../partials/support-bundles/_node-resources-cr.mdx" +import LogsSelectorsSecret from "../partials/support-bundles/_logs-selectors-secret.mdx" +import LogsSelectorsCr from "../partials/support-bundles/_logs-selectors-cr.mdx" +import LogsLimitsSecret from "../partials/support-bundles/_logs-limits-secret.mdx" +import LogsLimitsCr from "../partials/support-bundles/_logs-limits-cr.mdx" +import RedisMysqlSecret from "../partials/support-bundles/_redis-mysql-secret.mdx" +import RedisMysqlCr from "../partials/support-bundles/_redis-mysql-cr.mdx" +import RunPodsSecret from "../partials/support-bundles/_run-pods-secret.mdx" +import RunPodsCr from "../partials/support-bundles/_run-pods-cr.mdx" + +# Example Support Bundle Specs + +This topic includes common examples of support bundle specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/support-bundle) in GitHub. + +## Check API Deployment Status + +The examples below use the `deploymentStatus` analyzer to check the version of Kubernetes running in the cluster. The `deploymentStatus` analyzer uses data from the default `clusterResources` collector. + +For more information, see [Deployment Status](https://troubleshoot.sh/docs/analyze/deployment-status/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <DeployStatusSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <DeployStatusCr/> + </TabItem> +</Tabs> + +## Check HTTP Requests + +If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. + +The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. + +For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <HttpSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <HttpCr/> + </TabItem> +</Tabs> + +## Check Kubernetes Version + +The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. + +For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <K8sVersionSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <K8sVersionCr/> + </TabItem> +</Tabs> + +## Check Node Resources + +The examples below use the `nodeResources` analyzer to check that the minimum requirements are met for memory, CPU cores, number of nodes, and ephemeral storage. The `nodeResources` analyzer uses data from the default `clusterResources` collector. + +For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeResourcesSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <NodeResourcesCr/> + </TabItem> +</Tabs> + +## Check Node Status + +The following examples use the `nodeResources` analyzers to check the status of the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. + +For more information, see [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <NodeStatusSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <NodeStatusCr/> + </TabItem> +</Tabs> + +## Collect Logs Using Multiple Selectors + +The examples below use the `logs` collector to collect logs from various Pods where application workloads are running. They also use the `textAnalyze` collector to analyze the logs for a known error. + +For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. + +You can use the `selector` attribute of the `logs` collector to find Pods that have the specified labels. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector, as shown in the examples below. You can include the `logs` collector as many times as needed. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <LogsSelectorsSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <LogsSelectorsCr/> + </TabItem> +</Tabs> + +## Collect Logs Using `limits` + +The examples below use the `logs` collector to collect Pod logs from the Pod where the application is running. These specifications use the `limits` field to set a `maxAge` and `maxLines` to limit the output provided. + +For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <LogsLimitsSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <LogsLimitsCr/> + </TabItem> +</Tabs> + +## Collect Redis and MySQL Server Information + +The following examples use the `redis` and `mysql` collectors to collect information about Redis and MySQL servers running in the cluster. + +For more information, see [Redis](https://troubleshoot.sh/docs/collect/redis/) and [MySQL](https://troubleshoot.sh/docs/collect/mysql/) and in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <RedisMysqlSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <RedisMysqlCr/> + </TabItem> +</Tabs> + +## Run and Analyze a Pod + +The examples below use the `textAnalyze` analyzer to check that a command successfully executes in a Pod running in the cluster. The Pod specification is defined in the `runPod` collector. + +For more information, see [Run Pods](https://troubleshoot.sh/docs/collect/run-pod/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. + +<Tabs> + <TabItem value="secret" label="Kubernetes Secret" default> + <RunPodsSecret/> + </TabItem> + <TabItem value="custom-resource" label="SupportBundle Custom Resource"> + <RunPodsCr/> + </TabItem> +</Tabs> + + +--- + + +# Generating Support Bundles + +import InstallPlugin from "../partials/support-bundles/_install-plugin.mdx" +import GenerateBundle from "../partials/support-bundles/_generate-bundle.mdx" + +# Generating Support Bundles + +This topic describes how to generate support bundles from the command line using the kubectl support-bundle plugin. For more information about support bundles, see [About Preflights and Support Bundles](/vendor/preflight-support-bundle-about). + +The information in this topic applies to generating support bundles in clusters where you have kubectl access. For information about generating support bundles that include cluster- and host-level information for Replicated Embedded Cluster installations, see [Generating Support Bundles for Embedded Cluster](support-bundle-embedded). + +## Prerequisite: Install the support-bundle Plugin + +<InstallPlugin/> + +## Generate a Bundle + +<GenerateBundle/> + +## Generate a Bundle when a Helm Installation Fails + +If a Helm installation fails and you want to collect a support bundle to assist with diagnostics, you can use a Replicated default specification to generate the support bundle. + +Run the following command: + +```bash +kubectl support-bundle https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml +``` + +--- + + +# Enabling Support Bundle Uploads (Beta) + +# Enabling Support Bundle Uploads (Beta) + +:::note +Direct bundle uploads is in beta. The functionality, requirements, and limitations of direct bundle uploads are subject to change. +::: + +When this feature is enabled, customers using online KOTS installations can upload support bundles directly through the Admin Console UI, eliminating the need to share the generated bundle with you manually. + +When enabled, your customers can use the **Send bundle to vendor button** in the Admin Console to upload a generated support bundle. + +<img alt="Send bundle to vendor screen" src="/images/send-bundle-to-vendor.png" width="600px"/> + +After clicking this button, the bundle will be immediately available under the Troubleshoot tab in the Vendor Portal team account associated with this customer. + +For more information on how your customer can use this feature, see [Generating Support Bundles from the Admin Console](/enterprise/troubleshooting-an-app). + +### How to Enable Direct Bundle Uploads + +Direct bundle uploads are disabled by default. To enable this feature for your customer: + +1. Log in to the Vendor Portal and navigate to your customer's **Manage Customer** page. +1. Under the **License options** section, make sure your customer has **KOTS Install Enabled** checked, and then check the **Support Bundle Upload Enabled (Beta)** option. + <img alt="Customer license options: configure direct support bundle upload" src="/images/configure-direct-support-bundle-upload.png" width="400px"/> + + [View a larger version of this image](/images/configure-direct-support-bundle-upload.png) +1. Click **Save**. + +### Limitations + +- You will not receive a notification when a customer sends a support bundle to the Vendor Portal. To avoid overlooking these uploads, activate this feature only if there is a reliable escalation process already in place for the customer license. +- This feature only supports online KOTS installations. If enabled, but installed in air gap mode, the upload button will not appear. +- There is a 500mb limit for support bundles uploaded directly via the Admin Console. + + +--- + + +# Generating Host Bundles for kURL + +import GenerateBundleHost from "../partials/support-bundles/_generate-bundle-host.mdx" + +# Generating Host Bundles for kURL + +This topic describes how to configure a host support bundle spec for Replicated kURL installations. For information about generating host support bundles for Replicated Embedded Cluster installations, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). + +## Overview + +Host support bundles can be used to collect information directly from the host where a kURL cluster is running, such as CPU, memory, available block devices, and the operating system. Host support bundles can also be used for testing network connectivity and gathering the output of provided commands. + +Host bundles for kURL are useful when: +- The kURL cluster is offline +- The kURL installer failed before the control plane was initialized +- The Admin Console is not working +- You want to debug host-specific performance and configuration problems even when the cluster is running + +You can create a YAML spec to allow users to generate host support bundles for kURL installations. For information, see [Create a Host Support Bundle Spec](#create-a-host-support-bundle-spec) below. + +Replicated also provides a default support bundle spec to collect host-level information for installations with the Embedded Cluster installer. For more information, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). + +## Create a Host Support Bundle Spec + +To allow users to generate host support bundles for kURL installations, create a host support bundle spec in a YAML manifest that is separate from your application release and then share the file with customers to run on their hosts. This spec is separate from your application release because host collectors and analyzers are intended to run directly on the host and not with Replicated KOTS. If KOTS runs host collectors, the collectors are unlikely to produce the desired results because they run in the context of the kotsadm Pod. + +To configure a host support bundle spec for kURL: + +1. Create a SupportBundle custom resource manifest file (`kind: SupportBundle`). + +1. Configure all of your host collectors and analyzers in one manifest file. You can use the following resources to help create your specification: + + - Access sample specifications in the the Replicated troubleshoot-specs repository, which provides specifications for supporting your customers. See [troubleshoot-specs/host](https://github.com/replicatedhq/troubleshoot-specs/tree/main/host) in GitHub. + + - View a list and details of the available host collectors and analyzers. See [All Host Collectors and Analyzers](https://troubleshoot.sh/docs/host-collect-analyze/all/) in the Troubleshoot documentation. + + **Example:** + + The following example shows host collectors and analyzers for the number of CPUs and the amount of memory. + + ```yaml + apiVersion: troubleshoot.sh/v1beta2 + kind: SupportBundle + metadata: + name: host-collectors + spec: + hostCollectors: + - cpu: {} + - memory: {} + hostAnalyzers: + - cpu: + checkName: "Number of CPUs" + outcomes: + - fail: + when: "count < 2" + message: At least 2 CPU cores are required, and 4 CPU cores are recommended. + - pass: + message: This server has at least 4 CPU cores. + - memory: + checkName: "Amount of Memory" + outcomes: + - fail: + when: "< 4G" + message: At least 4G of memory is required, and 8G is recommended. + - pass: + message: The system has at least 8G of memory. + ``` + +1. Share the file with your customers to run on their hosts. + +:::important +Do not store support bundles on public shares, as they may still contain information that could be used to infer private data about the installation, even if some values are redacted. +::: + +## Generate a Host Bundle for kURL + +<GenerateBundleHost/> + +--- + + +# Inspecting Support Bundles + +# Inspecting Support Bundles + +You can use the Vendor Portal to get a visual analysis of customer support bundles and use the file inspector to drill down into the details and logs files. Use this information to get insights and help troubleshoot your customer issues. + +To inspect a support bundle: + +1. In the Vendor Portal, go to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Add support bundle > Upload a support bundle**. + +1. In the **Upload a support bundle** dialog, drag and drop or use the file selector to upload a support bundle file to the Vendor Portal. + + <img alt="Upload a support bundle dialog" src="/images/support-bundle-analyze.png" width="500px"/> + + [View a larger version of this image](/images/support-bundle-analyze.png) + +1. (Optional) If the support bundle relates to an open support issue, select the support issue from the dropdown to share the bundle with Replicated. + +1. Click **Upload support bundle**. + + The **Support bundle analysis** page opens. The **Support bundle analysis** page includes information about the bundle, any available instance reporting data from the point in time when the bundle was collected, an analysis overview that can be filtered to show errors and warnings, and a file inspector. + + ![Support bundle analysis overview](/images/support-bundle-analysis-overview.png) + + [View a larger version of this image](/images/support-bundle-analysis-overview.png) + +1. On the **File inspector** tab, select any files from the directory tree to inspect the details of any files included in the support bundle, such as log files. + +1. (Optional) Click **Download bundle** to download the bundle. This can be helpful if you want to access the bundle from another system or if other team members want to access the bundle and use other tools to examine the files. + +1. (Optional) Navigate back to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Create cluster** to provision a cluster with Replicated Compatibility Matrix. This can be helpful for creating customer-representative environments for troubleshooting. For more information about creating clusters with Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). + + <img alt="Cluster configuration dialog" src="/images/cmx-cluster-configuration.png" width="400px"/> + + [View a larger version of this image](/images/cmx-cluster-configuration.png) + +1. If you cannot resolve your customer's issue and need to submit a support request, go to the [**Support**](https://vendor.replicated.com/) page and click **Open a support request**. For more information, see [Submitting a Support Request](support-submit-request). + + :::note + The **Share with Replicated** button on the support bundle analysis page does _not_ open a support request. You might be directed to use the **Share with Replicated** option when you are already interacting with a Replicated team member. + ::: + + ![Submit a Support Request](/images/support.png) + + [View larger version of this image](/images/support.png) + + +--- + + +# About Creating Modular Support Bundle Specs + +# About Creating Modular Support Bundle Specs + +This topic describes how to use a modular approach to creating support bundle specs. + +## Overview + +Support bundle specifications can be designed using a modular approach. This refers to creating multiple different specs that are scoped to individual components or microservices, rather than creating a single, large spec. For example, for applications that are deployed as multiple Helm charts, vendors can create a separate support bundle spec in the `templates` directory in the parent chart as well as in each subchart. + +This modular approach helps teams develop specs that are easier to maintain and helps teams to avoid merge conflicts that are more likely to occur when making to changes to a large spec. When generating support bundles for an application that includes multiple modular specs, the specs are merged so that only one support bundle archive is generated. + +## Example: Support Bundle Specifications by Component {#component} + +Using a modular approach for an application that ships MySQL, NGINX, and Redis, your team can add collectors and analyzers in using a separate support bundle specification for each component. + +`manifests/nginx/troubleshoot.yaml` + +This collector and analyzer checks compliance for the minimum number of replicas for the NGINX component: + + ```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: nginx +spec: + collectors: + - logs: + selector: + - app=nginx + analyzers: + - deploymentStatus: + name: nginx + outcomes: + - fail: + when: replicas < 2 + ``` + +`manifests/mysql/troubleshoot.yaml` + +This collector and analyzer checks compliance for the minimum version of the MySQL component: + + ```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: mysql +spec: + collectors: + - mysql: + uri: 'dbuser:**REDACTED**@tcp(db-host)/db' + analyzers: + - mysql: + checkName: Must be version 8.x or later + outcomes: + - fail: + when: version < 8.x +``` + +`manifests/redis/troubleshoot.yaml` + +This collector and analyzer checks that the Redis server is responding: + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: + name: redis +spec: + collectors: + - redis: + collectorName: redis + uri: rediss://default:password@hostname:6379 +``` + +A single support bundle archive can be generated from a combination of these manifests using the `kubectl support-bundle --load-cluster-specs` command. +For more information and additional options, see [Generating Support Bundles](support-bundle-generating). + +--- + + +# Making Support Bundle Specs Available Online + +# Making Support Bundle Specs Available Online + +This topic describes how to make your application's support bundle specs available online as well as how to link to online specs. + +## Overview + +You can make the definition of one or more support bundle specs available online in a source repository and link to it from the specs in the cluster. This approach lets you update collectors and analyzers outside of the application release and notify customers of potential problems and fixes in between application updates. + +The schema supports a `uri:` field that, when set, causes the support bundle generation to use the online specification. If the URI is unreachable or unparseable, any collectors or analyzers in the specification are used as a fallback. + +You update collectors and analyzers in the online specification to manage bug fixes. When a customer generates a support bundle, the online specification can detect those potential problems in the cluster and let them know know how to fix it. Without the URI link option, you must wait for the next time your customers update their applications or Kubernetes versions to get notified of potential problems. The URI link option is particularly useful for customers that do not update their application routinely. + +If you are using a modular approach to designing support bundles, you can use multiple online specs. Each specification supports one URI link. For more information about modular specs, see [About Creating Modular Support Bundle Specs](support-modular-support-bundle-specs). + +## Example: URI Linking to a Source Repository + +This example shows how Replicated could set up a URI link for one of its own components. You can follow a similar process to link to your own online repository for your support bundles. + +Replicated kURL includes an EKCO add-on for maintenance on embedded clusters, such as automating certificate rotation or data migration tasks. Replicated can ship this component with a support bundle manifest that warns users if they do not have this add-on installed or if it is not running in the cluster. + +**Example: Release v1.0.0** + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: +  name: ekco +spec: + collectors: + analyzers: + - deploymentStatus: + checkName: Check EKCO is operational + name: ekc-operator + namespace: kurl + outcomes: + - fail: + when: absent + message: EKCO is not installed - please add the EKCO component to your kURL spec and re-run the installer script + - fail: + when: "< 1" + message: EKCO does not have any ready replicas + - pass: + message: EKCO has at least 1 replica +``` + +If a bug is discovered at any time after the release of the specification above, Replicated can write an analyzer for it in an online specification. By adding a URI link to the online specification, the support bundle uses the assets hosted in the online repository, which is kept current. + +The `uri` field is added to the specification as a raw file link. Replicated hosts the online specification on [GitHub](https://github.com/replicatedhq/troubleshoot-specs/blob/main/in-cluster/default.yaml). + +**Example: Release v1.1.0** + +```yaml +apiVersion: troubleshoot.sh/v1beta2 +kind: SupportBundle +metadata: +  name: ekco +spec: + uri: https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml + collectors: [...] + analyzers: [...] +``` + +Using the `uri:` property, the support bundle gets the latest online specification if it can, or falls back to the collectors and analyzers listed in the specification that is in the cluster. + +Note that because the release version 1.0.0 did not contain the URI, Replicated would have to wait until existing users upgrade a cluster before getting the benefit of the new analyzer. Then, going forward, those users get any future online analyzers without having to upgrade. New users who install the version containing the URI as their initial installation automatically get any online analyzers when they generate a support bundle. + +For more information about the URI, see [Troubleshoot schema supports a `uri://` field](https://troubleshoot.sh/docs/support-bundle/supportbundle/#uri) in the Troubleshoot documentation. For a complete example, see [Debugging Kubernetes: Enhancements to Troubleshoot](https://www.replicated.com/blog/debugging-kubernetes-enhancements-to-troubleshoot/#Using-online-specs-for-support-bundles) in The Replicated Blog. + + +--- + + +# Submitting a Support Request + +# Submitting a Support Request + +You can submit a support request and a support bundle using the Replicated Vendor Portal. Uploading a support bundle is secure and helps the Replicated support team troubleshoot your application faster. Severity 1 issues are resolved three times faster when you submit a support bundle with your support request. + +### Prerequisites + +The following prerequisites must be met to submit support requests: + +* Your Vendor Portal account must be configured for access to support before you can submit support requests. Contact your administrator to ensure that you are added to the correct team. + +* Your team must have a replicated-collab repository configured. If you are a team administrator and need information about getting a collab repository set up and adding users, see [Adding Users to the Collab Repository](team-management-github-username#add). + + +### Submit a Support Request + +To submit a support request: + +1. From the [Vendor Portal](https://vendor.replicated.com), click **Support > Submit a Support Request** or go directly to the [Support page](https://vendor.replicated.com/support). + +1. In section 1 of the Support Request form, complete the fields with information about your issue. + +1. In section 2, do _one_ of the following actions: + - Use your pre-selected support bundle or select a different bundle in the pick list + - Select **Upload and attach a new support bundle** and attach a bundle from your file browser + +1. Click **Submit Support Request**. You receive a link to your support issue, where you can interact with the support team. + + :::note + Click **Back** to exit without submitting a support request. + ::: + + +--- + + +# Managing Collab Repository Access + +import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" +import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" +import CollabExistingUser from "../partials/collab-repo/_collab-existing-user.mdx" + + +# Managing Collab Repository Access + +This topic describes how to add users to the Replicated collab GitHub repository automatically through the Replicated Vendor Portal. It also includes information about managing user roles in this repository using Vendor Portal role-based access control (RBAC) policies. + +## Overview {#overview} + +<CollabRepoAbout/> + +To get access to the collab repository, members of a Vendor Portal team can add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. The Vendor Portal then automatically provisions the team member as a user in the collab repository in GitHub. The RBAC policy that the member is assigned in the Vendor Portal determines the GitHub role that they have in the collab repository. + +Replicated recommends that Vendor Portal admins manage user access to the collab repository through the Vendor Portal, rather than manually managing users through GitHub. Managing access through the Vendor Portal has the following benefits: +* Users are automatically added to the collab repository when they add their GitHub username in the Vendor Portal. +* Users are automatically removed from the collab repository when they are removed from the Vendor Portal team. +* Vendor portal and collab repository RBAC policies are managed from a single location. + +## Add Users to the Collab Repository {#add} + +This procedure describes how to use the Vendor Portal to access the collab repository for the first time as an Admin, then automatically add new and existing users to the repository. This allows you to use the Vendor Portal to manage the GitHub roles for users in the collab repository, rather than manually adding, managing, and removing users from the repository through GitHub. + +### Prerequisite + +Your team must have a replicated-collab repository configured to add users to +the repository and to manage repository access through the Vendor Portal. To get +a collab support repository configured in GitHub for your team, complete the onboarding +instructions in the email you received from Replicated. You can also access the [Replicated community help forum](https://community.replicated.com/) for assistance. + +### Procedure + +To add new and existing users to the collab repository through the Vendor Portal: + +1. As a Vendor Portal admin, log in to your Vendor Portal account. In the [Account Settings](https://vendor.replicated.com/account-settings) page, add your GitHub username and click **Save Changes**. + + <img src="/images/account-info.png" alt="Account info in the Vendor Portal" width="600"/> + + The Vendor Portal automatically adds your GitHub username to the collab repository and assigns it the Admin role. You receive an email with details about the collab repository when you are added. + +1. Follow the collab repository link from the email that you receive to log in to your GitHub account and access the repository. + +1. (Recommended) Manually remove any users in the collab repository that were previously added through GitHub. + + :::note + <CollabExistingUser/> + ::: + +1. (Optional) In the Vendor Portal, go to the [Team](https://vendor.replicated.com/team/members) page. For each team member, click **Edit permissions** as necessary to specify their GitHub role in the collab repository. + + For information about which policies to select, see [About GitHub Roles](#about-github-roles). + +1. Instruct each Vendor Portal team member to add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. + + The Vendor Portal adds the username to the collab repository and assigns a GitHub role to the user based on their Vendor Portal policy. + + Users receive an email when they are added to the collab repository. + +## About GitHub Roles + +When team members add a GitHub username to their Vendor Portal account, the Vendor Portal determines how to assign the user a default GitHub role in the collab repository based on the following criteria: +* If the GitHub username already exists in the collab repository +* The RBAC policy assigned to the member in the Vendor Portal + +You can also update any custom RBAC policies in the Vendor Portal to change the default GitHub roles for those policies. + +### Default Roles for Existing Users {#existing-username} + +<CollabExistingUser/> + +### Default Role Mapping {#role-mapping} + +When team members add a GitHub username to their Vendor Portal account, the Vendor Portal assigns them to a GitHub role in the collab repository that corresponds to their Vendor Portal policy. For example, users with the default Read Only policy in the Vendor Portal are assigned the Read GitHub role in the collab repository. + +For team members assigned custom RBAC policies in the Vendor Portal, you can edit the custom policy to change their GitHub role in the collab repository. For more information, see [About Changing the Default GitHub Role](#custom) below. + +The table below describes how each default and custom Vendor Portal policy corresponds to a role in the collab repository in GitHub. For more information about each of the GitHub roles described in this table, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<table> + <tr> + <th width="25%">Vendor Portal Role</th> + <th width="25%">GitHub collab Role</th> + <th width="50%">Description</th> + </tr> + <tr> + <td>Admin</td> + <td>Admin</td> + <td><p>Members assigned the default Admin role in the Vendor Portal are assigned the GitHub Admin role in the collab repository.</p></td> + </tr> + <tr> + <td>Support Engineer</td> + <td>Triage</td> + <td><p>Members assigned the custom Support Engineer role in the Vendor Portal are assigned the GitHub Triage role in the collab repository.</p><p>For information about creating a custom Support Engineer policy in the Vendor Portal, see <a href="team-management-rbac-configuring#support-engineer">Support Engineer</a> in <em>Configuring RBAC Policies</em>.</p><p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p></td> + </tr> + <tr> + <td>Read Only</td> + <td>Read</td> + <td>Members assigned the default Read Only role in the Vendor Portal are assigned the GitHub Read role in the collab repository.</td> + </tr> + <tr> + <td>Sales</td> + <td>N/A</td> + <td><p>Users assigned the custom Sales role in the Vendor Portal do not have access to the collab repository.</p><p>For information about creating a custom Sales policy in the Vendor Portal, see <a href="team-management-rbac-configuring#sales">Sales</a> in <em>Configuring RBAC Policies</em>.</p><p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p></td> + </tr> + <tr> + <td>Custom policies with <code>**/admin</code> under <code>allowed:</code></td> + <td>Admin</td> + <td> + <p>By default, members assigned to a custom RBAC policy that specifies <code>**/admin</code> under <code>allowed:</code> are assigned the GitHub Admin role in the collab repository.</p> + <p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p> + </td> + </tr> + <tr> + <td>Custom policies <em>without</em> <code>**/admin</code> under <code>allowed:</code></td> + <td>Read Only</td> + <td> + <p>By default, members assigned to any custom RBAC policies that do not specify <code>**/admin</code> under <code>allowed:</code> are assigned the Read Only GitHub role in the collab repository.</p> + <p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p> + </td> + </tr> +</table> + +### Change the Default Role {#custom} + +You can update any custom RBAC policies that you create in the Vendor Portal to change the default GitHub roles for those policies. For example, by default, any team members assigned a custom policy with `**/admin` under `allowed:` are assigned the Admin role in the collab repository in GitHub. You can update the custom policy to specify a more restrictive GitHub role. + +To edit a custom policy to change the default GitHub role assigned to users with that policy, add one of the following RBAC resources to the `allowed:` or `denied:` list in the custom policy: + +* `team/support-issues/read` +* `team/support-issues/write` +* `team/support-issues/triage` +* `team/support-issues/admin` + +For more information about each of these RBAC resources, see [Team](team-management-rbac-resource-names#team) in _RBAC Resource Names_. + +For more information about how to edit the `allowed:` or `denied:` lists for custom policies in the Vendor Portal, see [Configuring Custom RBAC Policies](team-management-rbac-configuring). + +<CollabRbacResourcesImportant/> + + +--- + + +# Managing Google Authentication + +# Managing Google Authentication + +This topic describes the Google authentication options that you can configure to control access to the Replicated Vendor Portal. + +## Manage Google Authentication Options + +As a team administrator, you can enable, disable, or require Google authentication for all accounts in the team. + +A core benefit of using Google authentication is that when a user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal, unless the username and password is also allowed. Requiring Google authentication is an effective way of centrally removing access to the Vendor Portal. + +To manage Google authentication settings: + +1. Click **Team Settings > [Google Authentication](https://vendor.replicated.com/team/google-authentication)**. + + ![Google Auth Settings](/images/team-mgmt-google-auth.png) + +1. Enable or disable the settings: + + | Field | Instructions | + |-----------------------|------------------------| + | Allow Google authentication for team members | Enables team members to log in using a Google account. | + | Restrict login to only allow to Google authentication | Requires new users to accept an invitation and sign up with a Google account that exactly matches the email address that was invited to the team. The email address can be a gmail.com address or user from another domain, but it must match the email address from the invitation exactly. Disabling this setting requires users to accept the invitation by creating a username and password (or use the SAML workflow). | + + +## Migrating Existing Accounts +Excluding some teams that restrict end users to use only Security Assertion Markup Language (SAML) or require two-factor authentication (2FA), existing end users can seamlessly sign into an account that exactly matches their Google Workspace (formerly GSuite) email address. However, Google authentication only matches existing user accounts, so for users who have signed up using task-based email addresses (such as name+news@domain.com), you can continue to use email/password to sign in, invite your normal email address to your team, or contact support to change your email address. For more information about task-based email addresses, see [Create task-specific email addresses](https://support.google.com/a/users/answer/9308648?hl=en) in the Google Support site. + +Migrated accounts maintain the same role-based access control (RBAC) permissions that were previously assigned. After signing in with Google, users can choose to disable username/password-based authentication on their account or maintain both authentication methods using the Vendor Portal [account settings page](https://vendor.replicated.com/account-settings). + +## Limitations + +Using distribution lists for sending invitations to join a team are not supported. The invitations are sent, but are invalid and cannot be used to join a team using Google authentication. + +## Compatibility with Two-Factor Authentication +Google authentication is not entirely compatible with Replicated two-factor authentication (2FA) implementation because Google authentication bypasses account-based 2FA, relying on your Google Authentication instead. However, the Vendor Portal continues to enforce 2FA on all email/password-based authentication, even for the same user, if both options are enabled. + +## Related Topic + +[Managing Team Members](team-management) + + +--- + + +# Configuring RBAC Policies + +import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" + +# Configuring RBAC Policies + +This topic describes how to use role-based access policies (RBAC) to grant or deny team members permissions to use Replicated services in the Replicated Vendor Portal. + +## About RBAC Policies + +By default, every team has two policies created automatically: **Admin** and **Read Only**. If you have an Enterprise plan, you will also have the **Sales** and **Support** policies created automatically. These default policies are not configurable. For more information, see [Default RBAC Policies](#default-rbac) below. + +You can configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. For example, you can limit access for the sales team to one application and to specific channels. Or, you can grant only certain users permission to promote releases to your production channels. + +You can also create custom RBAC policies in the Vendor Portal to manage user access and permissions in the Replicated collab repository in GitHub. For more information, see [Managing Access to the Collab Repository](team-management-github-username). + +## Default RBAC Policies {#default-rbac} + +This section describes the default RBAC policies that are included for Vendor Portal teams, depending on the team's Replicated pricing plan. + +### Admin + +The Admin policy grants read/write permissions to all resources on the team. + +:::note +This policy is automatically created for all plans. +::: + +```json +{ + "v1": { + "name": "Admin", + "resources": { + "allowed": [ + "**/*" + ], + "denied": [] + } + } +} +``` + +### Read Only + +The Read Only policy grants read permission to all resources on the team except for API tokens. + +:::note +This policy is automatically created for all plans. +::: + +```json +{ + "v1": { + "name": "Read Only", + "resources": { + "allowed": [ + "**/list", + "**/read" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +### Support Engineer + +The Support Engineer policy grants read access to release, channels, and application data, and read-write access to customer and license details. It also grants permission to open Replicated support issues and upload support bundles. + +:::note +This policy is automatically created for teams with the Enterprise plan only. +::: + +```json +{ + "v1": { + "name": "Support Engineer", + "resources": { + "allowed": [ + "**/read", + "**/list", + "kots/app/*/license/**", + "team/support-issues/read", + "team/support-issues/write" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +### Sales + +The Sales policy grants read-write access to customers and license details and read-only access to resources necessary to manage licenses (applications, channels, and license fields). No additional access is granted. + +:::note +This policy is automatically created for teams with the Enterprise plan only. +::: + +```json +{ + "v1": { + "name": "Sales", + "resources": { + "allowed": [ + "kots/app/*/read", + "kots/app/*/channel/*/read", + "kots/app/*/licensefields/read", + "kots/app/*/license/**" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +## Configure a Custom RBAC Policy + +To configure a custom RBAC policy: + +1. From the Vendor Portal [Team page](https://vendor.replicated.com/team), select **RBAC** from the left menu. + +1. Do _one_ of the following: + + - Click **Create Policy** from the RBAC page to create a new policy. + - Click **View policy** to edit an existing custom policy in the list. + + <CollabRbacResourcesImportant/> + +1. Edit the fields in the policy dialog. In the **Definition** pane, specify the `allow` and `denied` arrays in the resources key to create limits for the role. + + The default policy allows everything and the **Config help** pane displays any errors. + + ![Create RBAC Policy](/images/policy-create.png) + + - For more information, see [Policy Definition](#policy-definition). + - For more information about and examples of rule order, see [Rule Order](#rule-order). + - For a list of resource names, see [RBAC Resource Names](team-management-rbac-resource-names). + +1. Click **Create Policy** to create a new policy, or click **Update Policy** to update an existing policy. + + :::note + Click **Cancel** to exit without saving changes. + ::: + +1. To apply RBAC policies to Vendor Portal team members, you can: + + - Assign policies to existing team members + - Specify a policy when inviting new team members + - Set a default policy for auto-joining a team + + See [Managing Team Members](team-management). + +## Policy Definition + +A policy is defined in a single JSON document: + +``` +{ + "v1": { + "name": "Read Only", + "resources": { + "allowed": [ + "**/read", + "**/list" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + +The primary content of a policy document is the resources key. The resources key should contain two arrays, identified as `allowed` and `denied`. Resources specified in the allowed list are allowed for users assigned to the policy, and resources specified in the denied list are denied. + +Resource names are hierarchical, and support wildcards and globs. For a complete list of resource names that can be defined in a policy document, see [RBAC Resource Names](team-management-rbac-resource-names). + +When a policy document has conflicting rules, the behavior is predictable. For more information about conflicting rules, see [Rule Order](#rule-order). + +### Example: View Specific Application and Channel + + The following policy definition example limits any user with this role to viewing a specific application and a specific channel for that application: + + ``` + { + "v1": { + "name": "Policy Name", + "resources": { + "allowed": [ + "kots/app/appID/list", + "kots/app/appID/read", + "kots/app/appID/channel/channelID/list", + "kots/app/appID/channel/channelID/read" + ], + "denied": [] + } + } + } + ``` + The example above uses an application ID and a channel ID to scope the permissions of the RBAC policy. To find your application and channel IDs, do the following: + + - To get the application ID, click **Settings > Show Application ID (Advanced)** in the Vendor Portal. + + - To get the channel ID, click **Channels** in the Vendor Portal. Then click the Release History link for the channel that you want to limit access to. The channel ID displays in your browser URL. + +## Rule Order + +When a resource name is specified in both the `allow` and the `deny` chains of a policy, defined rules determine which rule is applied. + +If `denied` is left empty, it is implied as a `**/*` rule, unless `**/*` rule is specified in the `allowed` resources. If a rule exactly conflicts with another rule, the `denied` rule takes precedence. + +### Defining Precedence Using Rule Specificity +The most specific rule definition is always applied, when compared with less specific rules. Specificity of a rule is calculated by the number of asterisks (`**` and `*`) in the definition. A `**` in the rule definition is the least specific, followed by rules with `*`, and finally rules with no wildcards as the most specific. + +### Example: No Access To Stable Channel + +In the following example, a policy grants access to promote releases to any channel except the Stable channel. It uses the rule pattern `kots/app/[:appId]/channel/[:channelId]/promote`. Note that you specify the channel ID, rather than the channel name. To find the channel ID, go to the Vendor Portal **Channels** page and click the **Settings** icon for the target channel. + +```json +{ + "v1": { + "name": "No Access To Stable Channel", + "resources": { + "allowed": [ + "**/*" + ], + "denied": [ + "kots/app/*/channel/1eg7CyEofYSmVAnK0pEKUlv36Y3/promote" + ] + } + } +} +``` + +### Example: View Customers Only + +In the following example, a policy grants access to viewing all customers, but not to creating releases, promoting releases, or creating new customers. + +```json +{ + "v1": { + "name": "View Customers Only", + "resources": { + "allowed": [ + "kots/app/*/license/*/read", + "kots/app/*/license/*/list", + "kots/app/*/read", + "kots/app/*/list" + ], + "denied": [ + "**/*" + ] + } + } +} +``` + + +--- + + +# RBAC Resource Names + +import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" + +# RBAC Resource Names + +This a list of all available resource names for the Replicated vendor role-based access control (RBAC) policy: + +## Integration Catalog + +### integration/catalog/list + +Grants the holder permission to view the catalog events and triggers available for integrations. + +## kots + +### kots/app/create + +When allowed, the holder will be allowed to create new applications. + +### kots/app/[:appId]/read +Grants the holder permission to view the application. If the holder does not have permissions to view an application, it will not appear in lists. + +### kots/externalregistry/list +Grants the holder the ability to list external docker registry for application(s). + +### kots/externalregistry/create + +Grants the holder the ability to link a new external docker registry to application(s). + +### kots/externalregistry/[:registryName]/delete + +Grants the holder the ability to delete the specified linked external docker registry in application(s). + +### kots/app/[:appId]/channel/create + +Grants the holder the ability to create a new channel in the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/archive + +Grants the holder permission to archive the specified channel(s) of the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/promote + +Grants the holder the ability to promote a new release to the specified channel(s) of the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/update + +Grants the holder permission to update the specified channel of the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/read + +Grants the holder the permission to view information about the specified channel of the specified application(s). + +### kots/app/[:appId]/enterprisechannel/[:channelId]/read + +Grants the holder the permission to view information about the specified enterprise channel of the specified application(s). + +### kots/app/[:appId]/channel/[:channelId]/releases/airgap + +Grants the holder permission to trigger airgap builds for the specified channel. + +### kots/app/[:appId]/channel/[:channelId]/releases/airgap/download-url + +Grants the holder permission to get an airgap bundle download URL for any release on the specified channel. + +### kots/app/[:appId]/installer/create + +Grants the holder permission to create kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). + +### kots/app/[:appId]/installer/update + +Grants the holder permission to update kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). + +### kots/app/[:appId]/installer/read + +Grants the holder permission to view kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). + +### kots/app/[:appId]/installer/promote + +Grants the holder permission to promote kURL installers to a channel. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). + +:::note +The `kots/app/[:appId]/installer/promote` policy does not grant the holder permission to view and create installers. Users must be assigned both the `kots/app/[:appId]/installers` and `kots/app/[:appId]/installer/promote` policies to have permissions to view, create, and promote installers. +::: + +### kots/app/[:appId]/license/create + +Grants the holder permission to create a new license in the specified application(s). + +### kots/app/[:appId]/license/[:customerId]/read + +Grants the holder permission to view the license specified by ID. If this is denied, the licenses will not show up in search, CSV export or on the Vendor Portal, and the holder will not be able to subscribe to this license's instance notifications. + +### kots/app/[:appId]/license/[:customerId]/update + +Grants the holder permission to edit the license specified by ID for the specified application(s). + +### kots/app/[:appId]/license/[:customerId]/slack-notifications/read + +Grants the holder permission to view the team's Slack notification subscriptions for instances associated with the specified license. + +### kots/app/[:appId]/license/[:customerId]/slack-notifications/update + +Grants the holder permission to edit the team's Slack notification subscriptions for instances associated with the specified license. + +### kots/app/[:appId]/builtin-licensefields/update + +Grants the holder permission to edit the builtin license field override values for the specified application(s). + +### kots/app/[:appId]/builtin-licensefields/delete + +Grants the holder permission to delete the builtin license field override values for the specified application(s). + +### kots/license/[:customerId]/airgap/password + +Grants the holder permission to generate a new download portal password for the license specified (by ID) for the specified application(s). + +### kots/license/[:customerId]/archive + +Grants the holder permission to archive the specified license (by ID). + +### kots/license/[:customerId]/unarchive + +Grants the holder permissions to unarchive the specified license (by ID). + +### kots/app/[:appId]/licensefields/create + +Grants the holder permission to create new license fields in the specified application(s). + +### kots/app/[:appId]/licensefields/read + +Grants the holder permission to view the license fields in the specified application(s). + +### kots/app/[:appId]/licensefields/update + +Grants the holder permission to edit the license fields for the specified application(s). + +### kots/app/[:appId]/licensefields/delete + +Grants the holder permission to delete the license fields for the specified application(s). + +### kots/app/[:appId]/release/create + +Grants the holder permission to create a new release in the specified application(s). + +### kots/app/[:appId]/release/[:sequence]/update + +Grants the holder permission to update the files saved in release sequence `[:sequence]` in the specified application(s). Once a release is promoted to a channel, it's not editable by anyone. + +### kots/app/[:appId]/release/[:sequence]/read + +Grants the holder permission to read the files at release sequence `[:sequence]` in the specified application(s). + +### kots/app/[:appId]/customhostname/list + +Grants the holder permission to view custom hostnames for the team. + +### kots/app/[:appId]/customhostname/create + +Grants the holder permission to create custom hostnames for the team. + +### kots/app/[:appId]/customhostname/delete + +Grants the holder permission to delete custom hostnames for the team. + +### kots/app/[:appId]/customhostname/default/set + +Grants the holder permission to set default custom hostnames. + +### kots/app/[:appId]/customhostname/default/unset + +Grants the holder permission to unset the default custom hostnames. + +### kots/app/[:appId]/supportbundle/read + +Grants the holder permission to view and download support bundles. + +## Registry + +### registry/namespace/:namespace/pull + +Grants the holder permission to pull images from Replicated registry. + +### registry/namespace/:namespace/push + +Grants the holder permission to push images into Replicated registry. + +## Compatibility Matrix + +### kots/cluster/create + +Grants the holder permission to create new clusters. + +### kots/cluster/list + +Grants the holder permission to list running and terminated clusters. + +### kots/cluster/[:clusterId] + +Grants the holder permission to get cluster details. + +### kots/cluster/[:clusterId]/upgrade + +Grants the holder permission to upgrade a cluster. + +### kots/cluster/tag/update + +Grants the holder permission to update cluster tags. + +### kots/cluster/ttl/update + +Grants the holder permission to update cluster ttl. + +### kots/cluster/[:clusterId]/nodegroup + +Grants the holder permission to update nodegroup details. + +### kots/cluster[:clusterId]/kubeconfig + +Grants the holder permision to get the kubeconfig for a cluster. + +### kots/cluster/[:clusterId]/delete + +Grants the holder permission to delete a cluster. + +### kots/cluster/[:clusterId]/addon/list + +Grants the holder permission to list addons for a cluster. + +### kots/cluster/[:clusterId]/addon/[:addonId]/read + +Grants the holder permission to read the addon for a cluster. + +### kots/cluster/[:clusterId]/addon/[:addonId]/delete + +Grants the holder permission to delete the addon for a cluster. + +### kots/cluster/[:clusterId]/addon/create/objectStore + +Grants the holder permission to create an object store for a cluster. + +### kots/cluster/[:clusterId]/port/expose + +Grants the holder permission to expose a port for a cluster. + +### kots/cluster/[:clusterId]/port/delete + +Grants the holder permission to delete a port for a cluster. + +### kots/cluster/[:clusterId]/port/list + +Grants the holder permission to list exposed ports for a cluster. + +### kots/cluster/list-quotas + +Grants the holder permission to list the quotas. + +### kots/cluster/increase-quota + +Grants the holder permission to request an increase in the quota. + +### kots/vm/tag/update + +Grants the holder permission to update vm tags. + +### kots/vm/ttl/update + +Grants the holder permission to update vm ttl. + +### kots/vm/[:vmId]/port/expose + +Grants the holder permission to expose a port for a vm. + +### kots/vm/[:vmId]/port/list + +Grants the holder permission to list exposed ports for a vm. + +### kots/vm/[:vmId]/addon/[:addonId]/delete + +Grants the holder permission to delete the addon for a vm. + +## Team + +### team/auditlog/read + +Grants the holder permission to view the audit log for the team. + +### team/authentication/update + +Grants the holder permission to manage the following team authentication settings: Google authentication, Auto-join, and SAML authentication. + +### team/authentication/read + +Grants the holder permission to read the following authentication settings: Google authentication, Auto-join, and SAML authentication. + +### team/integration/list + +Grants the holder permission to view team's integrations. + +### team/integration/create + +Grants the holder permission to create an integration. + +### team/integration/[:integrationId]/delete + +Grants the holder permission to delete specified integration(s). + +### team/integration/[:integrationId]/update + +Grants the holder permission to update specified integration(s). + +### team/members/list + +Grants the holder permission to list team members and invitations. + +### team/member/invite + +Grants the holder permission to invite additional people to the team. + +### team/members/delete + +Grants the holder permission to delete other team members. + +### team/notifications/slack-webhook/read + +Grants the holder permission to view the team's Slack webhook for instance notifications. + +### team/notifications/slack-webhook/update + +Grants the holder permission to edit the team's Slack webhook for instance notifications. + +### team/policy/read + +Grants the holder permission to view RBAC policies for the team. + +### team/policy/update + +Grants the holder permission to update RBAC policies for the team. + +### team/policy/delete + +Grants the holder permission to delete RBAC policies for the team. + +### team/policy/create + +Grants the holder permission to create RBAC policies for the team. + +### team/security/update + +Grants the holder permission to manage team password requirements including two-factor authentication and password complexity requirements. + +### team/serviceaccount/list + +Grants the holder permission to list service accounts. + +### team/serviceaccount/create + +Grants the holder permission to create new service accounts. + +### team/serviceaccount/[:name]/delete + +Grants the holder permission to delete the service account identified by the name specified. + +### team/support-issues/read + +Grants the holder Read permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. + +To prevent access to the collab repository for an RBAC policy, add `team/support-issues/read` to the `denied:` list in the policy. For example: + +``` +{ + "v1": { + "name": "Policy Name", + "resources": { + "allowed": [], + "denied": [ + "team/support-issues/read" + ] + } + } +} +``` + +For more information about the Read role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<CollabRbacResourcesImportant/> + +### team/support-issues/write + +Grants the holder Write permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. + +For more information about the Write role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<CollabRbacResourcesImportant/> + +### team/support-issues/triage + +Grants the holder Triage permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. + +For more information about the Triage role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<CollabRbacResourcesImportant/> + +### team/support-issues/admin + +Grants the holder Admin permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. + +For more information about the Admin role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. + +<CollabRbacResourcesImportant/> + +## User + +### user/token/list + +Grants the holder permission to list user tokens. + +### user/token/create + +Grants the holder permission to create new user tokens. + +### user/token/delete + +Grants the holder permission to delete user tokens. + + +--- + + +# Managing SAML Authentication + +# Managing SAML Authentication + +This topic describes how to enable or disable SAML authentication for the Replicated Vendor Portal. + +## About Using SAML with the Vendor Portal + +After starting out with Replicated, most teams grow, adding more developers, support engineers, and sales engineers. Eventually, managing access to the Vendor Portal can become difficult. Replicated supports logging in using SAML, which lets you manage access (provisioning and unprovisioning accounts) through your SAML identity provider. + +Using SAML, everyone on your team logs in with their existing usernames and passwords through your identity provider's dashboard. Users do not need to sign up through the Vendor Portal or log in with a separate Vendor Portal account, simplifying their experience. + +### Enabling SAML in Your Vendor Account + +To enable SAML in your Vendor Portal account, you must have an Enterprise plan. For access to SAML, you can contact Replicated through [Support](https://vendor.replicated.com/support). For information about the Enterprise plan, see [pricing](https://www.replicated.com/pricing/). + +### SCIM + +Replicated does not implement System for Cross-domain Identity Management (SCIM). Instead, we use SAML to authenticate and create just-in-time user identities in our system. We resolve the username (email address) as the actor and use this to ensure that audit log events follow these dynamically provisioned users. If a user's email address is already associated with a Replicated account, by using your SAML integration to access the Vendor Portal, they automatically leave their current team and join the team associated with the SAML login. + +### Compatibility with Two-Factor Authentication + +If SAML authentication is configured for your team, Replicated two-factor authentication (2FA) is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. + +### Role Based Access Control + +Replicated supports Role Based Access Control (RBAC) in the Vendor Portal. To use RBAC with SAML, you must configure policies and add users to the policies by their username. Usernames are the identity of the user in your identity provide (IDP). Typically, this username is the full email address. For more information about configuring RBAC, see [Configuring RBAC Policies](team-management-rbac-configuring). + +## Downloading Certificates from Supported SAML providers + +You must retrieve the metadata and x.509 public certificate files from your SAML provider before configuring SAML in the Vendor Portal. The certificate file must be in PEM format. + +Replicated tests several SAML providers, but the service should be compatible with any SAML 2.0 compliant service provider. We provide full support for the following SAML providers: + +* Okta. For more information about integrating Okta with Replicated, see [Configure Okta](#configure-okta). + +* OneLogin + + +## Configure Okta + +The first part of the Vendor Portal and Okta integration is configured in the Okta dashboard. This configuration lets you download the XML Metadata file and x.509 public certificate in PEM format required for the SAML authentication. + +This procedure outlines the basic configuration steps, recommended settings, and the specific fields to configure in Okta. For more information about using Okta, see the [Okta](https://help.okta.com/en/prod/Content/index.htm) documentation. + +To configure Okta and download the required files: + +1. Log in to your Okta Admin dashboard, and click applications. + +1. Select **Create new app integration**, and create a new application as a SAML 2.0 application. + +1. Provide a name and icon for the application, such as Replicated Vendor Portal. You can download a high quality Replicated icon [here](https://help.replicated.com/images/guides/vendor-portal-saml/replicated-application-icon.png). + +1. Click **Next**. + + The Configuring SAML page opens. + +1. Click **Download Okta Certificate**. This downloads your x.509 certificate to provide to Replicated. Save this file to safe location. + +1. On this same page, edit the following fields: + + | Field Name | Description | + | :---------------------- | ----------------------------------------------------------------------------------------------- | + | Single Sign On URL | Set this to `https://id.replicated.com/v1/saml`. | + | Audience URI (SP Entity ID) | Displays on the Vendor Portal [SAML authentication](https://vendor.replicated.com/team/saml-authentication) tab, and is unique to your team. | + | Name ID Format | Change this to `EmailAddress`. | + +1. Click **Next**. + +1. Select **I’m an Okta customer adding an internal app** on the final screen, and click **Finish**. + +1. Click **Identity provider metadata** to download the Metadata.xml file. This likely opens an XML download that you can right-click and select **Save Link As…** to download this file. + +### Next Step + +Configure and enable SAML in the Vendor Portal. For more information, see [Configure SAML](#configure-saml). + +## Configure SAML + +When you initially configure SAML, we do not recommend that you disable username/password access at the same time. It is possible, and recommended during testing, to support both SAML and non-SAML authentication on your account simultaneously. + +**Prerequisite** + +- Download your XML Metadata file and x.509 public certificate PEM file from your SAML provider. For more information on supported SAML providers and how to find these files, see [Supported SAML providers](#downloading-certificates-from-supported-saml-providers). + +To configure SAML: + +1. Log in to the Vendor Portal [Team Members page](https://vendor.replicated.com/team/members) as a user with Admin access. +1. Click [SAML Authentication](https://vendor.replicated.com/team/saml-authentication) from the left menu. If you do not see these options, contact [Support](https://vendor.replicated.com/support). + + The SAML Authentication page opens. + + ![SAML Authentication](/images/team-mgmt-saml-authentication.png) + + [View a larger version of this image](/images/team-mgmt-saml-authentication.png) + +1. Browse for, or drag and drop, your XML Metadata file and x.509 PEM file from your SAML provider. + +1. Click **Upload Metadata & Cert**. + +### Next Step + +At this point, SAML is configured, but not enabled. The next step is to enable SAML enforcement options. For more information, see [Enable SAML Enforcement](#enable-saml-enforcement). + +## Enable SAML Enforcement + +After you have uploaded the metadata and x.509 public certificate PEM file, you must enable SAML enforcement options. Replicated provides options that can be enabled or disabled at any time. You can also change the IDP metadata if needed. + +To enable SAML enforcement: + +1. From the Vendor Portal, select **Team > [SAML Authentication](https://vendor.replicated.com/team/saml-authentication)**. + +1. Select either or both login method options in the the Manage your SAML authentication pane. Allowing both login methods is a good way to test SAML without risking any interruption for the rest of your team. + + **Enable SAML for team logins** - Allows members of your team to log in to the Vendor Portal through your identity provider. This option does not remove, change, or restrict any other authentication that methods you have configured in the Vendor Portal. If you enable SAML and your team already is logging in with accounts provisioned in the Vendor Portal, they will be able to continue logging in with those accounts. + + **Only allow SAML logins** - Requires members of your team to log in to the Vendor Portal through your identity provider. Prevents any non-SAML accounts from logging in. Replicated does not delete the existing accounts. If you turn on this option and then later disable it, accounts that never logged in using SAML will be able to log in again. If an account exists outside of SAML and then is authenticated with SAML, the account is converted and cannot authenticate using a password again. + + ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) + + [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) + +1. (Optional) Set a default policy for new accounts from the drop-down list. +1. (Optional) Click **Change IdP Metadata** and follow the prompts to upload any changes to your metadata. + +SAML is now enabled on your account. For your team to use the SAML login option, you must enable access through your SAML identity provider’s dashboard. For example, if you use Okta, assign the application to users or groups. When a user clicks through to use the application, they are granted access as described in [SCIM](#scim). + +## Disable SAML Enforcement + +You can disable SAML authentication options at any time and re-enable them later if needed. + +To disable SAML enforcement: + +1. From the Vendor Portal, select **Team > SAML Authentication**. + +1. Click **Deprovision SAML** in the Manage your SAML authentication pane. + + ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) + + [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) + + +--- + + +# Configuring a Slack Webhook (Beta) + +import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" + + +# Configuring a Slack Webhook (Beta) + +As a vendor, anyone on your team can set up Slack notifications, which are sent to a shared Slack channel. Notifications give your team visibility into customer instance statuses and changes. + +<NotificationsAbout/> + +While email notifications are specific to each user, Slack notifications settings are shared, viewable, and editable by the entire team. Any changes made by a team member impacts the team. + +## Limitations + +As a Beta feature, the following limitations apply: + +- Only one Slack channel per team is supported. + +- RBAC policies are not supported for configuring granular permissions. + +## Prerequisite + +Create a Slack webhook URL. For more information, see [Sending Messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) in the Slack API documentation. + +Make sure to keep the URL secure because it contains a Secret that allows write access to one or more channels in your Slack Workspace. + +## Configure the Webhook in the Vendor Portal + +When you enable Slack notifications for a team, you must first configure the Slack webhook in the Vendor Portal. Typically you do this one time. Then you can configure notifications for individual customer instances. + +To configure the Slack webhook: + +1. From the **[Team Vendor Portal](https://vendor.replicated.com/team/members)** page, click **Slack Notifications**. + +1. On the **Slack Notifications Setup** page, paste the Slack webhook URL. Click **Save**. + +## Next Step + +[Configure Slack notifications for customer instances](instance-notifications-config). + + +--- + + +# Managing Two-Factor Authentication + +# Managing Two-Factor Authentication + +This topic describes how to enable and disable Replicated two-factor authentication for individual and team accounts in the Replicated Vendor Portal. + +Alternatively, you can use Google Authentication or SAML Authentication to access the Vendor Portal. For more information about those options, see [Managing Google Authentication](team-management-google-auth) and [Managing SAML Authentication](team-management-saml-auth). + +## About Two-Factor Authentication + +Two-factor authentication (2FA) provides additional security by requiring two methods of authentication to access resources and data. When you enable the 2FA option in the Vendor Portal, you are asked to provide an authentication code and your password during authentication. Replicated uses the open algorithm known as the Time-based One-time Password (TOTP 7), which is specified by the Internet Engineering Task Force (IETF) under RFC 6238 2. + +## Limitation + +If SAML Authentication or Google Authentication is configured and 2FA is also enabled, then 2FA is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. + +## Enable 2FA on Individual Accounts + +If you are an administrator or if 2FA is enabled for your team, you can enable 2FA on your individual account. + +To enable two-factor authentication on your individual account: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. + + <img src="/images/vendor-portal-account-settings.png" alt="Vendor portal account settings" width="200"/> + + [View a larger version of this image](/images/vendor-portal-account-settings.png) + +1. In the **Two-Factor Authentication** pane, click **Turn on two-factor authentication**. + + <img src="/images/vendor-portal-password-2fa.png" alt="Turn on 2FA in the Vendor Portal" width="600"/> + + [View a larger version of this image](/images/vendor-portal-password-2fa.png) + +1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. + +1. Scan the QR code that displays using a supported two-factor authentication application on your mobile device, such as Google Authenticator. Alternatively, click **Use this text code** in the Vendor Portal to generate an alphanumeric code that you enter in the mobile application. + + <img src="/images/vendor-portal-scan-qr.png" alt="Turn on 2FA in the Vendor Portal" width="400"/> + + [View a larger version of this image](/images/vendor-portal-scan-qr.png) + + Your mobile application displays an authentication code. + +1. Enter the authentication code in the Vendor Portal. + + Two-factor authentication is enabled and a list of recovery codes is displayed at the bottom of the **Two-Factor Authentication** pane. + +1. Save the recovery codes in a secure location. These codes can be used any time (one time per code), if you lose your mobile device. + +1. Log out of your account, then log back in to test that it is enabled. You are prompted to enter a one-time code generated by the application on your mobile device. + + +## Disable 2FA on Individual Accounts + +To disable two-factor authentication on your individual account: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. + + <img src="/images/vendor-portal-account-settings.png" alt="Vendor portal account settings" width="200"/> + + [View a larger version of this image](/images/vendor-portal-account-settings.png) + +1. In the **Two-Factor Authentication** pane, click **Turn off two-factor authentication**. + +1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. + +## Enable or Disable 2FA for a Team + +As an administrator, you can enable and disable 2FA for teams. You must first enable 2FA on your individual account before you can enable 2FA for teams. After you enable 2FA for your team, team members can enable 2FA on their individual accounts. + +To enable or disable 2FA for a team: + +1. In the [Vendor Portal](https://vendor.replicated.com), select the **Team** tab, then select **Multifactor Auth**. + + <img src="/images/team-2fa-auth.png" alt="Multifactor authentication for teams in the Vendor Portal" width="600"/> + + [View a larger image](/images/team-2fa-auth.png) + +1. On the **Multifactor Authentication** page, do one of the following with the **Require Two-Factor Authentication for all Username/Password authenticating users** toggle: + + - Turn on the toggle to enable 2FA + - Turn off the toggle to disable 2FA + +1. Click **Save changes**. + + + + +--- + + +# Managing Team Members + +import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" +import CollabRbacImportant from "../partials/collab-repo/_collab-rbac-important.mdx" + +# Managing Team Members + +This topic describes how to manage team members in the Replicated Vendor Portal, such as inviting and removing members, and editing permissions. For information about managing user access to the Replicated collab repository in GitHub, see [Managing Collab Repository Access](team-management-github-username). + +## Viewing Team Members +The [Team](https://vendor.replicated.com/team/members) page provides a list of all accounts currently associated with or invited to your team. Each row contains information about the user, including their two-factor authentication (2FA) status and role-based access control (RBAC) role, and lets administrators take additional actions, such as remove, re-invite, and edit permissions. + +<img src="/images/teams-view.png" alt="View team members list in the Vendor Portal" width="700"/> + +[View a larger image](/images/teams-view.png) + +All users, including read-only, can see the name of the RBAC role assigned to each team member. When SAML authentication is enabled, users with the built-in read-only policy cannot see the RBAC role assigned to team members. + +## Invite Members +By default, team administrators can invite more team members to collaborate. Invited users receive an email to activate their account. The activation link in the email is unique to the invited user. Following the activation link in the email also ensures that the invited user joins the team from which the invitation originated. + +:::note +Teams that have enforced SAML-only authentication do not use the email invitation flow described in this procedure. These teams and their users must log in through their SAML provider. +::: + +To invite a new team member: + +1. From the [Team Members](https://vendor.replicated.com/team/members) page, click **Invite team member**. + + The Invite team member dialog opens. + + <img src="/images/teams-invite-member.png" alt="Invite team member dialog in the Vendor Portal" width="500"/> + + [Invite team member dialog](/images/teams-invite-member.png) + +1. Enter the email address of the member. + +1. In the **Permissions** field, assign an RBAC policy from the dropdown list. + + <CollabRbacImportant/> + +1. Click **Invite member**. + + People invited to join your team receive an email notification to accept the invitation. They must follow the link in the email to accept the invitation and join the team. If they do not have a Replicated account already, they can create one that complies with your password policies, 2FA, and Google authentication requirements. If an invited user's email address is already associated with a Replicated account, by accepting your invitation, they automatically leave their current team and join the team that you have invited them to. + +## Managing Invitations + +Invitations expire after 7 days. If a prospective member has not accepted their invitation in this time frame, you can re-invite them without having to reenter their details. You can also remove the prospective member from the list. + +You must be an administrator to perform this action. + +To re-invite or remove a prospective member, do one of the following on the **Team Members** page: + +* Click **Reinvite** from the row with the user's email address, and then click **Reinvite** in the confirmation dialog. + +* Click **Remove** from the row with the user's email address, and then click **Delete Invitation** in the confirmation dialog. + +## Edit Policy Permissions + +You can edit the RBAC policy that is assigned to a member at any time. + +<CollabRbacImportant/> + +To edit policy permissions for individual team members: + +1. From the the Team Members list, click **Edit permissions** next to a members name. + + :::note + The two-factor authentication (2FA) status displays on the **Team members** page, but it is not configured on this page. For more information about configuring 2FA, see [Managing Two-Factor Authentication](team-management-two-factor-auth). + ::: + +1. Select an RBAC policy from the **Permissions** dropdown list, and click **Save**. For information about configuring the RBAC policies that display in this list, see [Configuring RBAC Policies](team-management-rbac-configuring). + + <img src="/images/teams-edit-permissions.png" alt="Edit team member permissions in the Vendor Portal" width="400"/> + +## Enable Users to Auto-join Your Team +By default, users must be invited to your team. Team administrators can use the auto-join feature to allow users from the same email domain to join their team automatically. This applies to users registering with an email, or with Google authentication if it is enabled for the team. The auto-join feature does not apply to SAML authentication because SAML users log in using their SAML provider's application portal instead of the Vendor Portal. + +To add, edit, or delete custom RBAC policies, see [Configuring RBAC Policies](team-management-rbac-configuring). + +To enable users to auto-join your team: + +1. From the Team Members page, click **Auto-join** from the left navigation. +1. Enable the **Allow all users from my domain to be added to my team** toggle. + + <img src="/images/teams-auto-join.png" alt="Auto join dialog in the Vendor Portal" width="600"/> + + [View a larger image](/images/teams-auto-join.png) + +1. For **Default RBAC policy level for new accounts**, you can use the default Read Only policy or select another policy from the list. This RBAC policy is applied to all users who join the team with the auto-join feature. + + <CollabRbacImportant/> + + +## Remove Members and End Sessions +As a Vendor Portal team admin, you can remove team members, except for the account you are currently logged in with. + +If the team member that you remove added their GitHub username to their Account Settings page in the Vendor Portal to access the Replicated collab repository, then the Vendor Portal also automatically removes their username from the collab repository. For more information, see [Managing Collab Repository Access](team-management-github-username). + +SAML-created users must be removed using this method to expire their existing sessions because Replicated does not support System for Cross-domain Identity Management (SCIM). + +To remove a member: + +1. From the Team Members page, click **Remove** on the right side of a user's row. + +1. Click **Remove** in the confirmation dialog. + + The member is removed. All of their current user sessions are deleted and their next attempt at communicating with the server logs them out of their browser's session. + + If the member added their GitHub username to the Vendor Portal to access the collab repository, then the Vendor Portal also removes their GitHub username from the collab repository. + + For Google-authenticated users, if the user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal unless the username and password are allowed. + +## Update Email Addresses + +:::important +Changing team member email addresses has security implications. Replicated advises that you avoid changing team member email addresses if possible. +::: + +Updating the email address for a team member requires creating a new account with the updated email address, and then deactivating the previous account. + +To update the email address for a team member: + +1. From the Team Members page, click **Invite team member**. + +1. Assign the required RBAC policies to the new user. + +1. Deactivate the previous team member account. + +--- + + +# Collecting Telemetry for Air Gap Instances + +import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" + +# Collecting Telemetry for Air Gap Instances + +This topic describes how to collect telemetry for instances in air gap environments. + +## Overview + +Air gap instances run in environments without outbound internet access. This limitation prevents these instances from periodically sending telemetry to the Replicated Vendor Portal through the Replicated SDK or Replicated KOTS. For more information about how the Vendor Portal collects telemetry from online (internet-connected) instances, see [About Instance and Event Data](/vendor/instance-insights-event-data#about-reporting). + +<AirGapTelemetry/> + +The following diagram demonstrates how air gap telemetry is collected and stored by the Replicated SDK in a customer environment, and then shared to the Vendor Portal in a support bundle: + +<img alt="Air gap telemetry collected by the SDK in a support bundle" src="/images/airgap-telemetry.png" width="800px"/> + +[View a larger version of this image](/images/airgap-telemetry.png) + +All support bundles uploaded to the Vendor Portal from air gap customers contributes to a comprehensive dataset, providing parity in the telemetry for air gap and online instances. Replicated recommends that you collect support bundles from air gap customers regularly (monthly or quarterly) to improve the completeness of the dataset. The Vendor Portal handles any overlapping event archives idempotently, ensuring data integrity. + +## Requirement + +Air gap telemetry has the following requirements: + +* To collect telemetry from air gap instances, one of the following must be installed in the cluster where the instance is running: + + * The Replicated SDK installed in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). + + * KOTS v1.92.1 or later + + :::note + When both the Replicated SDK and KOTS v1.92.1 or later are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), both collect and store instance telemetry in their own dedicated secret, subject to the size limitation noted below. In the case of any overlapping data points, the Vendor Portal will report these data points chronologically based on their timestamp. + ::: + +* To collect custom metrics from air gap instances, the Replicated SDK must installed in the cluster in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). + + For more information about custom metrics, see [Configuring Custom Metrics](https://docs.replicated.com/vendor/custom-metrics). + +Replicated strongly recommends that all applications include the Replicated SDK because it enables access to both standard instance telemetry and custom metrics for air gap instances. + +## Limitation + +Telemetry data is capped at 4,000 events or 1MB per Secret; whichever limit is reached first. + +When a limit is reached, the oldest events are purged until the payload is within the limit. For optimal use, consider collecting support bundles regularly (monthly or quarterly) from air gap customers. + +## Collect and View Air Gap Telemetry + +To collect telemetry from air gap instances: + +1. Ask your customer to collect a support bundle. See [Generating Support Bundles](/vendor/support-bundle-generating). + +1. After receiving the support bundle from your customer, go to the Vendor Portal **Customers**, **Customer Reporting**, or **Instance Details** page and upload the support bundle: + + ![upload new bundle button on instance details page](/images/airgap-upload-telemetry.png) + + The telemetry collected from the support bundle appears in the instance data shortly. Allow a few minutes for all data to be processed. + + +--- + + +# About Compatibility Matrix + +import Overview from "../partials/cmx/_overview.mdx" +import SupportedClusters from "../partials/cmx/_supported-clusters-overview.mdx" + +# About Compatibility Matrix + +This topic describes Replicated Compatibility Matrix, including use cases, billing, limitations, and more. + +## Overview + +<Overview/> + +You can use Compatibility Matrix with the Replicated CLI or the Replicated Vendor Portal. For more information about how to use Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). + +### Supported Clusters + +<SupportedClusters/> + +### Billing and Credits + +Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a `running` status and ends when the cluster is deleted. Compatibility Matrix marks a cluster as `running` when a working kubeconfig for the cluster is accessible. + +You are billed only for the time that the cluster is in a `running` status. You are _not_ billed for the time that it takes Compatibility Matrix to create and tear down clusters, including when the cluster is in an `assigned` status. + +For more information about pricing, see [Compatibility Matrix Pricing](testing-pricing). + +To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. +If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to [**Compatibility Matrix > Buy additional credits**](https://vendor.replicated.com/compatibility-matrix). +Otherwise, to request credits, log in to the Vendor Portal and go to [**Compatibility Matrix > Request more credits**](https://vendor.replicated.com/compatibility-matrix). + +### Quotas and Capacity + +By default, Compatibility Matrix sets quotas for the capacity that can be used concurrently by each vendor portal team. These quotas are designed to ensure that Replicated maintains a minimum amount of capacity for provisioning both VM and cloud-based clusters. + +By default, the quota for cloud-based cluster distributions (AKS, GKE, EKS) is three clusters running concurrently. + +VM-based cluster distributions (such as kind, OpenShift, and Replicated Embedded Cluster) have the following default quotas: +* 32 vCPUs +* 128 GiB memory +* 800 GiB disk size + +You can request increased quotas at any time with no additional cost. To view your team's current quota and capacity usage, or to request a quota increase, go to [**Compatibility Matrix > Settings**](https://vendor.replicated.com/compatibility-matrix/settings) in the vendor portal: + +![Compatibility matrix settings page](/images/compatibility-matrix-settings.png) + +[View a larger version of this image](/images/compatibility-matrix-settings.png) + +### Cluster Status + +Clusters created with Compatibility Matrix can have the following statuses: + +* `assigned`: The cluster resources were requested and Compatibility Matrix is provisioning the cluster. You are not billed for the time that a cluster spends in the `assigned` status. + +* `running`: A working kubeconfig for the cluster is accessible. Billing begins when the cluster reaches a `running` status. + + Additionally, clusters are verified prior to transitioning to a `running` status. Verification includes checking that the cluster is healthy and running with the correct number of nodes, as well as passing [sonobuoy](https://sonobuoy.io/) tests in `--quick` mode. + +* `terminated`: The cluster is deleted. Billing ends when the cluster status is changed from `running` to `terminated`. + +* `error`: An error occured when attempting to provision the cluster. + +You can view the status of clusters using the `replicated cluster ls` command. For more information, see [cluster ls](/reference/replicated-cli-cluster-ls). + +### Cluster Add-ons + +The Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. +This allows you to more easily provision dependencies required by your application. + +For more information about how to use the add-ons, see [Compatibility Matrix Cluster Add-ons](testing-cluster-addons). + +## Limitations + +Compatibility Matrix has the following limitations: + +- Clusters cannot be resized. Create another cluster if you want to make changes, such as add another node. +- Clusters cannot be rebooted. Create another cluster if you need to reset/reboot the cluster. +- On cloud clusters, node groups are not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- Multi-node support is not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- ARM instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- GPU instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). +- There is no support for IPv6 as a single stack. Dual stack support is available on kind clusters. +- There is no support for air gap testing. +- The `cluster upgrade` feature is available only for kURL distributions. See [cluster upgrade](/reference/replicated-cli-cluster-upgrade). +- Cloud clusters do not allow for the configuration of CNI, CSI, CRI, Ingress, or other plugins, add-ons, services, and interfaces. +- The node operating systems for clusters created with Compatibility Matrix cannot be configured nor replaced with different operating systems. +- The Kubernetes scheduler for clusters created with Compatibility Matrix cannot be replaced with a different scheduler. +- Each team has a quota limit on the amount of resources that can be used simultaneously. This limit can be raised by messaging your account representative. +- Team actions with Compatibility Matrix (for example, creating and deleting clusters and requesting quota increases) are not logged and displayed in the [Vendor Team Audit Log](https://vendor.replicated.com/team/audit-log). + +For additional distribution-specific limitations, see [Supported Compatibility Matrix Cluster Types](testing-supported-clusters). + + +--- + + +# Compatibility Matrix Cluster Add-ons (Alpha) + +# Compatibility Matrix Cluster Add-ons (Alpha) + +This topic describes the supported cluster add-ons for Replicated Compatibility Matrix. + +## Overview + +Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. +This allows you to more easily provision dependencies required by your application. + +## CLI + +The Replicated CLI can be used to [create](/reference/replicated-cli-cluster-addon-create), [manage](/reference/replicated-cli-cluster-addon-ls) and [remove](/reference/replicated-cli-cluster-addon-rm) cluster add-ons. + +## Supported Add-ons + +This section lists the supported cluster add-ons for clusters created with Compatibility Matrix. + +### object-store (Alpha) + +The Replicated cluster object store add-on can be used to create S3 compatible object store buckets for clusters (currently only AWS S3 is supported for EKS clusters). + +Assuming you already have a cluster, run the following command with the cluster ID to create an object store bucket: + +```bash +$ replicated cluster addon create object-store 4d2f7e70 --bucket-prefix mybucket +05929b24 Object Store pending {"bucket_prefix":"mybucket"} +$ replicated cluster addon ls 4d2f7e70 +ID TYPE STATUS DATA +05929b24 Object Store ready {"bucket_prefix":"mybucket","bucket_name":"mybucket-05929b24-cmx","service_account_namespace":"cmx","service_account_name":"mybucket-05929b24-cmx","service_account_name_read_only":"mybucket-05929b24-cmx-ro"} +``` + +This will create two service accounts in a namespace, one read-write and the other read-only access to the object store bucket. + +Additional service accounts can be created in any namespace with access to the object store by annotating the new service account with the same `eks.amazonaws.com/role-arn` annotation found in the predefined ones (`service_account_name` and `service_account_name_read_only`). + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Distributions</th> + <td>EKS (AWS S3)</td> + </tr> + <tr> + <th>Cost</th> + <td>Flat fee of $0.50 per bucket.</td> + </tr> + <tr> + <th>Options</th> + <td> + <ul> + <li><strong>bucket_prefix (string):</strong> A prefix for the bucket name to be created (required)</li> + </ul> + </td> + </tr> + <tr> + <th>Data</th> + <td> + <ul> + <li><strong>bucket_prefix:</strong> The prefix specified by the user for the bucket name</li> + </ul> + <ul> + <li><strong>bucket_name:</strong> The actual bucket name</li> + </ul> + <ul> + <li><strong>service_account_namespace:</strong> The namespace in which the service accounts (`service_account_name` and `service_account_name_read_only`) have been created.</li> + </ul> + <ul> + <li><strong>service_account_name:</strong> The service account name for read-write access to the bucket.</li> + </ul> + <ul> + <li><strong>service_account_name_read_only:</strong> The service account name for read-only access to the bucket.</li> + </ul> + </td> + </tr> +</table> + + + +--- + + +# Using Compatibility Matrix + +import TestRecs from "../partials/ci-cd/_test-recs.mdx" +import Prerequisites from "../partials/cmx/_prerequisites.mdx" + +# Using Compatibility Matrix + +This topic describes how to use Replicated Compatibility Matrix to create ephemeral clusters. + +## Prerequisites + +Before you can use Compatibility Matrix, you must complete the following prerequisites: + +<Prerequisites/> + +* Existing accounts must accept the TOS for the trial on the [**Compatibility Matrix**](https://vendor.replicated.com/compatibility-matrix) page in the Replicated Vendor Portal. + +## Create and Manage Clusters + +This section explains how to use Compatibility Matrix to create and manage clusters with the Replicated CLI or the Vendor Portal. + +For information about creating and managing clusters with the Vendor API v3, see the [clusters](https://replicated-vendor-api.readme.io/reference/listclusterusage) section in the Vendor API v3 documentation. + +### Create Clusters + +You can create clusters with Compatibility Matrix using the Replicated CLI or the Vendor Portal. + +#### Replicated CLI + +To create a cluster using the Replicated CLI: + +1. (Optional) View the available cluster distributions, including the supported Kubernetes versions, instance types, and maximum nodes for each distribution: + + ```bash + replicated cluster versions + ``` + For command usage, see [cluster versions](/reference/replicated-cli-cluster-versions). + +1. Run the following command to create a cluster: + + ``` + replicated cluster create --name NAME --distribution K8S_DISTRO --version K8S_VERSION --disk DISK_SIZE --instance-type INSTANCE_TYPE [--license-id LICENSE_ID] + ``` + Where: + * `NAME` is any name for the cluster. If `--name` is excluded, a name is automatically generated for the cluster. + * `K8S_DISTRO` is the Kubernetes distribution for the cluster. + * `K8S_VERSION` is the Kubernetes version for the cluster if creating a standard Cloud or VM-based cluster. If creating an Embedded Cluster or kURL cluster type,`--version` is optional: + * For Embedded Cluster types, `--verison` is the latest available release on the channel by default. Otherwise, to specify a different release, set `--version` to the `Channel release sequence` value for the release. + * For kURL cluster types, `--verison` is the `"latest"` kURL Installer ID by default. Otherwise, to specify a different kURL Installer, set `--version` to the kURL Installer ID. + * `DISK_SIZE` is the disk size (GiB) to request per node. + * `INSTANCE_TYPE` is the instance type to use for each node. + * (Embedded Cluster Only) `LICENSE_ID` is a valid customer license. Required to create an Embedded Cluster. + + For command usage and additional optional flags, see [cluster create](/reference/replicated-cli-cluster-create). + + **Example:** + + The following example creates a kind cluster with Kubernetes version 1.27.0, a disk size of 100 GiB, and an instance type of `r1.small`. + + ```bash + replicated cluster create --name kind-example --distribution kind --version 1.27.0 --disk 100 --instance-type r1.small + ``` + +1. Verify that the cluster was created: + + ```bash + replicated cluster ls CLUSTER_NAME + ``` + Where `CLUSTER_NAME` is the name of the cluster that you created. + + In the output of the command, you can see that the `STATUS` of the cluster is `assigned`. When the kubeconfig for the cluster is accessible, the cluster's status is changed to `running`. For more information about cluster statuses, see [Cluster Status](testing-about#cluster-status) in _About Compatibility Matrix._ + +#### Vendor Portal + +To create a cluster using the Vendor Portal: + +1. Go to [**Compatibility Matrix > Create cluster**](https://vendor.replicated.com/compatibility-matrix/create-cluster). + + <img alt="Create a cluster page" src="/images/create-a-cluster.png" width="650px"/> + + [View a larger version of this image](/images/create-a-cluster.png) + +1. On the **Create a cluster** page, complete the following fields: + + <table> + <tr> + <th>Field</th> + <th>Description</th> + </tr> + <tr> + <td>Kubernetes distribution</td> + <td>Select the Kubernetes distribution for the cluster.</td> + </tr> + <tr> + <td>Version</td> + <td>Select the Kubernetes version for the cluster. The options available are specific to the distribution selected.</td> + </tr> + <tr> + <td>Name (optional)</td> + <td>Enter an optional name for the cluster.</td> + </tr> + <tr> + <td>Tags</td> + <td>Add one or more tags to the cluster as key-value pairs.</td> + </tr> + <tr> + <td>Set TTL</td> + <td>Select the Time to Live (TTL) for the cluster. When the TTL expires, the cluster is automatically deleted. TTL can be adjusted after cluster creation with [cluster update ttl](/reference/replicated-cli-cluster-update-ttl).</td> + </tr> + </table> + +1. For **Nodes & Nodes Groups**, complete the following fields to configure nodes and node groups for the cluster: + + <table> + <tr> + <td>Instance type</td> + <td>Select the instance type to use for the nodes in the node group. The options available are specific to the distribution selected.</td> + </tr> + <tr> + <td>Disk size</td> + <td>Select the disk size in GiB to use per node.</td> + </tr> + <tr> + <td>Nodes</td> + <td>Select the number of nodes to provision in the node group. The options available are specific to the distribution selected.</td> + </tr> + </table> + +1. (Optional) Click **Add node group** to add additional node groups. + +1. Click **Create cluster**. + + The cluster is displayed in the list of clusters on the **Compatibility Matrix** page with a status of Assigned. When the kubeconfig for the cluster is accessible, the cluster's status is changed to Running. + + :::note + If the cluster is not automatically displayed, refresh your browser window. + ::: + + <img alt="Cluster configuration dialog" src="/images/cmx-assigned-cluster.png" width="700px"/> + + [View a larger version of this image](/images/cmx-assigned-cluster.png) + +### Prepare Clusters + +For applications distributed with the Replicated Vendor Portal, the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) command reduces the number of steps required to provision a cluster and then deploy a release to the cluster for testing. This is useful in continuous integration (CI) workflows that run multiple times a day. For an example workflow that uses the `cluster prepare` command, see [Recommended CI/CD Workflows](/vendor/ci-workflows). + +The `cluster prepare` command does the following: +* Creates a cluster +* Creates a release for your application based on either a Helm chart archive or a directory containing the application YAML files +* Creates a temporary customer of type `test` + :::note + Test customers created by the `cluster prepare` command are not saved in your Vendor Portal team. + ::: +* Installs the release in the cluster using either the Helm CLI or Replicated KOTS + +The `cluster prepare` command requires either a Helm chart archive or a directory containing the application YAML files to be installed: + +* **Install a Helm chart with the Helm CLI**: + + ```bash + replicated cluster prepare \ + --distribution K8S_DISTRO \ + --version K8S_VERSION \ + --chart HELM_CHART_TGZ + ``` + The following example creates a kind cluster and installs a Helm chart in the cluster using the `nginx-chart-0.0.14.tgz` chart archive: + ```bash + replicated cluster prepare \ + --distribution kind \ + --version 1.27.0 \ + --chart nginx-chart-0.0.14.tgz \ + --set key1=val1,key2=val2 \ + --set-string s1=val1,s2=val2 \ + --set-json j1='{"key1":"val1","key2":"val2"}' \ + --set-literal l1=val1,l2=val2 \ + --values values.yaml + ``` + +* **Install with KOTS from a YAML directory**: + + ```bash + replicated cluster prepare \ + --distribution K8S_DISTRO \ + --version K8S_VERSION \ + --yaml-dir PATH_TO_YAML_DIR + ``` + The following example creates a k3s cluster and installs an application in the cluster using the manifest files in a local directory named `config-validation`: + ```bash + replicated cluster prepare \ + --distribution k3s \ + --version 1.26 \ + --namespace config-validation \ + --shared-password password \ + --app-ready-timeout 10m \ + --yaml-dir config-validation \ + --config-values-file conifg-values.yaml \ + --entitlements "num_of_queues=5" + ``` + +For command usage, including additional options, see [cluster prepare](/reference/replicated-cli-cluster-prepare). + +### Access Clusters + +Compatibility Matrix provides the kubeconfig for clusters so that you can access clusters with the kubectl command line tool. For more information, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. + +To access a cluster from the command line: + +1. Verify that the cluster is in a Running state: + + ```bash + replicated cluster ls + ``` + In the output of the command, verify that the `STATUS` for the target cluster is `running`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). + +1. Run the following command to open a new shell session with the kubeconfig configured for the cluster: + + ```bash + replicated cluster shell CLUSTER_ID + ``` + Where `CLUSTER_ID` is the unique ID for the running cluster that you want to access. + + For command usage, see [cluster shell](/reference/replicated-cli-cluster-shell). + +1. Verify that you can interact with the cluster through kubectl by running a command. For example: + + ```bash + kubectl get ns + ``` + +1. Press Ctrl-D or type `exit` when done to end the shell and the connection to the server. + +### Upgrade Clusters (kURL Only) + +For kURL clusters provisioned with Compatibility Matrix, you can use the the `cluster upgrade` command to upgrade the version of the kURL installer specification used to provision the cluster. A recommended use case for the `cluster upgrade` command is for testing your application's compatibility with Kubernetes API resource version migrations after upgrade. + +The following example upgrades a kURL cluster from its previous version to version `9d5a44c`: + +```bash +replicated cluster upgrade cabb74d5 --version 9d5a44c +``` + +For command usage, see [cluster upgrade](/reference/replicated-cli-cluster-upgrade). + +### Delete Clusters + +You can delete clusters using the Replicated CLI or the Vendor Portal. + +#### Replicated CLI + +To delete a cluster using the Replicated CLI: + +1. Get the ID of the target cluster: + + ``` + replicated cluster ls + ``` + In the output of the command, copy the ID for the cluster. + + **Example:** + + ``` + ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES + 1234abc My Test Cluster eks 1.27 running 2023-10-09 17:08:01 +0000 UTC - + ``` + + For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). + +1. Run the following command: + + ``` + replicated cluster rm CLUSTER_ID + ``` + Where `CLUSTER_ID` is the ID of the target cluster. + For command usage, see [cluster rm](/reference/replicated-cli-cluster-rm). +1. Confirm that the cluster was deleted: + ``` + replicated cluster ls CLUSTER_ID --show-terminated + ``` + Where `CLUSTER_ID` is the ID of the target cluster. + In the output of the command, you can see that the `STATUS` of the cluster is `terminated`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). +#### Vendor Portal + +To delete a cluster using the Vendor Portal: + +1. Go to **Compatibility Matrix**. + +1. Under **Clusters**, in the vertical dots menu for the target cluster, click **Delete cluster**. + + <img alt="Delete cluster button" src="/images/cmx-delete-cluster.png" width="700px"/> + + [View a larger version of this image](/images/cmx-delete-cluster.png) + +## About Using Compatibility Matrix with CI/CD + +Replicated recommends that you integrate Compatibility Matrix into your existing CI/CD workflow to automate the process of creating clusters to install your application and run tests. For more information, including additional best practices and recommendations for CI/CD, see [About Integrating with CI/CD](/vendor/ci-overview). + +### Replicated GitHub Actions + +Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to using Compatibility Matrix and distributing applications with Replicated. + +If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. + +To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. + +For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). + +### Recommended Workflows + +Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). For example development and release workflows that integrate Compatibility Matrix for testing, see [Recommended CI/CD Workflows](/vendor/ci-workflows). + +### Test Script Recommendations + +Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: + +<TestRecs/> + + +--- + + +# Accessing Your Application + +# Accessing Your Application + +This topic describes the networking options for accessing applications deployed on clusters created with Replicated Compatibility Matrix. It also describes how to use and manage Compatibility Matrix tunnels. + +## Networking Options + +After deploying your application into Compatibility Matrix clusters, you will want to execute your tests using your own test runner. +In order to do this, you need to access your application. +Compatibility matrix offers several methods to access your application. + +Some standard Kubernetes networking options are available, but vary based on the distribution. +For VM-based distributions, there is no default network route into the cluster, making inbound connections challenging to create. + +### Port Forwarding +Port forwarding is a low-cost and portable mechanism to access your application. +Port forwarding works on all clusters supported by Compatibility Matrix because the connection is initiated from the client, over the Kubernetes API server port. +If you have a single service or pod and are not worried about complex routing, this is a good mechanism. +The basic steps are to connect the port-forward, execute your tests against localhost, and then shut down the port-forward. + +### LoadBalancer +If your application is only running on cloud services (EKS, GKE, AKS) you can create a service of type `LoadBalancer`. +This will provision the cloud-provider specific load balancer. +The `LoadBalancer` service will be filled by the in-tree Kubernetes functionality that's integrated with the underlying cloud provider. +You can then query the service definition using `kubectl` and connect to and execute your tests over the `LoadBalancer` IP address. + +### Ingress +Ingress is a good way to recreate customer-representative environments, but the problem still remains on how to get inbound access to the IP address that the ingress controller allocates. +Ingress is also not perfectly portable; each ingress controller might require different annotations in the ingress resource to work properly. +Supported ingress controllers vary based on the distribution. +Compatibility matrix supports ingress controllers that are running as a `NodePort` service. + +### Compatibility Matrix Tunnels +All VM-based Compatibility Matrix clusters support tunneling traffic into a `NodePort` service. +When this option is used, Replicated is responsible for creating the DNS record and TLS certs. +Replicated will route traffic from `:443` and/or `:80` into the `NodePort` service you defined. For more information about using tunnels, see [Managing Compatibility Matrix Tunnels](#manage-nodes) below. + +The following diagram shows how the traffic is routed into the service using Compatibility Matrix tunnels: + +<img src="/images/compatibility-matrix-ingress.png" alt="Compatibility Matrix ingress"></img> + +[View a larger version of this image](/images/compatibility-matrix-ingress.png) + +## Managing Compatibility Matrix Tunnels {#manage-nodes} + +Tunnels are viewed, created, and removed using the Compatibility Matrix UI within Vendor Portal, the Replicated CLI, GitHub Actions, or directly with the Vendor API v3. There is no limit to the number of tunnels you can create for a cluster and multiple tunnels can connect to a single service, if desired. + +### Limitations + +Compatibility Matrix tunnels have the following limitations: +* One tunnel can only connect to one service. If you need fanout routing into different services, consider installing the nginx ingress controller as a `NodePort` service and exposing it. +* Tunnels are not supported for cloud distributions (EKS, GKE, AKS). + +### Supported Protocols + +A tunnel can support one or more protocols. +The supported protocols are HTTP, HTTPS, WS and WSS. +GRPC and other protocols are not routed into the cluster. + +### Exposing Ports +Once you have a node port available on the cluster, you can use the Replicated CLI to expose the node port to the public internet. +This can be used multiple times on a single cluster. + +Optionally, you can specify the `--wildcard` flag to expose this port with wildcard DNS and TLS certificate. +This feature adds extra time to provision the port, so it should only be used if necessary. + +```bash +replicated cluster port expose \ + [cluster id] \ + --port [node port] \ + --protocol [protocol] \ + --wildcard +``` + +For example, if you have the nginx ingress controller installed and the node port is 32456: + +```bash +% replicated cluster ls +ID NAME DISTRIBUTION VERSION STATUS +1e616c55 tender_ishizaka k3s 1.29.2 running + +% replicated cluster port expose \ + 1e616c55 \ + --port 32456 \ + --protocol http \ + --protocol https \ + --wildcard +``` + +:::note +You can expose a node port that does not yet exist in the cluster. +This is useful if you have a deterministic node port, but need the DNS name as a value in your Helm chart. +::: + +### Viewing Ports +To view all exposed ports, use the Replicated CLI `port ls` subcommand with the cluster ID: + +```bash +% replicated cluster port ls 1e616c55 +ID CLUSTER PORT PROTOCOL EXPOSED PORT WILDCARD STATUS +d079b2fc 32456 http http://happy-germain.ingress.replicatedcluster.com true ready + +d079b2fc 32456 https https://happy-germain.ingress.replicatedcluster.com true ready +``` + +### Removing Ports +Exposed ports are automatically deleted when a cluster terminates. +If you want to remove a port (and the associated DNS records and TLS certs) prior to cluster termination, run the `port rm` subcommand with the cluster ID: + +```bash +% replicated cluster port rm 1e616c55 --id d079b2fc +``` + +You can remove just one protocol, or all. +Removing all protocols also removes the DNS record and TLS cert. + + +--- + + +# Compatibility Matrix Pricing + +# Compatibility Matrix Pricing + +This topic describes the pricing for Replicated Compatibility Matrix. + +## Pricing Overview + +Compatibility Matrix usage-based pricing includes a $0.50 per cluster startup cost, plus by the minute pricing based on instance size and count (starting at the time the cluster state changed to "running" and ending when the cluster is either expired (TTL) or removed). Minutes will be rounded up, so there will be a minimum charge of $0.50 plus 1 minute for all running clusters. Each cluster's cost will be rounded up to the nearest cent and subtracted from the available credits in the team account. Remaining credit balance is viewable on the Replicated Vendor Portal [Cluster History](https://vendor.replicated.com/compatibility-matrix/history) page or with the Vendor API v3 [/vendor/v3/cluster/stats](https://replicated-vendor-api.readme.io/reference/getclusterstats) endpoint. Cluster [add-ons](/vendor/testing-cluster-addons) may incur additional charges. + +If the team's available credits are insufficient to run the cluster for the full duration of the TTL, the cluster creation will be rejected. + +## Cluster Quotas + +Each team is limited by the number of clusters that they can run concurrently. To increase the quota, reach out to your account manager. + +## VM Cluster Pricing (Openshift, RKE2, K3s, Kind, Embedded Cluster, kURL) + +VM-based clusters approximately match the AWS m6.i instance type pricing. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="25%">VCPUs</th> + <th width="25%">Memory (GiB)</th> + <th width="25%">USD/Credit per hour</th> + </tr> + <tr> + <td>r1.small</td> + <td>2</td> + <td>8</td> + <td>$0.096</td> + </tr> + <tr> + <td>r1.medium</td> + <td>4</td> + <td>16</td> + <td>$0.192</td> + </tr> + <tr> + <td>r1.large</td> + <td>8</td> + <td>32</td> + <td>$0.384</td> + </tr> + <tr> + <td>r1.xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.768</td> + </tr> + <tr> + <td>r1.2xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.536</td> + </tr> +</table> + +## Cloud Cluster Pricing + +### AWS EKS Cluster Pricing + +AWS clusters will be charged AWS pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. Pricing for Extended Support EKS versions (those Kubernetes versions considered deprecated by upstream Kubernetes) will have additional charges applied. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="25%">VCPUs</th> + <th width="25%">Memory (GiB)</th> + <th width="25%">USD/Credit per hour</th> + </tr> + <tr> + <td>m6i.large</td> + <td>2</td> + <td>8</td> + <td>$0.115</td> + </tr> + <tr> + <td>m6i.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.230</td> + </tr> + <tr> + <td>m6i.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.461</td> + </tr> + <tr> + <td>m6i.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.922</td> + </tr> + <tr> + <td>m6i.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.843</td> + </tr> +<tr> + <td>m7i.large</td> + <td>2</td> + <td>8</td> + <td>$0.121</td> + </tr> + <tr> + <td>m7i.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.242</td> + </tr> + <tr> + <td>m7i.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.484</td> + </tr> + <tr> + <td>m7i.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.968</td> + </tr> + <tr> + <td>m7i.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.935</td> + </tr> + <tr> + <td>m5.large</td> + <td>2</td> + <td>8</td> + <td>$0.115</td> + </tr> + <tr> + <td>m5.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.230</td> + </tr> + <tr> + <td>m5.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.461</td> + </tr> + <tr> + <td>m5.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.922</td> + </tr> + <tr> + <td>m5.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.843</td> + </tr> + <tr> + <td>m7g.large</td> + <td>2</td> + <td>8</td> + <td>$0.098</td> + </tr> + <tr> + <td>m7g.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.195</td> + </tr> + <tr> + <td>m7g.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.392</td> + </tr> + <tr> + <td>m7g.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$0.784</td> + </tr> + <tr> + <td>m7g.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$1.567</td> + </tr> + <tr> + <td>c5.large</td> + <td>2</td> + <td>4</td> + <td>$0.102</td> + </tr> + <tr> + <td>c5.xlarge</td> + <td>4</td> + <td>8</td> + <td>$0.204</td> + </tr> + <tr> + <td>c5.2xlarge</td> + <td>8</td> + <td>16</td> + <td>$0.408</td> + </tr> + <tr> + <td>c5.4xlarge</td> + <td>16</td> + <td>32</td> + <td>$0.816</td> + </tr> + <tr> + <td>c5.9xlarge</td> + <td>36</td> + <td>72</td> + <td>$1.836</td> + </tr> + <tr> + <td>g4dn.xlarge</td> + <td>4</td> + <td>16</td> + <td>$0.631</td> + </tr> + <tr> + <td>g4dn.2xlarge</td> + <td>8</td> + <td>32</td> + <td>$0.902</td> + </tr> + <tr> + <td>g4dn.4xlarge</td> + <td>16</td> + <td>64</td> + <td>$1.445</td> + </tr> + <tr> + <td>g4dn.8xlarge</td> + <td>32</td> + <td>128</td> + <td>$2.611</td> + </tr> + <tr> + <td>g4dn.12xlarge</td> + <td>48</td> + <td>192</td> + <td>$4.964</td> + </tr> + <tr> + <td>g4dn.16xlarge</td> + <td>64</td> + <td>256</td> + <td>$5.222</td> + </tr> +</table> + +### GCP GKE Cluster Pricing + +GCP clusters will be charged GCP list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="25%">VCPUs</th> + <th width="25%">Memory (GiB)</th> + <th width="25%">USD/Credit per hour</th> + </tr> + <tr> + <td>n2-standard-2</td> + <td>2</td> + <td>8</td> + <td>$0.117</td> + </tr> + <tr> + <td>n2-standard-4</td> + <td>4</td> + <td>16</td> + <td>$0.233</td> + </tr> + <tr> + <td>n2-standard-8</td> + <td>8</td> + <td>32</td> + <td>$0.466</td> + </tr> + <tr> + <td>n2-standard-16</td> + <td>16</td> + <td>64</td> + <td>$0.932</td> + </tr> + <tr> + <td>n2-standard-32</td> + <td>32</td> + <td>128</td> + <td>$1.865</td> + </tr> + <tr> + <td>t2a-standard-2</td> + <td>2</td> + <td>8</td> + <td>$0.092</td> + </tr> + <tr> + <td>t2a-standard-4</td> + <td>4</td> + <td>16</td> + <td>$0.185</td> + </tr> + <tr> + <td>t2a-standard-8</td> + <td>8</td> + <td>32</td> + <td>$0.370</td> + </tr> + <tr> + <td>t2a-standard-16</td> + <td>16</td> + <td>64</td> + <td>$0.739</td> + </tr> + <tr> + <td>t2a-standard-32</td> + <td>32</td> + <td>128</td> + <td>$1.478</td> + </tr> + <tr> + <td>t2a-standard-48</td> + <td>48</td> + <td>192</td> + <td>$2.218</td> + </tr> + <tr> + <td>e2-standard-2</td> + <td>2</td> + <td>8</td> + <td>$0.081</td> + </tr> + <tr> + <td>e2-standard-4</td> + <td>4</td> + <td>16</td> + <td>$0.161</td> + </tr> + <tr> + <td>e2-standard-8</td> + <td>8</td> + <td>32</td> + <td>$0.322</td> + </tr> + <tr> + <td>e2-standard-16</td> + <td>16</td> + <td>64</td> + <td>$0.643</td> + </tr> + <tr> + <td>e2-standard-32</td> + <td>32</td> + <td>128</td> + <td>$1.287</td> + </tr> + <tr> + <td>n1-standard-1+nvidia-tesla-t4+1</td> + <td>1</td> + <td>3.75</td> + <td>$0.321</td> + </tr> + <tr> + <td>n1-standard-1+nvidia-tesla-t4+2</td> + <td>1</td> + <td>3.75</td> + <td>$0.585</td> + </tr> + <tr> + <td>n1-standard-1+nvidia-tesla-t4+4</td> + <td>1</td> + <td>3.75</td> + <td>$1.113</td> + </tr> + <tr> + <td>n1-standard-2+nvidia-tesla-t4+1</td> + <td>2</td> + <td>7.50</td> + <td>$0.378</td> + </tr> + <tr> + <td>n1-standard-2+nvidia-tesla-t4+2</td> + <td>2</td> + <td>7.50</td> + <td>$0.642</td> + </tr> + <tr> + <td>n1-standard-2+nvidia-tesla-t4+4</td> + <td>2</td> + <td>7.50</td> + <td>$1.170</td> + </tr> + <tr> + <td>n1-standard-4+nvidia-tesla-t4+1</td> + <td>4</td> + <td>15</td> + <td>$0.492</td> + </tr> + <tr> + <td>n1-standard-4+nvidia-tesla-t4+2</td> + <td>4</td> + <td>15</td> + <td>$0.756</td> + </tr> + <tr> + <td>n1-standard-4+nvidia-tesla-t4+4</td> + <td>4</td> + <td>15</td> + <td>$1.284</td> + </tr> + <tr> + <td>n1-standard-8+nvidia-tesla-t4+1</td> + <td>8</td> + <td>30</td> + <td>$0.720</td> + </tr> + <tr> + <td>n1-standard-8+nvidia-tesla-t4+2</td> + <td>8</td> + <td>30</td> + <td>$0.984</td> + </tr> + <tr> + <td>n1-standard-8+nvidia-tesla-t4+4</td> + <td>8</td> + <td>30</td> + <td>$1.512</td> + </tr> + <tr> + <td>n1-standard-16+nvidia-tesla-t4+1</td> + <td>16</td> + <td>60</td> + <td>$1.176</td> + </tr> + <tr> + <td>n1-standard-16+nvidia-tesla-t4+2</td> + <td>16</td> + <td>60</td> + <td>$1.440</td> + </tr> + <tr> + <td>n1-standard-16+nvidia-tesla-t4+4</td> + <td>16</td> + <td>60</td> + <td>$1.968</td> + </tr> + <tr> + <td>n1-standard-32+nvidia-tesla-t4+1</td> + <td>32</td> + <td>120</td> + <td>$2.088</td> + </tr> + <tr> + <td>n1-standard-32+nvidia-tesla-t4+2</td> + <td>32</td> + <td>120</td> + <td>$2.352</td> + </tr> + <tr> + <td>n1-standard-32+nvidia-tesla-t4+4</td> + <td>32</td> + <td>120</td> + <td>$2.880</td> + </tr> + <tr> + <td>n1-standard-64+nvidia-tesla-t4+1</td> + <td>64</td> + <td>240</td> + <td>$3.912</td> + </tr> + <tr> + <td>n1-standard-64+nvidia-tesla-t4+2</td> + <td>64</td> + <td>240</td> + <td>$4.176</td> + </tr> + <tr> + <td>n1-standard-64+nvidia-tesla-t4+4</td> + <td>64</td> + <td>240</td> + <td>$4.704</td> + </tr> + <tr> + <td>n1-standard-96+nvidia-tesla-t4+1</td> + <td>96</td> + <td>360</td> + <td>$5.736</td> + </tr> + <tr> + <td>n1-standard-96+nvidia-tesla-t4+2</td> + <td>96</td> + <td>360</td> + <td>$6.000</td> + </tr> + <tr> + <td>n1-standard-96+nvidia-tesla-t4+4</td> + <td>96</td> + <td>360</td> + <td>$6.528</td> + </tr> +</table> + +### Azure AKS Cluster Pricing + +Azure clusters will be charged Azure list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="15%">VCPUs</th> + <th width="15%">Memory (GiB)</th> + <th width="15%">Rate</th> + <th width="15%">List Price</th> + <th width="15%">USD/Credit per hour</th> + </tr> + <tr> + <td>Standard_B2ms</td> + <td>2</td> + <td>8</td> + <td>8320</td> + <td>$0.083</td> + <td>$0.100</td> + </tr> + <tr> + <td>Standard_B4ms</td> + <td>4</td> + <td>16</td> + <td>16600</td> + <td>$0.166</td> + <td>$0.199</td> + </tr> + <tr> + <td>Standard_B8ms</td> + <td>8</td> + <td>32</td> + <td>33300</td> + <td>$0.333</td> + <td>$0.400</td> + </tr> + <tr> + <td>Standard_B16ms</td> + <td>16</td> + <td>64</td> + <td>66600</td> + <td>$0.666</td> + <td>$0.799</td> + </tr> + <tr> + <td>Standard_DS2_v2</td> + <td>2</td> + <td>7</td> + <td>14600</td> + <td>$0.146</td> + <td>$0.175</td> + </tr> + <tr> + <td>Standard_DS3_v2</td> + <td>4</td> + <td>14</td> + <td>29300</td> + <td>$0.293</td> + <td>$0.352</td> + </tr> + <tr> + <td>Standard_DS4_v2</td> + <td>8</td> + <td>28</td> + <td>58500</td> + <td>$0.585</td> + <td>$0.702</td> + </tr> + <tr> + <td>Standard_DS5_v2</td> + <td>16</td> + <td>56</td> + <td>117000</td> + <td>$1.170</td> + <td>$1.404</td> + </tr> + <tr> + <td>Standard_D2ps_v5</td> + <td>2</td> + <td>8</td> + <td>14600</td> + <td>$0.077</td> + <td>$0.092</td> + </tr> + <tr> + <td>Standard_D4ps_v5</td> + <td>4</td> + <td>16</td> + <td>7700</td> + <td>$0.154</td> + <td>$0.185</td> + </tr> + <tr> + <td>Standard_D8ps_v5</td> + <td>8</td> + <td>32</td> + <td>15400</td> + <td>$0.308</td> + <td>$0.370</td> + </tr> + <tr> + <td>Standard_D16ps_v5</td> + <td>16</td> + <td>64</td> + <td>30800</td> + <td>$0.616</td> + <td>$0.739</td> + </tr> + <tr> + <td>Standard_D32ps_v5</td> + <td>32</td> + <td>128</td> + <td>61600</td> + <td>$1.232</td> + <td>$1.478</td> + </tr> + <tr> + <td>Standard_D48ps_v5</td> + <td>48</td> + <td>192</td> + <td>23200</td> + <td>$1.848</td> + <td>$2.218</td> + </tr> + <tr> + <td>Standard_NC4as_T4_v3</td> + <td>4</td> + <td>28</td> + <td>52600</td> + <td>$0.526</td> + <td>$0.631</td> + </tr> + <tr> + <td>Standard_NC8as_T4_v3</td> + <td>8</td> + <td>56</td> + <td>75200</td> + <td>$0.752</td> + <td>$0.902</td> + </tr> + <tr> + <td>Standard_NC16as_T4_v3</td> + <td>16</td> + <td>110</td> + <td>120400</td> + <td>$1.204</td> + <td>$1.445</td> + </tr> + <tr> + <td>Standard_NC64as_T4_v3</td> + <td>64</td> + <td>440</td> + <td>435200</td> + <td>$4.352</td> + <td>$5.222</td> + </tr> + <tr> + <td>Standard_D2S_v5</td> + <td>2</td> + <td>8</td> + <td>9600</td> + <td>$0.096</td> + <td>$0.115</td> + </tr> + <tr> + <td>Standard_D4S_v5</td> + <td>4</td> + <td>16</td> + <td>19200</td> + <td>$0.192</td> + <td>$0.230</td> + </tr> + <tr> + <td>Standard_D8S_v5</td> + <td>8</td> + <td>32</td> + <td>38400</td> + <td>$0.384</td> + <td>$0.461</td> + </tr> + <tr> + <td>Standard_D16S_v5</td> + <td>16</td> + <td>64</td> + <td>76800</td> + <td>$0.768</td> + <td>$0.922</td> + </tr> + <tr> + <td>Standard_D32S_v5</td> + <td>32</td> + <td>128</td> + <td>153600</td> + <td>$1.536</td> + <td>$1.843</td> + </tr> + <tr> + <td>Standard_D64S_v5</td> + <td>64</td> + <td>192</td> + <td>230400</td> + <td>$2.304</td> + <td>$2.765</td> + </tr> +</table> + +### Oracle OKE Cluster Pricing + +Oracle based clusters will be charged Oracle list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. + +<table> + <tr> + <th width="25%">Instance Type</th> + <th width="25%">VCPUs</th> + <th width="25%">Memory (GiB)</th> + <th width="25%">USD/Credit per hour</th> + </tr> + <tr> + <td>VM.Standard2.1</td> + <td>1</td> + <td>15</td> + <td>$0.076</td> + </tr> + <tr> + <td>VM.Standard2.2</td> + <td>2</td> + <td>30</td> + <td>$0.153</td> + </tr> + <tr> + <td>VM.Standard2.4</td> + <td>4</td> + <td>60</td> + <td>$0.306</td> + </tr> + <tr> + <td>VM.Standard2.8</td> + <td>8</td> + <td>120</td> + <td>$0.612</td> + </tr> + <tr> + <td>VM.Standard2.16</td> + <td>16</td> + <td>240</td> + <td>$1.225</td> + </tr> + <tr> + <td>VM.Standard3Flex.1</td> + <td>1</td> + <td>4</td> + <td>$0.055</td> + </tr> + <tr> + <td>VM.Standard3Flex.2</td> + <td>2</td> + <td>8</td> + <td>$0.110</td> + </tr> + <tr> + <td>VM.Standard3Flex.4</td> + <td>4</td> + <td>16</td> + <td>$0.221</td> + </tr> + <tr> + <td>VM.Standard3Flex.8</td> + <td>8</td> + <td>32</td> + <td>$0.442</td> + </tr> + <tr> + <td>VM.Standard3Flex.16</td> + <td>16</td> + <td>64</td> + <td>$0.883</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.1</td> + <td>1</td> + <td>4</td> + <td>$0.019</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.2</td> + <td>2</td> + <td>8</td> + <td>$0.038</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.4</td> + <td>4</td> + <td>16</td> + <td>$0.077</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.8</td> + <td>8</td> + <td>32</td> + <td>$0.154</td> + </tr> + <tr> + <td>VM.Standard.A1.Flex.16</td> + <td>16</td> + <td>64</td> + <td>$0.309</td> + </tr> +</table> + +Last modified January 06, 2025 + + +--- + + +# Supported Compatibility Matrix Cluster Types + +import Pool from "../partials/cmx/\_openshift-pool.mdx" + +# Supported Compatibility Matrix Cluster Types + +This topic describes the supported Kubernetes distributions, Kubernetes versions, instance types, nodes, limitations, and common use cases for clusters created with Replicated Compatibility Matrix. + +Compatibility Matrix provisions cloud-based or virtual machine (VM) clusters. + +## VM Clusters + +This section lists the supported VM cluster distributions for clusters created with Compatibility Matrix. + +### kind + +Compatibility Matrix supports creating [kind](https://kind.sigs.k8s.io/) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_kind_VERSIONS */}1.26.15, 1.27.16, 1.28.15, 1.29.14, 1.30.10, 1.31.6, 1.32.2{/* END_kind_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>No</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports a single node.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4` or `dual`.</td> + </tr> + <tr> + <th>Limitations</th> + <td>See <a href="testing-about#limitations">Limitations</a></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Smoke tests</td> + </tr> +</table> + +### k3s + +Compatibility Matrix supports creating [k3s](https://k3s.io) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported k3s Versions</th> + <td>The upstream k8s version that matches the Kubernetes version requested.</td> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_k3s_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.1, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_k3s_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</td> + </tr> + <tr> + <th>Common Use Cases</th> + <td><ul><li>Smoke tests</li><li>Customer release tests</li></ul></td> + </tr> +</table> + +### RKE2 (Beta) + +Compatibility Matrix supports creating [RKE2](https://docs.rke2.io/) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported RKE2 Versions</th> + <td>The upstream k8s version that matches the Kubernetes version requested.</td> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_rke2_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_rke2_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</td> + </tr> + <tr> + <th>Common Use Cases</th> + <td><ul><li>Smoke tests</li><li>Customer release tests</li></ul></td> + </tr> +</table> + +### OpenShift OKD + +Compatibility Matrix supports creating [Red Hat OpenShift OKD](https://www.okd.io/) clusters, which is the community distribution of OpenShift, using CodeReady Containers (CRC). + +OpenShift clusters are provisioned with two users: + +- (Default) A `kubeadmin` user with `cluster-admin` priviledges. Use the `kubeadmin` user only for administrative tasks such as creating new users or setting roles. +- A `developer` user with namespace-scoped priviledges. The `developer` user can be used to better simulate access in end-customer environments. + +By default, kubeconfig context is set to the `kubeadmin` user. To switch to the `developer` user, run the command `oc login --username developer`. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported OpenShift Versions</th> + <td>{/* START_openshift_VERSIONS */}4.10.0-okd, 4.11.0-okd, 4.12.0-okd, 4.13.0-okd, 4.14.0-okd, 4.15.0-okd, 4.16.0-okd, 4.17.0-okd{/* END_openshift_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes for versions 4.13.0-okd and later.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td> + <ul> + <li>OpenShift does not support r1.small instance types.</li> + <li>OpenShift versions earlier than 4.13-okd do not have a registry mirror and so may be subject to rate limiting from Docker Hub. For information about Docker Hub rate limiting, see <a href="https://docs.docker.com/docker-hub/download-rate-limit/">Docker Hub rate limit</a>. To increase limits, Replicated recommends that you configure an image pull secret to pull public Docker Hub images as an authenticated user. For more information about how to configure image pull secrets, see <a href="https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/">Pull an Image from a Private Registry</a> in the Kubernetes documentation.</li> + <li> + <p>OpenShift builds take approximately 17 minutes.</p> + <p><Pool/></p> + </li> + </ul> + <p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p> + </td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### Embedded Cluster + +Compatibility Matrix supports creating clusters with Replicated Embedded Cluster. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Embedded Cluster Versions</th> + <td> + Any valid release sequence that has previously been promoted to the channel where the customer license is assigned. + Version is optional and defaults to the latest available release on the channel. + </td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes (alpha).</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td> + <ul> + <li>The Admin Console UI is not exposed publicly and must be exposed via `kubectl -n kotsadm port-forward svc/kurl-proxy-kotsadm 38800:8800`. The password for the Admin Console is `password`.</li> + <li><strong>A valid customer license is required to create an Embedded Cluster.</strong></li> + <li>The [cluster prepare](/vendor/testing-how-to#prepare-clusters) command is not supported.</li> + </ul> + <p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p> + </td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### kURL + +Compatibility Matrix supports creating [kURL](https://kurl.sh) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported kURL Versions</th> + <td>Any promoted kURL installer. Version is optional. For an installer version other than "latest", you can find the specific Installer ID for a previously promoted installer under the relevant **Install Command** (ID after kurl.sh/) on the **Channels > kURL Installer History** page in the Vendor Portal. For more information about viewing the history of kURL installers promoted to a channel, see [Installer History](/vendor/installer-history).</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td>See <a href="#types">Replicated Instance Types</a></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>Does not work with the <a href="https://kurl.sh/docs/add-ons/longhorn">Longhorn add-on</a>.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +## Cloud Clusters + +This section lists the supported cloud clusters for compatibility testing. + +### EKS + +Compatibility Matrix supports creating [AWS EKS](https://aws.amazon.com/eks/?nc2=type_a) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td><p>{/* START_eks_VERSIONS */}1.25, 1.26, 1.27, 1.28, 1.29, 1.30, 1.31, 1.32{/* END_eks_VERSIONS */}</p><p>Extended Support Versions: 1.25, 1.26, 1.27, 1.28</p></td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td><p>m6i.large, m6i.xlarge, m6i.2xlarge, m6i.4xlarge, m6i.8xlarge, m7i.large, m7i.xlarge, m7i.2xlarge, m7i.4xlarge, m7i.8xlarge, m5.large, m5.xlarge, m5.2xlarge, + m5.4xlarge, m5.8xlarge, m7g.large (arm), m7g.xlarge (arm), m7g.2xlarge (arm), m7g.4xlarge (arm), m7g.8xlarge (arm), c5.large, c5.xlarge, c5.2xlarge, c5.4xlarge, + c5.9xlarge, g4dn.xlarge (gpu), g4dn.2xlarge (gpu), g4dn.4xlarge (gpu), g4dn.8xlarge (gpu), g4dn.12xlarge (gpu), g4dn.16xlarge (gpu)</p><p>g4dn instance types depend on available capacity. After a g4dn cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [Amazon EKS optimized accelerated Amazon Linux AMIs](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) in the AWS documentation.</p></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>Yes. Cost will be based on the max number of nodes.</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>You can only choose a minor version, not a patch version. The EKS installer chooses the latest patch for that minor version.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### GKE + +Compatibility Matrix supports creating [Google GKE](https://cloud.google.com/kubernetes-engine) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_gke_VERSIONS */}1.29, 1.30, 1.31, 1.32{/* END_gke_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td><p>n2-standard-2, n2-standard-4, n2-standard-8, n2-standard-16, n2-standard-32, t2a-standard-2 (arm), t2a-standard-4 (arm), t2a-standard-8 (arm), t2a-standard-16 (arm), t2a-standard-32 (arm), t2a-standard-48 (arm), e2-standard-2, e2-standard-4, e2-standard-8, e2-standard-16, e2-standard-32, n1-standard-1+nvidia-tesla-t4+1 (gpu), n1-standard-1+nvidia-tesla-t4+2 (gpu), n1-standard-1+nvidia-tesla-t4+4 (gpu), n1-standard-2+nvidia-tesla-t4+1 (gpu), n1-standard-2+nvidia-tesla-t4+2 (gpu), n1-standard-2+nvidia-tesla-t4+4 (gpu), n1-standard-4+nvidia-tesla-t4+1 (gpu), n1-standard-4+nvidia-tesla-t4+2 (gpu), n1-standard-4+nvidia-tesla-t4+4 (gpu), n1-standard-8+nvidia-tesla-t4+1 (gpu), n1-standard-8+nvidia-tesla-t4+2 (gpu), n1-standard-8+nvidia-tesla-t4+4 (gpu), n1-standard-16+nvidia-tesla-t4+1 (gpu), n1-standard-16+nvidia-tesla-t4+2 (gpu), n1-standard-16+nvidia-tesla-t4+4 (gpu), n1-standard-32+nvidia-tesla-t4+1 (gpu), n1-standard-32+nvidia-tesla-t4+2 (gpu), n1-standard-32+nvidia-tesla-t4+4 (gpu), n1-standard-64+nvidia-tesla-t4+1 (gpu), n1-standard-64+nvidia-tesla-t4+2 (gpu), n1-standard-64+nvidia-tesla-t4+4 (gpu), n1-standard-96+nvidia-tesla-t4+1 (gpu), n1-standard-96+nvidia-tesla-t4+2 (gpu), n1-standard-96+nvidia-tesla-t4+4 (gpu)</p><p>You can specify more than one node.</p></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>Yes. Cost will be based on the max number of nodes.</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>You can choose only a minor version, not a patch version. The GKE installer chooses the latest patch for that minor version.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### AKS + +Compatibility Matrix supports creating [Azure AKS](https://azure.microsoft.com/en-us/products/kubernetes-service) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_aks_VERSIONS */}1.29, 1.30, 1.31{/* END_aks_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td><p>Standard_B2ms, Standard_B4ms, Standard_B8ms, Standard_B16ms, Standard_DS2_v2, Standard_DS3_v2, Standard_DS4_v2, Standard_DS5_v2, Standard_DS2_v5, Standard_DS3_v5, Standard_DS4_v5, Standard_DS5_v5, Standard_D2ps_v5 (arm), Standard_D4ps_v5 (arm), Standard_D8ps_v5 (arm), Standard_D16ps_v5 (arm), Standard_D32ps_v5 (arm), Standard_D48ps_v5 (arm), Standard_NC4as_T4_v3 (gpu), Standard_NC8as_T4_v3 (gpu), Standard_NC16as_T4_v3 (gpu), Standard_NC64as_T4_v3 (gpu)</p><p>GPU instance types depend on available capacity. After a GPU cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [NVIDIA GPU Operator with Azure Kubernetes Service](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/microsoft-aks.html) in the NVIDIA documentation.</p></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>Yes. Cost will be based on the max number of nodes.</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>You can choose only a minor version, not a patch version. The AKS installer chooses the latest patch for that minor version.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +### OKE (Beta) + +Compatibility Matrix supports creating [Oracle Container Engine for Kubernetes (OKE)](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm) clusters. + +<table> + <tr> + <th width="35%">Type</th> + <th width="65%">Description</th> + </tr> + <tr> + <th>Supported Kubernetes Versions</th> + <td>{/* START_oke_VERSIONS */}1.29.1, 1.30.1, 1.31.1{/* END_oke_VERSIONS */}</td> + </tr> + <tr> + <th>Supported Instance Types</th> + <td><p>VM.Standard2.1, VM.Standard2.2, VM.Standard2.4, VM.Standard2.8, VM.Standard2.16, VM.Standard3.Flex.1, VM.Standard3.Flex.2, VM.Standard3.Flex.4, VM.Standard3.Flex.8, VM.Standard3.Flex.16, VM.Standard.A1.Flex.1 (arm), VM.Standard.A1.Flex.2 (arm), VM.Standard.A1.Flex.4 (arm), VM.Standard.A1.Flex.8 (arm), VM.Standard.A1.Flex.16 (arm)</p></td> + </tr> + <tr> + <th>Node Groups</th> + <td>Yes</td> + </tr> + <tr> + <th>Node Auto Scaling</th> + <td>No.</td> + </tr> + <tr> + <th>Nodes</th> + <td>Supports multiple nodes.</td> + </tr> + <tr> + <th>IP Family</th> + <td>Supports `ipv4`.</td> + </tr> + <tr> + <th>Limitations</th> + <td><p>Provising an OKE cluster does take between 8 to 10 minutes. If needed, some timeouts in your CI pipelines might have to be adjusted.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> + </tr> + <tr> + <th>Common Use Cases</th> + <td>Customer release tests</td> + </tr> +</table> + +## Replicated Instance Types {#types} + +When creating a VM-based cluster with Compatibility Matrix, you must specify a Replicated instance type. + +<table> + <tr> + <th width="30%">Type</th> + <th width="35%">Memory (GiB)</th> + <th width="35%">VCPU Count</th> + </tr> + <tr> + <th>r1.small</th> + <td>8 GB</td> + <td>2 VCPUs</td> + </tr> + <tr> + <th>r1.medium</th> + <td>16 GB</td> + <td>4 VCPUs</td> + </tr> + <tr> + <th>r1.large</th> + <td>32 GB</td> + <td>8 VCPUs</td> + </tr> + <tr> + <th>r1.xlarge</th> + <td>64 GB</td> + <td>16 VCPUs</td> + </tr> + <tr> + <th>r1.2xlarge</th> + <td>128 GB</td> + <td>32 VCPUs</td> + </tr> +</table> + +## Kubernetes Version Support Policy + +We do not maintain forks or patches of the supported distributions. When a Kubernetes version in Compatibility Matrix is out of support (EOL), Replicated will attempt to continue to support this version for six months for compatibility testing to support customers who are running out-of-date versions of Kubernetes. In the event that a critical security issue or bug is found and unresolved, we might discontinue support for EOL versions of Kubernetes prior to 6 months post EOL. + + +--- + + +# Example: Adding Database Configuration Options + +# Example: Adding Database Configuration Options + +In this tutorial, we'll explore ways to give your end user the option to either embed a database instance with the application, or connect your application to an external database instance that they will manage. +We'll use a PostgreSQL database as an example, configuring an example app to connect. + +This tutorial explores advanced topics like workload coordination, credential management, and refactoring your application's user-facing configuration in the Replicated Admin Console. We'll also review best practices for integrating persistent stores like databases, queues, and caches. + +It is split into 5 sections: + +- [The Example Application](#the-example-application) +- [User-Facing Configuration](#user-facing-configuration) +- [Embedding a Database](#embedding-a-database) +- [Connecting to an External Database](#connecting-to-an-external-database) + +### Prerequisites + +This guide assumes you have: + +* A running instance of the Replicated Admin Console (`kotsadm`) to iterate against in either an existing cluster or an embedded cluster created with Replicated kURL. If you do not have a running instance of the Admin Console in an existing or kURL cluster, complete the [Install with KOTS in an Existing Cluster](tutorial-cli-setup) tutorial to package and install a sample application. +* A local git checkout of your application manifests. + +### Accompanying Code Examples + +A full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). + +* * * + +## The Example Application + +For demonstration purposes, we'll use a simple app that connects to a Postgres database via the `psql` CLI. +Once you've finished this guide, you should feel confident replacing it with any Kubernetes workload(s) that need to connect to a database. +The deployment we'll use can be seen below: + +```yaml +# pg-consumer.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: postgres:10 + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + # hard coded for now, we'll wire these up later + env: + - name: DB_HOST + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_USER + value: postgres + - name: DB_PASSWORD + value: postgres + - name: DB_NAME + value: postgres +``` + +This app simply connects to the database every 20 seconds and writes the server timestamp to stdout. +Even though `psql` supports [default environment variables](https://www.postgresql.org/docs/current/libpq-envars.html) for host, username, etc that can be read transparently, we're intentionally using these generic `DB_` variables for clarity. +Later, you can change these environment variable names to whatever format your application consumes. + +For now we'll hard code the DB variable values, in the next sections we'll wire these up to the user-provided configuration. + + +### Deploying the example application + + Once you've added this deployment to you application's `manifests` directory, create a release by running `replicated release create --auto` locally. + Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: + +![View Update](/images/guides/kots/view-update.png) + +Click **Deploy**. You should be able to review the logs and see `deployment.apps/pg-consumer created` in `applyStdout`: + + +![Deployed PG Consumer](/images/guides/kots/pg-consumer-deployed.png) + + +After it is deployed, you can run `kubectl get pods` to inspect the cluster. +We should expect the Pod to be crashlooping at this point, since there's no database to connect to just yet: + +```text +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +kotsadm-5bbf54df86-p7kqg 1/1 Running 0 12m +kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 12m +kotsadm-minio-0 1/1 Running 0 12m +kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 12m +kotsadm-postgres-0 1/1 Running 0 12m +pg-consumer-75f49bfb69-mljr6 0/1 CrashLoopBackOff 1 10s +``` + +Checking the logs, we should see a connect error: + +```text +$ kubectl logs -l app=pg-consumer +psql: could not translate host name "postgres" to address: Name or service not known +``` + +If the `kubectl logs` command hangs, you can try using the `--previous` flag to fetch the logs of the most recent crash: + + +```text +$ kubectl logs -l app=pg-consumer --previous +psql: could not translate host name "postgres" to address: Name or service not known +``` + +Now that our test app is deployed, we'll walk through presenting options to the end user for connecting a Postgres instance to this app. + +* * * + +## User-Facing Configuration + +The core of this guide will be around how to give your end users the option to do one of the following actions: + +* Bring their own PostgreSQL instance for your app to connect to +* Use an "embedded" database bundled in with the application + +The first step here is to present that option to the user, then we'll walk through implementing each scenario. +The `kots.io/v1beta1` `Config` resource controls what configuration options are presented to the end user. +If you followed one of the "Getting Started" guides, you probably have a `config.yaml` in your manifests that looks something like the following YAML file: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Ingress object. + items: + - name: use_ingress + title: Use Ingress? + help_text: An example field to toggle inclusion of an Ingress Object + type: bool + default: "0" + - name: ingress_hostname + title: Ingress Hostname + help_text: If desired, enter the hostname for ingress to this application. You can enter the IP of this instance, or a DNS hostname. + type: text + when: repl{{ ConfigOptionEquals "use_ingress" "1" }} +``` + +To add a database section, we'll modify it to include some database settings. +In this case we'll remove the Ingress toggle that is included as an example, although you might also choose to leave this in. None of these database settings will have any effect yet, but we'll still be able to preview what the end user will see. +Modify your YAML to include this database section: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: database + title: Database + items: + - name: postgres_type + help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres + - name: embedded_postgres_password + hidden: true + type: password + value: "{{repl RandomString 32}}" +``` + +This creates a toggle to allow the user to choose between an embedded or external Postgres instance, and a `hidden` field to generate a unique password for the embedded instance. + +As mentioned in the introduction, a full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). + + +### Validating Config Changes + +Even though the options aren't wired, let's create a new release to validate the configuration screen was modified. +Create a release by running `replicated release create --auto`. +Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: + +![View Update](/images/guides/kots/view-update.png) + +After the update is deployed, click the Config tab and review our new toggle. +You might also notice that we've removed the Ingress settings to simplify things for this guide: + +![Database Config](/images/guides/kots/database-config.png) + +Now that we have the configuration screen started, we can proceed to implement the "Embedded Postgres" option. + +* * * + +## Embedding a Database + +To implement the embedded Database option, we'll add a Kubernetes [Statefulset](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), and use the [annotations for optional resources](packaging-include-resources/) to control when it will be included in the application. + +### Adding the Secret and StatefulSet + +First, we'll create a secret to store the root password for our embedded postgres instance: + +```yaml +# postgres-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' +``` + +Next, create a new YAML file in your `manifests` directory with the following contents. +Note the use of `kots.io/when` to only conditionally include this based on end-user inputs: + +```yaml +# postgres-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres + labels: + app: pg-provider + annotations: + kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' +spec: + replicas: 1 + selector: + matchLabels: + app: pg-provider + serviceName: postgres + template: + metadata: + labels: + app: pg-provider + spec: + containers: + - env: + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + # create a db called "postgres" + - name: POSTGRES_DB + value: postgres + # create admin user with name "postgres" + - name: POSTGRES_USER + value: postgres + # use admin password from secret + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + key: DB_PASSWORD + name: postgres + image: postgres:10 + name: postgres + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: pgdata + volumes: + - name: pgdata + persistentVolumeClaim: + claimName: pgdata + volumeClaimTemplates: + - metadata: + name: pgdata + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Finally, lets add a Service object so we can route traffic to our postgres instance, again using `kots.io/when` to conditionally include this resource: + + +```yaml +# postgres-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: postgres + labels: + app: pg-provider + annotations: + kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' +spec: + ports: + - port: 5432 + selector: + app: pg-provider + type: ClusterIP +``` + +### Validating the embedded Database + +After you've added these resources, you can push a new release and update in the Admin Console. +You should see the following in the deployment logs: + +![Embedded PG Deployed](/images/guides/kots/embedded-pg-deployed.png) + +We should now see an instance of Postgres running in our namespace as well. +The consumer may still be crashlooping, but we can see the error is different now: + +```text +$ kubectl logs -l app=pg-consumer +psql: FATAL: password authentication failed for user "postgres" +``` + +This is because we still need to deliver the generated password to our workload pod. +In `pg-consumer.yaml`, we'll remove this section: + +```yaml + - name: DB_PASSWORD + value: postgres +``` + +and replace it with: + +```yaml + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD +``` + +The full Deployment should now look like the following YAML file: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + # hard coded for now, we'll wire these up later + env: + - name: DB_HOST + value: postgres + - name: DB_PORT + value: "5432" + - name: DB_USER + value: postgres + - name: DB_NAME + value: postgres + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD +``` + +From here, make another release and deploy it. +You should see the consumer pod is now able to connect to the database: + + +```text +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +kotsadm-5bbf54df86-p7kqg 1/1 Running 0 144m +kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 144m +kotsadm-minio-0 1/1 Running 0 144m +kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 144m +kotsadm-postgres-0 1/1 Running 0 144m +pg-consumer-77b868d7d8-xdn9v 1/1 Running 0 20s +postgres-0 1/1 Running 0 6m22s +``` + +Checking the logs, we can connect now: + +```text +$ kubectl logs -l app=pg-consumer + now +------------------------------- + 2020-04-12 17:11:45.019293+00 +(1 row) + + now +------------------------------- + 2020-04-12 17:11:55.072041+00 +(1 row) +``` + +Now that we've configured our application to read from an embedded postgres instance, we'll switch to allowing the end user to provide their own database connection parameters. + +* * * + +## Connecting to an External Database + +In this section, we'll expand our configuration section to allow end users to bring their own Postgres instance. + +### Modifying the Config Screen + +Let's update our config screen to allow an end user to input some details about their database. +We'll add the following YAML, noting the use of the `when` field to conditionally hide or show fields in the user-facing config screen: + +```yaml + - name: external_postgres_host + title: Postgres Host + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: postgres + - name: external_postgres_port + title: Postgres Port + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: "5432" + - name: external_postgres_user + title: Postgres Username + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + required: true + - name: external_postgres_password + title: Postgres Password + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: password + required: true + - name: external_postgres_db + title: Postgres Database + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: sentry +``` + +Your full configuration screen should now look something like the following YAMl file: + +```yaml +apiVersion: kots.io/v1beta1 +kind: Config +metadata: + name: config-sample +spec: + groups: + - name: database + title: Database + items: + - name: postgres_type + help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? + type: radio + title: Postgres + default: embedded_postgres + items: + - name: embedded_postgres + title: Embedded Postgres + - name: external_postgres + title: External Postgres + - name: embedded_postgres_password + hidden: true + type: password + value: "{{repl RandomString 32}}" + - name: external_postgres_host + title: Postgres Host + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: postgres + - name: external_postgres_port + title: Postgres Port + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: "5432" + - name: external_postgres_user + title: Postgres Username + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + required: true + - name: external_postgres_password + title: Postgres Password + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: password + required: true + - name: external_postgres_db + title: Postgres Database + when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' + type: text + default: postgres +``` + +Let's save this and create a new release. After deploying the release in the Admin Console, click **Config** and set the toggle to "External Postgres" to see the new fields: + +In order to demonstrate that these are working, let's add some values that we know won't work, and just check to confirm that checking "External Postgres" will remove our embedded postgres instance: + + +![External PG Config Fake](/images/guides/kots/external-pg-config-fake.png) + +Save these settings, and then you'll be directed back to the Version History page to apply the change: + +![Deploy Config Change](/images/guides/kots/deploy-config-change.png) + +after this is deployed, we should see that the postgres statefulset has been removed, and that our sample application is back to failing: + + +```text +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +kotsadm-5bbf54df86-8ws98 1/1 Running 0 12m +kotsadm-api-cbccb97ff-r7mz6 1/1 Running 2 12m +kotsadm-minio-0 1/1 Running 0 12m +kotsadm-operator-84477b5c4-4gmbm 1/1 Running 0 12m +kotsadm-postgres-0 1/1 Running 0 12m +pg-consumer-6bd78594d-n7nmw 0/1 Error 2 29s +``` + +You'll note that it is failing, but it is still using our hardcoded environment variables, not the user-entered config. +In the next step, we'll wire the end-user configuration values into our service. + +```text +$ kubectl logs -l app=pg-consumer +psql: could not translate host name "postgres" to address: Name or service not known +``` + +### Mapping User Inputs + +To map the user-supplied configuration, we'll start by expanding our secret we created before, adding fields for additional variables, using `{{repl if ... }}` blocks to switch between embedded/external contexts. + +To start, you can add a field for hostname, using Base64Encode. You must use a single line, as shown in the following example. + + + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' + DB_HOST: + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" }}{{repl Base64Encode "postgres" }}{{repl else}}{{repl ConfigOption"external_postgres_host" | Base64Encode }}{{repl end}} +``` + +Now that we have the value in our Secret, we can modify our deployment to consume it. +Replace this text: + +```yaml + - name: DB_HOST + value: postgres +``` + +with this text: + +```yaml + - name: DB_HOST + valueFrom: + secretKeyRef: + name: postgres + key: DB_HOST +``` + +Your full deployment should look something like the following YAML file: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + env: + - name: DB_HOST + valueFrom: + secretKeyRef: + name: postgres + key: DB_HOST + - name: DB_PORT + value: "5432" + - name: DB_USER + value: postgres + - name: DB_NAME + value: postgres + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD +``` + +From here, let's create and deploy a release, and verify that the secret has the customer-provided value, base64 decoding the secret contents: + +```text +$ kubectl get secret postgres -o yaml | head -n 4 +apiVersion: v1 +data: + DB_HOST: ZmFrZQ== + DB_PASSWORD: ajNVWDd1RnRfc0NkVTJqOFU3Q25xUkxRQk5fUlh3RjA= +``` + +You can verify we pulled in our user-provided config by base64-decoding the `DB_HOST` field: + +```text +$ echo ZmFrZQ== | base64 --decode +fake +``` + +Checking on our service itself, we can verify that it's now trying to connect to the `fake` hostname instead of `postgres`: + +```text +$ kubectl logs -l app=pg-consumer +psql: could not translate host name "fake" to address: Name or service not known +``` + +We'll optionally wire this to a real external Postgres database later, but for now we'll proceed to add the rest of the fields. + +### Extending this to All Fields + +Now that we've wired the DB_HOST field all the way through, we'll do the same for the other fields. +In the end, your Secret and Deployment should look like the following YAML files: + +```yaml +# postgres-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres +data: + DB_HOST: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_host" | Base64Encode }} + {{repl end}} + DB_PORT: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "5432" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_port" | Base64Encode }} + {{repl end}} + DB_USER: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_user" | Base64Encode }} + {{repl end}} + DB_PASSWORD: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl ConfigOption "embedded_postgres_password" | Base64Encode }} + {{repl else -}} + {{repl ConfigOption "external_postgres_password" | Base64Encode }} + {{repl end}} + DB_NAME: >- + {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} + {{repl Base64Encode "postgres" }} + {{repl else -}} + {{repl ConfigOption "external_postgres_db" | Base64Encode }} + {{repl end}} +``` + +```yaml +# pg-consumer.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + env: + - name: DB_HOST + valueFrom: + secretKeyRef: + name: postgres + key: DB_HOST + - name: DB_PORT + valueFrom: + secretKeyRef: + name: postgres + key: DB_PORT + - name: DB_USER + valueFrom: + secretKeyRef: + name: postgres + key: DB_USER + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: postgres + key: DB_PASSWORD + - name: DB_NAME + valueFrom: + secretKeyRef: + name: postgres + key: DB_NAME +``` + +Optionally, you can be extra concise and collapse each individual `env` `valueFrom` into a single `envFrom` `secretRef` entry: + +```yaml +# pg-consumer.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + envFrom: + - secretRef: + name: postgres +``` + + +After deploying this, you should see all of the fields in the secret: + +```text +$ kubectl get secret postgres -o yaml +apiVersion: v1 +data: + DB_HOST: ZmFrZQ== + DB_NAME: ZmFrZQ== + DB_PASSWORD: ZXh0cmEgZmFrZQ== + DB_PORT: NTQzMjE= + DB_USER: ZmFrZQ== +kind: Secret +# ...snip... +``` + +We can also print the environment in our sample app to verify that all of the values are piped properly: + +```text +$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' +DB_PORT=54321 +DB_NAME=fake +DB_PASSWORD=extra fake +DB_HOST=fake +DB_USER=fake +``` + +### Testing Config Changes + +Now let's make some changes to the database credentials. In this case, we'll use a Postgres database provisioned in Amazon RDS, but you can use any external database. +To start, head to the "Config" screen and input your values: + +![Real Postgres Values](/images/guides/kots/real-postgres-values.png) + +Let's save and apply this config and check in our pod again: + +```text +$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' +DB_PORT=54321 +DB_NAME=fake +DB_PASSWORD=extra fake +DB_HOST=fake +DB_USER=fake +``` + +Uh oh, It appears that our values did not get updated! If you've worked with Secrets before, you may know that there's a [long-standing issue in Kubernetes](https://github.com/kubernetes/kubernetes/issues/22368) where pods that load config from Secrets or ConfigMaps won't automatically restart when underlying config is changed. +There are some tricks to make this work, and in the next step we'll implement one of them, but for now we can delete the pod to verify that the configuration is being piped through to our sample application: + +```text +$ kubectl delete pod -l app=pg-consumer +pod "pg-consumer-6df9d5d7fd-bd5z6"" deleted +``` + +If the pod is crashlooping, you might need to add `--force --grace-period 0` to force delete it. +In either case, once a new pod starts, we should now see it loading the correct config: + +```text +$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' +DB_PORT=5432 +DB_NAME=postgres +DB_PASSWORD=<redacted> +DB_HOST=10.128.0.12 +DB_USER=postgres +``` + +### Triggering Restarts on Changes + +In order to automate this restart on changes, we're going to use a hash of all database parameters to trigger a rolling update whenever database parameters are changed. +We'll use a `hidden`, `readonly` field to store this in our config screen: + +```yaml + - name: external_postgres_confighash + hidden: true + readonly: true + type: text + value: '{{repl (sha256sum (print (ConfigOption "external_postgres_host") (ConfigOption "external_postgres_port") (ConfigOption "external_postgres_user") (ConfigOption "external_postgres_password") (ConfigOption "external_postgres_db") ))}}' +``` + +The `hidden` flag will hide it from the UI, and the `readonly` flag in this case will cause the value to be re-computed any time an upstream `ConfigOption` value changes. + +Next, let's add this as an annotation to our deployment's pod template at `spec.template.metadata.annotations`: + +```yaml +annotations: + kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' +``` + +**Note**: It's worth noting here that there's nothing special about the `kots.io/config-hash` annotation. We could have just as easily called this annotation `my-app-something-fake` instead. +What matters here is that when the value in a Deployment annotation changes, it will cause Kubernetes to roll out a new version of the pod, stopping the old one and thus picking up our config changes. + + +Your full deployment should now look like the following YAML file: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pg-consumer +spec: + selector: + matchLabels: + app: pg-consumer + template: + metadata: + labels: + app: pg-consumer + annotations: + kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' + spec: + containers: + - name: pg-consumer + image: 'postgres:10' + # connect to the database every 20 seconds + command: + - /bin/sh + - -ec + - | + while :; do + sleep 20 + PGPASSWORD=${DB_PASSWORD} \ + psql --host ${DB_HOST} \ + --port ${DB_PORT} \ + --user ${DB_USER} \ + --dbname ${DB_NAME} \ + --command 'SELECT NOW()' + done + envFrom: + - secretRef: + name: postgres +``` + + +### Integrating a Real Database + +If you'd like at this point, you can integrate a real database in your environment, just fill out your configuration fields. You'll know you did it right if your pg-consumer pod can connect. + + +--- + + +# Step 2: Create an Application + +# Step 2: Create an Application + +After you install the Replicated CLI and create an API token, you can use the CLI to create a new application. + +To create an application: + +1. Run the following command to create an application named `cli-tutorial`: + + ``` + replicated app create cli-tutorial + ``` + + **Example output**: + + ``` + ID NAME SLUG SCHEDULER + 2GmY... cli-tutorial cli-tutorial kots + ``` + +1. Export the application slug in the output of the `app create` command as an environment variable: + + ``` + export REPLICATED_APP=YOUR_SLUG + ``` + Replace `YOUR_SLUG` with the slug for the application you created in the previous step. + +1. Verify that both the `REPLICATED_API_TOKEN` environment variable that you created as part of [Step 1: Install the Replicated CLI](tutorial-cli-install-cli) and the `REPLICATED_APP` environment variable are set correctly: + + ``` + replicated release ls + ``` + + In the output of this command, you now see an empty list of releases for the application: + + ``` + SEQUENCE CREATED EDITED ACTIVE_CHANNELS + ``` + +## Next Step + +Continue to [Step 3: Get the Sample Manifests](tutorial-cli-manifests) to download the manifest files for a sample Kubernetes application. You will use these manifest files to create the first release for the `cli-tutorial` application. + + +--- + + +# Step 5: Create a Customer + +# Step 5: Create a Customer + +After promoting the first release for the `cli-tutorial` application, create a customer so that you can install the application. + +A _customer_ is an object in the Vendor Portal that represents a single licensed user of your application. When you create a customer, you define entitlement information for the user, and the Vendor Portal generates a YAML license file for the customer that you can download. + +When you install the application later in this tutorial, you will upload the license file that you create in this step to allow KOTS to create the application containers. + +To create a customer and download the license file: + +1. From the `replicated-cli-tutorial` directory, create a license for a customer named `Some-Big-Bank` that is assigned to the Unstable channel and expires in 10 days: + + ``` + replicated customer create \ + --name "Some-Big-Bank" \ + --expires-in "240h" \ + --channel "Unstable" + ``` + The Unstable channel is the channel where you promoted the release in [Step 4: Create a Release](tutorial-cli-create-release). Assigning the customer to a channel allows them to install the releases that are promoted to that channel. + + **Example output:** + + ``` + ID NAME CHANNELS EXPIRES TYPE + 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev + ``` + +1. Verify the customer creation details: + + ``` + replicated customer ls + ``` + + **Example output:** + + ``` + ID NAME CHANNELS EXPIRES TYPE + 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev + ``` + +1. Download the license file for the customer that you just created: + + ``` + replicated customer download-license \ + --customer "Some-Big-Bank" + ``` + + The license downloads to `stdout`. + + **Example output**: + + ``` + apiVersion: kots.io/v1beta1 + kind: License + metadata: + name: some-big-bank + spec: + appSlug: cli-tutorial + channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 + channelName: Unstable + customerName: Some-Big-Bank + endpoint: https://replicated.app + entitlements: + expires_at: + description: License Expiration + title: Expiration + value: "2022-11-10T14:59:49Z" + valueType: String + isNewKotsUiEnabled: true + licenseID: 2GuB3ZLQsU38F5SX3n03x8qBzeL + licenseSequence: 1 + licenseType: dev + signature: eyJsaW... + ``` + +1. Rename the license file and save it to your Desktop folder: + + ``` + export LICENSE_FILE=~/Desktop/Some-Big-Bank-${REPLICATED_APP}-license.yaml + replicated customer download-license --customer "Some-Big-Bank" > "${LICENSE_FILE}" + ``` + +1. Verify that the license was written properly using either `cat` or `head`: + + ``` + head ${LICENSE_FILE} + ``` + + **Example output**: + + ``` + apiVersion: kots.io/v1beta1 + kind: License + metadata: + name: some-big-bank + spec: + appSlug: cli-tutorial + channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 + channelName: Unstable + customerName: Some-Big-Bank + endpoint: https://replicated.app + ``` + +## Next Step + +Continue to [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to get the installation commands from the Unstable channel, then install the KOTS components and the sample application in your cluster. + + +--- + + +# Step 8: Create a New Version + +# Step 8: Create a New Version + +In this step, you make an edit to the Config custom resource manifest file in the `replicated-cli-tutorial/manifests` directory for the `cli-tutorial` application to create a new field on the **Config** page in the Admin Console. You will then create and promote a new release to the Unstable channel with your changes. + +To create and promote a new version of the application: + +1. In your local directory, go to the the `replicated-cli-tutorial/manifests` folder and open the `kots-config.yaml` file in a text editor. + +1. Copy and paste the following YAML into the file under the `example_default_value` field to create a new text field on the **Config** page: + + ```yaml + - name: more_text + title: Another Text Example + type: text + value: "" + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + ``` + The following shows the full YAML for the `kots-config.yaml` file after you add the new field: + + ```yaml + --- + apiVersion: kots.io/v1beta1 + kind: Config + metadata: + name: config-sample + spec: + groups: + - name: example_settings + title: My Example Config + description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Nginx welcome page. + items: + - name: show_text_inputs + title: Customize Text Inputs + help_text: "Show custom user text inputs" + type: bool + default: "0" + recommended: true + - name: example_default_value + title: Text Example (with default value) + type: text + value: "" + default: please change this value + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + # Add the new more_text field here + - name: more_text + title: Another Text Example + type: text + value: "" + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + - name: api_token + title: API token + type: password + props: + rows: 5 + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + - name: readonly_text_left + title: Readonly Text + type: text + value: "{{repl RandomString 10}}" + readonly: true + when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} + - name: hidden_text + title: Secret Key + type: password + hidden: true + value: "{{repl RandomString 40}}" + + ``` + +1. Open the `example-configmap.yaml` file. + +1. In the `example-configmap.yaml` file, copy and paste the following HTML to replace the `<body>` section: + + ``` + <body> + This is an example KOTS application. + <p>This is text from a user config value: '{{repl ConfigOption "example_default_value"}}' </p> + <p>This is more text from a user config value: '{{repl ConfigOption "more_text"}}' </p> + <p>This is a hidden value: '{{repl ConfigOption "hidden_text"}}'</p> + </body> + ``` + This creates a reference to the `more_text` field using a Replicated KOTS template function. The ConfigOption template function renders the user input from the configuration item that you specify. For more information, see [Config Context](/reference/template-functions-config-context) in _Reference_. + +1. Save the changes to both YAML files. + +1. Change to the root `replicated-cli-tutorial` directory, then run the following command to verify that there are no errors in the YAML: + + ``` + replicated release lint --yaml-dir=manifests + ``` + +1. Create a new release and promote it to the Unstable channel: + + ``` + replicated release create --auto + ``` + + **Example output**: + + ``` + • Reading manifests from ./manifests ✓ + • Creating Release ✓ + • SEQUENCE: 2 + • Promoting ✓ + • Channel 2GxpUm7lyB2g0ramqUXqjpLHzK0 successfully set to release 2 + ``` + +1. Type `y` and press **Enter** to continue with the defaults. + + **Example output**: + + ``` + RULE TYPE FILENAME LINE MESSAGE + + • Reading manifests from ./manifests ✓ + • Creating Release ✓ + • SEQUENCE: 2 + • Promoting ✓ + • Channel 2GmYFUFzj8JOSLYw0jAKKJKFua8 successfully set to release 2 + ``` + + The release is created and promoted to the Unstable channel with `SEQUENCE: 2`. + +1. Verify that the release was promoted to the Unstable channel: + + ``` + replicated release ls + ``` + **Example output**: + + ``` + SEQUENCE CREATED EDITED ACTIVE_CHANNELS + 2 2022-11-03T19:16:24Z 0001-01-01T00:00:00Z Unstable + 1 2022-11-03T18:49:13Z 0001-01-01T00:00:00Z + ``` + +## Next Step + +Continue to [Step 9: Update the Application](tutorial-cli-update-app) to return to the Admin Console and update the application to the new version that you promoted. + + +--- + + +# Step 4: Create a Release + +# Step 4: Create a Release + +Now that you have the manifest files for the sample Kubernetes application, you can create a release for the `cli-tutorial` application and promote the release to the Unstable channel. + +By default, the Vendor Portal includes Unstable, Beta, and Stable release channels. The Unstable channel is intended for software vendors to use for internal testing, before promoting a release to the Beta or Stable channels for distribution to customers. For more information about channels, see [About Channels and Releases](releases-about). + +To create and promote a release to the Unstable channel: + +1. From the `replicated-cli-tutorial` directory, lint the application manifest files and ensure that there are no errors in the YAML: + + ``` + replicated release lint --yaml-dir=manifests + ``` + + If there are no errors, an empty list is displayed with a zero exit code: + + ```text + RULE TYPE FILENAME LINE MESSAGE + ``` + + For a complete list of the possible error, warning, and informational messages that can appear in the output of the `release lint` command, see [Linter Rules](/reference/linter). + +1. Initialize the project as a Git repository: + + ``` + git init + git add . + git commit -m "Initial Commit: CLI Tutorial" + ``` + + Initializing the project as a Git repository allows you to track your history. The Replicated CLI also reads Git metadata to help with the generation of release metadata, such as version labels. + +1. From the `replicated-cli-tutorial` directory, create a release with the default settings: + + ``` + replicated release create --auto + ``` + + The `--auto` flag generates release notes and metadata based on the Git status. + + **Example output:** + + ``` + • Reading Environment ✓ + + Prepared to create release with defaults: + + yaml-dir "./manifests" + promote "Unstable" + version "Unstable-ba710e5" + release-notes "CLI release of master triggered by exampleusername [SHA: d4173a4] [31 Oct 22 08:51 MDT]" + ensure-channel true + lint-release true + + Create with these properties? [Y/n] + ``` + +1. Type `y` and press **Enter** to confirm the prompt. + + **Example output:** + + ```text + • Reading manifests from ./manifests ✓ + • Creating Release ✓ + • SEQUENCE: 1 + • Promoting ✓ + • Channel VEr0nhJBBUdaWpPvOIK-SOryKZEwa3Mg successfully set to release 1 + ``` + The release is created and promoted to the Unstable channel. + +1. Verify that the release was promoted to the Unstable channel: + + ``` + replicated release ls + ``` + **Example output:** + + ```text + SEQUENCE CREATED EDITED ACTIVE_CHANNELS + 1 2022-10-31T14:55:35Z 0001-01-01T00:00:00Z Unstable + ``` + +## Next Step + +Continue to [Step 5: Create a Customer](tutorial-cli-create-customer) to create a customer license file that you will upload when installing the application. + + +--- + + +# Step 7: Configure the Application + +# Step 7: Configure the Application + +After you install KOTS, you can log in to the KOTS Admin Console. This procedure shows you how to make a configuration change for the application from the Admin Console, which is a typical task performed by end users. + +To configure the application: + +1. Access the Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: + + ```bash + kubectl kots admin-console --namespace NAMESPACE + ``` + + Replace `NAMESPACE` with the namespace where KOTS is installed. + +1. Enter the password that you created in [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to log in to the Admin Console. + + The Admin Console dashboard opens. On the Admin Console **Dashboard** tab, users can take various actions, including viewing the application status, opening the application, checking for application updates, syncing their license, and setting up application monitoring on the cluster with Prometheus. + + ![Admin Console app dashboard](/images/tutorials/tutorial-admin-console-dashboard.png) + +1. On the **Config** tab, select the **Customize Text Inputs** checkbox. In the **Text Example** field, enter any text. For example, `Hello`. + + ![Admin Console configuration tab](/images/tutorials/tutorial-install-config-tab.png) + + This page displays configuration settings that are specific to the application. Software vendors define the fields that are displayed on this page in the KOTS Config custom resource. For more information, see [Config](/reference/custom-resource-config) in _Reference_. + +1. Click **Save config**. In the dialog that opens, click **Go to updated version**. + + The **Version history** tab opens. + +1. Click **Deploy** for the new version. Then click **Yes, deploy** in the confirmation dialog. + + ![Admin Console configuration tab](/images/tutorials/tutorial-install-version-history.png) + +1. Click **Open App** to view the application in your browser. + + ![web page that displays text](/images/tutorials/tutorial-open-app.png) + + Notice the text that you entered previously on the configuration page is displayed on the screen. + + :::note + If you do not see the new text, refresh your browser. + ::: + +## Next Step + +Continue to [Step 8: Create a New Version](tutorial-cli-create-new-version) to make a change to one of the manifest files for the `cli-tutorial` application, then use the Replicated CLI to create and promote a new release. + + +--- + + +# Step 6: Install KOTS and the Application + +# Step 6: Install KOTS and the Application + +The next step is to test the installation process for the application release that you promoted. Using the KOTS CLI, you will install KOTS and the sample application in your cluster. + +KOTS is the Replicated component that allows your users to install, manage, and upgrade your application. Users can interact with KOTS through the Admin Console or through the KOTS CLI. + +To install KOTS and the application: + +1. From the `replicated-cli-tutorial` directory, run the following command to get the installation commands for the Unstable channel, where you promoted the release for the `cli-tutorial` application: + + ``` + replicated channel inspect Unstable + ``` + + **Example output:** + + ``` + ID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 + NAME: Unstable + DESCRIPTION: + RELEASE: 1 + VERSION: Unstable-d4173a4 + EXISTING: + + curl -fsSL https://kots.io/install | bash + kubectl kots install cli-tutorial/unstable + + EMBEDDED: + + curl -fsSL https://k8s.kurl.sh/cli-tutorial-unstable | sudo bash + + AIRGAP: + + curl -fSL -o cli-tutorial-unstable.tar.gz https://k8s.kurl.sh/bundle/cli-tutorial-unstable.tar.gz + # ... scp or sneakernet cli-tutorial-unstable.tar.gz to airgapped machine, then + tar xvf cli-tutorial-unstable.tar.gz + sudo bash ./install.sh airgap + ``` + This command prints information about the channel, including the commands for installing in: + * An existing cluster + * An _embedded cluster_ created by Replicated kURL + * An air gap cluster that is not connected to the internet + +1. If you have not already, configure kubectl access to the cluster you provisioned as part of [Set Up the Environment](tutorial-cli-setup#set-up-the-environment). For more information about setting the context for kubectl, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. + +1. Run the `EXISTING` installation script with the following flags to automatically upload the license file and run the preflight checks at the same time you run the installation. + + **Example:** + + ``` + curl -fsSL https://kots.io/install | bash + kubectl kots install cli-tutorial/unstable \ + --license-file ./LICENSE_YAML \ + --shared-password PASSWORD \ + --namespace NAMESPACE + ``` + + Replace: + + - `LICENSE_YAML` with the local path to your license file. + - `PASSWORD` with a password to access the Admin Console. + - `NAMESPACE` with the namespace where KOTS and application will be installed. + + When the Admin Console is ready, the script prints the `https://localhost:8800` URL where you can access the Admin Console and the `http://localhost:8888` URL where you can access the application. + + **Example output**: + + ``` + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + • Waiting for Admin Console to be ready ✓ + • Waiting for installation to complete ✓ + • Waiting for preflight checks to complete ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + + • Go to http://localhost:8888 to access the application + ``` + +1. Verify that the Pods are running for the example NGNIX service and for kotsadm: + + ```bash + kubectl get pods --namespace NAMESPACE + ``` + + Replace `NAMESPACE` with the namespace where KOTS and application was installed. + + **Example output:** + + ```NAME READY STATUS RESTARTS AGE + kotsadm-7ccc8586b8-n7vf6 1/1 Running 0 12m + kotsadm-minio-0 1/1 Running 0 17m + kotsadm-rqlite-0 1/1 Running 0 17m + nginx-688f4b5d44-8s5v7 1/1 Running 0 11m + ``` + +## Next Step + +Continue to [Step 7: Configure the Application](tutorial-cli-deploy-app) to log in to the Admin Console and make configuration changes. + + +--- + + +# Step 1: Install the Replicated CLI + +# Step 1: Install the Replicated CLI + +In this tutorial, you use the Replicated CLI to create and promote releases for a sample application with Replicated. The Replicated CLI is the CLI for the Replicated Vendor Portal. + +This procedure describes how to create a Vendor Portal account, install the Replicated CLI on your local machine, and set up a `REPLICATED_API_TOKEN` environment variable for authentication. + +To install the Replicated CLI: + +1. Do one of the following to create an account in the Replicated Vendor Portal: + * **Join an existing team**: If you have an existing Vendor Portal team, you can ask your team administrator to send you an invitation to join. + * **Start a trial**: Alternatively, go to [vendor.replicated.com](https://vendor.replicated.com/) and click **Sign up** to create a 21-day trial account for completing this tutorial. + +1. Run the following command to use [Homebrew](https://brew.sh) to install the CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + + For the latest Linux or macOS versions of the Replicated CLI, see the [replicatedhq/replicated](https://github.com/replicatedhq/replicated/releases) releases in GitHub. + +1. Verify the installation: + + ``` + replicated version + ``` + **Example output**: + + ```json + { + "version": "0.37.2", + "git": "8664ac3", + "buildTime": "2021-08-24T17:05:26Z", + "go": { + "version": "go1.14.15", + "compiler": "gc", + "os": "darwin", + "arch": "amd64" + } + } + ``` + If you run a Replicated CLI command, such as `replicated release ls`, you see the following error message about a missing API token: + + ``` + Error: set up APIs: Please provide your API token + ``` + +1. Create an API token for the Replicated CLI: + + 1. Log in to the Vendor Portal, and go to the [Account settings](https://vendor.replicated.com/account-settings) page. + + 1. Under **User API Tokens**, click **Create user API token**. For Nickname, provide a name for the token. For Permissions, select **Read and Write**. + + For more information about User API tokens, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. + + 1. Click **Create Token**. + + 1. Copy the string that appears in the dialog. + +1. Export the string that you copied in the previous step to an environment variable named `REPLICATED_API_TOKEN`: + + ```bash + export REPLICATED_API_TOKEN=YOUR_TOKEN + ``` + Replace `YOUR_TOKEN` with the token string that you copied from the Vendor Portal in the previous step. + +1. Verify the User API token: + + ``` + replicated release ls + ``` + + You see the following error message: + + ``` + Error: App not found: + ``` + +## Next Step + +Continue to [Step 2: Create an Application](tutorial-cli-create-app) to use the Replicated CLI to create an application. + + +--- + + +# Step 3: Get the Sample Manifests + +# Step 3: Get the Sample Manifests + +To create a release for the `cli-tutorial` application, first create the Kubernetes manifest files for the application. This tutorial provides a set of sample manifest files for a simple Kubernetes application that deploys an NGINX service. + +To get the sample manifest files: + +1. Run the following command to create and change to a `replicated-cli-tutorial` directory: + + ``` + mkdir replicated-cli-tutorial + cd replicated-cli-tutorial + ``` + +1. Create a `/manifests` directory and download the sample manifest files from the [kots-default-yaml](https://github.com/replicatedhq/kots-default-yaml) repository in GitHub: + + ``` + mkdir ./manifests + curl -fSsL https://github.com/replicatedhq/kots-default-yaml/archive/refs/heads/main.zip | \ + tar xzv --strip-components=1 -C ./manifests \ + --exclude README.md --exclude LICENSE --exclude .gitignore + ``` + +1. Verify that you can see the YAML files in the `replicated-cli-tutorial/manifests` folder: + + ``` + ls manifests/ + ``` + ``` + example-configmap.yaml example-service.yaml kots-app.yaml kots-lint-config.yaml kots-support-bundle.yaml + example-deployment.yaml k8s-app.yaml kots-config.yaml kots-preflight.yaml + ``` + +## Next Step + +Continue to [Step 4: Create a Release](tutorial-cli-create-release) to create and promote the first release for the `cli-tutorial` application using these manifest files. + + +--- + + +# Introduction and Setup + +import KubernetesTraining from "../partials/getting-started/_kubernetes-training.mdx" +import LabsIntro from "../partials/getting-started/_labs-intro.mdx" +import TutorialIntro from "../partials/getting-started/_tutorial-intro.mdx" +import RelatedTopics from "../partials/getting-started/_related-topics.mdx" +import VMRequirements from "../partials/getting-started/_vm-requirements.mdx" + +# Introduction and Setup + +<TutorialIntro/> + +The steps in this KOTS CLI-based tutorial show you how to use the Replicated CLI to perform these tasks. The Replicated CLI is the CLI for the Replicated Vendor Portal. You can use the Replicated CLI as a software vendor to programmatically create, configure, and manage your application artifacts, including application releases, release channels, customer entitlements, private image registries, and more. + +<KubernetesTraining/> + +## Set Up the Environment + +As part of this tutorial, you will install a sample application into a Kubernetes cluster. Before you begin, do the following to set up your environment: + +* Create a Kubernetes cluster that meets the minimum system requirements described in [KOTS Installation Requirements](/enterprise/installing-general-requirements). You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. + + **Example:** + + For example, to create a cluster in GKE, run the following command in the gcloud CLI: + + ``` + gcloud container clusters create NAME --preemptible --no-enable-ip-alias + ``` + Where `NAME` is any name for the cluster. + +* Install kubectl, the Kubernetes command line tool. See [Install Tools](https://kubernetes.io/docs/tasks/tools/) in the Kubernetes documentation. +* Configure kubectl command line access to the cluster that you created. See [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. + +## Related Topics + +<RelatedTopics/> + + +--- + + +# Step 9: Update the Application + +# Step 9: Update the Application + +To test the new release that you promoted, return to the Admin Console in a browser to update the application. + +To update the application: + +1. Access the KOTS Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: + + ```bash + kubectl kots admin-console --namespace NAMESPACE + ``` + + Replace `NAMESPACE` with the namespace where the Admin Console is installed. + +1. Go to the Version history page, and click **Check for update**. + + ![Admin Console version history page](/images/tutorials/tutorial-check-for-update.png) + + The Admin Console loads the new release that you promoted. + +1. Click **Deploy**. In the dialog, click **Yes, deploy** to deploy the new version. + + ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-app.png) + +1. After the Admin Console deploys the new version, go to the **Config** page where the **Another Text Example** field that you added is displayed. + + ![Admin Console configuration page with Another Text Example field](/images/tutorials/tutorial-new-config-item.png) + +1. In the new **Another Text Example** field, enter any text. Click **Save config**. + + The Admin Console notifies you that the configuration settings for the application have changed. + + ![dialog over Admin Console configuration screen](/images/tutorials/tutorial-go-to-updated-version.png) + +1. In the dialog, click **Go to updated version**. + + The Admin Console loads the updated version on the Version history page. + +1. On the Version history page, click **Deploy** next to the latest version to deploy the configuration change. + + ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-config-change.png) + +1. Go to the **Dashboard** page and click **Open App**. The application displays the text that you added to the field. + + ![web page with text from the new configuration field](/images/tutorials/tutorial-updated-app.png) + + :::note + If you do not see the new text, refresh your browser. + ::: + +## Summary + +Congratulations! As part of this tutorial, you: +* Created and promoted a release for a Kubernetes application using the Replicated CLI +* Installed the application in a Kubernetes cluster +* Edited the manifest files for the application, adding a new configuration field and using template functions to reference the field +* Promoted a new release with your changes +* Used the Admin Console to update the application to the latest version + + +--- + + +# Step 2: Create an Application + +# Step 2: Create an Application + +Next, install the Replicated CLI and then create an application. + +To create an application: + +1. Install the Replicated CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Authorize the Replicated CLI: + + ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + +1. Create an application named `Grafana`: + + ``` + replicated app create Grafana + ``` + +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: + + 1. Get the slug for the application that you created: + + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Grafana grafana-python kots + ``` + In the example above, the application slug is `grafana-python`. + + :::info + The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. + ::: + + 1. Set the `REPLICATED_APP` environment variable to the application slug. + + **MacOS Example:** + + ``` + export REPLICATED_APP=grafana-python + ``` + +## Next Step + +Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-config-package-chart). + +## Related Topics + +* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [replicated app create](/reference/replicated-cli-app-create) + +--- + + +# Step 5: Create a KOTS-Enabled Customer + +# Step 5: Create a KOTS-Enabled Customer + +After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. + +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + +1. For **License type**, select Development. + +1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. + +1. Click **Save Changes**. + +1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. + + ![Download license button on the customer page](/images/customer-download-license.png) + + [View a larger version of this image](/images/customer-download-license.png) + +## Next Step + +Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-config-install-kots). + +## Related Topics + +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) + +--- + + +# Step 4: Add the Chart Archive to a Release + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChart from "../partials/getting-started/_grafana-helmchart.mdx" +import KotsApp from "../partials/getting-started/_grafana-kots-app.mdx" +import K8sApp from "../partials/getting-started/_grafana-k8s-app.mdx" +import Config from "../partials/getting-started/_grafana-config.mdx" + +# Step 4: Add the Chart Archive to a Release + +Next, add the Helm chart archive to a new release for the application in the Replicated vendor platform. + +The purpose of this step is to configure a release that supports installation with KOTS. Additionally, this step defines a user-facing application configuration page that displays in the KOTS Admin Console during installation where users can set their own Grafana login credentials. + +To create a release: + +1. In the `grafana` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with Replicated KOTS to this subdirectory. + +1. Move the Helm chart archive that you created to `manifests`: + + ``` + mv grafana-9.6.5.tgz manifests + ``` + +1. In the `manifests` directory, create the following YAML files to configure the release: + + ``` + cd manifests + ``` + ``` + touch kots-app.yaml k8s-app.yaml kots-config.yaml grafana.yaml + ``` + +1. In each file, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="kots-app" label="kots-app.yaml" default> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>grafana</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Grafana application in a browser.</p> + <h5>YAML</h5> + <KotsApp/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sApp/> + </TabItem> + <TabItem value="config" label="kots-config.yaml"> + <h5>Description</h5> + <p>The Config custom resource specifies a user-facing configuration page in the Admin Console designed for collecting application configuration from users. The YAML below creates "Admin User" and "Admin Password" fields that will be shown to the user on the configuration page during installation. These fields will be used to set the login credentials for Grafana.</p> + <h5>YAML</h5> + <Config/> + </TabItem> + <TabItem value="helmchart" label="grafana.yaml"> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart.</p> + <p>The HelmChart custom resource below contains a <code>values</code> key, which creates a mapping to the Grafana <code>values.yaml</code> file. In this case, the <code>values.admin.user</code> and <code>values.admin.password</code> fields map to <code>admin.user</code> and <code>admin.password</code> in the Grafana <code>values.yaml</code> file.</p> + <p>During installation, KOTS renders the ConfigOption template functions in the <code>values.admin.user</code> and <code>values.admin.password</code> fields and then sets the corresponding Grafana values accordingly.</p> + <h5>YAML</h5> + <HelmChart/> + </TabItem> + </Tabs> + +1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: + + ``` + replicated release lint --yaml-dir . + ``` + `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. + + **Example output**: + + ``` + RULE TYPE FILENAME LINE MESSAGE + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `grafana` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. + ::: + +1. Create a release: + + ``` + replicated release create --yaml-dir . + ``` + **Example output**: + ``` + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 + ``` + +1. Log in to the Vendor Portal and go to **Releases**. + + The release that you created is listed under **All releases**. + + ![Release page in the Vendor Portal with one release](/images/grafana-release-seq-1.png) + + [View a larger version of this image](/images/grafana-release-seq-1.png) + +1. Click **Edit release** to view the files in the release. + + In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Grafana Helm chart. You can also see the same warning messages that were displayed in the CLI output. + + ![Edit Release page in the Vendor Portal](/images/grafana-edit-release-seq-1.png) + + [View a larger version of this image](/images/grafana-edit-release-seq-1.png) + +1. At the top of the page, click **Promote**. + +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. + + <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> + + [View a larger version of this image](/images/release-promote.png) + +## Next Step + +Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-config-create-customer). + +## Related Topics + +* [About Channels and Releases](/vendor/releases-about) +* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) +* [Config Custom Resource](/reference/custom-resource-config) +* [Manipulating Helm Chart Values with KOTS](/vendor/helm-optional-value-keys) + +--- + + +# Step 1: Get the Sample Chart and Test + +# Step 1: Get the Sample Chart and Test + +To begin, get the sample Grafana Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated vendor platform. + +To get the sample Grafana chart and test installation: + +1. Run the following command to pull and untar version 9.6.5 of the Bitnami Grafana Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/grafana --version 9.6.5 + ``` + For more information about this chart, see the [bitnami/grafana](https://github.com/bitnami/charts/tree/main/bitnami/grafana) repository in GitHub. + +1. Change to the new `grafana` directory that was created: + ``` + cd grafana + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` +1. Install the chart in your cluster: + + ``` + helm install grafana . --namespace grafana --create-namespace + ``` + To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/grafana/README.md#installing-the-chart) in the `bitnami/grafana` repository. + + After running the installation command, the following output is displayed: + + ``` + NAME: grafana + LAST DEPLOYED: Thu Dec 14 14:54:50 2023 + NAMESPACE: grafana + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + CHART NAME: grafana + CHART VERSION: 9.6.5 + APP VERSION: 10.2.2 + + ** Please be patient while the chart is being deployed ** + + 1. Get the application URL by running these commands: + echo "Browse to http://127.0.0.1:8080" + kubectl port-forward svc/grafana 8080:3000 & + + 2. Get the admin credentials: + + echo "User: admin" + echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" + # Note: Do not include grafana.validateValues.database here. See https://github.com/bitnami/charts/issues/20629 + ``` + +1. Watch the `grafana` Deployment until it is ready: + + ``` + kubectl get deploy grafana --namespace grafana --watch + ``` + +1. When the Deployment is created, run the commands provided in the output of the installation command to get the Grafana login credentials: + + ``` + echo "User: admin" + echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" + ``` + +1. Run the commands provided in the ouptut of the installation command to get the Grafana URL: + + ``` + echo "Browse to http://127.0.0.1:8080" + kubectl port-forward svc/grafana 8080:3000 --namespace grafana + ``` + + :::note + Include `--namespace grafana` in the `kubectl port-forward` command. + ::: + +1. In a browser, go to the URL to open the Grafana login page: + + <img alt="Grafana login page" src="/images/grafana-login.png" width="300px"/> + + [View a larger version of this image](/images/grafana-login.png) + +1. Log in using the credentials provided to open the Grafana dashboard: + + <img alt="Grafana dashboard" src="/images/grafana-dashboard.png" width="500px"/> + + [View a larger version of this image](/images/grafana-dashboard.png) + +1. Uninstall the Helm chart: + + ``` + helm uninstall grafana --namespace grafana + ``` + This command removes all the Kubernetes resources associated with the chart and uninstalls the `grafana` release. + +1. Delete the namespace: + + ``` + kubectl delete namespace grafana + ``` + +## Next Step + +Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-config-create-app). + +## Related Topics + +* [Helm Install](https://helm.sh/docs/helm/helm_install/) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Create](https://helm.sh/docs/helm/helm_create/) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) +* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) + +--- + + +# Step 6: Install the Release with KOTS + +# Step 6: Install the Release with KOTS + +Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. + +As part of installation, you will set Grafana login credentials on the KOTS Admin Console configuration page. + +To install the release with KOTS: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. + + ![KOTS Install tab on the Unstable channel card](/images/grafana-unstable-channel.png) + + [View a larger version of this image](/images/grafana-unstable-channel.png) + +1. On the command line, run the **KOTS Install** command that you copied: + + ```bash + curl https://kots.io/install | bash + kubectl kots install $REPLICATED_APP/unstable + ``` + + This installs the latest version of the KOTS CLI and the Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. + + For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + + :::note + KOTS v1.104.0 or later is required to deploy the Replicated SDK. You can verify the version of KOTS installed with `kubectl kots version`. + ::: + +1. Complete the installation command prompts: + + 1. For `Enter the namespace to deploy to`, enter `grafana`. + + 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. + + When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. + + **Example output:** + + ```bash + Enter the namespace to deploy to: grafana + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password for the Admin Console (6+ characters): •••••••• + • Waiting for Admin Console to be ready ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + ``` + +1. With the port forward running, go to `http://localhost:8800` in a browser to access the Admin Console. + +1. On the login page, enter the password that you created for the Admin Console. + +1. On the license page, select the license file that you downloaded previously and click **Upload license**. + +1. On the **Configure Grafana** page, enter a username and password. You will use these credentials to log in to Grafana. + + ![Admin Console config page with username and password fields](/images/grafana-config.png) + + [View a larger version of this image](/images/grafana-config.png) + +1. Click **Continue**. + + The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `grafana` Deployment is being created. + + ![Admin Console dashboard showing unavailable application status](/images/grafana-unavailable.png) + + [View a larger version of this image](/images/grafana-unavailable.png) + +1. On the command line, press Ctrl+C to exit the port forward. + +1. Watch for the `grafana` Deployment to become ready: + + ``` + kubectl get deploy grafana --namespace grafana --watch + ``` + +1. After the Deployment is ready, run the following command to confirm that the `grafana-admin` Secret was updated with the new password that you created on the **Configure Grafana** page: + + ``` + echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" + ``` + + The ouput of this command displays the password that you created. + +1. Start the port foward again to access the Admin Console: + + ``` + kubectl kots admin-console --namespace grafana + ``` + +1. Go to `http://localhost:8800` to open the Admin Console. + + On the Admin Console dashboard, the application status is now displayed as Ready: + + ![Admin console dashboard showing ready application status](/images/grafana-ready.png) + + [View a larger version of this image](/images/grafana-ready.png) + +1. Click **Open App** to open the Grafana login page in a browser. + + <img alt="Grafana login webpage" src="/images/grafana-login.png" width="300px"/> + + [View a larger version of this image](/images/grafana-login.png) + +1. On the Grafana login page, enter the username and password that you created on the **Configure Grafana** page. Confirm that you can log in to the application to access the Grafana dashboard: + + <img alt="Grafana dashboard" src="/images/grafana-dashboard.png" width="500px"/> + + [View a larger version of this image](/images/grafana-dashboard.png) + +1. On the command line, press Ctrl+C to exit the port forward. + +1. Uninstall the Grafana application from your cluster: + + ```bash + kubectl kots remove $REPLICATED_APP --namespace grafana --undeploy + ``` + **Example output**: + ``` + • Removing application grafana-python reference from Admin Console and deleting associated resources from the cluster ✓ + • Application grafana-python has been removed + ``` + +1. Remove the Admin Console from the cluster: + + 1. Delete the namespace where the Admin Console is installed: + + ``` + kubectl delete namespace grafana + ``` + 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: + + ``` + kubectl delete clusterrole kotsadm-role + ``` + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` + +## Next Step + +Congratulations! As part of this tutorial, you used the KOTS Config custom resource to define a configuration page in the Admin Console. You also used the KOTS HelmChart custom resource and KOTS ConfigOption template function to override the default Grafana login credentials with a user-supplied username and password. + +To learn more about how to customize the Config custom resource to create configuration fields for your application, see [Config](/reference/custom-resource-config). + +## Related Topics + +* [kots install](/reference/kots-cli-install/) +* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) +* [Installing an Application](/enterprise/installing-overview) +* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) + + +--- + + +# Step 3: Package the Helm Chart + +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + +# Step 3: Package the Helm Chart + +Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. + +To add the Replicated SDK and package the Helm chart: + +1. In your local file system, go to the `grafana` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-config-get-chart). + +1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: + + <DependencyYaml/> + +1. Update dependencies and package the Helm chart to a `.tgz` chart archive: + + ```bash + helm package . --dependency-update + ``` + <UnauthorizedError/> + +## Next Step + +Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-config-create-release). + +## Related Topics + +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) + +--- + + +# Introduction and Setup + +# Introduction and Setup + +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. + +## Summary + +This tutorial introduces you to mapping user-supplied values from the Replicated KOTS Admin Console configuration page to a Helm chart `values.yaml` file. + +In this tutorial, you use a sample Helm chart to learn how to: + +* Define a user-facing application configuration page in the KOTS Admin Console +* Set Helm chart values with the user-supplied values from the Admin Console configuration page + +## Set Up the Environment + +Before you begin, ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. + +## Next Step + +Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-config-get-chart) + +--- + + +# Tutorial: Using ECR for Private Images + +# Tutorial: Using ECR for Private Images + +## Objective + +The purpose of this tutorial is to walk you through how to configure Replicated KOTS to pull images from a private registry in Amazon's Elastic Container Registry (ECR). This tutorial demonstrates the differences between using public and private images with KOTS. + +## Prerequisites + +* To install the application in this tutorial, you must have a virtual machine (VM) that meets the following minimum requirements: + * Ubuntu 18.04 + * At least 8 GB of RAM + * 4 CPU cores + * At least 40GB of disk space + +* To pull a public NGINX container and push it to a private repository in ECR as part of this tutorial, you must have the following: + * An ECR Repository + * An AWS account to use with Docker to pull and push the public NGINX image to the ECR repository. The AWS account must be able to create a read-only user. + * Docker + * The AWS CLI + +## Overview + +The guide is divided into the following steps: + + 1. [Set Up the Testing Environment](#set-up) + + 2. [Configure Private Registries in Replicated](#2-configure-private-registries-in-replicated) + + 3. [Update Definition Files](#3-update-definition-files) + + 4. [Install the New Version](#4-install-the-new-version) + +## 1. Set Up the Testing Environment {#set-up} + +We are going to use the default NGINX deployment to create our application and then update it to pull the same container from a private repository in ECR and note the differences. + +### Create Sample Application and deploy the first release + +In this section, we cover at a high level the steps to create a new application and install it on a VM. + +To create our sample application follow these steps: + +* Create a new application in the Replicated [vendor portal](https://vendor.replicated.com) and call it 'MySampleECRApp'. +* Create the first release using the default definition files and promote it to the *unstable* channel. +* Create a customer, assign it to the *Unstable* channel and download the license file after creating the customer. +* Install the application to a VM + +Log in to the Replicated admin console. To inspect what was deployed, look at the files under **View Files** from the admin console. +In the Upstream files (files from the release created in the vendor portal) show that we are pulling the public image. + +![admin-console-view-files-upstream-release1](/images/guides/kots/priv-reg-ecr-ups-files-rel1.png) + +We can further validate this if we switch back to the terminal window on the VM where we installed the application. +If we run `kubectl describe pod <pod-name>` on the NGINX pod, we can confirm that it was in fact pulled from the public repository. + +![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubctl-describe-rel1.png) + +Now that we have the basic application installed, we are now going to pull the same image, but from an ECR repository. + +### Pull Public Image and Push to ECR + +To keep the changes to a minimum and only focus on using a private registry, we are going to pull the public NGINX container (as specified in the `deployment.yaml` file) to our local environment, and then push it to a repository in ECR. +To use `docker login` with ECR, we will need to configure AWS CLI with the AWS Access Key ID and AWS Secret Key for this user. + +Let's start by pulling the public image: + +```shell +$ docker pull nginx +``` + +You should have an output similar to this: + +```shell +Using default tag: latest +latest: Pulling from library/nginx +d121f8d1c412: Pull complete +ebd81fc8c071: Pull complete +655316c160af: Pull complete +d15953c0e0f8: Pull complete +2ee525c5c3cc: Pull complete +Digest: sha256:c628b67d21744fce822d22fdcc0389f6bd763daac23a6b77147d0712ea7102d0 +Status: Downloaded newer image for nginx:latest +docker.io/library/nginx:latest +``` + +Next, log in to ECR and push this container. +To use `docker login` with ECR, [install the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) and [configure it](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) if not already done. +As part of this, we will need to provide the AWS Access Key ID and AWS Secret Key for a user that has permissions to create and push images to the repository. For more information about working with containers and ECR in the AWS CLI, see [Using Amazon ECR with the AWS CLI](https://docs.aws.amazon.com/AmazonECR/latest/userguide/getting-started-cli.html). + +Just like with any other private registry, we need to know the registry endpoint to pass the `docker login` command. +The syntax is as follows: + +```shell + +docker login [some.private.registry]:[port] + +``` +In this case, the endpoint is the **[some.private.registry]:[port]** + +To determine the endpoint for ECR, log in to the AWS console and search for 'ECR', which should bring up Elastic Container Registry as an option as shown below. + +![search-4-ecr](/images/guides/kots/priv-reg-ecr-search-4-ecr.png) + +Select 'Elastic Container Registry' from the options in the dropdown to get to the list of repositories. + +![ecr-repos](/images/guides/kots/priv-reg-ecr-repos.png) + +As you can see from the screenshot above, you can see the endpoints for each repository under the URI column. +For the purpose of this guide, we will push the NGINX image to the **demo-apps** repository. + +To determine the endpoint to use in the login command, use the URL without the repository name. + +When logging in to ECR, use the AWS CLI to the user credentials. +For example, to log in to ECR, we run the following command: + +```shell + +$ aws ecr get-login-password --region us-east-2 | docker login --username AWS --password-stdin 4999999999999.dkr.ecr.us-east-2.amazonaws.com +``` + +A successful login will display a `Login Succeeded` message. +To push this image to our private repository, tag the image. +The new tag will consist of: + +`<ecr repoendpoint>/image` + +For example, to tag the public NGINX image, we run the following command: + +```shell +$ docker tag nginx 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx +``` + +Assuming the tagging is successful, push the container to our ECR repository: + +```shell +$ docker push 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx +The push refers to repository [4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx] +908cf8238301: Pushed +eabfa4cd2d12: Pushed +60c688e8765e: Pushed +f431d0917d41: Pushed +07cab4339852: Pushed +latest: digest: sha256:794275d96b4ab96eeb954728a7bf11156570e8372ecd5ed0cbc7280313a27d19 size: 1362 + +``` +Our testing environment is all set. +We are now ready to update Replicated to use the private registry. + +* * * + +## 2. Configure Private Registries in Replicated + +To configure a Private Registry in Replicated, we need to provide the same information we needed to login to ECR in the previous step: + +- **Endpoint** +- **Username** +- **Password** + +The difference is that we'll use a different user than the one we used previously. Since Replicated only needs to pull images, it is a best practice to create a 'read-only' user for this specific purpose. + +### Determine the endpoint + +The endpoint should be the same as the one we provided in the previous step. + +### Setting up the Service Account User + +Replicated only needs access to pull images from the private registry. Let's create a new user in AWS: + +![aws-new-user](/images/guides/kots/priv-reg-ecr-new-user.png) + +As far as permissions go, there are a couple of options, depending on scope of access. +If exposing all images to Replicated is an acceptable solution, the Amazon-provided [AmazonEC2ContainerRegistryReadOnly](https://docs.aws.amazon.com/AmazonECR/latest/userguide/ecr_managed_policies.html#AmazonEC2ContainerRegistryReadOnly) policy will work: + +```shell +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + }] +} +``` +If you wish to limit Replicated to only certain images, this policy should be used instead: + +```shell +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:DescribeImages", + "ecr:BatchGetImage" + ], + "Resource": [ + "arn:aws:ecr:us-east-1:<account-id>:repository/<repo1>", + "arn:aws:ecr:us-east-1:<account-id>:repository/<repo2>" + ] + }] +}{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + }, + ] +} +``` + +We will need the AWS Access Key ID and AWS Secret Key in the next section as these will map to the *Username* and *Password* fields. You can obtain these as you create the user or after the user has been created. + +### Enter Registry Information in Replicated + +First, we must link Replicated with the registry. To do this, click on **Add External Registry** from the *Images* tab. + +<img src="/images/add-external-registry.png" alt="/images/add-external-registry.png" width="400px"></img> + +[View a larger version of this image](/images/add-external-registry.png) + +The values for the fields are: + +**Endpoint:** +Enter the same URL used to log in to ECR. +For example, to link to the same registry as the one in the section, we would enter *4999999999999.dkr.ecr.us-east-2.amazonaws.com*. + +**Username:** +Enter the AWS Access Key ID for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. + +**Password:** +Enter the AWS Secret Key for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. + +* * * + +## 3. Update Definition Files + +Last step is to update our definition manifest to pull the image from the ECR repository. +To do this, we'll update the `deployment.yaml` file by adding the ECR registry URL to the `image` value. +Below is an example using the registry URL used in this guide. + +```diff + spec: + containers: + - name: nginx +- image: nginx ++ image: 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx + envFrom: +``` + +Save your changes and create the new release and promote it to the *Unstable* channel. + +* * * + +## 4. Install the New Version + +To deploy the new version of the application, go back to the admin console and select the *Version History* tab. +Click on **Check for Updates** and then **Deploy** when the new version is listed. +To confirm that the new version was in fact installed, it should look like the screenshot below. + +![version-history](/images/guides/kots/priv-reg-ecr-version-history.png) + +Now, we can inspect to see the changes in the definition files. +Looking at the `deployment.yaml` upstream file, we see the image path as we set it in the [Update Definition Files](#3-update-definition-files) section. + +![admin-console-view-files-upstream-release2](/images/guides/kots/priv-reg-ecr-upstream-file-rel2.png) + +Because KOTS is able to detect that it cannot pull this image anonymously, it then tries to proxy the private registries configured. Looking at the `kustomization.yaml` downstream file we can see that the image path is changed to use the Replicated proxy. + +![admin-console-view-files-downstream-release2](/images/guides/kots/priv-reg-ecr-downstream-file-rel2.png) + +The install of the new version should have created a new pod. If we run `kubectl describe pod` on the new NGINX pod, we can confirm that the image was in fact pulled from the ECR repository. + +![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubectl-describe-rel2.png) + +* * * + +## Related Topics + +- [Connecting to an External Registry](packaging-private-images/) + +- [Replicated Community Thread on AWS Roles and Permissions](https://help.replicated.com/community/t/what-are-the-minimal-aws-iam-permissions-needed-to-proxy-images-from-elastic-container-registry-ecr/267) + +- [AWS ECR Managed Policies Documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecr_managed_policies.html) + + +--- + + +# Step 1: Create an Application + +# Step 1: Create an Application + +To begin, install the Replicated CLI and create an application in the Replicated Vendor Portal. + +An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. + +To create an application: + +1. Install the Replicated CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Authorize the Replicated CLI: + + ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + +1. Create an application named `Gitea`: + + ``` + replicated app create Gitea + ``` + +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: + + 1. Get the slug for the application that you created: + + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-kite kots + ``` + In the example above, the application slug is `gitea-kite`. + + :::note + The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. + ::: + + 1. Set the `REPLICATED_APP` environment variable to the application slug. + + **Example:** + + ``` + export REPLICATED_APP=gitea-kite + ``` + +## Next Step + +Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 2: Package the Helm Chart](tutorial-embedded-cluster-package-chart). + +## Related Topics + +* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [replicated app create](/reference/replicated-cli-app-create) + +--- + + +# Step 4: Create an Embedded Cluster-Enabled Customer + +# Step 4: Create an Embedded Cluster-Enabled Customer + +After promoting the release, create a customer with the Replicated KOTS and Embedded Cluster entitlements so that you can install the release with Embedded Cluster. A _customer_ represents a single licensed user of your application. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. + +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + +1. For **License type**, select **Development**. + +1. For **License options**, enable the following entitlements: + * **KOTS Install Enabled** + * **Embedded Cluster Enabled** + +1. Click **Save Changes**. + +## Next Step + +Get the Embedded Cluster installation commands and install. See [Step 5: Install the Release on a VM](tutorial-embedded-cluster-install). + +## Related Topics + +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) + +--- + + +# Step 3: Add the Chart Archive to a Release + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import EcCr from "../partials/embedded-cluster/_ec-config.mdx" + +# Step 3: Add the Chart Archive to a Release + +Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with Replicated Embedded Cluster. + +A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. + +To create a release: + +1. In the `gitea` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with Replicated KOTS to this subdirectory. + +1. Move the Helm chart archive that you created to `manifests`: + + ``` + mv gitea-1.0.6.tgz manifests + ``` + +1. In `manifests`, create the YAML manifests required by KOTS: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml + ``` + +1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The <a href="/vendor/helm-optional-value-keys#conditionally-set-values"><code>optionalValues</code></a> field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment.</p> + <h5>YAML</h5> + <HelmChartCr/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml"> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.</p> + <h5>YAML</h5> + <KotsCr/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sCr/> + </TabItem> + <TabItem value="ec" label="embedded-cluster.yaml"> + <h5>Description</h5> + <p>To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.</p> + <h5>YAML</h5> + <EcCr/> + </TabItem> + </Tabs> + +1. Lint: + + ```bash + replicated release lint --yaml-dir . + ``` + ```bash + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + You can ignore any warning messages for the purpose of this tutorial. + ::: + +1. Create a release: + + ``` + replicated release create --yaml-dir . + ``` + **Example output**: + ``` + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 + ``` + +1. Log in to the Vendor Portal and go to **Releases**. + + The release that you created is listed under **All releases**. + + ![Release page in the Vendor Portal with one release](/images/gitea-ec-release-seq-1.png) + + [View a larger version of this image](/images/gitea-ec-release-seq-1.png) + +1. Click the dot menu then **Edit release** to view the files in the release. + + ![dot menu](/images/gitea-ec-release-edit-button.png) + + [View a larger version of this image](/images/gitea-ec-release-edit-button.png) + + In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. + + ![Edit Release page in the Vendor Portal](/images/gitea-ec-release-edit-seq-1.png) + + [View a larger version of this image](/images/gitea-ec-release-edit-seq-1.png) + +1. At the top of the page, click **Promote**. + +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. + + <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> + + [View a larger version of this image](/images/release-promote.png) + +## Next Step + +Create a customer with the Embedded Cluster entitlement so that you can install the release using Embedded Cluster. See [Step 4: Create an Embedded Cluster-Enabled Customer](tutorial-embedded-cluster-create-customer). + +## Related Topics + +* [About Channels and Releases](/vendor/releases-about) +* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) +* [Embedded Cluster Config](/reference/embedded-config) +* [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) + +--- + + +# Step 5: Install the Release on a VM + +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" + +# Step 5: Install the Release on a VM + +Next, get the customer-specific Embedded Cluster installation commands and then install the release on a Linux VM. + +To install the release with Embedded Cluster: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers**. Click on the name of the customer you created. + +1. Click **Install instructions > Embedded cluster**. + + <img alt="Customer install instructions dropdown" src="/images/customer-install-instructions-dropdown.png" width="600px"/> + + [View a larger version of this image](/images/customer-install-instructions-dropdown.png) + + The **Embedded cluster install instructions** dialog opens. + + <img alt="Embedded Cluster install instructions dialog" src="/images/embedded-cluster-install-dialog-latest.png" width="600px"/> + + [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) + +1. On the command line, SSH onto your Linux VM. + +1. Run the first command in the **Embedded cluster install instructions** dialog to download the latest release. + +1. Run the second command to extract the release. + +1. Run the third command to install the release. + +1. When prompted, enter a password for accessing the KOTS Admin Console. + + The installation command takes a few minutes to complete. + +1. When the installation command completes, go to the URL provided in the output to log in to the Admin Console. + + **Example output:** + + ```bash + ✔ Host files materialized + ? Enter an Admin Console password: ******** + ? Confirm password: ******** + ✔ Node installation finished + ✔ Storage is ready! + ✔ Embedded Cluster Operator is ready! + ✔ Admin Console is ready! + ✔ Finished! + Visit the admin console to configure and install gitea-kite: http://104.155.145.60:30000 + ``` + + At this point, the cluster is provisioned and the KOTS Admin Console is deployed, but the application is not yet installed. + +1. Bypass the browser TLS warning by clicking **Continue to Setup**. + +1. Click **Advanced > Proceed**. + +1. On the **HTTPS for the Gitea Admin Console** page, select **Self-signed** and click **Continue**. + +1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. + +1. On the **Nodes** page, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. + + The Admin Console dashboard opens. + +1. In the **Version** section, for version `0.1.0`, click **Deploy** then **Yes, Deploy**. + + The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. + +1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser: + + ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) + + [View a larger version of this image](/images/gitea-ec-ready.png) + + <img alt="Gitea app landing page" src="/images/gitea-app.png" width="600px"/> + + [View a larger version of this image](/images/gitea-app.png) + +1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created. + + On the **Reporting** page for the customer, you can see details about the customer's license and installed instances: + + ![Customer reporting page](/images/gitea-customer-reporting-ec.png) + + [View a larger version of this image](/images/gitea-customer-reporting-ec.png) + +1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. + + On the instance details page, you can see additional insights such as the version of Embedded Cluster that is running, instance status and uptime, and more: + + ![Customer instance details page](/images/gitea-instance-insights-ec.png) + + [View a larger version of this image](/images/gitea-instance-insights-ec.png) + +1. (Optional) Reset the node to remove the cluster and the application from the node. This is useful for iteration and development so that you can reset a machine and reuse it instead of having to procure another machine. + + ```bash + sudo ./APP_SLUG reset --reboot + ``` + Where `APP_SLUG` is the unique slug for the application that you created. You can find the appication slug by running `replicated app ls` on the command line on your local machine. + +## Summary + +Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with Replicated Embedded Cluster in a VM. To learn more about Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). + +## Related Topics + +* [Embedded Cluster Overview](embedded-overview) +* [Customer Reporting](/vendor/customer-reporting) +* [Instance Details](/vendor/instance-insights-details) +* [Reset a Node](/vendor/embedded-using#reset-a-node) + +--- + + +# Step 2: Package the Gitea Helm Chart + +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + +# Step 2: Package the Gitea Helm Chart + +Next, get the sample Gitea Helm chart from Bitnami. Add the Replicated SDK as a dependency of the chart, then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. + +The Replicated SDK is a Helm chart that can be optionally added as a dependency of your application Helm chart. The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. Additionally, the Replicated SDK provides access to insights and telemetry for instances of your application installed with the Helm CLI. + +To add the Replicated SDK and package the Helm chart: + +1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. + +1. Change to the new `gitea` directory that was created: + ``` + cd gitea + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` + +1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: + + <DependencyYaml/> + +1. Update dependencies and package the Helm chart to a `.tgz` chart archive: + + ```bash + helm package . --dependency-update + ``` + <UnauthorizedError/> + +## Next Step + +Create a release using the Helm chart archive. See [Step 3: Add the Chart Archive to a Release](tutorial-embedded-cluster-create-release). + +## Related Topics + +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release.md) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) + + +--- + + +# Introduction and Setup + +import Requirements from "../partials/embedded-cluster/_requirements.mdx" + +# Introduction and Setup + +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. + +## Summary + +This tutorial introduces you to installing an application on a Linux virtual machine (VM) using Replicated Embedded Cluster. Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. + +In this tutorial, you use a sample application to learn how to: + +* Add the Embedded Cluster Config to a release +* Use Embedded Cluster to install the application on a Linux VM + +## Set Up the Environment + +Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: + +<Requirements/> + +## Next Step + +Install the Replicated CLI and create an application in the Replicated Vendor Portal. See [Step 1: Create an Application](/vendor/tutorial-embedded-cluster-create-app). + +--- + + +# Step 2: Create an Application + +# Step 2: Create an Application + +Next, install the Replicated CLI and then create an application. + +An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. + +To create an application: + +1. Install the Replicated CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Authorize the Replicated CLI: + + ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + +1. Create an application named `Gitea`: + + ``` + replicated app create Gitea + ``` + +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: + + 1. Get the slug for the application that you created: + + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots + ``` + In the example above, the application slug is `gitea-boxer`. + + :::note + The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. + ::: + + 1. Set the `REPLICATED_APP` environment variable to the application slug. + + **Example:** + + ``` + export REPLICATED_APP=gitea-boxer + ``` + +## Next Step + +Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-kots-helm-package-chart). + +## Related Topics + +* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) +* [Installing the Replicated CLI](/reference/replicated-cli-installing) +* [replicated app create](/reference/replicated-cli-app-create) + +--- + + +# Step 5: Create a KOTS-Enabled Customer + +# Step 5: Create a KOTS-Enabled Customer + +After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. A _customer_ represents a single licensed user of your application. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. + +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + +1. For **License type**, select Development. + +1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. + +1. Click **Save Changes**. + +1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. + + ![Download license button on the customer page](/images/customer-download-license.png) + + [View a larger version of this image](/images/customer-download-license.png) + +## Next Step + +Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-kots-helm-install-kots). + +## Related Topics + +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) + +--- + + +# Step 4: Add the Chart Archive to a Release + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" + +# Step 4: Add the Chart Archive to a Release + +Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with both Replicated KOTS and with the Helm CLI. + +A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. + +To create a release: + +1. In the `gitea` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with Replicated KOTS to this subdirectory. + +1. Move the Helm chart archive that you created to `manifests`: + + ``` + mv gitea-1.0.6.tgz manifests + ``` + +1. In `manifests`, create the YAML manifests required by KOTS: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml + ``` + +1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.</p> + <h5>YAML</h5> + <HelmChartCr/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml"> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the KOTS Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.</p> + <h5>YAML</h5> + <KotsCr/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the KOTS Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sCr/> + </TabItem> + </Tabs> + +1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: + + ``` + replicated release lint --yaml-dir . + ``` + `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. + + **Example output**: + + ``` + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + :::note + The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `gitea` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. + ::: + +1. Create a release: + + ``` + replicated release create --yaml-dir . + ``` + **Example output**: + ``` + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 1 + ``` + +1. Log in to the Vendor Portal and go to **Releases**. + + The release that you created is listed under **All releases**. + + ![Release page in the Vendor Portal with one release](/images/tutorial-kots-helm-release-seq-1.png) + + [View a larger version of this image](/images/tutorial-kots-helm-release-seq-1.png) + +1. Click **Edit release** to view the files in the release. + + In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. + + ![Edit Release page in the Vendor Portal](/images/tutorial-kots-helm-release-edit-seq-1.png) + + [View a larger version of this image](/images/tutorial-kots-helm-release-edit-seq-1.png) + +1. At the top of the page, click **Promote**. + +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. + + <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> + + [View a larger version of this image](/images/release-promote.png) + +## Next Step + +Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-kots-helm-create-customer). + +## Related Topics + +* [About Channels and Releases](/vendor/releases-about) +* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) + +--- + + +# Step 1: Get the Sample Chart and Test + +# Step 1: Get the Sample Chart and Test + +To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated Vendor Portal. + +To get the sample Gitea Helm chart and test installation: + +1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. + +1. Change to the new `gitea` directory that was created: + ``` + cd gitea + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` +1. Install the Gitea chart in your cluster: + + ``` + helm install gitea . --namespace gitea --create-namespace + ``` + To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/gitea/README.md#installing-the-chart) in the `bitnami/gitea` repository. + + When the chart is installed, the following output is displayed: + + ``` + NAME: gitea + LAST DEPLOYED: Tue Oct 24 12:44:55 2023 + NAMESPACE: gitea + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + CHART NAME: gitea + CHART VERSION: 1.0.6 + APP VERSION: 1.20.5 + + ** Please be patient while the chart is being deployed ** + + 1. Get the Gitea URL: + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace gitea -w gitea' + + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" + + WARNING: You did not specify a Root URL for Gitea. The rendered URLs in Gitea may not show correctly. In order to set a root URL use the rootURL value. + + 2. Get your Gitea login credentials by running: + + echo Username: bn_user + echo Password: $(kubectl get secret --namespace gitea gitea -o jsonpath="{.data.admin-password}" | base64 -d) + ``` + +1. Watch the `gitea` LoadBalancer service until an external IP is available: + + ``` + kubectl get svc gitea --namespace gitea --watch + ``` + +1. When the external IP for the `gitea` LoadBalancer service is available, run the commands provided in the output of the installation command to get the Gitea URL: + + ``` + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" + ``` + +1. In a browser, go to the Gitea URL to confirm that you can see the welcome page for the application: + + <img alt="Gitea application webpage" src="/images/gitea-app.png" width="500px"/> + + [View a larger version of this image](/images/gitea-app.png) + +1. Uninstall the Helm chart: + + ``` + helm uninstall gitea --namespace gitea + ``` + This command removes all the Kubernetes components associated with the chart and uninstalls the `gitea` release. + +1. Delete the namespace: + + ``` + kubectl delete namespace gitea + ``` + +## Next Step + +Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-kots-helm-create-app). + +## Related Topics + +* [Helm Install](https://helm.sh/docs/helm/helm_install/) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Create](https://helm.sh/docs/helm/helm_create/) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) +* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) + +--- + + +# Step 7: Install the Release with the Helm CLI + +# Step 7: Install the Release with the Helm CLI + +Next, install the same release using the Helm CLI. All releases that contain one or more Helm charts can be installed with the Helm CLI. + +All Helm charts included in a release are automatically pushed to the Replicated registry when the release is promoted to a channel. Helm CLI installations require that the customer has a valid email address to authenticate with the Replicated registry. + +To install the release with the Helm CLI: + +1. Create a new customer to test the Helm CLI installation: + + 1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + + 1. For **Customer name**, enter a name for the customer. For example, `Helm Customer`. + + 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + + 1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. + + 1. For **License type**, select Trial. + + 1. (Optional) For **License options**, _disable_ the **KOTS Install Enabled** entitlement. + + 1. Click **Save Changes**. + +1. On the **Manage customer** page for the new customer, click **Helm install instructions**. + + ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) + + [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) + + You will use the instructions provided in the **Helm install instructions** dialog to install the chart. + +1. Before you run the first command in the **Helm install instructions** dialog, create a `gitea` namespace for the installation: + + ``` + kubectl create namespace gitea + ``` + +1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: + + ``` + kubectl config set-context --namespace=gitea --current + ``` + +1. Run the commands in the provided in the **Helm install instructions** dialog to log in to the registry and install the Helm chart. + + <img alt="Helm install instructions dialog" src="/images/tutorial-gitea-helm-install-instructions.png" width="500px"/> + + [View a larger version of this image](/images/tutorial-gitea-helm-install-instructions.png) + + :::note + You can ignore the **No preflight checks found** warning for the purpose of this tutorial. This warning appears because there are no specifications for preflight checks in the Helm chart archive. + ::: + +1. After the installation command completes, you can see that both the `gitea` Deployment and the Replicated SDK `replicated` Deployment were created: + + ``` + kubectl get deploy + ``` + **Example output:** + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + gitea 0/1 1 0 35s + replicated 1/1 1 1 35s + ``` + +1. Watch the `gitea` LoadBalancer service until an external IP is available: + + ``` + kubectl get svc gitea --watch + ``` + +1. After an external IP address is available for the `gitea` LoadBalancer service, follow the instructions in the output of the installation command to get the Gitea URL and then confirm that you can open the application in a browser. + +1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created for the Helm CLI installation. + + On the **Reporting** page for the customer, because the Replicated SDK was installed alongside the Gitea Helm chart, you can see details about the customer's license and installed instances: + + ![Customer reporting](/images/tutorial-gitea-helm-reporting.png) + + [View a larger version of this image](/images/tutorial-gitea-helm-reporting.png) + +1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. + + On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of the Replicated SDK running in the cluster, instance status and uptime, and more: + + ![Customer instance details](/images/tutorial-gitea-helm-instance.png) + + [View a larger version of this image](/images/tutorial-gitea-helm-instance.png) + +1. Uninstall the Helm chart and the Replicated SDK: + + ``` + helm uninstall gitea + ``` + +1. Delete the `gitea` namespace: + + ``` + kubectl delete namespace gitea + ``` + +## Next Step + +Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with both KOTS and the Helm CLI. + +## Related Topics + +* [Installing with Helm](/vendor/install-with-helm) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Delete](https://helm.sh/docs/helm/helm_delete/) + +--- + + +# Step 6: Install the Release with KOTS + +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" + +# Step 6: Install the Release with KOTS + +Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. + +To install the release with KOTS: + +1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. + + ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) + + [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) + +1. On the command line, run the **KOTS Install** command that you copied: + + ```bash + curl https://kots.io/install | bash + kubectl kots install $REPLICATED_APP/unstable + ``` + + This installs the latest version of the KOTS CLI and the Replicated KOTS Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. + + For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + + :::note + <KotsVerReq/> + ::: + +1. Complete the installation command prompts: + + 1. For `Enter the namespace to deploy to`, enter `gitea`. + + 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. + + When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. + + **Example output:** + + ```bash + Enter the namespace to deploy to: gitea + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password for the admin console (6+ characters): •••••••• + • Waiting for Admin Console to be ready ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + ``` + +1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. + +1. On the login page, enter the password that you created. + +1. On the license page, select the license file that you downloaded previously and click **Upload license**. + + The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: + + ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) + + [View a larger version of this image](/images/tutorial-gitea-unavailable.png) + +1. While waiting for the `gitea` Deployment to be created, do the following: + + 1. On the command line, press Ctrl+C to exit the port forward. + + 1. Watch for the `gitea` Deployment to become ready: + + ``` + kubectl get deploy gitea --namespace gitea --watch + ``` + + 1. After the `gitea` Deployment is ready, confirm that an external IP for the `gitea` LoadBalancer service is available: + + ``` + kubectl get svc gitea --namespace gitea + ``` + + 1. Start the port foward again to access the Admin Console: + + ``` + kubectl kots admin-console --namespace gitea + ``` + + 1. Go to `http://localhost:8800` to open the Admin Console. + +1. On the Admin Console dashboard, the application status is now displayed as Ready and you can click **Open App** to view the Gitea application in a browser: + + ![Admin console dashboard showing ready status](/images/tutorial-gitea-ready.png) + + [View a larger version of this image](/images/tutorial-gitea-ready.png) + +1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created. + + On the **Reporting** page for the customer, you can see details about the customer's license and installed instances: + + ![Customer reporting page](/images/tutorial-gitea-customer-reporting.png) + + [View a larger version of this image](/images/tutorial-gitea-customer-reporting.png) + +1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. + + On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of KOTS running in the cluster, instance status and uptime, and more: + + ![Customer instance details page](/images/tutorial-gitea-instance-insights.png) + + [View a larger version of this image](/images/tutorial-gitea-instance-insights.png) + +1. Uninstall the Gitea application from your cluster so that you can install the same release again using the Helm CLI: + + ```bash + kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy + ``` + **Example output**: + ``` + • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ + • Application gitea-boxer has been removed + ``` + +1. Remove the Admin Console from the cluster: + + 1. Delete the namespace where the Admin Console is installed: + + ``` + kubectl delete namespace gitea + ``` + 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: + + ``` + kubectl delete clusterrole kotsadm-role + ``` + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` + +## Next Step + +Install the same release with the Helm CLI. See [Step 7: Install the Release with the Helm CLI](tutorial-kots-helm-install-helm). + +## Related Topics + +* [kots install](/reference/kots-cli-install/) +* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) +* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) +* [Customer Reporting](customer-reporting) +* [Instance Details](instance-insights-details) + + +--- + + +# Step 3: Package the Helm Chart + +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" +import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" + +# Step 3: Package the Helm Chart + +Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. + +The Replicated SDK is a Helm chart that can be optionally added as a dependency of your application Helm chart. The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. Additionally, the Replicated SDK provides access to insights and telemetry for instances of your application installed with the Helm CLI. + +To add the Replicated SDK and package the Helm chart: + +1. In your local file system, go to the `gitea` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-kots-helm-get-chart). + +1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: + + <DependencyYaml/> + +1. Update dependencies and package the Helm chart to a `.tgz` chart archive: + + ```bash + helm package . --dependency-update + ``` + <UnauthorizedError/> + +## Next Step + +Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-kots-helm-create-release). + +## Related Topics + +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release.md) +* [About the Replicated SDK](/vendor/replicated-sdk-overview) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) + + +--- + + +# Introduction and Setup + +# Introduction and Setup + +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. + +## Summary + +This tutorial introduces you to the Replicated Vendor Portal, the Replicated CLI, the Replicated SDK, and the Replicated KOTS installer. + +In this tutorial, you use a sample Helm chart to learn how to: + +* Add the Replicated SDK to a Helm chart as a dependency +* Create a release with the Helm chart using the Replicated CLI +* Add custom resources to the release so that it supports installation with both the Helm CLI and Replicated KOTS +* Install the release in a cluster using KOTS and the KOTS Admin Console +* Install the same release using the Helm CLI + +## Set Up the Environment + +Before you begin, do the following to set up your environment: + +* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. + + For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: + * [Install Tools](https://kubernetes.io/docs/tasks/tools/) + * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) + +* Install the Helm CLI. To install the Helm CLI using Homebrew, run: + + ``` + brew install helm + ``` + + For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. + +* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). + + :::note + If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. + ::: + +## Next Step + +Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-kots-helm-get-chart) + +--- + + +# Step 2: Add a Preflight Spec to the Chart + +import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" + +# Step 2: Add a Preflight Spec to the Chart + +Create a preflight specification that fails if the cluster is running a version of Kubernetes earlier than 1.23.0, and add the specification to the Gitea chart as a Kubernetes Secret. + +To add a preflight specification to the Gitea chart: + +1. In the `gitea/templates` directory, create a `gitea-preflights.yaml` file: + + ``` + touch templates/gitea-preflights.yaml + ``` + +1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a preflight check specification: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + labels: + troubleshoot.sh/kind: preflight + name: gitea-preflight-checks + stringData: + preflight.yaml: | + apiVersion: troubleshoot.sh/v1beta2 + kind: Preflight + metadata: + name: gitea-preflight-checks + spec: + analyzers: + - clusterVersion: + outcomes: + - fail: + when: "< 1.23.0" + message: |- + Your cluster is running a version of Kubernetes that is not supported and your installation will not succeed. To continue, upgrade your cluster to Kubernetes 1.23.0 or later. + uri: https://www.kubernetes.io + - pass: + message: Your cluster is running the required version of Kubernetes. + ``` + + The YAML above defines a preflight check that fails if the target cluster is running a version of Kubernetes earlier than 1.23.0. The preflight check also includes a message to the user that describes the failure and lists the required Kubernetes version. The `troubleshoot.sh/kind: preflight` label is required to run preflight checks defined in Secrets. + +1. In the Gitea `Chart.yaml` file, add the Replicated SDK as a dependency: + + <DependencyYaml/> + + The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. + +1. Update dependencies and package the chart to a `.tgz` chart archive: + + ```bash + helm package . --dependency-update + ``` + + :::note + If you see a `401 Unauthorized` error message, log out of the Replicated registry by running `helm registry logout registry.replicated.com` and then run `helm package . --dependency-update` again. + ::: + +## Next Step + +Add the chart archive to a release. See [Add the Chart Archive to a Release](tutorial-preflight-helm-create-release). + +## Related Topics + +* [Defining Preflight Checks](/vendor/preflight-defining) +* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) + +--- + + +# Step 4: Create a Customer + +# Step 4: Create a Customer + +After promoting the release, create a customer so that you can run the preflight checks and install. + +To create a customer: + +1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. + + The **Create a new customer** page opens: + + ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) + + [View a larger version of this image](/images/create-customer.png) + +1. For **Customer name**, enter a name for the customer. For example, `Preflight Customer`. + +1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. + +1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. + +1. For **License type**, select Development. + +1. Click **Save Changes**. + +## Next Step + +Use the Helm CLI to run the preflight checks you defined and install Gitea. See [Run Preflights with the Helm CLI](tutorial-preflight-helm-install). + +## Related Topics + +* [About Customers](/vendor/licenses-about) +* [Creating and Managing Customers](/vendor/releases-creating-customer) + +--- + + +# Step 3: Add the Chart Archive to a Release + +# Step 3: Add the Chart Archive to a Release + +Use the Replicated CLI to add the Gitea Helm chart archive to a release in the Replicated vendor platform. + +To create a release: + +1. Install the Replicated CLI: + + ``` + brew install replicatedhq/replicated/cli + ``` + For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Authorize the Replicated CLI: + + ``` + replicated login + ``` + In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. + +1. Create an application named `Gitea`: + + ``` + replicated app create Gitea + ``` + +1. Get the slug for the application that you created: + + ``` + replicated app ls + ``` + **Example output**: + ``` + ID NAME SLUG SCHEDULER + 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots + ``` + In the example above, the application slug is `gitea-boxer`. + +1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: + + **Example:** + + ``` + export REPLICATED_APP=gitea-boxer + ``` + +1. Go to the `gitea` directory. + +1. Create a release with the Gitea chart archive: + + ``` + replicated release create --chart=gitea-1.0.6.tgz + ``` + ```bash + You are creating a release that will only be installable with the helm CLI. + For more information, see + https://docs.replicated.com/vendor/helm-install#about-helm-installations-with-replicated + + • Reading chart from gitea-1.0.6.tgz ✓ + • Creating Release ✓ + • SEQUENCE: 1 + ``` + +1. Log in to the Vendor Portal and go to **Releases**. + + The release that you created is listed under **All releases**. + +1. Click **View YAML** to view the files in the release. + +1. At the top of the page, click **Promote**. + + <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> + + [View a larger version of this image](/images/release-promote.png) + +1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. + +1. For **Version label**, open the dropdown and select **1.0.6**. + +1. Click **Promote**. + + +## Next Step + +Create a customer so that you can install the release in a development environment. See [Create a Customer](tutorial-preflight-helm-create-customer). + +## Related Topics + +* [About Channels and Releases](/vendor/releases-about) +* [Managing Releases with the CLI](/vendor/releases-creating-cli) + +--- + + +# Step 1: Get the Sample Chart and Test + +# Step 1: Get the Sample Chart and Test + +To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install the application before adding preflight checks to the chart. + +To get the sample Gitea Helm chart and test installation: + +1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: + + ``` + helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 + ``` + For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. + +1. Change to the new `gitea` directory that was created: + ``` + cd gitea + ``` +1. View the files in the directory: + ``` + ls + ``` + The directory contains the following files: + ``` + Chart.lock Chart.yaml README.md charts templates values.yaml + ``` +1. Install the Gitea chart in your cluster: + + ``` + helm install gitea . --namespace gitea --create-namespace + ``` + To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/gitea/README.md#installing-the-chart) in the `bitnami/gitea` repository. + + When the chart is installed, the following output is displayed: + + ``` + NAME: gitea + LAST DEPLOYED: Tue Oct 24 12:44:55 2023 + NAMESPACE: gitea + STATUS: deployed + REVISION: 1 + TEST SUITE: None + NOTES: + CHART NAME: gitea + CHART VERSION: 1.0.6 + APP VERSION: 1.20.5 + + ** Please be patient while the chart is being deployed ** + + 1. Get the Gitea URL: + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace gitea -w gitea' + + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" + + WARNING: You did not specify a Root URL for Gitea. The rendered URLs in Gitea may not show correctly. In order to set a root URL use the rootURL value. + + 2. Get your Gitea login credentials by running: + + echo Username: bn_user + echo Password: $(kubectl get secret --namespace gitea gitea -o jsonpath="{.data.admin-password}" | base64 -d) + ``` + +1. Watch the `gitea` LoadBalancer service until an external IP is available: + + ``` + kubectl get svc gitea --namespace gitea --watch + ``` + +1. When the external IP for the `gitea` LoadBalancer service is available, run the commands provided in the output of the installation command to get the Gitea URL: + + ``` + export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") + echo "Gitea URL: http://$SERVICE_IP/" + ``` + + :::note + Alternatively, you can run the following command to forward a local port to a port on the Gitea Pod: + + ``` + POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=gitea -o jsonpath='{.items[0].metadata.name}') + kubectl port-forward pod/$POD_NAME 8080:3000 + ``` + ::: + +1. In a browser, go to the Gitea URL to confirm that you can see the welcome page for the application: + + <img alt="Gitea application webpage" src="/images/gitea-app.png" width="500px"/> + + [View a larger version of this image](/images/gitea-app.png) + +1. Uninstall the Helm chart: + + ``` + helm uninstall gitea --namespace gitea + ``` + This command removes all the Kubernetes components associated with the chart and uninstalls the `gitea` release. + +1. Delete the namespace: + + ``` + kubectl delete namespace gitea + ``` + +## Next Step + +Define preflight checks and add them to the Gitea Helm chart. See [Add a Preflight Spec to the Chart](tutorial-preflight-helm-add-spec). + +## Related Topics + +* [Helm Install](https://helm.sh/docs/helm/helm_install/) +* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) +* [Helm Package](https://helm.sh/docs/helm/helm_package/) +* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) + +--- + + +# Step 6: Run Preflights with KOTS + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" +import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" +import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" +import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" + +# Step 6: Run Preflights with KOTS + +Create a KOTS-enabled release and then install Gitea with KOTS. This purpose of this step is to see how preflight checks automatically run in the KOTS Admin Console during installation. + +To run preflight checks during installation with KOTS: + +1. In the `gitea` directory, create a subdirectory named `manifests`: + + ``` + mkdir manifests + ``` + + You will add the files required to support installation with KOTS to this subdirectory. + +1. Move the Helm chart archive to `manifests`: + + ``` + mv gitea-1.0.6.tgz manifests + ``` + +1. In `manifests`, create the YAML manifests required by KOTS: + ``` + cd manifests + ``` + ``` + touch gitea.yaml kots-app.yaml k8s-app.yaml + ``` + +1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: + + <Tabs> + <TabItem value="helmchart" label="gitea.yaml" default> + <h5>Description</h5> + <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.</p> + <h5>YAML</h5> + <HelmChartCr/> + </TabItem> + <TabItem value="kots-app" label="kots-app.yaml"> + <h5>Description</h5> + <p>The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.</p> + <h5>YAML</h5> + <KotsCr/> + </TabItem> + <TabItem value="k8s-app" label="k8s-app.yaml"> + <h5>Description</h5> + <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.</p> + <h5>YAML</h5> + <K8sCr/> + </TabItem> + </Tabs> + +1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: + + ``` + replicated release lint --yaml-dir . + ``` + `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. + + **Example output**: + + ``` + RULE TYPE FILENAME LINE MESSAGE + config-spec warn Missing config spec + preflight-spec warn Missing preflight spec + troubleshoot-spec warn Missing troubleshoot spec + nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. + ``` + + The output includes warning messages, including a warning about a missing preflight spec. This warning appears because the preflight spec is defined in the Helm chart. The warnings can be ignored for the purpose of this tutorial. + +1. Create a release: + + ```bash + replicated release create --yaml-dir . + ``` + **Example output**: + ```bash + • Reading manifests from . ✓ + • Creating Release ✓ + • SEQUENCE: 2 + ``` + +1. Log in to the [vendor portal](https://vendor.replicated.com) and go to **Releases**. The new release is labeled **Sequence 2**. + +1. Promote the release to the Unstable channel. + +1. Go to the **Customers** page. + +1. Create a new customer named `KOTS Preflight Customer`. For **License options**, enable the **KOTS Install Enabled** checkbox. This is the entitlement that allows the customer to install with KOTS. + +1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. + +1. Go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. + + ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) + + [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) + +1. On the command line, run the **KOTS Install** command that you copied: + + ```bash + curl https://kots.io/install | bash + kubectl kots install $REPLICATED_APP/unstable + ``` + + This installs the latest version of the KOTS CLI and the Replicated Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. + + For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). + + :::note + <KotsVerReq/> + ::: + +1. Complete the installation command prompts: + + 1. For `Enter the namespace to deploy to`, enter `gitea`. + + 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. + + When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. + + **Example output:** + + ```bash + Enter the namespace to deploy to: gitea + • Deploying Admin Console + • Creating namespace ✓ + • Waiting for datastore to be ready ✓ + Enter a new password for the Admin Console (6+ characters): •••••••• + • Waiting for Admin Console to be ready ✓ + + • Press Ctrl+C to exit + • Go to http://localhost:8800 to access the Admin Console + ``` + +1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. + +1. On the login page, enter the password that you created. + +1. On the license page, select the license file that you downloaded previously and click **Upload license**. + + Preflight checks run automatically: + + ![Gitea preflight checks page](/images/gitea-preflights-admin-console.png) + + [View a larger version of this image](/images/gitea-preflights-admin-console.png) + +1. When the preflight checks finish, click **Deploy** to deploy the application. + + The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: + + ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) + + [View a larger version of this image](/images/tutorial-gitea-unavailable.png) + +1. (Optional) After the application is in a Ready status, click **Open App** to view the Gitea application in a browser. + +1. Uninstall the Gitea application from your cluster: + + ```bash + kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy + ``` + **Example output**: + ``` + • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ + • Application gitea-boxer has been removed + ``` + +1. Remove the Admin Console from the cluster: + + 1. Delete the namespace where the Admin Console is installed: + + ``` + kubectl delete namespace gitea + ``` + 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: + + ``` + kubectl delete clusterrole kotsadm-role + ``` + ``` + kubectl delete clusterrolebinding kotsadm-rolebinding + ``` + +## Summary + +Congratulations! In this tutorial, you defined a preflight check for Gitea that checks the version of Kubernetes running in the cluster. You also ran preflight checks before installing with both the Helm CLI and with KOTS. + +To learn more about defining and running preflight checks, see: +* [Defining Preflight Checks](/vendor/preflight-defining) +* [Running Preflight Checks](/vendor/preflight-running) +* [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. + +--- + + +# Step 5: Run Preflights with the Helm CLI + +# Step 5: Run Preflights with the Helm CLI + +Use the Helm CLI installation instructions provided for the customer that you created to run the preflight checks for Gitea and install. The purpose of this step is to demonstrate how enterprise users can run preflight checks defined in a Helm chart before installing. + +To run preflight checks and install with the Helm CLI: + +1. Create a `gitea` namespace for the installation: + + ``` + kubectl create namespace gitea + ``` + +1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: + + ``` + kubectl config set-context --namespace=gitea --current + ``` + +1. In the [vendor portal](https://vendor.replicated.com), go to the **Customers** page. + +1. On the **Customer details** page for the customer that you created, click **Helm install instructions**. + + ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) + + [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) + +1. Run the first command in the **Helm install instructions** dialog to log in to the Replicated registry. + +1. Run the second command to install the preflight kubectl plugin: + + ```bash + curl https://krew.sh/preflight | bash + ``` + The preflight plugin is a client-side utility used to run preflight checks. + +1. Run the third command to run preflight checks: + + ```bash + helm template oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea | kubectl preflight - + ``` + This command templates the Gitea chart and then pipes the result to the preflight plugin. The following shows an example of the ouput for this command: + + <img alt="Preflight CLI output" src="/images/gitea-preflights-cli.png" width="600px"/> + + [View a larger version of this image](/images/gitea-preflights-cli.png) + +1. Run the fourth command listed under **Option 1: Install Gitea** to install the application: + + ```bash + helm install gitea oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea + ``` + +1. Uninstall and delete the namespace: + + ```bash + helm uninstall gitea --namespace gitea + ``` + ```bash + kubectl delete namespace gitea + ``` + +## Next Step + +Install the application with KOTS to see how preflight checks are run from the KOTS Admin Console. See [Run Preflights with KOTS](tutorial-preflight-helm-install-kots). + +## Related Topics + +* [Running Preflight Checks](/vendor/preflight-running) +* [Installing with Helm](/vendor/install-with-helm) + +--- + + +# Introduction and Setup + +# Introduction and Setup + +This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. + +## Summary + +This tutorial introduces you to preflight checks. The purpose of preflight checks is to provide clear feedback about any missing requirements or incompatibilities in the customer's cluster _before_ they install or upgrade an application. Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. + +Preflight checks are part of the [Troubleshoot](https://troubleshoot.sh/) open source project, which is maintained by Replicated. + +In this tutorial, you use a sample Helm chart to learn how to: + +* Define custom preflight checks in a Kubernetes Secret in a Helm chart +* Package a Helm chart and add it to a release in the Replicated Vendor Portal +* Run preflight checks using the Helm CLI +* Run preflight checks in the Replicated KOTS Admin Console + +## Set Up the Environment + +Before you begin, do the following to set up your environment: + +* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. + + For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: + * [Install Tools](https://kubernetes.io/docs/tasks/tools/) + * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) + +* Install the Helm CLI. To install the Helm CLI using Homebrew, run: + + ``` + brew install helm + ``` + + For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. + +* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). + + :::note + If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. + ::: + +## Next Step + +Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-preflight-helm-get-chart) + +--- + + +# Using a Registry Proxy for Helm Air Gap Installations + +# Using a Registry Proxy for Helm Air Gap Installations + +This topic describes how to connect the Replicated proxy registry to a Harbor or jFrog Artifactory instance to support pull-through image caching. It also includes information about how to set up replication rules in Harbor for image mirroring. + +## Overview + +For applications distributed with Replicated, the [Replicated proxy registry](/vendor/private-images-about) grants proxy, or _pull-through_, access to application images without exposing registry credentials to customers. + +Users can optionally connect the Replicated proxy registry with their own [Harbor](https://goharbor.io) or [jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) instance to proxy and cache the images that are required for installation on demand. This can be particularly helpful in Helm installations in air-gapped environments because it allows users to pull and cache images from an internet-connected machine, then access the cached images during installation from a machine with limited or no outbound internet access. + +In addition to the support for on-demand pull-through caching, connecting the Replicated proxy registry to a Harbor or Artifactory instance also has the following benefits: +* Registries like Harbor or Artifactory typically support access controls as well as scanning images for security vulnerabilities +* With Harbor, users can optionally set up replication rules for image mirroring, which can be used to improve data availability and reliability + +## Limtiation + +Artifactory does not support mirroring or replication for Docker registries. If you need to set up image mirroring, use Harbor. See [Set Up Mirroring in Harbor](#harbor-mirror) below. + +## Connect the Replicated Proxy Registry to Harbor + +[Harbor](https://goharbor.io) is a popular open-source container registry. Users can connect the Replicated proxy registry to Harbor in order to cache images on demand and set up pull-based replication rules to proactively mirror images. Connecting the Replicated proxy registry to Harbor also allows customers use Harbor's security features. + +### Use Harbor for Pull-Through Proxy Caching {#harbor-proxy-cache} + +To connect the Replicated proxy registry to Harbor for pull-through proxy caching: + +1. Log in to Harbor and create a new replication endpoint. This endpoint connects the Replicated proxy registry to the Harbor instance. For more information, see [Creating Replication Endpoints](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-endpoints/) in the Harbor documentation. + +1. Enter the following details for the endpoint: + + * For the provider field, choose Docker Registry. + * For the URL field, enter `https://proxy.replicated.com` or the custom domain that is configured for the Replicated proxy registry. For more information about configuring custom domains in the Vendor Portal, see [Using Custom Domains](/vendor/custom-domains-using). + * For the access ID, enter the email address associated with the customer in the Vendor Portal. + * For the access secret, enter the customer's unique license ID. You can find the license ID in the Vendor Portal by going to **Customers > [Customer Name]**. + +1. Verify your configuration by testing the connection and then save the endpoint. + +1. After adding the Replicated proxy registry as a replication endpoint in Harbor, set up a proxy cache. This allows for pull-through image caching with Harbor. For more information, see [Configure Proxy Cache](https://goharbor.io/docs/2.11.0/administration/configure-proxy-cache/) in the Harbor documentation. + +1. (Optional) Add a pull-based replication rule to support image mirroring. See [Configure Image Mirroring in Harbor](#harbor-mirror) below. + +### Configure Image Mirroring in Harbor {#harbor-mirror} + +To enable image mirroring with Harbor, users create a pull-based replication rule. This periodically (or when manually triggered) pulls images from the Replicated proxy registry to store them in Harbor. + +The Replicated proxy regsitry exposes standard catalog and tag listing endpoints that are used by Harbor to support image mirroring: +* The catalog endpoint returns a list of repositories built from images of the last 10 releases. +* The tags listing endpoint lists the tags available in a given repository for those same releases. + +When image mirroring is enabled, Harbor uses these endpoints to build a list of images to cache and then serve. + +#### Limitations + +Image mirroring with Harbor has the following limitations: + +* Neither the catalog or tags listing endpoints exposed by the Replicated proxy service respect pagination requests. However, Harbor requests 1000 items at a time. + +* Only authenticated users can perform catalog calls or list tags. Authenticated users are those with an email address and license ID associated with a customer in the Vendor Portal. + +#### Create a Pull-Based Replication Rule in Harbor for Image Mirroring + +To configure image mirroring in Harbor: + +1. Follow the steps in [Use Harbor for Pull-Through Proxy Caching](#harbor-proxy-cache) above to add the Replicated proxy registry to Harbor as a replication endpoint. + +1. Create a **pull-based** replication rule in Harbor to mirror images proactively. For more information, see [Creating a replication rule](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-rules/) in the Harbor documentation. + +## Use Artifactory for Pull-Through Proxy Caching + +[jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) supports pull-through caching for Docker registries. + +For information about how to configure a pull-through cache with Artifactory, see [Remote Repository](https://jfrog.com/help/r/jfrog-artifactory-documentation/configure-a-remote-repository) in the Artifactory documentation. + + +--- + + +# Application Settings Page + +# Application Settings Page + +Each application has its own settings, which include the application name and application slug. + +The following shows the **Application Settings** page, which you access by selecting **_Application Name_ > Settings**: + +<img alt="Settings page" src="/images/application-settings.png" width="600px"/> + +[View a larger version of this image](/images/application-settings.png) + +The following describes each of the application settings: + +- **Application name:** The application name is initially set when you first create the application in the Vendor Portal. You can change the name at any time so that it displays as a user-friendly name that your team can easily identify. +- **Application slug:** The application slug is used with the Replicated CLI and with some of the KOTS CLI commands. You can click on the link below the slug to toggle between the application ID number and the slug name. The application ID and application slug are unique identifiers that cannot be edited. +- **Service Account Tokens:** Provides a link to the the **Service Accounts** page, where you can create or remove a service account. Service accounts are paired with API tokens and are used with the Vendor API to automate tasks. For more information, see [Using Vendor API Tokens](/reference/vendor-api-using). +- **Scheduler:** Displayed if the application has a KOTS entitlement. +- **Danger Zone:** Lets you delete the application, and all of the licenses and data associated with the application. The delete action cannot be undone. + +--- + + +# Creating a Vendor Account + +# Creating a Vendor Account + +To get started with Replicated, you must create a Replicated vendor account. When you create your account, you are also prompted to create an application. To create additional applications in the future, log in to the Replicated Vendor Portal and select **Create new app** from the Applications drop-down list. + +To create a vendor account: + +1. Go to the [Vendor Portal](https://vendor.replicated.com), and select **Sign up**. + + The sign up page opens. +3. Enter your email address or continue with Google authentication. + + - If registering with an email, the Activate account page opens and you will receive an activation code in your email. + + :::note + To resend the code, click **Resend it**. + ::: + + - Copy and paste the activation code into the text box and click **Activate**. Your account is now activated. + + :::note + After your account is activated, you might have the option to accept a pending invitation, or to automatically join an existing team if the auto-join feature is enabled by your administrator. For more information about enabling the auto-join feature, see [Enable Users to Auto-join Your Team](https://docs.replicated.com/vendor/team-management#enable-users-to-auto-join-your-team). + ::: + +4. On the Create your team page, enter you first name, last name, and company name. Click **Continue** to complete the setup. + + :::note + The company name you provide is used as your team name in Vendor Portal. + ::: + + The Create application page opens. + +5. Enter a name for the application, such as `My-Application-Demo`. Click **Create application**. + + The application is created and the Channels page opens. + + :::important + Replicated recommends that you use a temporary name for the application at this time such as `My-Application-Demo` or `My-Application-Test`. + + Only use an official name for your application when you have completed testing and are ready to distribute the application to your customers. + + Replicated recommends that you use a temporary application name for testing because you are not able to restore or modify previously-used application names or application slugs in the Vendor Portal. + ::: + +## Next Step + +Invite team members to collaborate with you in Vendor Portal. See [Invite Members](team-management#invite-members). + + +--- + + +# Managing Applications + +# Managing Applications + +This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. + +For information about creating and managing application with the Vendor API v3, see the [apps](https://replicated-vendor-api.readme.io/reference/createapp) section in the Vendor API v3 documentation. + +## Create an Application + +Teams can create one or more applications. It is common to create multiple applications for testing purposes. + +### Vendor Portal + +To create a new application: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com/). If you do not have an account, see [Creating a Vendor Account](/vendor/vendor-portal-creating-account). + +1. In the top left of the page, open the application drop down and click **Create new app...**. + + <img alt="create new app drop down" src="/images/create-new-app.png" width="300px"/> + + [View a larger version of this image](/images/create-new-app.png) + +1. On the **Create application** page, enter a name for the application. + + <img alt="create new app page" src="/images/create-application-page.png" width="500px"/> + + [View a larger version of this image](/images/create-application-page.png) + + :::important + If you intend to use the application for testing purposes, Replicated recommends that you use a temporary name such as `My Application Demo` or `My Application Test`. + + You are not able to restore or modify previously-used application names or application slugs. + ::: + +1. Click **Create application**. + +### Replicated CLI + +To create an application with the Replicated CLI: + +1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Run the following command: + + ```bash + replicated app create APP-NAME + ``` + Replace `APP-NAME` with the name that you want to use for the new application. + + **Example**: + + ```bash + replicated app create cli-app + ID NAME SLUG SCHEDULER + 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots + ``` + +## Get the Application Slug {#slug} + +Each application has a slug, which is used for interacting with the application using the Replicated CLI. The slug is automatically generated based on the application name and cannot be changed. + +### Vendor Portal + +To get an application slug in the Vendor Portal: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. + +1. Under **Application Slug**, copy the slug. + + <img alt="Application slug" src="/images/application-settings.png" width="600px"/> + + [View a larger version of this image](/images/application-settings.png) + +### Replicated CLI + +To get an application slug with the Replicated CLI: + +1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Run the following command: + + ```bash + replicated app ls APP-NAME + ``` + Replace `APP-NAME` with the name of the target application. Or, exclude `APP-NAME` to list all applications in the team. + + **Example:** + + ```bash + replicated app ls cli-app + ID NAME SLUG SCHEDULER + 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots + ``` + +1. Copy the value in the `SLUG` field. + +## Delete an Application + +When you delete an application, you also delete all licenses and data associated with the application. You can also optionally delete all images associated with the application from the Replicated registry. Deleting an application cannot be undone. + +### Vendor Portal + +To delete an application in the Vendor Portal: + +1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. + +1. Under **Danger Zone**, click **Delete App**. + + <img alt="Setting page" src="/images/application-settings.png" width="600px"/> + + [View a larger version of this image](/images/application-settings.png) + +1. In the **Are you sure you want to delete this app?** dialog, enter the application name. Optionally, enter your password if you want to delete all images associated with the application from the Replicated registry. + + <img alt="delete app dialog" src="/images/delete-app-dialog.png" width="400px"/> + + [View a larger version of this image](/images/delete-app-dialog.png) + +1. Click **Delete app**. + +### Replicated CLI + +To delete an application with the Replicated CLI: + +1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). + +1. Run the following command: + + ```bash + replicated app delete APP-NAME + ``` + Replace `APP-NAME` with the name of the target application. + +1. When prompted, type `yes` to confirm that you want to delete the application. + + **Example:** + + ```bash + replicated app delete deletion-example + • Fetching App ✓ + ID NAME SLUG SCHEDULER + 1xyAIzrmbvq... deletion-example deletion-example kots + Delete the above listed application? There is no undo: yes█ + • Deleting App ✓ + ``` + +--- + diff --git a/static/llms.txt b/static/llms.txt index 83451af945..2f42e64dbe 100644 --- a/static/llms.txt +++ b/static/llms.txt @@ -1,5 +1,11 @@ +# Replicated Documentation for LLMs + +This file contains markdown-formatted links to Replicated documentation pages. + ## Docs +For a complete archive of all documentation pages, see [llms-full.txt](https://docs.replicated.com/llms-full.txt) + - [Changing an Admin Console Password](https://docs.replicated.com/enterprise/auth-changing-passwords.md): When you install for the first time with Replicated kURL, the Replicated KOTS Admin Console is secured with a single shared password that is set automatically for all users. - [Configuring Role-based Access Control (Beta)](https://docs.replicated.com/enterprise/auth-configuring-rbac.md): You can regulate access to the Replicated KOTS Admin Console resources based on the roles of individual users within your organization. - [Using an Identity Provider for User Access (Beta)](https://docs.replicated.com/enterprise/auth-identity-provider.md): When you install an application for the first time, the Replicated KOTS Admin Console is secured with a single shared password for all users. @@ -750,4 +756,10 @@ - [Using a Registry Proxy for Helm Air Gap Installations](https://docs.replicated.com/vendor/using-third-party-registry-proxy.md): This topic describes how to connect the Replicated proxy registry to a Harbor or jFrog Artifactory instance to support pull-through image caching. - [Application Settings Page](https://docs.replicated.com/vendor/vendor-portal-application-settings.md): Each application has its own settings, which include the application name and application slug. - [Creating a Vendor Account](https://docs.replicated.com/vendor/vendor-portal-creating-account.md): To get started with Replicated, you must create a Replicated vendor account. -- [Managing Applications](https://docs.replicated.com/vendor/vendor-portal-manage-app.md): This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. \ No newline at end of file +- [Managing Applications](https://docs.replicated.com/vendor/vendor-portal-manage-app.md): This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. + +## Additional Resources + +For more information, visit: +- [Replicated Documentation Home](https://docs.replicated.com) +- [Replicated Help Center](https://help.replicated.com) From 3d5dc834489ac20cf0425086e2cc7a73e028a3eb Mon Sep 17 00:00:00 2001 From: Paige Calvert <paige@replicated.com> Date: Thu, 27 Mar 2025 17:44:17 -0600 Subject: [PATCH 6/9] run generate-llms script on each build --- .gitignore | 2 + package.json | 4 +- static/js/generate-llms.js | 13 +- static/llms-full.txt | 1502 ------------------------------------ static/llms.txt | 10 +- 5 files changed, 17 insertions(+), 1514 deletions(-) diff --git a/.gitignore b/.gitignore index 62a4d8bc0a..78cd2e6074 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,8 @@ .docusaurus .cache-loader .history +.llms-full.txt +.llms.txt # Misc .DS_Store diff --git a/package.json b/package.json index c2c77504d6..94e8c3cfb0 100644 --- a/package.json +++ b/package.json @@ -5,7 +5,9 @@ "scripts": { "docusaurus": "docusaurus", "start": "docusaurus start", - "build": "repomix && mv llms-docs.txt static/llms/ && docusaurus build", + "build": "docusaurus build", + "generate-llms": "node static/js/generate-llms.js", + "prebuild": "npm run generate-llms", "swizzle": "docusaurus swizzle", "deploy": "docusaurus deploy", "clear": "docusaurus clear", diff --git a/static/js/generate-llms.js b/static/js/generate-llms.js index 5f13a17a8a..1abd4c55ce 100644 --- a/static/js/generate-llms.js +++ b/static/js/generate-llms.js @@ -8,19 +8,19 @@ const OUTPUT_FULL_FILE = path.join(__dirname, "../../static", "llms-full.txt"); const BASE_URL = "https://docs.replicated.com"; // Define static content -const STATIC_HEADER = `# Replicated Documentation for LLMs +const STATIC_HEADER = `# Replicated Documentation -This file contains markdown-formatted links to Replicated documentation pages. +> Replicated is a commercial software distribution platform. Independent software vendors (ISVs) can use features of the Replicated Platform to distribute modern commercial software into complex, customer-controlled environments, including on-prem and air gap. `; const STATIC_FOOTER = ` -## Additional Resources +## Optional For more information, visit: -- [Replicated Documentation Home](https://docs.replicated.com) -- [Replicated Help Center](https://help.replicated.com) +- [Replicated Community](https://community.replicated.com/) +- [Replicated Vendor API v3 Docs](https://replicated-vendor-api.readme.io/reference/) `; function extractFirstSentence(text) { @@ -87,7 +87,8 @@ function getMarkdownFiles(dir, fileList = []) { function generateFullLLMsTxt(files) { const fullContent = files.map(file => { - return `# ${file.title}\n\n${file.content}\n\n---\n\n`; + // Don't add the title separately since it's already in the content + return `${file.content}\n\n---\n\n`; }).join('\n'); fs.writeFileSync(OUTPUT_FULL_FILE, fullContent); diff --git a/static/llms-full.txt b/static/llms-full.txt index 38c7eaf6fa..99fced17bb 100644 --- a/static/llms-full.txt +++ b/static/llms-full.txt @@ -1,7 +1,5 @@ # Changing an Admin Console Password -# Changing an Admin Console Password - When you install for the first time with Replicated kURL, the Replicated KOTS Admin Console is secured with a single shared password that is set automatically for all users. Replicated recommends that you change this to a new, unique password for security purposes as this automated password is displayed to the user in plain text. The Admin Console password is salted and one-way hashed using bcrypt. The irreversible hash is stored in a Secret named `kotsadm-password`. The password is not retrievable if lost. If you lose your Admin Console password, reset your password to access the Admin Console. @@ -33,8 +31,6 @@ To change your Admin Console password: --- -# Configuring Role-based Access Control (Beta) - # Configuring Role-based Access Control (Beta) You can regulate access to the Replicated KOTS Admin Console resources based on the roles of individual users within your organization. @@ -68,8 +64,6 @@ The Admin Console comes with pre-defined identity service roles that can be assi --- -# Using an Identity Provider for User Access (Beta) - # Using an Identity Provider for User Access (Beta) When you install an application for the first time, the Replicated KOTS Admin Console is secured with a single shared password for all users. It is possible to further configure the Admin Console to authenticate users with your organization's user management system. This feature is only available for licenses that have the Replicated identity service feature enabled. @@ -104,8 +98,6 @@ If you want to re-enable the shared password authentication, run the `kubectl ko --- -# Adding Nodes to kURL Clusters - import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" # Adding Nodes to kURL Clusters @@ -137,8 +129,6 @@ To add primary and secondary nodes: --- -# Deleting the Admin Console and Removing Applications - # Deleting the Admin Console and Removing Applications This topic describes how to remove installed applications and delete the Replicated KOTS Admin Console. The information in this topic applies to existing cluster installations with KOTS. @@ -227,8 +217,6 @@ To completely delete the Admin Console from an existing cluster: --- -# Managing Multi-Node Clusters with Embedded Cluster - import HaArchitecture from "../partials/embedded-cluster/_multi-node-ha-arch.mdx" # Managing Multi-Node Clusters with Embedded Cluster @@ -378,8 +366,6 @@ To create a multi-node HA cluster: --- -# Updating Custom TLS Certificates in Embedded Cluster Installations - # Updating Custom TLS Certificates in Embedded Cluster Installations This topic describes how to update custom TLS certificates in Replicated Embedded Cluster installations. @@ -432,8 +418,6 @@ To upload a new custom TLS certificate in Embedded Cluster installations: --- -# Managing Secrets with KOTS Auto-GitOps (Alpha) - import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" # Managing Secrets with KOTS Auto-GitOps (Alpha) @@ -515,8 +499,6 @@ Replace: --- -# KOTS Auto-GitOps Workflow - import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" @@ -597,8 +579,6 @@ To enable pushing updates to the Auto-GitOps workflow: --- -# Working with the kURL Image Registry - import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" # Working with the kURL Image Registry @@ -675,8 +655,6 @@ The kURL registry image garbage collection feature has following limitations: --- -# Avoiding Docker Hub Rate Limits - # Avoiding Docker Hub Rate Limits This topic describes how to avoid rate limiting for anonymous and free authenticated use of Docker Hub by providing a Docker Hub username and password to the `kots docker ensure-secret` command. @@ -711,8 +689,6 @@ For more information, see [docker ensure-secret](/reference/kots-cli-docker-ensu --- -# Configuring Local Image Registries - import ImageRegistrySettings from "../partials/image-registry/_image-registry-settings.mdx" import DockerCompatibility from "../partials/image-registry/_docker-compatibility.mdx" @@ -806,8 +782,6 @@ To stop using a registry and remove registry settings from the Admin Console: --- -# Air Gap Installation with Embedded Cluster - import UpdateAirGapAdm from "../partials/embedded-cluster/_update-air-gap-admin-console.mdx" import UpdateAirGapCli from "../partials/embedded-cluster/_update-air-gap-cli.mdx" import UpdateAirGapOverview from "../partials/embedded-cluster/_update-air-gap-overview.mdx" @@ -954,8 +928,6 @@ On the Admin Console dashboard, the application status changes from Missing to U --- -# Automating Installation with Embedded Cluster - import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" @@ -1024,8 +996,6 @@ To install with Embedded Cluster in an air-gapped environment: --- -# Embedded Cluster Installation Requirements - import EmbeddedClusterRequirements from "../partials/embedded-cluster/_requirements.mdx" import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" @@ -1119,8 +1089,6 @@ The following ports are opened in the default zone: --- -# Online Installation with Embedded Cluster - import Prerequisites from "../partials/install/_ec-prereqs.mdx" # Online Installation with Embedded Cluster @@ -1229,8 +1197,6 @@ On the Admin Console dashboard, the application status changes from Missing to U --- -# Air Gap Installation in Existing Clusters with KOTS - import IntroExisting from "../partials/install/_intro-existing.mdx" import IntroAirGap from "../partials/install/_intro-air-gap.mdx" import PrereqsExistingCluster from "../partials/install/_prereqs-existing-cluster.mdx" @@ -1349,8 +1315,6 @@ On the Admin Console dashboard, the application status changes from Missing to U --- -# Installing with the KOTS CLI - import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" import PlaceholdersGlobal from "../partials/install/_placeholders-global.mdx" @@ -1466,8 +1430,6 @@ To install with KOTS in an air-gapped existing cluster: --- -# Online Installation in Existing Clusters with KOTS - import IntroExisting from "../partials/install/_intro-existing.mdx" import PrereqsExistingCluster from "../partials/install/_prereqs-existing-cluster.mdx" import LicenseFile from "../partials/install/_license-file-prereq.mdx" @@ -1562,8 +1524,6 @@ On the Admin Console dashboard, the application status changes from Missing to U --- -# KOTS Installation Requirements - import DockerCompatibility from "../partials/image-registry/_docker-compatibility.mdx" import KubernetesCompatibility from "../partials/install/_kubernetes-compatibility.mdx" import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" @@ -1875,8 +1835,6 @@ KOTS has been tested for compatibility with the following registries: --- -# Air Gap Installation with kURL - import KurlAbout from "../partials/install/_kurl-about.mdx" import IntroEmbedded from "../partials/install/_intro-embedded.mdx" import IntroAirGap from "../partials/install/_intro-air-gap.mdx" @@ -2008,8 +1966,6 @@ To install an application with kURL: --- -# Installing with kURL from the Command Line - import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" import PlaceholdersGlobal from "../partials/install/_placeholders-global.mdx" @@ -2110,8 +2066,6 @@ To install in an air-gapped kURL cluster: --- -# kURL Installation Requirements - import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" @@ -2192,8 +2146,6 @@ You must meet the additional kURL system requirements when applicable: --- -# Online Installation with kURL - import KurlAbout from "../partials/install/_kurl-about.mdx" import IntroEmbedded from "../partials/install/_intro-embedded.mdx" import PrereqsEmbeddedCluster from "../partials/install/_prereqs-embedded-cluster.mdx" @@ -2293,8 +2245,6 @@ To install an application with kURL: --- -# Considerations Before Installing - # Considerations Before Installing Before you install an application with KOTS in an existing cluster, consider the following installation options. @@ -2340,8 +2290,6 @@ For more information about how to install KOTS without object storage, see [Inst --- -# Installing KOTS in Existing Clusters Without Object Storage - # Installing KOTS in Existing Clusters Without Object Storage This topic describes how to install Replicated KOTS in existing clusters without the default object storage, including limitations of installing without object storage. @@ -2375,8 +2323,6 @@ When `--with-minio=false` is used with the `kots admin-console upgrade` command, --- -# Accessing Dashboards Using Port Forwarding - # Accessing Dashboards Using Port Forwarding This topic includes information about how to access Prometheus, Grafana, and Alertmanager in Replicated KOTS existing cluster and Replicated kURL installations. @@ -2451,8 +2397,6 @@ To access the Alertmanager dashboard: --- -# Configuring Prometheus Monitoring in Existing Cluster KOTS Installations - import OverviewProm from "../partials/monitoring/_overview-prom.mdx" # Configuring Prometheus Monitoring in Existing Cluster KOTS Installations @@ -2510,8 +2454,6 @@ To connect the Admin Console to a Prometheus endpoint: --- -# Consuming Prometheus Metrics Externally - import OverviewProm from "../partials/monitoring/_overview-prom.mdx" import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" @@ -2580,8 +2522,6 @@ To consume Prometheus metrics from an external service: --- -# Validating SBOM Signatures - # Validating SBOM Signatures This topic describes the process to perform the validation of software bill of material (SBOM) signatures for Replicated KOTS, Replicated kURL, and Troubleshoot releases. @@ -2661,8 +2601,6 @@ To validate an Troubleshoot SBOM signature: --- -# How to Set Up Backup Storage - # How to Set Up Backup Storage This topic describes the process of setting up backup storage for the Replicated snapshots feature. @@ -2718,8 +2656,6 @@ After you configure a storage destination, you can create a backup. See [Creatin --- -# Configuring a Host Path Storage Destination - import InstallVelero from "../partials/snapshots/_installVelero.mdx" import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" import ResticDaemonSet from "../partials/snapshots/_resticDaemonSet.mdx" @@ -2846,8 +2782,6 @@ To install Velero and configure host path storage for existing clusters: --- -# Configuring an NFS Storage Destination - import InstallVelero from "../partials/snapshots/_installVelero.mdx" import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" import ResticDaemonSet from "../partials/snapshots/_resticDaemonSet.mdx" @@ -2977,8 +2911,6 @@ To install Velero and configure NFS storage for existing clusters: --- -# Creating and Scheduling Backups - # Creating and Scheduling Backups This topic describes how to use the Replicated snapshots feature to create backups. It also includes information about how to use the Replicated KOTS Admin Console create a schedule for automatic backups. For information about restoring, see [Restoring from Backups](snapshots-restoring-full). @@ -3084,8 +3016,6 @@ To schedule automatic backups in the Admin Console: --- -# Restoring from Backups - import RestoreTable from "../partials/snapshots/_restoreTable.mdx" import RestoreTypes from "../partials/snapshots/_restore-types.mdx" import GetBackups from "../partials/snapshots/_step-get-backups.mdx" @@ -3251,8 +3181,6 @@ To restore an application from a partial backup: --- -# Configuring Other Storage Destinations - import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" import CheckVersion from "../partials/snapshots/_checkVersion.mdx" @@ -3449,8 +3377,6 @@ If no Velero installation is detected, instructions are displayed to install Vel --- -# Troubleshooting Snapshots - import NodeAgentMemLimit from "../partials/snapshots/_node-agent-mem-limit.mdx" # Troubleshooting Snapshots @@ -3683,8 +3609,6 @@ These warnings do not necessarily mean that the restore itself failed. The endpo --- -# Updating Storage Settings - # Updating Storage Settings This topic describes how to update existing storage destination settings using the Replicated Admin Console. @@ -3806,8 +3730,6 @@ When configuring the Admin Console to store backups on host path storage, the fo --- -# Installing the Velero CLI - # Installing the Velero CLI You install the Velero CLI before installing Velero and configuring a storage destination for backups. @@ -3916,8 +3838,6 @@ Install Velero and configure a storage destination using one of the following pr --- -# Configuring Namespace Access and Memory Limit - import NodeAgentMemLimit from "../partials/snapshots/_node-agent-mem-limit.mdx" import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" @@ -3963,8 +3883,6 @@ Velero sets default limits for the velero Pod and the node-agent (restic) Pod du --- -# Understanding Application Status Details in the Admin Console - import StatusesTable from "../partials/status-informers/_statusesTable.mdx" import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" @@ -4014,8 +3932,6 @@ The following table lists the supported Kubernetes resources and the conditions --- -# Generating Support Bundles from the Admin Console - import GenerateBundleAdminConsole from "../partials/support-bundles/_generate-bundle-admin-console.mdx" # Generating Support Bundles from the Admin Console @@ -4029,8 +3945,6 @@ This topic describes how to generate support bundles from the KOTS Admin Console --- -# Performing Updates in Existing Clusters - import AdminConsole from "../partials/updating/_admin-console.mdx" import AdminConsoleAirGap from "../partials/updating/_admin-console-air-gap.mdx" import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" @@ -4179,8 +4093,6 @@ To update KOTS in an existing air gap cluster: --- -# Configuring Automatic Updates - # Configuring Automatic Updates This topic describes how to configure automatic updates for applications installed in online (internet-connected) environments. @@ -4232,8 +4144,6 @@ To configure automatic updates: --- -# Performing Updates in Embedded Clusters - import UpdateAirGapAdm from "../partials/embedded-cluster/_update-air-gap-admin-console.mdx" import UpdateAirGapCli from "../partials/embedded-cluster/_update-air-gap-cli.mdx" import UpdateAirGapOverview from "../partials/embedded-cluster/_update-air-gap-overview.mdx" @@ -4325,8 +4235,6 @@ To update by uploading the air gap bundle for the new version from the Admin Con --- -# About kURL Cluster Updates - import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" # About kURL Cluster Updates @@ -4397,8 +4305,6 @@ For example, if the version of KOTS running in your cluster is 1.109.0, and the --- -# Performing Updates in kURL Clusters - import InstallerRequirements from "../partials/updating/_installerRequirements.mdx" import UpgradePrompt from "../partials/updating/_upgradePrompt.mdx" import AdminConsole from "../partials/updating/_admin-console.mdx" @@ -4520,8 +4426,6 @@ To update the kURL cluster in an air gap environment: --- -# Updating Licenses in the Admin Console - # Updating Licenses in the Admin Console This topic describes how to update a license from the KOTS Admin Console. @@ -4613,8 +4517,6 @@ To change a community license to another license: --- -# Patching with Kustomize - # Patching with Kustomize This topic describes how to use Kustomize to patch an application before deploying. @@ -4831,8 +4733,6 @@ To patch an application: --- -# Updating TLS Certificates in kURL Clusters - import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" # Updating TLS Certificates in kURL Clusters @@ -4886,8 +4786,6 @@ To upload a new custom TLS certificate: --- -# Introduction to KOTS - import Kots from "../docs/partials/kots/_kots-definition.mdx" # Introduction to KOTS @@ -4971,8 +4869,6 @@ The KOTS CLI can also be used to install an application without needing to acces --- -# Introduction to Replicated - --- pagination_prev: null --- @@ -5144,8 +5040,6 @@ Support teams can use Replicated features to more quickly diagnose and resolve a --- -# Home - --- slug: / pagination_next: null @@ -5436,15 +5330,11 @@ pagination_next: null --- -# _airgap-bundle - Air gap bundles (`.airgap`) contain the images needed to install and run a single release of your application in _air gap_ environments with no outbound internet access. --- -# _nginx-deployment - ```yaml apiVersion: apps/v1 kind: Deployment @@ -5478,8 +5368,6 @@ spec: --- -# _nginx-k8s-app - ```yaml apiVersion: app.k8s.io/v1beta1 kind: Application @@ -5496,8 +5384,6 @@ spec: --- -# _nginx-kots-app - ```yaml apiVersion: kots.io/v1beta1 kind: Application @@ -5518,8 +5404,6 @@ spec: --- -# _nginx-service - ```yaml apiVersion: v1 kind: Service @@ -5556,15 +5440,11 @@ spec: --- -# _build-source-code - Add one or more jobs to compile your application source code and build images. The build jobs that you create vary depending upon your application and your CI/CD platform. For additional guidance, see the documentation for your CI/CD platform. --- -# _test-recs - * **Application Testing:** Traditional application testing includes unit, integration, and end-to-end tests. These tests are critical for application reliability, and Compatibility Matrix is designed to to incorporate and use your application testing. * **Performance Testing:** Performance testing is used to benchmark your application to ensure it can handle the expected load and scale gracefully. Test your application under a range of workloads and scenarios to identify any bottlenecks or performance issues. Make sure to optimize your application for different Kubernetes distributions and configurations by creating all of the environments you need to test in. @@ -5578,8 +5458,6 @@ Add one or more jobs to compile your application source code and build images. T --- -# _openshift-pool - :::note Due to the time it takes to start an OpenShift cluster, a warm pool of OpenShift clusters is maintained. When available, an OpenShift cluster from the pool starts in approximately two minutes with default disks. @@ -5590,8 +5468,6 @@ When starting a cluster with a disk size different than the default, an addition --- -# _overview - Replicated Compatibility Matrix quickly provisions ephemeral clusters of different Kubernetes distributions and versions, such as OpenShift, EKS, and Replicated kURL. You can use Compatibility Matrix to get kubectl access to running clusters within minutes or less. This allows you to more easily test your code in a range of different environments before releasing to customers. @@ -5605,8 +5481,6 @@ Example use cases for Compatibility Matrix include: --- -# _prerequisites - * Create an account in the Replicated Vendor Portal. See [Creating a Vendor Account](/vendor/vendor-portal-creating-account). * Install the Replicated CLI and then authorize the CLI using your vendor account. See [Installing the Replicated CLI](/reference/replicated-cli-installing). @@ -5617,8 +5491,6 @@ Example use cases for Compatibility Matrix include: --- -# _supported-clusters-overview - Compatibility Matrix can create clusters on virtual machines (VMs), such as kind, k3s, RKE2, and Red Hat OpenShift OKD, and also create cloud-managed clusters, such as EKS, GKE and AKS: * Cloud-based Kubernetes distributions are run in a Replicated managed and controlled cloud account to optimize and deliver a clusters quickly and reliably. The Replicated account has control planes ready and adds a node group when you request it, making the cluster available much faster than if you try to create your own cluster with your own cloud account. @@ -5633,8 +5505,6 @@ For detailed information about the available cluster distributions, see [Support --- -# _collab-existing-user - If a team member adds a GitHub username to their Vendor Portal account that already exists in the collab repository, then the Vendor Portal does _not_ change the role that the existing user is assigned in the collab repository. However, if the RBAC policy assigned to this member in the Vendor Portal later changes, or if the member is removed from the Vendor Portal team, then the Vendor Portal updates or removes the user in the collab repository accordingly. @@ -5643,8 +5513,6 @@ However, if the RBAC policy assigned to this member in the Vendor Portal later c --- -# _collab-rbac-important - :::important The RBAC policy that you specify also determines the level of access that the user has to the Replicated collab repository in GitHub. By default, the Read Only policy grants the user read access to the collab repository. @@ -5655,8 +5523,6 @@ For more information about managing user access to the collab repository from th --- -# _collab-rbac-resources-important - :::important When you update an existing RBAC policy to add one or more `team/support-issues` resource, the GitHub role in the Replicated collab repository of every team member that is assigned to that policy and has a GitHub username saved in their account is updated accordingly. ::: @@ -5665,8 +5531,6 @@ When you update an existing RBAC policy to add one or more `team/support-issues` --- -# _collab-repo-about - The replicated-collab organization in GitHub is used for tracking and collaborating on escalations, bug reports, and feature requests that are sent by members of a Vendor Portal team to the Replicated team. Replicated creates a unique repository in the replicated-collab organization for each Vendor Portal team. Members of a Vendor Portal team submit issues to their unique collab repository on the Support page in the [Vendor Portal](https://vendor.replicated.com/support). For more information about the collab repositories and how they are used, see [Replicated Support Paths and Processes](https://community.replicated.com/t/replicated-vendor-support-paths-and-processes/850) in _Replicated Community_. @@ -5675,8 +5539,6 @@ For more information about the collab repositories and how they are used, see [R --- -# _affixExample - ```yaml groups: - name: example_settings @@ -5698,8 +5560,6 @@ groups: --- -# _defaultExample - ```yaml - name: custom_key title: Set your secret key for your app @@ -5718,8 +5578,6 @@ groups: --- -# _helpTextExample - ```yaml - name: http_settings title: HTTP Settings @@ -5736,8 +5594,6 @@ groups: --- -# _hiddenExample - ```yaml - name: secret_key title: Secret Key @@ -5749,8 +5605,6 @@ groups: --- -# _item-types - - `bool` - `dropdown` - `file` @@ -5766,8 +5620,6 @@ groups: --- -# _nameExample - ```yaml - name: http_settings title: HTTP Settings @@ -5780,8 +5632,6 @@ groups: --- -# _property-when - It can be useful to conditionally show or hide fields so that your users are only provided the configuration options that are relevant to them. This helps to reduce user error when configuring the application. Conditional statements in the `when` property can be used to evaluate things like the user's environment, license entitlements, and configuration choices. For example: * The Kubernetes distribution of the cluster * If the license includes a specific feature entitlement @@ -5793,8 +5643,6 @@ You can construct conditional statements in the `when` property using KOTS templ --- -# _randomStringNote - :::note When you assign a template function that generates a value to a `value` property, you can use the `readonly` and `hidden` properties to define whether or not the generated value is ephemeral or persistent between changes to the configuration settings for the application. For more information, see [RandomString](template-functions-static-context#randomstring) in _Static Context_. ::: @@ -5802,8 +5650,6 @@ When you assign a template function that generates a value to a `value` property --- -# _readonlyExample - ```yaml - name: key title: Key @@ -5823,8 +5669,6 @@ When you assign a template function that generates a value to a `value` property --- -# _recommendedExample - ```yaml - name: recommended_field title: My recommended field @@ -5839,8 +5683,6 @@ When you assign a template function that generates a value to a `value` property --- -# _regexValidationExample - ``` - name: smtp-settings title: SMTP Settings @@ -5865,8 +5707,6 @@ When you assign a template function that generates a value to a `value` property --- -# _requiredExample - ```yaml - name: custom_key title: Set your secret key for your app @@ -5886,8 +5726,6 @@ When you assign a template function that generates a value to a `value` property --- -# _typeExample - ```yaml - name: group_title title: Group Title @@ -5904,8 +5742,6 @@ When you assign a template function that generates a value to a `value` property --- -# _valueExample - ```yaml - name: custom_key title: Set your secret key for your app @@ -5923,8 +5759,6 @@ When you assign a template function that generates a value to a `value` property --- -# _when-note - :::note `when` is a property of both groups and items. See [Group Properties > `when`](/reference/custom-resource-config#when) above. ::: @@ -5932,8 +5766,6 @@ When you assign a template function that generates a value to a `value` property --- -# _when-requirements - * The `when` property accepts the following types of values: * Booleans * Strings that match "true", "True", "false", or "False" @@ -5945,8 +5777,6 @@ When you assign a template function that generates a value to a `value` property --- -# _whenExample - ```yaml - name: database_settings_group title: Database Settings @@ -5981,8 +5811,6 @@ When you assign a template function that generates a value to a `value` property --- -# _boolExample - ```yaml bool_config_field: value: "1" @@ -5996,8 +5824,6 @@ bool_config_field: --- -# _config-values-procedure - During installation, KOTS automatically generates a ConfigValues file and saves the file in a directory called `upstream`. After installation, you can view the generated ConfigValues file in the Admin Console **View files** tab or from the command line by running the `kubectl kots get config` command. To get the ConfigValues file from an installed application instance: @@ -6026,8 +5852,6 @@ To get the ConfigValues file from an installed application instance: --- -# _configValuesExample - ```yaml apiVersion: kots.io/v1beta1 kind: ConfigValues @@ -6046,8 +5870,6 @@ spec: --- -# _fileExample - ```yaml file_config_field: filename: my-file.txt @@ -6058,8 +5880,6 @@ file_config_field: --- -# _passwordExample - ```yaml password_config_field: valuePlaintext: myPlainTextPassword @@ -6069,8 +5889,6 @@ password_config_field: --- -# _selectOneExample - ```yaml radio_config_field: value: option_name @@ -6080,8 +5898,6 @@ radio_config_field: --- -# _textExample - ```yaml text_config_field: value: This is a text field value. @@ -6091,8 +5907,6 @@ text_config_field: --- -# _textareaExample - ```yaml textarea_config_field: value: This is a text area field value. @@ -6102,8 +5916,6 @@ textarea_config_field: --- -# _wizard - 1. In the [Vendor Portal](https://vendor.replicated.com), go to **Custom Domains**. 1. In the section for the target Replicated endpoint, click **Add your first custom domain** for your first domain, or click **Add new domain** for additional domains. @@ -6135,8 +5947,6 @@ The Vendor Portal marks the domain as **Configured** after the verification chec --- -# _additionalImages - ```yaml additionalImages: - jenkins/jenkins:lts @@ -6146,8 +5956,6 @@ additionalImages: --- -# _additionalNamespaces - ```yaml additionalNamespaces: - "*" @@ -6157,8 +5965,6 @@ additionalNamespaces: --- -# _allowRollback - ```yaml allowRollback: false ``` @@ -6167,8 +5973,6 @@ allowRollback: false --- -# _graphs - ```yaml graphs: - title: User Signups @@ -6179,8 +5983,6 @@ graphs: --- -# _icon - ```yaml icon: https://support.io/img/logo.png ``` @@ -6189,8 +5991,6 @@ icon: https://support.io/img/logo.png --- -# _minKotsVersion - ```yaml minKotsVersion: "1.71.0" ``` @@ -6199,15 +5999,11 @@ minKotsVersion: "1.71.0" --- -# _ports-applicationURL - <li><p>(Optional) <code>ports.applicationUrl</code>: When set to the same URL that is specified in the `descriptor.links.url` field of the Kubernetes SIG Application custom resource, KOTS adds a link on the Admin Console dashboard where the given service can be accessed. This process automatically links to the hostname in the browser (where the Admin Console is being accessed) and appends the specified `localPort`.</p><p>If not set, then the URL defined in the `descriptor.links.url` field of the Kubernetes SIG Application is linked on the Admin Console dashboard.</p></li> --- -# _ports-kurl-note - :::note KOTS does not automatically create port forwards for installations on VMs or bare metal servers with Replicated Embedded Cluster or Replicated kURL. This is because it cannot be verified that the ports are secure and authenticated. Instead, Embedded Cluster or kURL creates a NodePort service to make the Admin Console accessible on a port on the node (port `8800` for kURL or port `30000` for Embedded Cluster). @@ -6217,29 +6013,21 @@ You can expose additional ports on the node for Embedded Cluster or kURL install --- -# _ports-localPort - <li><code>ports.localPort</code>: The port to map on the local workstation.</li> --- -# _ports-serviceName - <li><code>ports.serviceName</code>: The name of the service that receives the traffic.</li> --- -# _ports-servicePort - <li><p><code>ports.servicePort</code>: The <code>containerPort</code> of the Pod where the service is running.</p></li> --- -# _ports - ```yaml ports: - serviceName: web @@ -6252,8 +6040,6 @@ ports: --- -# _proxyRegistryDomain - ```yaml proxyRegistryDomain: "proxy.mycompany.com" ``` @@ -6261,8 +6047,6 @@ proxyRegistryDomain: "proxy.mycompany.com" --- -# _releaseNotes - ```yaml releaseNotes: Fixes a bug and adds a new feature. ``` @@ -6271,8 +6055,6 @@ releaseNotes: Fixes a bug and adds a new feature. --- -# _replicatedRegistryDomain - ```yaml replicatedRegistryDomain: "registry.mycompany.com" ``` @@ -6281,8 +6063,6 @@ replicatedRegistryDomain: "registry.mycompany.com" --- -# _requireMinimalRBACPrivileges - ```yaml requireMinimalRBACPrivileges: false ``` @@ -6291,8 +6071,6 @@ requireMinimalRBACPrivileges: false --- -# _servicePort-note - :::note Ensure that you use the `containerPort` and not the `servicePort`. The `containerPort` and `servicePort` are often the same port, though it is possible that they are different. ::: @@ -6300,8 +6078,6 @@ Ensure that you use the `containerPort` and not the `servicePort`. The `containe --- -# _statusInformers - ```yaml statusInformers: - deployment/my-web-svc @@ -6318,8 +6094,6 @@ statusInformers: --- -# _supportMinimalRBACPrivileges - ```yaml supportMinimalRBACPrivileges: true ``` @@ -6328,8 +6102,6 @@ supportMinimalRBACPrivileges: true --- -# _targetKotsVersion - ```yaml targetKotsVersion: "1.85.0" ``` @@ -6338,8 +6110,6 @@ targetKotsVersion: "1.85.0" --- -# _title - ```yaml title: My Application ``` @@ -6348,15 +6118,11 @@ title: My Application --- -# _change-channel - You can change the channel a customer is assigned at any time. For installations with Replicated KOTS, when you change the customer's channel, the customer can synchronize their license in the Replicated Admin Console to fetch the latest release on the new channel and then upgrade. The Admin Console always fetches the latest release on the new channel, regardless of the presence of any releases on the channel that are marked as required. --- -# _download - You can download customer and instance data from the **Download CSV** dropdown on the **Customers** page: ![Download CSV button in the Customers page](/images/customers-download-csv.png) @@ -6374,8 +6140,6 @@ You can also export customer instance data as JSON using the Vendor API v3 `cust --- -# _definition - Replicated Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. Embedded Cluster is based on the open source Kubernetes distribution k0s. For more information, see the [k0s documentation](https://docs.k0sproject.io/stable/). For software vendors, Embedded Cluster provides a Config for defining characteristics of the cluster that will be created in the customer environment. Additionally, each version of Embedded Cluster includes a specific version of Replicated KOTS, ensuring compatibility between KOTS and the cluster. For enterprise users, cluster updates are done automatically at the same time as application updates, allowing users to more easily keep the cluster up-to-date without needing to use kubectl. @@ -6383,8 +6147,6 @@ For software vendors, Embedded Cluster provides a Config for defining characteri --- -# _ec-config - ```yaml apiVersion: embeddedcluster.replicated.com/v1beta1 kind: Config @@ -6395,8 +6157,6 @@ spec: --- -# _multi-node-ha-arch - The following diagram shows the architecture of an HA multi-node Embedded Cluster installation: ![Embedded Cluster multi-node architecture with high availability](/images/embedded-architecture-multi-node-ha.png) @@ -6415,8 +6175,6 @@ Any Helm [`extensions`](/reference/embedded-config#extensions) that you include --- -# _port-reqs - This section lists the ports used by Embedded Cluster. These ports must be open and available for both single- and multi-node installations. #### Ports Used by Local Processes @@ -6465,8 +6223,6 @@ If port 50000 is occupied, you can select a different port for the LAM during in --- -# _proxy-install-limitations - **Limitations:** * If any of your [Helm extensions](/reference/embedded-config#extensions) make requests to the internet, the given charts need to be manually configured so that those requests are made to the user-supplied proxy server instead. Typically, this requires updating the Helm values to set HTTP proxy, HTTPS proxy, and no proxy. Note that this limitation applies only to network requests made by your Helm extensions. The proxy settings supplied to the install command are used to pull the containers required to run your Helm extensions. @@ -6476,15 +6232,11 @@ If port 50000 is occupied, you can select a different port for the LAM during in --- -# _proxy-install-reqs - **Requirement:** Proxy installations require Embedded Cluster 1.5.1 or later with Kubernetes 1.29 or later. --- -# _requirements - * Linux operating system * x86-64 architecture @@ -6529,8 +6281,6 @@ If port 50000 is occupied, you can select a different port for the LAM during in --- -# _update-air-gap-admin-console - 1. On a machine with browser access (for example, where you accessed the Admin Console to configure the application), download the air gap bundle for the new version using the same curl command that you used to install. For example: ```bash @@ -6576,8 +6326,6 @@ If port 50000 is occupied, you can select a different port for the LAM during in --- -# _update-air-gap-cli - 1. SSH onto a controller node in the cluster and download the air gap bundle for the new version using the same curl command that you used to install. For example: ```bash @@ -6632,8 +6380,6 @@ If port 50000 is occupied, you can select a different port for the LAM during in --- -# _update-air-gap-overview - To upgrade an installation, new air gap bundles can be uploaded to the Admin Console from the browser or with the Embedded Cluster binary from the command line. Using the binary is faster and allows the user to download the air gap bundle directly to the machine where the Embedded Cluster is running. Using the browser is slower because the user must download the air gap bundle to a machine with a browser, then upload that bundle to the Admin Console, and then the Admin Console can process it. @@ -6641,8 +6387,6 @@ Using the binary is faster and allows the user to download the air gap bundle di --- -# _update-overview - When you update an application installed with Embedded Cluster, you update both the application and the cluster infrastructure together, including Kubernetes, KOTS, and other components running in the cluster. There is no need or mechanism to update the infrastructure on its own. When you deploy a new version, any changes to the cluster are deployed first. The Admin Console waits until the cluster is ready before updatng the application. @@ -6654,8 +6398,6 @@ When performing an upgrade with Embedded Cluster, the user is able to change the --- -# _warning-do-not-downgrade - :::important Do not downgrade the Embedded Cluster version. This is not supported but is not prohibited, and it can lead to unexpected behavior. ::: @@ -6663,15 +6405,11 @@ Do not downgrade the Embedded Cluster version. This is not supported but is not --- -# _create-promote-release - Create a new release and promote it to the Unstable channel. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). --- -# _csdl-overview - Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. Replicated has developed the Commercial Software Distribution Lifecycle to represents the stages that are essential for every company that wants to deliver their software securely and reliably to customer controlled environments. @@ -6681,8 +6419,6 @@ This lifecycle was inspired by the DevOps lifecycle and the Software Development --- -# _gitea-ec-config - ```yaml apiVersion: embeddedcluster.replicated.com/v1beta1 kind: Config @@ -6693,8 +6429,6 @@ spec: --- -# _gitea-helmchart-cr-ec - ```yaml apiVersion: kots.io/v1beta2 kind: HelmChart @@ -6718,8 +6452,6 @@ spec: --- -# _gitea-helmchart-cr - ```yaml apiVersion: kots.io/v1beta2 kind: HelmChart @@ -6736,8 +6468,6 @@ spec: --- -# _gitea-k8s-app-cr - ```yaml apiVersion: app.k8s.io/v1beta1 kind: Application @@ -6755,8 +6485,6 @@ spec: --- -# _gitea-kots-app-cr-ec - ```yaml apiVersion: kots.io/v1beta1 kind: Application @@ -6778,8 +6506,6 @@ spec: --- -# _gitea-kots-app-cr - ```yaml apiVersion: kots.io/v1beta1 kind: Application @@ -6800,8 +6526,6 @@ spec: --- -# _grafana-config - ```yaml apiVersion: kots.io/v1beta1 kind: Config @@ -6826,8 +6550,6 @@ spec: --- -# _grafana-helmchart - ```yaml apiVersion: kots.io/v1beta2 kind: HelmChart @@ -6847,8 +6569,6 @@ spec: --- -# _grafana-k8s-app - ```yaml apiVersion: app.k8s.io/v1beta1 kind: Application @@ -6865,8 +6585,6 @@ spec: --- -# _grafana-kots-app - ```yaml apiVersion: kots.io/v1beta1 kind: Application @@ -6887,8 +6605,6 @@ spec: --- -# _kubernetes-training - :::note This tutorial assumes that you have a working knowledge of Kubernetes. For an introduction to Kubernetes and free training resources, see [Training](https://kubernetes.io/training/) in the Kubernetes documentation. ::: @@ -6897,8 +6613,6 @@ This tutorial assumes that you have a working knowledge of Kubernetes. For an in --- -# _labs-intro - Replicated also offers a sandbox environment where you can complete several beginner, intermediate, and advanced labs. The sandbox environment automatically provisions the required Kubernetes cluster or VM where you will install a sample application as part of the labs. To get started with an introductory lab, see [Deploy a Hello World Application with Replicated](https://play.instruqt.com/replicated/tracks/hello-world). @@ -6907,8 +6621,6 @@ To get started with an introductory lab, see [Deploy a Hello World Application w --- -# _related-topics - For more information about the subjects in the getting started tutorials, see the following topics: * [Installing the Replicated CLI](/reference/replicated-cli-installing) @@ -6920,22 +6632,16 @@ For more information about the subjects in the getting started tutorials, see th --- -# _replicated-definition - Replicated is a commercial software distribution platform. Independent software vendors (ISVs) can use features of the Replicated Platform to distribute modern commercial software into complex, customer-controlled environments, including on-prem and air gap. --- -# _test-your-changes - Install the release to test your changes. For Embedded Cluster installations, see [Performing Udpates in Embedded Clusters](/enterprise/updating-embedded). For existing cluster installations with KOTS, see [Performing Updates in Existing Clusters](/enterprise/updating-app-manager). --- -# _tutorial-intro - This tutorial introduces you to the Replicated features for software vendors and their enterprise users. It is designed to familiarize you with the key concepts and processes that you use as a software vendor when you package and distribute your application with Replicated. In this tutorial, you use a set of sample manifest files for a basic NGINX application to learn how to: @@ -6946,8 +6652,6 @@ In this tutorial, you use a set of sample manifest files for a basic NGINX appli --- -# _vm-requirements - For this tutorial, the VM must meet the following requirements: * Ubuntu 18.04 @@ -6965,8 +6669,6 @@ For the complete list of system requirements for the kURL, see [kURL Requirement --- -# _gitops-not-recommended - :::important KOTS Auto-GitOps is a legacy feature and is **not recommended** for use. For modern enterprise customers that prefer software deployment processes that use CI/CD pipelines, Replicated recommends the [Helm CLI installation method](/vendor/install-with-helm), which is more commonly used in these types of enterprise environments. ::: @@ -6974,16 +6676,12 @@ KOTS Auto-GitOps is a legacy feature and is **not recommended** for use. For mod --- -# _gitops-limitation - The KOTS Auto-GitOps workflow is not supported for installations with the HelmChart custom resource `apiVersion: kots.io/v1beta2` or the HelmChart custom resource `apiVersion: kots.io/v1beta1` with `useHelmInstall: true`. --- -# _helm-builder-requirements - The `builder` key has the following requirements and recommendations: * Replicated recommends that you include only the minimum Helm values in the `builder` key that are required to template the Helm chart with the correct image tags. * Use only static, or _hardcoded_, values in the `builder` key. You cannot use template functions in the `builder` key because values in the `builder` key are not rendered in a customer environment. @@ -6992,15 +6690,11 @@ The `builder` key has the following requirements and recommendations: --- -# _helm-cr-builder-airgap-intro - In the `builder` key, you provide the minimum Helm values required to render the chart templates so that the output includes any images that must be included in the air gap bundle. The Vendor Portal uses these values to render the Helm chart templates when building the `.airgap` bundle for the release. --- -# ... - For example, a Helm chart might include a conditional PostgreSQL Deployment, as shown in the Helm template below: ```yaml @@ -7052,16 +6746,12 @@ spec: --- -# _helm-cr-chart-name - The name of the chart. This value must exactly match the `name` field from a `Chart.yaml` in a `.tgz` chart archive that is also included in the release. If the names do not match, then the installation can error or fail. --- -# _helm-cr-chart-release-name - Specifies the release name to use when installing this instance of the Helm chart. Defaults to the chart name. The release name must be unique across all charts deployed in the namespace. To deploy multiple instances of the same Helm chart in a release, you must add an additional HelmChart custom resource with a unique release name for each instance of the Helm chart. @@ -7072,24 +6762,18 @@ Must be a valid Helm release name that matches regex `^[a-z0-9]([-a-z0-9]*[a-z0- --- -# _helm-cr-chart-version - The version of the chart. This value must match the `version` field from a `Chart.yaml` in a `.tgz` chart archive that is also included in the release. --- -# _helm-cr-chart - The `chart` key allows for a mapping between the data in this definition and the chart archive itself. More than one `kind: HelmChart` can reference a single chart archive, if different settings are needed. --- -# _helm-cr-exclude - The attribute is a value for making optional charts. The `exclude` attribute can be parsed by template functions. When Replicated KOTS processes Helm charts, it excludes the entire chart if the output of the `exclude` field can be parsed as a boolean evaluating to `true`. @@ -7103,8 +6787,6 @@ For more information about optional charts, template functions, and how KOTS pro --- -# _helm-cr-namespace - The `namespace` key specifies an alternative namespace where Replicated KOTS installs the Helm chart. **Default:** The Helm chart is installed in the same namespace as the Admin Console. The `namespace` attribute can be parsed by template functions. For more information about template functions, see [About template function contexts](template-functions-about). @@ -7113,8 +6795,6 @@ If you specify a namespace in the HelmChart `namespace` field, you must also inc --- -# _helm-cr-optional-values-recursive-merge - The `optionalValues.recursiveMerge` boolean defines how KOTS merges `values` and `optionalValues`: * When `optionalValues.recursiveMerge` is false, the top level keys in `optionalValues` override the top level keys in `values`. By default, `optionalValues.recursiveMerge` is set to false. @@ -7124,8 +6804,6 @@ The `optionalValues.recursiveMerge` boolean defines how KOTS merges `values` and --- -# _helm-cr-optional-values-when - The `optionalValues.when` field defines a conditional statement that must evaluate to true for the given values to be set. Evaluation of the conditional in the `optionalValues.when` field is deferred until render time in the customer environment. Use KOTS template functions to write the `optionalValues.when` conditional statement. The following example shows a conditional statement for selecting a database option on the Admin Console configuration screen: @@ -7140,8 +6818,6 @@ For more information about using KOTS template functions, see [About Template Fu --- -# _helm-cr-optional-values - The `optionalValues` key can be used to set values in the Helm chart `values.yaml` file when a given conditional statement evaluates to true. For example, if a customer chooses to include an optional application component in their deployment, it might be necessary to include Helm chart values related to the optional component. `optionalValues` includes the following properties: @@ -7155,8 +6831,6 @@ The `optionalValues` key can be used to set values in the Helm chart `values.yam --- -# _helm-cr-upgrade-flags - Specifies additional flags to pass to the `helm upgrade` command for charts. These flags are passed in addition to any flags Replicated KOTS passes by default. The values specified here take precedence if KOTS already passes the same flag. The `helmUpgradeFlags` attribute can be parsed by template functions. For more information about template functions, see [About template function contexts](template-functions-about). KOTS uses `helm upgrade` for _all_ deployments of an application, not just upgrades, by specifying the `--install` flag. For non-boolean flags that require an additional argument, such as `--timeout 1200s`, you must use an equal sign (`=`) or specify the additional argument separately in the array. @@ -7173,8 +6847,6 @@ helmUpgradeFlags: --- -# _helm-cr-values - The `values` key can be used to set or delete existing values in the Helm chart `values.yaml` file. Any values that you include in the `values` key must match values in the Helm chart `values.yaml`. For example, `spec.values.images.pullSecret` in the HelmChart custom resource matches `images.pullSecret` in the Helm chart `values.yaml`. During installation or upgrade with KOTS, `values` is merged with the Helm chart `values.yaml` in the chart archive. Only include values in the `values` key that you want to set or delete. @@ -7182,15 +6854,11 @@ During installation or upgrade with KOTS, `values` is merged with the Helm chart --- -# _helm-cr-weight-limitation - The `weight` field is _not_ supported for HelmChart custom resources with `useHelmInstall: false`. --- -# _helm-cr-weight - Determines the order in which KOTS applies the Helm chart. Charts are applied by weight in ascending order, with lower weights applied first. **Supported values:** Positive or negative integers. **Default:** `0` In KOTS v1.99.0 and later, `weight` also determines the order in which charts are uninstalled. Charts are uninstalled by weight in descending order, with higher weights uninstalled first. For more information about uninstalling applications, see [remove](kots-cli-remove) in _KOTS CLI_. @@ -7200,22 +6868,16 @@ For more information, see [Orchestrating Resource Deployment](/vendor/orchestrat --- -# _helm-definition - Helm is a popular open source package manager for Kubernetes applications. Many ISVs use Helm to configure and deploy Kubernetes applications because it provides a consistent, reusable, and sharable packaging format. For more information, see the [Helm documentation](https://helm.sh/docs). --- -# _helm-install-beta - The Helm installation method is Beta and is not recommended for production releases. The features and availability of the Helm installation method are subject to change. --- -# _helm-install-prereqs - * The customer used to install must have a valid email address. This email address is only used as a username for the Replicated registry and is never contacted. For more information about creating and editing customers in the Vendor Portal, see [Creating a Customer](/vendor/releases-creating-customer). * The customer used to install must have the **Existing Cluster (Helm CLI)** install type enabled. For more information about enabling install types for customers in the Vendor Portal, see [Managing Install Types for a License](licenses-install-types). @@ -7227,8 +6889,6 @@ The Helm installation method is Beta and is not recommended for production relea --- -# _helm-package - ```bash helm package -u PATH_TO_CHART ``` @@ -7241,8 +6901,6 @@ The Helm chart, including any dependencies, is packaged and copied to your curre --- -# _helm-template-limitation - Helm's `lookup` function and some values in the built-in `Capabilities` object are not supported with the `kots.io/v1beta1` HelmChart custom resource. This is because KOTS uses the `helm template` command to render chart templates locally. During rendering, Helm does not have access to the cluster where the chart will be installed. For more information, see [Kubernetes and Chart Functions](https://helm.sh/docs/chart_template_guide/function_list/#kubernetes-and-chart-functions) in the Helm documentation. @@ -7250,22 +6908,16 @@ Helm's `lookup` function and some values in the built-in `Capabilities` object a --- -# _helm-version-limitation - Support for Helm v2, including security patches, ended on November 13, 2020. If you specified `helmVersion: v2` in any HelmChart custom resources, update your references to v3. By default, KOTS uses Helm v3 to process all Helm charts. --- -# _hook-weights-limitation - Hook weights below -9999 are not supported. All hook weights must be set to a value above -9999 to ensure the Replicated image pull secret is deployed before any resources are pulled. --- -# _hooks-limitation - The following hooks are not supported and are ignored if they are present: * `test` * `pre-rollback` @@ -7274,8 +6926,6 @@ The following hooks are not supported and are ignored if they are present: --- -# _installer-only-annotation - Any other Kubernetes resources in the release (such as Kubernetes Deployments or Services) must include the `kots.io/installer-only` annotation. The `kots.io/installer-only` annotation indicates that the Kubernetes resource is used only by the Replicated installers (Embedded Cluster, KOTS, and kURL). @@ -7293,22 +6943,16 @@ metadata: --- -# _kots-helm-cr-description - To deploy Helm charts, KOTS requires a unique HelmChart custom resource for each Helm chart `.tgz` archive in the release. You configure the HelmChart custom resource to provide the necessary instructions to KOTS for processing and preparing the chart for deployment. Additionally, the HelmChart custom resource creates a mapping between KOTS and your Helm chart to allow Helm values to be dynamically set during installation or upgrade. --- -# _replicated-deprecated - The HelmChart custom resource `apiVersion: kots.io/v1beta1` is deprecated. For installations with Replicated KOTS v1.99.0 and later, use the HelmChart custom resource with `apiVersion: kots.io/v1beta2` instead. See [HelmChart v2](/reference/custom-resource-helmchart-v2) and [Confguring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). --- -# _replicated-helm-migration - You cannot migrate existing Helm charts in existing installations from the `useHelmInstall: false` installation method to a different method. If KOTS already installed the Helm chart previously in the environment using a HelmChart custom resource with `apiVersion: kots.io/v1beta1` and `useHelmInstall: false`, then KOTS does not attempt to install the chart using a different method and displays the following error message: `Deployment method for chart <chart_name> has changed`. To change the installation method from `useHelmInstall: false` to a different method, the user must reinstall your application in a new environment. @@ -7316,8 +6960,6 @@ To change the installation method from `useHelmInstall: false` to a different me --- -# Helm chart values.yaml - Using KOTS template functions in the [Config](/reference/template-functions-config-context) context allows you to set Helm values based on user-supplied values from the KOTS Admin Console configuration page. For example, the following Helm chart `values.yaml` file contains `postgresql.enabled`, which is set to `false`: @@ -7355,8 +6997,6 @@ During installation or upgrade, the template function is rendered to true or fal --- -# KOTS HelmChart custom resource - Using KOTS template functions in the [License](/reference/template-functions-license-context) context allows you to set Helm values based on the unique license file used for installation or upgrade. For example, the following HelmChart custom resource uses the Replicated [LiencseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function to evaluate if the license has the boolean `newFeatureEntitlement` field set to `true`: @@ -7385,8 +7025,6 @@ During installation or upgrade, the LicenseFieldValue template function is rende --- -# _v2-native-helm-cr-example - ```yaml apiVersion: kots.io/v1beta2 kind: HelmChart @@ -7456,8 +7094,6 @@ spec: --- -# _docker-compatibility - - Docker Hub :::note @@ -7474,8 +7110,6 @@ spec: --- -# _image-registry-settings - <table> <tr> <th width="30%">Field</th> @@ -7506,8 +7140,6 @@ spec: --- -# _access-admin-console - By default, during installation, KOTS automatically opens localhost port 8800 to provide access to the Admin Console. Using the `--no-port-forward` flag with the `kots install` command prevents KOTS from creating a port forward to the Admin Console. After you install with the `--no-port-forward` flag, you can optionally create a port forward so that you can log in to the Admin Console in a browser window. @@ -7548,8 +7180,6 @@ To access the Admin Console: --- -# _airgap-bundle-build - * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. * If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. @@ -7564,15 +7194,11 @@ To access the Admin Console: --- -# _airgap-bundle-download - After the build completes, download the bundle. Ensure that you can access the downloaded bundle from the environment where you will install the application. --- -# _airgap-bundle-view-contents - (Optional) View the contents of the downloaded bundle: ```bash @@ -7584,8 +7210,6 @@ After the build completes, download the bundle. Ensure that you can access the d --- -# _airgap-license-download - 1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page. 1. Click on the name of the target customer and go to the **Manage customer** tab. @@ -7605,22 +7229,16 @@ After the build completes, download the bundle. Ensure that you can access the d --- -# _automation-intro-embedded - When you use the KOTS CLI to install an application in a kURL cluster, you first run the kURL installation script to provision the cluster and automatically install KOTS in the cluster. Then, you can run the `kots install` command to install the application. --- -# _automation-intro-existing - When you use the KOTS CLI to install an application in an existing cluster, you install both the application and Replicated KOTS with a single command. --- -# _config-values-procedure - To get the ConfigValues file from an installed application instance: 1. Install the target release in a development environment. You can either install the release with Replicated Embedded Cluster or install in an existing cluster with KOTS. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Online Installation in Existing Clusters](/enterprise/installing-existing-cluster). @@ -7647,8 +7265,6 @@ To get the ConfigValues file from an installed application instance: --- -# _download-kotsadm-bundle - Download the `kotsadm.tar.gz` air gap bundle from the [Releases](https://github.com/replicatedhq/kots/releases) page in the kots repository in GitHub. Ensure that you can access the downloaded bundle from the environment where you will install the application. :::note @@ -7658,8 +7274,6 @@ The version of the `kotsadm.tar.gz` air gap bundle used must be compatible with --- -# _download-kurl-bundle - ```bash export REPLICATED_APP=APP_SLUG curl -LS https://k8s.kurl.sh/bundle/$REPLICATED_APP.tar.gz -o $REPLICATED_APP.tar.gz @@ -7669,8 +7283,6 @@ Where `APP_SLUG` is the unqiue slug for the application. --- -# _ec-prereqs - * Ensure that your installation environment meets the Embedded Cluster requirements. See [Embedded Cluster Requirements](/enterprise/installing-embedded-requirements). * The application release that you want to install must include an [Embedded Cluster Config](/reference/embedded-config). @@ -7680,8 +7292,6 @@ Where `APP_SLUG` is the unqiue slug for the application. --- -# _embedded-ha-step - (HA Installation Only) If you are installing in HA mode and did not already preconfigure a load balancer, you are prompted during the installation. Do one of the following: - If you are using the internal load balancer, leave the prompt blank and proceed with the installation. @@ -7691,8 +7301,6 @@ Where `APP_SLUG` is the unqiue slug for the application. --- -# _embedded-login-password - After the installation command finishes, note the `Kotsadm` and `Login with password (will not be shown again)` fields in the output of the command. You use these to log in to the Admin Console. The following shows an example of the `Kotsadm` and `Login with password (will not be shown again)` fields in the output of the installation command: @@ -7712,8 +7320,6 @@ After the installation command finishes, note the `Kotsadm` and `Login with pass --- -# _extract-kurl-bundle - In your installation environment, extract the contents of the kURL `.tar.gz` bundle that you downloaded: ```bash @@ -7723,8 +7329,6 @@ In your installation environment, extract the contents of the kURL `.tar.gz` bun --- -# _firewall-openings-intro - The domains for the services listed in the table below need to be accessible from servers performing online installations. No outbound internet access is required for air gap installations. For services hosted at domains owned by Replicated, the table below includes a link to the list of IP addresses for the domain at [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json) in GitHub. Note that the IP addresses listed in the `replicatedhq/ips` repository also include IP addresses for some domains that are _not_ required for installation. @@ -7734,8 +7338,6 @@ For any third-party services hosted at domains not owned by Replicated, consult --- -# _firewall-openings - The domains for the services listed in the table below need to be accessible from servers performing online installations. No outbound internet access is required for air gap installations. For services hosted at domains owned by Replicated, the table below includes a link to the list of IP addresses for the domain at [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json) in GitHub. Note that the IP addresses listed in the `replicatedhq/ips` repository also include IP addresses for some domains that are _not_ required for installation. @@ -7826,8 +7428,6 @@ For third-party services hosted at domains not owned by Replicated, the table be --- -# _ha-load-balancer-about - A load balancer is required for high availability mode. If your vendor has chosen to use the internal load balancer with the kURL EKCO add-on, you do not need to provide your own external load balancer. An external load balancer can be preferred when clients outside the cluster need access to the cluster's Kubernetes API. If you decide to use an external load balancer, the external load balancer must be a TCP forwarding load balancer. For more information, see [Prerequisites](#prerequisites). @@ -7837,8 +7437,6 @@ The health check for an apiserver is a TCP check on the port that the kube-apise --- -# _ha-load-balancer-prereq - - If you are installing in high availability (HA) mode, a load balancer is required. You can use the kURL internal load balancer if the [Embedded kURL Cluster Operator (EKCO) Add-On](https://kurl.sh/docs/add-ons/ekco) is included in the kURL Installer spec. Or, you can bring your own external load balancer. An external load balancer might be preferred when clients outside the cluster need access to the cluster's Kubernetes API. To install in HA mode, complete the following prerequisites: @@ -7855,15 +7453,11 @@ The health check for an apiserver is a TCP check on the port that the kube-apise --- -# _install-kots-cli-airgap - Install the KOTS CLI. See [Manually Download and Install](/reference/kots-cli-getting-started#manually-download-and-install) in _Installing the KOTS CLI_. --- -# _install-kots-cli - Install the KOTS CLI: ``` @@ -7875,29 +7469,21 @@ Install the KOTS CLI: --- -# _intro-air-gap - The procedures in this topic apply to installation environments that do not have access to the internet, known as _air gap_ environments. --- -# _intro-embedded - This topic describes how to use Replicated kURL to provision an embedded cluster in a virtual machine (VM) or bare metal server and install an application in the cluster. --- -# _intro-existing - This topic describes how to use Replicated KOTS to install an application in an existing Kubernetes cluster. --- -# _kots-airgap-version-match - :::note The versions of the KOTS CLI and the `kotsadm.tar.gz` bundle must match. You can check the version of the KOTS CLI with `kubectl kots version`. ::: @@ -7905,8 +7491,6 @@ The versions of the KOTS CLI and the `kotsadm.tar.gz` bundle must match. You can --- -# _kots-install-prompts - When prompted by the `kots install` command: 1. Provide the namespace where you want to install both KOTS and the application. 1. Create a new password for logging in to the Admin Console. @@ -7932,8 +7516,6 @@ When prompted by the `kots install` command: --- -# _kubernetes-compatibility - | KOTS Versions | Kubernetes Compatibility | |------------------------|-----------------------------| | 1.117.0 and later | 1.31, 1.30, 1.29 | @@ -7944,22 +7526,16 @@ When prompted by the `kots install` command: --- -# _kurl-about - Replicated kURL is an open source project. For more information, see the [kURL documentation](https://kurl.sh/docs/introduction/). --- -# _license-file-prereq - * Download your license file. Ensure that you can access the downloaded license file from the environment where you will install the application. See [Downloading Customer Licenses](/vendor/licenses-download). --- -# _placeholder-airgap-bundle - * `PATH_TO_AIRGAP_BUNDLE` with the path to the `.airgap` bundle for the application release. You can build and download the air gap bundle for a release in the [Vendor Portal](https://vendor.replicated.com) on the **Release history** page for the channel where the release is promoted. Alternatively, for information about building and downloading air gap bundles with the Vendor API v3, see [Trigger airgap build for a channel's release](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbuild) and [Get airgap bundle download URL for the active release on the channel](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) in the Vendor API v3 documentation. @@ -7967,29 +7543,21 @@ Replicated kURL is an open source project. For more information, see the [kURL d --- -# _placeholder-app-name-UI - * `APP_NAME` with the name of the application. The `APP_NAME` is included in the installation command that your vendor gave you. This is a unique identifier that KOTS will use to refer to the application that you install. --- -# _placeholder-namespace-embedded - * `NAMESPACE` with the namespace where Replicated kURL installed Replicated KOTS when creating the cluster. By default, kURL installs KOTS in the `default` namespace. --- -# _placeholder-namespace-existing - * `NAMESPACE` with the namespace where you want to install both the application and KOTS. --- -# _placeholder-ro-creds - * `REGISTRY_HOST` with the same hostname for the private registry where you pushed the Admin Console images. * `RO_USERNAME` and `RO_PASSWORD` with the username and password for an account that has read-only access to the private registry. @@ -8003,8 +7571,6 @@ Replicated kURL is an open source project. For more information, see the [kURL d --- -# _placeholders-global - * `APP_NAME` with a name for the application. This is the unique name that KOTS will use to refer to the application that you install. * `PASSWORD` with a shared password for accessing the Admin Console. @@ -8016,8 +7582,6 @@ Replicated kURL is an open source project. For more information, see the [kURL d --- -# _prereqs-embedded-cluster - * Ensure that your environment meets the minimum system requirements. See [kURL Installation Requirements](/enterprise/installing-kurl-requirements). * Review the advanced installation options available for the kURL installer. See [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. @@ -8025,8 +7589,6 @@ Replicated kURL is an open source project. For more information, see the [kURL d --- -# _prereqs-existing-cluster - * Ensure that your cluster meets the minimum system requirements. See [Minimum System Requirements](/enterprise/installing-general-requirements#minimum-system-requirements) in _Installation Requirements_. * Ensure that you have at least the minimum RBAC permissions in the cluster required to install KOTS. See [RBAC Requirements](/enterprise/installing-general-requirements#rbac-requirements) in _Installation Requirements_. @@ -8042,15 +7604,11 @@ Replicated kURL is an open source project. For more information, see the [kURL d --- -# _provision-cluster-intro - This procedure describes how to use kURL to provision an embedded cluster on a VM or bare metal server. When you create a cluster with kURL, kURL also automatically installs Replicated KOTS in the `default` namespaces in the cluster. --- -# _push-kotsadm-images - Extract the KOTS Admin Console container images from the `kotsadm.tar.gz` bundle and push the images to your private registry: ``` @@ -8072,8 +7630,6 @@ Extract the KOTS Admin Console container images from the `kotsadm.tar.gz` bundle --- -# _airgap-telemetry - For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. The Replicated SDK also stores any custom metrics within its Secret. The telemetry and custom metrics stored in the Secret are collected when a support bundle is generated in the environment. When the support bundle is uploaded to the Vendor Portal, the telemetry and custom metrics are associated with the correct customer and instance ID, and the Vendor Portal updates the instance insights and event data accordingly. @@ -8082,8 +7638,6 @@ The telemetry and custom metrics stored in the Secret are collected when a suppo --- -# _notifications-about - :::note Configuring notifications for customer instance changes is in public Beta. Features and functionality are subject to change as we continue to iterate this functionality towards General Availability. ::: @@ -8096,8 +7650,6 @@ For more information about how application status is determined, see [Resource S --- -# _supported-resources-status - The following resource types are supported: * Deployment @@ -8110,15 +7662,11 @@ The following resource types are supported: --- -# _admin-console-about - KOTS provides an Admin Console that lets your customers manage your application. You can customize the Admin Console. For example, you can customize the Config screen to allow customers to specify inputs related to unique options that your application provides. You can also include your own branding on the Admin Console, configure status informers, and add custom graphs. --- -# _download-portal-about - The Replicated Download Portal can be used to share license files, air gap bundles, and other assets with customers. A unique Download Portal link is available for each customer. The Download Portal uses information from the customer's license to make the relevant assets available for download, such as: * The license file * `.airgap` bundles for the application releases that the customer has access to based on their channel assignment @@ -8129,22 +7677,16 @@ The Replicated Download Portal can be used to share license files, air gap bundl --- -# _embedded-kubernetes-definition - _Embedded Kubernetes_ refers to delivering a Kubernetes distribution alongside an application, so that both Kubernetes and the application are deployed in the customer environment. Embedding Kubernetes allows software vendors to install their Kubernetes application in non-Kubernetes customer-controlled environments, such as virtual machines (VMs) or bare metal servers. Additionally, software vendors that embed Kubernetes with their application have greater control over the charactersitics of the cluster where their application is installed. This allows vendors to deliver a cluster that meets their application's requirements, which can help reduce errors during installation. --- -# _kots-definition - Replicated KOTS is a kubectl plugin and an in-cluster Admin Console that provides highly successful installations of Helm charts and Kubernetes applications into customer-controlled environments, including on-prem and air gap environments. --- -# _kots-entitlement-note - :::note The Replicated KOTS entitlement is required to install applications with KOTS. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. ::: @@ -8152,8 +7694,6 @@ The Replicated KOTS entitlement is required to install applications with KOTS. F --- -# _ensure-rbac - <tr> <td><code>--ensure-rbac</code></td> <td>bool</td> @@ -8164,8 +7704,6 @@ The Replicated KOTS entitlement is required to install applications with KOTS. F --- -# _help - <tr> <td><code>-h, --help</code></td> <td></td> @@ -8176,8 +7714,6 @@ The Replicated KOTS entitlement is required to install applications with KOTS. F --- -# _kotsadm-namespace - <tr> <td><code>--kotsadm-namespace</code></td> <td>string</td> @@ -8187,8 +7723,6 @@ The Replicated KOTS entitlement is required to install applications with KOTS. F --- -# _kotsadm-registry - <tr> <td><code>--kotsadm-registry</code></td> <td>string</td> @@ -8198,8 +7732,6 @@ The Replicated KOTS entitlement is required to install applications with KOTS. F --- -# _registry-password - <tr> <td><code>--registry-password</code></td> <td>string</td> @@ -8209,8 +7741,6 @@ The Replicated KOTS entitlement is required to install applications with KOTS. F --- -# _registry-username - <tr> <td><code>--registry-username</code></td> <td>string</td> @@ -8220,8 +7750,6 @@ The Replicated KOTS entitlement is required to install applications with KOTS. F --- -# _skip-rbac-check - <tr> <td><code>--skip-rbac-check</code></td> <td>bool</td> @@ -8232,8 +7760,6 @@ The Replicated KOTS entitlement is required to install applications with KOTS. F --- -# _strict-sec-context-yaml - ```yaml securityContext: fsGroup: 1001 @@ -8250,8 +7776,6 @@ securityContext: --- -# _strict-security-context - import StrictSecContextYaml from "./_strict-sec-context-yaml.mdx" <tr> @@ -8283,8 +7807,6 @@ import StrictSecContextYaml from "./_strict-sec-context-yaml.mdx" --- -# _use-minimal-rbac - <tr> <td><code>--use-minimal-rbac</code></td> <td>bool</td> @@ -8295,8 +7817,6 @@ import StrictSecContextYaml from "./_strict-sec-context-yaml.mdx" --- -# _wait-duration - <tr> <td><code>--wait-duration</code></td> <td>string</td> @@ -8307,8 +7827,6 @@ import StrictSecContextYaml from "./_strict-sec-context-yaml.mdx" --- -# _with-minio - <tr> <td><code>--with-minio</code></td> <td>bool</td> @@ -8319,15 +7837,11 @@ import StrictSecContextYaml from "./_strict-sec-context-yaml.mdx" --- -# _installers - To provision a cluster on a VM or bare metal server, kURL uses a spec that is defined in a manifest file with `apiVersion: cluster.kurl.sh/v1beta1` and `kind: Installer`. This spec (called a _kURL installer_) lists the kURL add-ons that will be included in the cluster. kURL provides add-ons for networking, storage, ingress, and more. kURL also provides a KOTS add-on, which installs KOTS in the cluster and deploys the KOTS Admin Console. You can customize the kURL installer according to your application requirements. --- -# _kurl-availability - :::note Replicated kURL is available only for existing customers. If you are not an existing kURL user, use Replicated Embedded Cluster instead. For more information, see [Using Embedded Cluster](/vendor/embedded-overview). @@ -8337,8 +7851,6 @@ kURL is a Generally Available (GA) product for existing customers. For more info --- -# _kurl-definition - kURL is an open source project maintained by Replicated that software vendors can use to create custom Kubernetes distributions that are embedded with their application. Enterprise customers can then run a kURL installation script on their virtual machine (VM) or bare metal server to provision a cluster and install the application. This allows software vendors to distribute Kubernetes applications to customers that do not have access to a cluster in their environment. For more information about the kURL open source project, see the [kURL website](https://kurl.sh). @@ -8346,8 +7858,6 @@ For more information about the kURL open source project, see the [kURL website]( --- -# _allow-privilege-escalation - ```yaml spec: allowPrivilegeEscalation: true @@ -8357,8 +7867,6 @@ spec: --- -# _application-icon - ```yaml apiVersion: kots.io/v1beta1 kind: Application @@ -8370,8 +7878,6 @@ spec: --- -# _application-spec - ```yaml apiVersion: kots.io/v1beta1 kind: Application @@ -8381,8 +7887,6 @@ kind: Application --- -# _application-statusInformers - ```yaml apiVersion: kots.io/v1beta1 kind: Application @@ -8395,8 +7899,6 @@ spec: --- -# _config-option-invalid-regex-validator - **Correct**: ```yaml @@ -8435,8 +7937,6 @@ spec: --- -# _config-option-invalid-type - **Correct**: ```yaml @@ -8473,8 +7973,6 @@ spec: --- -# _config-option-is-circular - **Incorrect**: ```yaml @@ -8491,8 +7989,6 @@ spec: --- -# _config-option-password-type - ```yaml spec: groups: @@ -8506,8 +8002,6 @@ spec: --- -# _config-option-regex-validator-invalid-type - **Correct**: ```yaml @@ -8546,8 +8040,6 @@ spec: --- -# _config-spec - ```yaml apiVersion: kots.io/v1beta1 kind: Config @@ -8557,8 +8049,6 @@ kind: Config --- -# _container-image-latest-tag - ```yaml spec: containers: @@ -8569,8 +8059,6 @@ spec: --- -# _container-image-local-image-name - ```yaml spec: containers: @@ -8581,8 +8069,6 @@ spec: --- -# _container-resource-limits - ```yaml spec: containers: @@ -8598,8 +8084,6 @@ spec: --- -# _container-resource-requests - ```yaml spec: containers: @@ -8615,8 +8099,6 @@ spec: --- -# _container-resources - ```yaml spec: @@ -8629,8 +8111,6 @@ spec: --- -# _deprecated-kubernetes-installer-version - **Correct**: ```yaml @@ -8649,8 +8129,6 @@ kind: Installer --- -# _hardcoded-namespace - ```yaml metadata: name: spline-reticulator @@ -8661,8 +8139,6 @@ metadata: --- -# _invalid-helm-release-name - ```yaml apiVersion: kots.io/v1beta1 kind: HelmChart @@ -8675,8 +8151,6 @@ spec: --- -# _invalid-kubernetes-installer - **Correct**: ```yaml @@ -8703,8 +8177,6 @@ spec: --- -# _invalid-min-kots-version - ```yaml apiVersion: kots.io/v1beta1 kind: Application @@ -8716,8 +8188,6 @@ spec: --- -# _invalid-rendered-yaml - **Example Helm Chart**: ```yaml apiVersion: kots.io/v1beta1 @@ -8820,8 +8290,6 @@ spec: --- -# _invalid-target-kots-version - ```yaml apiVersion: kots.io/v1beta1 kind: Application @@ -8833,8 +8301,6 @@ spec: --- -# _invalid-yaml - **Correct**: ```yaml @@ -8854,8 +8320,6 @@ spec: --- -# _invalid_type - **Correct**: ```yaml @@ -8876,16 +8340,12 @@ ports: --- -# _linter-definition - The linter checks the manifest files in Replicated KOTS releases to ensure that there are no YAML syntax errors, that all required manifest files are present in the release to support installation with KOTS, and more. --- -# _may-contain-secrets - ```yaml data: ENV_VAR_1: "y2X4hPiAKn0Pbo24/i5nlInNpvrL/HJhlSCueq9csamAN8g5y1QUjQnNL7btQ==" @@ -8895,8 +8355,6 @@ data: --- -# _missing-api-version-field - ```yaml apiVersion: kots.io/v1beta1 ``` @@ -8905,8 +8363,6 @@ apiVersion: kots.io/v1beta1 --- -# _missing-kind-field - ```yaml kind: Config ``` @@ -8915,8 +8371,6 @@ kind: Config --- -# _preflight-spec - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -8926,8 +8380,6 @@ kind: Preflight --- -# _privileged - ```yaml spec: privileged: true @@ -8937,8 +8389,6 @@ spec: --- -# _repeat-option-malformed-yamlpath - ```yaml spec: groups: @@ -8952,8 +8402,6 @@ spec: --- -# _repeat-option-missing-template - ```yaml spec: groups: @@ -8979,8 +8427,6 @@ spec: --- -# _repeat-option-missing-valuesByGroup - ```yaml spec: groups: @@ -8999,8 +8445,6 @@ spec: --- -# _replicas-1 - ```yaml spec: replicas: 1 @@ -9010,8 +8454,6 @@ spec: --- -# _resource-limits-cpu - ```yaml spec: containers: @@ -9026,8 +8468,6 @@ spec: --- -# _resource-limits-memory - ```yaml spec: containers: @@ -9042,8 +8482,6 @@ spec: --- -# _resource-requests-cpu - ```yaml spec: containers: @@ -9058,8 +8496,6 @@ spec: --- -# _resource-requests-memory - ```yaml spec: containers: @@ -9074,8 +8510,6 @@ spec: --- -# _troubleshoot-spec - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: SupportBundle @@ -9085,8 +8519,6 @@ kind: SupportBundle --- -# _volume-docker-sock - ```yaml spec: volumes: @@ -9098,8 +8530,6 @@ spec: --- -# _volumes-host-paths - ```yaml spec: volumes: @@ -9111,15 +8541,11 @@ spec: --- -# _limitation-ec - Monitoring applications with Prometheus is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). --- -# _overview-prom - The KOTS Admin Console can use the open source systems monitoring tool Prometheus to collect metrics on an application and the cluster where the application is installed. Prometheus components include the main Prometheus server, which scrapes and stores time series data, an Alertmanager for alerting on metrics, and Grafana for visualizing metrics. For more information about Prometheus, see [What is Prometheus?](https://prometheus.io/docs/introduction/overview/) in the Prometheus documentation. The Admin Console exposes graphs with key metrics collected by Prometheus in the **Monitoring** section of the dashboard. By default, the Admin Console displays the following graphs: @@ -9139,16 +8565,12 @@ The following screenshot shows an example of the **Monitoring** section on the A --- -# _analyzers-note - For basic examples of checking CPU, memory, and disk capacity, see [Node Resources Analyzer](https://troubleshoot.sh/reference/analyzers/node-resources/) in the Troubleshoot documentation. --- -# _http-requests-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -9177,8 +8599,6 @@ spec: --- -# _http-requests-secret - ```yaml apiVersion: v1 kind: Secret @@ -9215,8 +8635,6 @@ stringData: --- -# _k8s-distro-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -9258,8 +8676,6 @@ spec: --- -# _k8s-distro-secret - ```yaml apiVersion: v1 kind: Secret @@ -9309,8 +8725,6 @@ stringData: --- -# _k8s-version-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -9335,8 +8749,6 @@ spec: --- -# _k8s-version-secret - ```yaml apiVersion: v1 kind: Secret @@ -9369,8 +8781,6 @@ stringData: --- -# _mysql-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -9401,8 +8811,6 @@ spec: --- -# _mysql-secret - ```yaml apiVersion: v1 kind: Secret @@ -9443,15 +8851,11 @@ stringData: --- -# _node-count-cr - --- -# _node-count-secret - ```yaml apiVersion: v1 kind: Secret @@ -9480,8 +8884,6 @@ stringData: --- -# _node-cpu-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -9503,8 +8905,6 @@ spec: --- -# _node-cpu-secret - ```yaml apiVersion: v1 kind: Secret @@ -9534,8 +8934,6 @@ stringData: --- -# _node-ephem-storage-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -9561,8 +8959,6 @@ spec: --- -# _node-ephem-storage-secret - ```yaml apiVersion: v1 kind: Secret @@ -9596,8 +8992,6 @@ stringData: --- -# _node-mem-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -9623,8 +9017,6 @@ spec: --- -# _node-mem-secret - ```yaml apiVersion: v1 kind: Secret @@ -9658,8 +9050,6 @@ stringData: --- -# _node-req-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -9685,8 +9075,6 @@ spec: --- -# _node-req-secret - ```yaml apiVersion: v1 kind: Secret @@ -9720,8 +9108,6 @@ stringData: --- -# _node-storage-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: Preflight @@ -9742,8 +9128,6 @@ spec: --- -# _node-storage-secret - ```yaml apiVersion: v1 kind: Secret @@ -9772,8 +9156,6 @@ stringData: --- -# _preflights-add-analyzers - You must add analyzers to analyze the data from the collectors that you specified. Define the criteria for the pass, fail, and warn outcomes, and specify custom messages for each. For example, you can set a `fail` outcome if the MySQL version is less than the minimum required. Then, specify a message to display that informs your customer of the reasons for the failure and steps they can take to fix the issue. @@ -9781,8 +9163,6 @@ For example, you can set a `fail` outcome if the MySQL version is less than the --- -# _preflights-define-xref - For more information about defining collectors and analyzers, see [Collecting Data](https://troubleshoot.sh/docs/collect/) and [Analyzing Data](https://troubleshoot.sh/docs/analyze/) in the Troubleshoot documentation. @@ -9790,15 +9170,11 @@ and [Analyzing Data](https://troubleshoot.sh/docs/analyze/) in the Troubleshoot --- -# _preflights-define - Any preflight checks you run are dependent on your application needs. This section gives some guidance about how to think about using collectors and analyzers to design preflight checks. --- -# _preflights-sb-about - Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. Troubleshoot is a kubectl plugin that provides diagnostic tools for Kubernetes applications. For more information, see the open source [Troubleshoot](https://troubleshoot.sh/docs/collect/) documentation. Preflight checks and support bundles analyze data from customer environments to provide insights that help users to avoid or troubleshoot common issues with an application: @@ -9808,23 +9184,17 @@ Preflight checks and support bundles analyze data from customer environments to --- -# _preflights-sb-note - For a comprehensive overview, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). --- -# _preflights-spec-locations - For more information about specifications, see [About Specifications](preflight-support-bundle-about#about-specifications) in _About Preflight Checks and Support Bundles_. --- -# _preflights-strict - If any strict preflight checks are configured, the `--skip-preflights` flag are not honored because the preflight checks must run and contain no failures before the application is deployed. When the `--deploy` option is provided and there are strict preflight checks, the preflight checks always run. The deployment waits for up to 15 minutes for the preflight checks to complete. If the checks complete without strict preflight failures, the release deploys. If the checks do not complete within 15 minutes, the release does not deploy. If there are one or more strict preflight failures, the release does not deploy. @@ -9834,37 +9204,27 @@ For more information about strict preflight checks, see [Defining Preflight Chec --- -# _step-creds - Provide read-only credentials for the external private registry in your Replicated account. This allows Replicated to access the images through the proxy registry. See [Add Credentials for an External Registry](packaging-private-images#add-credentials-for-an-external-registry) in _Connecting to an External Registry_. --- -# _step-custom-domain - (Optional) Add a custom domain for the proxy registry instead of `proxy.replicated.com`. See [Using Custom Domains](custom-domains-using). --- -# _redactors-about - Troubleshoot has built-in redactors to prevent sensitive data from being collected when support bundles are generated. You can add more redactors if needed. For more information, see [Redacting Data](https://troubleshoot.sh/docs/redact/) in the Troubleshoot documentation. --- -# _required-releases-description - When a release is required, KOTS requires users to upgrade to that version before they can upgrade to a later version. For example, if you select **Prevent this release from being skipped during upgrades** for release v2.0.0, users with v1.0.0 deployed must upgrade to v2.0.0 before they can upgrade to a version later than v2.0.0, such as v2.1.0. --- -# _required-releases-limitations - Required releases have the following limitations: * Required releases are supported in KOTS v1.68.0 and later. @@ -9875,8 +9235,6 @@ Required releases have the following limitations: --- -# _version-label-reqs-helm - * The version label for the release must match the version label from one of the `Chart.yaml` files in the release. * If there is one Helm chart in the release, Replicated automatically uses the version from the `Chart.yaml` file. * If there is more than one Helm chart in the release, Replicated uses the version label from one of the `Chart.yaml` files. You can edit the version label for the release to use the version label from a different `Chart.yaml` file. @@ -9884,8 +9242,6 @@ Required releases have the following limitations: --- -# _app - <tr> <td><code>--app</code></td> <td>string</td> @@ -9896,8 +9252,6 @@ Required releases have the following limitations: --- -# _authorize-with-token-note - :::note The `replicated login` command creates a token after you log in to your vendor account in a browser and saves it to a config file. Alteratively, if you do not have access to a browser, you can set the `REPLICATED_API_TOKEN` environment variable to authenticate. For more information, see [(Optional) Set Environment Variables](#env-var) below. ::: @@ -9905,8 +9259,6 @@ The `replicated login` command creates a token after you log in to your vendor a --- -# _authtype - <tr> <td><code>--authtype</code></td> <td>string</td> @@ -9917,8 +9269,6 @@ The `replicated login` command creates a token after you log in to your vendor a --- -# _chart-yaml-dir-reqs - :::note If your release supports installations with Replicated KOTS, `--yaml-dir` is required. If your release supports installations with the Helm CLI only, either `--yaml-dir` or `--chart` can be used. ::: @@ -9926,8 +9276,6 @@ If your release supports installations with Replicated KOTS, `--yaml-dir` is req --- -# _help - <tr> <td><code>-h, --help</code></td> <td></td> @@ -9938,8 +9286,6 @@ If your release supports installations with Replicated KOTS, `--yaml-dir` is req --- -# _login - Authorize the Replicated CLI: ``` @@ -9955,8 +9301,6 @@ Authorize the Replicated CLI: --- -# _logout - (Optional) When you are done using the Replicated CLI, remove any stored credentials created by the `replicated login` command: ``` @@ -9966,8 +9310,6 @@ Authorize the Replicated CLI: --- -# _output - <tr> <td><code>--output</code></td> <td>string</td> @@ -9979,8 +9321,6 @@ Authorize the Replicated CLI: --- -# _password-stdin - <tr> <td><code>--password-stdin</code></td> <td></td> @@ -9991,8 +9331,6 @@ Authorize the Replicated CLI: --- -# _password - <tr> <td><code>--password</code></td> <td>string</td> @@ -10003,8 +9341,6 @@ Authorize the Replicated CLI: --- -# _skip-validation - <tr> <td><code>--skip-validation</code></td> <td></td> @@ -10015,8 +9351,6 @@ Authorize the Replicated CLI: --- -# _sudo-install - :::note If you do not have root access to the `/usr/local/bin` directory, you can install with sudo by running `sudo mv replicated /usr/local/bin/replicated` instead of `mv replicated /usr/local/bin/replicated`. ::: @@ -10024,8 +9358,6 @@ If you do not have root access to the `/usr/local/bin` directory, you can instal --- -# _token-stdin - <tr> <td><code>--token-stdin</code></td> <td></td> @@ -10036,8 +9368,6 @@ If you do not have root access to the `/usr/local/bin` directory, you can instal --- -# _token - <tr> <td><code>--token</code></td> <td>string</td> @@ -10048,8 +9378,6 @@ If you do not have root access to the `/usr/local/bin` directory, you can instal --- -# _username - <tr> <td><code>--username</code></td> <td>string</td> @@ -10060,8 +9388,6 @@ If you do not have root access to the `/usr/local/bin` directory, you can instal --- -# _verify-install - Verify that the installation was successful: ``` @@ -10071,8 +9397,6 @@ Verify that the installation was successful: --- -# _yaml-dir - <tr> <td><code>--yaml-dir</code></td> <td>path</td> @@ -10083,8 +9407,6 @@ Verify that the installation was successful: --- -# _401-unauthorized - :::note If you see a `401 Unauthorized` error message, log out of the Replicated registry by running `helm registry logout registry.replicated.com` and then run `helm package . --dependency-update` again. ::: @@ -10092,8 +9414,6 @@ If you see a `401 Unauthorized` error message, log out of the Replicated registr --- -# Chart.yaml - ```yaml # Chart.yaml dependencies: @@ -10108,8 +9428,6 @@ For the latest version information for the Replicated SDK, see the [replicated-s --- -# _integration-mode-install - You can install the Replicated SDK in integration mode to develop locally against the SDK API without needing to add the SDK to your application, create a release in the Replicated Vendor Portal, or make changes in your environment. You can also use integration mode to test sending instance data to the Vendor Portal, including any custom metrics that you configure. To use integration mode, install the Replicated SDK as a standalone component using a valid Development license created in the Vendor Portal. After you install in integration mode, the SDK provides default mock data for requests to the SDK API `app` endpoints. Requests to the `license` endpoints use the real data from your Development license. @@ -10163,15 +9481,11 @@ To install the SDK in integration mode: --- -# _kots-version-req - To install the SDK with a Replicated installer, KOTS v1.104.0 or later and the SDK version 1.0.0-beta.12 or later are required. You can verify the version of KOTS installed with `kubectl kots version`. For Replicated Embedded Cluster installations, you can see the version of KOTS that is installed by your version of Embedded Cluster in the [Embedded Cluster Release Notes](/release-notes/rn-embedded-cluster). --- -# _overview - The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. The SDK can be installed alongside applications packaged as Helm charts or Kubernetes manifests. The SDK can be installed using the Helm CLI or KOTS. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). @@ -10188,8 +9502,6 @@ Replicated recommends that the SDK is distributed with all applications because --- -# _registry-logout - If you see a 401 Unauthorized error after running `helm dependency update`, run the following command to remove credentials from the Replicated registry, then re-run `helm dependency update`: ```bash @@ -10201,8 +9513,6 @@ For more information, see [401 Unauthorized Error When Updating Helm Dependencie --- -# values.yaml - When a user installs a Helm chart that includes the Replicated SDK as a dependency, a set of default SDK values are included in the `replicated` key of the parent chart's values file. For example: @@ -10236,15 +9546,11 @@ The SDK Helm values also include a `replicated.license` field, which is a string --- -# _checkVersion - Run `velero version --client-only` to check the version of the velero CLI that you installed as part of [Installing the Velero CLI](snapshots-velero-cli-installing). --- -# _installVelero - Run one of the following commands to install Velero, depending on the version of the velero CLI you are using: * **Velero v1.10 and later**: @@ -10272,43 +9578,31 @@ Run one of the following commands to install Velero, depending on the version of --- -# _limitation-cli-restores - Only full backups can be restored using the KOTS CLI. To restore an application from a partial backup, use the Admin Console. See [Restore the Application Only Using the Admin Console](/enterprise/snapshots-restoring-full#admin-console). --- -# _limitation-dr - Only full backups that include both the application and the Admin Console can be restored to a new cluster in disaster recovery scenarios. Partial backups that include the application only _cannot_ be restored to a new cluster, and are therefore not useable for disaster recovery. --- -# _limitation-install-method - Snapshots can be restored only to clusters that use the same installation method as the cluster the snapshot was taken from. For example, snapshots taken in an online (internet-connected) cluster must be restored to an online cluster. --- -# _limitation-no-ec-support - The KOTS Snapshots feature is supported for existing cluster installations with KOTS and Replicated kURL installations only. Snapshots is not supported for Replicated Embedded Cluster installations. For more information about configuring backup and restore for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). --- -# _limitation-os - Snapshots must be restored on the same operating system that the snapshot was taken on. For example, snapshots taken on a CentOS cluster must be restored on a CentOS cluster. --- -# _node-agent-mem-limit - Increase the default memory limit for the node-agent (restic) Pod if your application is particularly large. For more information about configuring Velero resource requests and limits, see [Customize resource requests and limits](https://velero.io/docs/v1.10/customize-installation/#customize-resource-requests-and-limits) in the Velero documentation. For example, the following kubectl commands will increase the memory limit for the node-agent (restic) daemon set from the default of 1Gi to 2Gi. @@ -10342,8 +9636,6 @@ kubectl -n velero set env daemonset/restic GOGC=1 --- -# _registryCredentialsNote - :::note It is typical for the velero and node-agent (restic) Pods to be in the `ErrImagePull` or `ImagePullBackOff` state after you run the `velero install` command because Velero does not support passing registry credentials during installation. In Replicated KOTS v1.94.0 and later, this situation resolves itself after you complete the instructions to configure the storage destination. @@ -10353,8 +9645,6 @@ If you are on an earlier version of KOTS, Replicated recommends that you upgrade --- -# _resticDaemonSet - Configure the Restic DaemonSet specification if your cluster uses one of the following Kubernetes distributions: * RancherOS * OpenShift @@ -10366,8 +9656,6 @@ For information about how to configure the Restic DaemonSet for these distributi --- -# _restore-types - Snapshots supports the following types of restores: * Restore both the application and the KOTS Admin Console (also referred to as a _full_ restore) * Restore the KOTS Admin Console only @@ -10376,8 +9664,6 @@ Snapshots supports the following types of restores: --- -# _restoreTable - <table> <tr> <th width="25%">Restore Type</th> @@ -10404,15 +9690,11 @@ Snapshots supports the following types of restores: --- -# _step-get-backups - Run the [`kubectl kots get backups`](/reference/kots-cli-get-backups) command to get the list of full backups for the instance. --- -# _step-restore - Run the following command to restore a full backup: ```bash @@ -10425,8 +9707,6 @@ Run the following command to restore a full backup: --- -# _updateDefaultStorage - If Velero is already installed, you can update your storage destination in the Replicated Admin Console. For embedded clusters with the Velero add-on, you must update the default internal storage settings in the Admin Console because internal storage is insufficient for full backups. @@ -10436,8 +9716,6 @@ For more information about updating storage, see [Updating Settings in the Admin --- -# _aggregate-status-intro - When you provide more than one Kubernetes resource, Replicated aggregates all resource statuses to display a single application status. Replicated uses the least available resource status to represent the aggregate application status. For example, if at least one resource has an Unavailable status, then the aggregate application status is Unavailable. @@ -10445,8 +9723,6 @@ Replicated uses the least available resource status to represent the aggregate a --- -# _aggregateStatus - The following table describes the resource statuses that define each aggregate application status: <table> @@ -10481,8 +9757,6 @@ The following table describes the resource statuses that define each aggregate a --- -# _statusesTable - <table> <tbody> <tr> @@ -10540,8 +9814,6 @@ The following table describes the resource statuses that define each aggregate a --- -# _configmap-note - :::note Alternatively, you can use a ConfigMap (`kind: ConfigMap`) if the specification will not collect private information from the cluster. ::: @@ -10549,8 +9821,6 @@ Alternatively, you can use a ConfigMap (`kind: ConfigMap`) if the specification --- -# _customize-support-bundle-spec - When customizing your support bundle specifications, consider the following guidelines: - The `clusterInfo` and `clusterResources` collectors are useful because they collect a large amount of data to help with installation and debugging. @@ -10636,8 +9906,6 @@ When customizing your support bundle specifications, consider the following guid --- -# _deploy-status-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: SupportBundle @@ -10663,8 +9931,6 @@ spec: --- -# _deploy-status-secret - ```yaml apiVersion: v1 kind: Secret @@ -10698,8 +9964,6 @@ stringData: --- -# _ec-support-bundle-intro - Embedded Cluster includes a default support bundle spec that collects both host- and cluster-level information. The host-level information is useful for troubleshooting failures related to host configuration like DNS, networking, or storage problems. Cluster-level information includes details about the components provided by Replicated, such as the Admin Console and Embedded Cluster operator that manage install and upgrade operations. If the cluster has not installed successfully and cluster-level information is not available, then it is excluded from the bundle. @@ -10710,8 +9974,6 @@ In addition to the host- and cluster-level details provided by the default Embed --- -# _generate-bundle-admin-console - The Replicated KOTS Admin Console includes a **Troubleshoot** page where you can generate a support bundle and review remediation suggestions for troubleshooting. You can also download the support bundle from the Admin Console. To generate a support bundle in the KOTS Admin Console: @@ -10736,8 +9998,6 @@ To generate a support bundle in the KOTS Admin Console: --- -# _generate-bundle-default-kots - For KOTS installations, you can generate a support bundle using the default KOTS spec. This is useful if the application does not have a support bundle spec included. #### Online Environments @@ -10769,8 +10029,6 @@ For air gap environments, perform the following steps to generate a support bund --- -# _generate-bundle-ec - There are different steps to generate a support bundle depending on the version of Embedded Cluster installed. ### For Versions 1.17.0 and Later @@ -10843,8 +10101,6 @@ To generate a bundle: --- -# _generate-bundle-host - To generate a kURL host support bundle: 1. Do one of the following: @@ -10882,8 +10138,6 @@ To generate a kURL host support bundle: --- -# _generate-bundle - Run the following command to generate a bundle: ```bash @@ -10897,8 +10151,6 @@ For a complete list of options with the `kubectl support-bundle` command, run `k --- -# _http-requests-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: SupportBundle @@ -10927,8 +10179,6 @@ spec: --- -# _http-requests-secret - ```yaml apiVersion: v1 kind: Secret @@ -10965,8 +10215,6 @@ stringData: --- -# _install-plugin - The support-bundle plugin (a kubectl plugin) is required to generate support bundles from the command line. You can install the support-bundle plugin using krew or install it manually from the release archives. @@ -11023,8 +10271,6 @@ To install the support-bundle plugin manually: --- -# _k8s-version-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: SupportBundle @@ -11051,8 +10297,6 @@ spec: --- -# _k8s-version-secret - ```yaml apiVersion: v1 kind: Secret @@ -11087,8 +10331,6 @@ stringData: --- -# _logs-limits-cr - ```yaml apiVersion: troubleshoot.replicated.com/v1beta1 kind: SupportBundle @@ -11108,8 +10350,6 @@ spec: --- -# _logs-limits-secret - ```yaml apiVersion: v1 kind: Secret @@ -11137,8 +10377,6 @@ stringData: --- -# _logs-selectors-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: SupportBundle @@ -11178,8 +10416,6 @@ spec: --- -# _logs-selectors-secret - ```yaml apiVersion: v1 kind: Secret @@ -11227,8 +10463,6 @@ stringData: --- -# _node-resources-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: SupportBundle @@ -11277,8 +10511,6 @@ spec: --- -# _node-resources-secret - ```yaml apiVersion: v1 kind: Secret @@ -11335,8 +10567,6 @@ stringData: --- -# _node-status-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: SupportBundle @@ -11361,8 +10591,6 @@ spec: --- -# _node-status-secret - ```yaml apiVersion: v1 kind: Secret @@ -11395,8 +10623,6 @@ stringData: --- -# _redis-mysql-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: SupportBundle @@ -11423,8 +10649,6 @@ spec: --- -# _redis-mysql-secret - ```yaml apiVersion: v1 kind: Secret @@ -11459,8 +10683,6 @@ stringData: --- -# _run-pods-cr - ```yaml apiVersion: troubleshoot.sh/v1beta2 kind: SupportBundle @@ -11490,8 +10712,6 @@ spec: --- -# _run-pods-secret - ```yaml apiVersion: v1 kind: Secret @@ -11529,8 +10749,6 @@ stringData: --- -# _support-bundle-add-analyzers - Add analyzers based on conditions that you expect for your application. For example, you might require that a cluster have at least 2 CPUs and 4GB memory available. Good analyzers clearly identify failure modes. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log. @@ -11546,8 +10764,6 @@ Add analyzers based on conditions that you expect for your application. For exam --- -# _support-bundle-add-logs - Replicated recommends adding application Pod logs and set the collection limits for the number of lines logged. Typically the selector attribute is matched to the labels. To get the labels for an application, either inspect the YAML or run `kubectl get pods --show-labels`. @@ -11559,8 +10775,6 @@ The `limits` field can support `maxAge` or `maxLines`. This limits the output to --- -# _support-bundle-custom-collectors - Add any custom collectors to the file. Collectors that Replicated recommends considering are: - **Kubernetes resources:** Use for custom resource definitions (CRDs), Secrets, and ConfigMaps, if they are required for your application to work. @@ -11573,8 +10787,6 @@ Add any custom collectors to the file. Collectors that Replicated recommends con --- -# _go-sprig - KOTS template functions are based on the Go text/template library. All functionality of the Go templating language, including if statements, loops, and variables, is supported with KOTS template functions. For more information, see [text/template](https://golang.org/pkg/text/template/) in the Go documentation. Additionally, KOTS template functions can be used with all functions in the Sprig library. Sprig provides several template functions for the Go templating language, such as type conversion, string, and integer math functions. For more information, see [Sprig Function Documentation](https://masterminds.github.io/sprig/). @@ -11582,8 +10794,6 @@ Additionally, KOTS template functions can be used with all functions in the Spri --- -# KOTS Config custom resource - The following example uses: * KOTS [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function to evaluate the number of seats permitted by the license * Sprig [atoi](https://masterminds.github.io/sprig/conversion.html) function to convert the string values returned by LicenseFieldValue to integers @@ -11635,8 +10845,6 @@ As shown in the image below, if the user's license contains `numSeats: 150`, the --- -# _ne-comparison - In the example below, the `ingress_type` field is displayed on the **Config** page only when the distribution of the cluster is _not_ [Replicated Embedded Cluster](/vendor/embedded-overview). This ensures that only users deploying to their own existing cluster are able to select the method for ingress. The following example uses: @@ -11699,8 +10907,6 @@ Conversely, when the distribution of the cluster is not `embedded-cluster`, both --- -# KOTS Config custom resource - The following example uses: * KOTS [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where KOTS is running * [eq](https://pkg.go.dev/text/template#hdr-Functions) (_equal_) Go binary operator to compare the rendered value of the Distribution template function to a string, then return the boolean truth of the comparison @@ -11741,8 +10947,6 @@ The following image shows how only the `gke_distribution` item is displayed on t --- -# _use-cases - Common use cases for KOTS template functions include rendering values during installation or upgrade, such as: * Customer-specific license field values * User-provided configuration values @@ -11754,8 +10958,6 @@ KOTS template functions can also be used to work with integer, boolean, float, a --- -# _admin-console-air-gap - import BuildAirGapBundle from "../install/_airgap-bundle-build.mdx" import DownloadAirGapBundle from "../install/_airgap-bundle-download.mdx" import ViewAirGapBundle from "../install/_airgap-bundle-view-contents.mdx" @@ -11794,8 +10996,6 @@ the changes between them by clicking **Diff releases** in the right corner. --- -# _admin-console - To perform an update from the Admin Console: 1. In the Admin Console, go to the **Version History** tab. @@ -11833,8 +11033,6 @@ the changes between them by clicking **Diff releases** in the right corner. --- -# _installerRequirements - * **installer-spec-file**: If you used the `installer-spec-file` flag to pass a `patch.yaml` file when you installed, you must pass the same `patch.yaml` file when you upgrade. This prevents the installer from overwriting any configuration from your `patch.yaml` file and making changes to the add-ons in your cluster. For example: `installer-spec-file="./patch.yaml"`. * **app-version-label**: By default, the script also upgrades your application to the latest version when you run the installation script. @@ -11844,8 +11042,6 @@ the changes between them by clicking **Diff releases** in the right corner. --- -# _upgradePrompt - (Kubernetes Upgrades Only) If a Kubernetes upgrade is required, the script automatically prints a `Drain local node and apply upgrade?` prompt. Confirm the prompt to drain the local primary node and apply the Kubernetes upgrade to the control plane. The script continues to drain and upgrade nodes sequentially. For each node, the script prints a command that you must run on the node to upgrade Kubernetes. For more information, see [About Kubernetes Updates](/enterprise/updating-kurl-about#kubernetes) in _About kURL Cluster Updates_. @@ -11853,15 +11049,11 @@ the changes between them by clicking **Diff releases** in the right corner. --- -# _api-about - The Vendor API is the API for the Vendor Portal. This API can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams and license files. --- -# _team-token-note - :::note Team API tokens are deprecated and cannot be generated. If you are already using team API tokens, Replicated recommends that you migrate to Service Accounts or User API tokens instead because these options provide better granular control over token access. ::: @@ -11869,8 +11061,6 @@ Team API tokens are deprecated and cannot be generated. If you are already using --- -# Cron Expressions - # Cron Expressions This topic describes the supported cron expressions that you can use to schedule automatic application update checks and automatic backups in the KOTS Admin Console. @@ -12028,8 +11218,6 @@ The following examples show valid cron expressions to schedule checking for upda --- -# About Custom Resources - # About Custom Resources You can include custom resources in releases to control the experience for applications installed with Replicated KOTS. @@ -12066,8 +11254,6 @@ The following are custom resources in API groups other than `kots.io` that can b --- -# Application - import Title from "../partials/custom-resource-application/_title.mdx" import Icon from "../partials/custom-resource-application/_icon.mdx" import ReleaseNotes from "../partials/custom-resource-application/_releaseNotes.mdx" @@ -12492,8 +11678,6 @@ spec: --- -# Velero Backup Resource for Snapshots - # Velero Backup Resource for Snapshots This topic provides information about the supported fields in the Velero Backup resource for the Replicated KOTS snapshots feature. @@ -12660,8 +11844,6 @@ However, not all fields are supported for full backups. The table below lists th --- -# Config - import ItemTypes from "../partials/config/_item-types.mdx" import PropertyWhen from "../partials/config/_property-when.mdx" import RandomStringNote from "../partials/config/_randomStringNote.mdx" @@ -13731,8 +12913,6 @@ spec: --- -# HelmChart v2 - import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" import Chart from "../partials/helm/_helm-cr-chart.mdx" @@ -13850,8 +13030,6 @@ The `builder` key is required for the following use cases: --- -# HelmChart v1 (Deprecated) - import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" import Chart from "../partials/helm/_helm-cr-chart.mdx" @@ -14042,8 +13220,6 @@ The `helmUpgradeFlags` field is _not_ supported for HelmChart custom resources w --- -# Identity (Beta) - :::important This topic is deleted from the product documentation because this Beta feature is deprecated. ::: @@ -14121,8 +13297,6 @@ Can be used for branding the application identity login screen. --- -# LintConfig - import LinterDefinition from "../partials/linter-rules/_linter-definition.mdx" # LintConfig @@ -14179,8 +13353,6 @@ spec: --- -# Preflight and Support Bundle - # Preflight and Support Bundle You can define preflight checks and support bundle specifications for Replicated KOTS and Helm installations. @@ -14332,8 +13504,6 @@ spec: --- -# Redactor (KOTS Only) - # Redactor (KOTS Only) This topic describes how to define redactors with the Redactor custom resource. @@ -14434,8 +13604,6 @@ spec: --- -# Embedded Cluster Install Command Options - import ProxyLimitations from "../partials/embedded-cluster/_proxy-install-limitations.mdx" import ProxyRequirements from "../partials/embedded-cluster/_proxy-install-reqs.mdx" @@ -14627,8 +13795,6 @@ sudo ./my-app install --license license.yaml --network-interface eno167777 --- -# Embedded Cluster Config - # Embedded Cluster Config This topic is a reference for the Replicated Embedded Cluster Config custom resource. For more information about Embedded Cluster, see [Using Embedded Cluster](/vendor/embedded-overview). @@ -14898,8 +14064,6 @@ spec: --- -# admin-console garbage-collect-images - # admin-console garbage-collect-images Starts image garbage collection. @@ -14927,8 +14091,6 @@ kubectl kots admin-console garbage-collect-images -n default --- -# admin-console generate-manifests - import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" @@ -15034,8 +14196,6 @@ kubectl kots admin-console generate-manifests --namespace kotsadm --minimal-rbac --- -# admin-console - # admin-console Enables access to the KOTS Admin Console from a local machine. @@ -15067,8 +14227,6 @@ kubectl kots admin-console --namespace kots-sentry --- -# admin-console push-images - # admin-console push-images Pushes images from an air gap bundle to a private registry. @@ -15099,8 +14257,6 @@ kubectl kots admin-console push-images ./kotsadm.tar.gz private.registry.host/ap --- -# admin-console upgrade - # admin-console upgrade import EnsureRBAC from "../partials/kots-cli/_ensure-rbac.mdx" @@ -15151,8 +14307,6 @@ kubectl kots admin-console upgrade --ensure-rbac=false --- -# backup - # backup Create a full instance snapshot for disaster recovery. @@ -15182,8 +14336,6 @@ kubectl kots backup --namespace kots-sentry --- -# backup ls - # backup ls :::note @@ -15215,8 +14367,6 @@ kubectl kots backup ls --namespace kots-sentry --- -# docker ensure-secret - # docker ensure-secret Creates an image pull secret for Docker Hub that the Admin Console can utilize to avoid [rate limiting](/enterprise/image-registry-rate-limits). @@ -15249,8 +14399,6 @@ kubectl kots docker ensure-secret --dockerhub-username sentrypro --dockerhub-pas --- -# docker - # docker KOTS Docker interface @@ -15267,8 +14415,6 @@ This command supports all [global flags](kots-cli-global-flags). --- -# download - # download Retrieves a copy of the application manifests from the cluster, and store them in a specific directory structure on your workstation. @@ -15305,8 +14451,6 @@ kubectl kots download kots-sentry --namespace kots-sentry --dest ./manifests --o --- -# enable-ha - # enable-ha (Deprecated) Runs the rqlite StatefulSet as three replicas for data replication and high availability. @@ -15338,8 +14482,6 @@ kubectl kots enable-ha --namespace kots-sentry --- -# get apps - # get apps The `kots get apps` command lists installed applications. @@ -15367,8 +14509,6 @@ kubectl kots get apps -n default --- -# get backups - # get backups The `kots get backups` command lists available full snapshots (instance). @@ -15404,8 +14544,6 @@ kubectl kots get backups -n default --- -# get config - # get config The `kots get config` command returns the `configValues` file for an application. @@ -15437,8 +14575,6 @@ kubectl kots get config -n default --sequence 5 --appslug myapp --- -# get - # get The `kots get` command shows information about one or more resources. @@ -15466,8 +14602,6 @@ This command supports all [global flags](kots-cli-global-flags) and also: --- -# get restores - # get restores The `kots get restores` command lists created full snapshot restores. @@ -15503,8 +14637,6 @@ kubectl kots get restores -n default --- -# get versions - # get versions The `kots get versions` command lists all versions of an application. @@ -15540,8 +14672,6 @@ kubectl kots get versions kots-sentry -n default --- -# Installing the KOTS CLI - # Installing the KOTS CLI Users can interact with the Replicated KOTS CLI to install and manage applications with Replicated KOTS. The KOTS CLI is a kubectl plugin that runs locally on any computer. @@ -15732,8 +14862,6 @@ To uninstall the KOTS CLI: --- -# Global flags - # Global flags All KOTS CLI commands support a set of global flags to be used to connect to the cluster. @@ -15760,8 +14888,6 @@ All KOTS CLI commands support a set of global flags to be used to connect to the --- -# identity-service enable-shared-password - # identity-service enable-shared-password Enable the shared password login option in the KOTS Admin Console. @@ -15790,8 +14916,6 @@ kubectl kots identity-service enable-shared-password --namespace kots-sentry --- -# identity-service - # identity-service KOTS Identity Service @@ -15808,8 +14932,6 @@ This command supports all [global flags](kots-cli-global-flags). --- -# install - import StrictSecurityContext from "../partials/kots-cli/_strict-security-context.mdx" import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" @@ -16007,8 +15129,6 @@ kubectl kots install --ensure-rbac=false --- -# pull - # pull Running this command will create a directory on the workstation containing the application and Kubernetes manifests. These assets can be used to deploy KOTS to a cluster through other workflows, such as kubectl. This command is necessary when managing a application without the use of the Admin Console. @@ -16055,8 +15175,6 @@ kubectl kots pull sentry/unstable --license-file ~/license.yaml --- -# remove - # remove Remove application reference from the KOTS Admin Console. @@ -16115,8 +15233,6 @@ kubectl kots remove sentry -n default --- -# reset-password - # reset-password If you deployed an application with the KOTS Admin Console, the `kots reset-password` command will change the bcrypted password hash in the cluster, allowing you to log in again. @@ -16145,8 +15261,6 @@ kubectl kots reset-password sentry-namespace --- -# reset-tls - # reset-tls If a bad TLS certificate is uploaded to the KOTS Admin Console or the kotsadm-tls secret is missing, the `kots reset-tls` command reapplies a default self-signed TLS certificate. @@ -16176,8 +15290,6 @@ kubectl kots reset-tls sentry-namespace --- -# restore - # restore Restore full snapshots for disaster recovery, or do a partial restore of the application only or the Replicated Admin Console only. @@ -16210,8 +15322,6 @@ kubectl kots restore --from-backup instance-942kf --- -# restore ls - # restore ls :::note @@ -16243,8 +15353,6 @@ kubectl kots restore ls --namespace kots-sentry --- -# set config - import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" @@ -16303,8 +15411,6 @@ kubectl kots set config myapp -n default --key config-item-name --value "config --- -# set - # set Configure KOTS resources. @@ -16325,8 +15431,6 @@ This command supports all [global flags](kots-cli-global-flags). --- -# upload - import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" # upload @@ -16374,8 +15478,6 @@ kubectl kots upload ./manifests --name kots-sentry --namespace kots-sentry --slu --- -# upstream download - # upstream download The `kots upstream download` command retries downloading a failed update of the upstream application. @@ -16407,8 +15509,6 @@ kubectl kots upstream download kots-sentry --namespace kots-sentry --sequence 8 --- -# upstream upgrade - import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" # upstream upgrade @@ -16456,8 +15556,6 @@ kubectl kots upstream upgrade kots-sentry --namespace kots-sentry --- -# upstream - # upstream KOTS Upstream interface. @@ -16473,8 +15571,6 @@ This command supports all [global flags](kots-cli-global-flags). --- -# velero configure-aws-s3 - # velero configure-aws-s3 Configures snapshots to use an AWS S3 Bucket as a storage destination. @@ -16546,8 +15642,6 @@ kubectl kots velero configure-aws-s3 instance-role --namespace default --region --- -# velero configure-azure - # velero configure-azure Configures snapshots to use an Azure Blob Storage Container as a storage destination. @@ -16599,8 +15693,6 @@ kubectl kots velero configure-azure service-principle --namespace default --con --- -# velero configure-gcp - # velero configure-gcp Configures snapshots to use a Google Cloud Platform Object Storage Bucket as a storage destination. @@ -16670,8 +15762,6 @@ kubectl kots velero configure-gcp workload-identity --namespace default --bucket --- -# velero configure-hostpath - import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" @@ -16746,8 +15836,6 @@ kubectl kots velero configure-hostpath \ --- -# velero configure-internal - # velero configure-internal :::important @@ -16780,8 +15868,6 @@ kubectl kots velero configure-internal --- -# velero configure-nfs - import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" @@ -16862,8 +15948,6 @@ kubectl kots velero configure-nfs \ --- -# velero configure-other-s3 - import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" @@ -16950,8 +16034,6 @@ kubectl kots velero configure-other-s3 --namespace default --endpoint http://min --- -# velero ensure-permissions - # velero ensure-permissions Ensures the necessary permissions that enables Replicated KOTS to access Velero. @@ -16980,8 +16062,6 @@ kubectl kots velero ensure-permissions --namespace kots-sentry --velero-namespac --- -# velero - # velero The KOTS Velero interface, which configures storage destinations for backups (snapshots), permissions, and print instructions fo set up. @@ -17009,8 +16089,6 @@ The following `kots velero` commands are supported: --- -# velero print-fs-instructions - # velero print-fs-instructions :::note @@ -17044,8 +16122,6 @@ kubectl kots velero print-fs-instructions --namespace kots-sentry --- -# Linter Rules - import MissingKindField from "../partials/linter-rules/_missing-kind-field.mdx" import MissingAPIVersionField from "../partials/linter-rules/_missing-api-version-field.mdx" import PreflightSpec from "../partials/linter-rules/_preflight-spec.mdx" @@ -18218,8 +17294,6 @@ For more information, see [LintConfig](custom-resource-lintconfig). --- -# replicated api get - # replicated api get Make ad-hoc GET API calls to the Replicated API @@ -18265,8 +17339,6 @@ replicated api get /v3/apps --- -# replicated api patch - # replicated api patch Make ad-hoc PATCH API calls to the Replicated API @@ -18313,8 +17385,6 @@ replicated api patch /v3/customer/2VffY549paATVfHSGpJhjh6Ehpy -b '{"name":"Valua --- -# replicated api post - # replicated api post Make ad-hoc POST API calls to the Replicated API @@ -18361,8 +17431,6 @@ replicated api post /v3/app/2EuFxKLDxKjPNk2jxMTmF6Vxvxu/channel -b '{"name":"mar --- -# replicated api put - # replicated api put Make ad-hoc PUT API calls to the Replicated API @@ -18409,8 +17477,6 @@ replicated api put /v3/app/2EuFxKLDxKjPNk2jxMTmF6Vxvxu/channel/2QLPm10JPkta7jO3Z --- -# replicated api - # replicated api Make ad-hoc API calls to the Replicated API @@ -18441,8 +17507,6 @@ Make ad-hoc API calls to the Replicated API --- -# replicated app create - # replicated app create Create a new application @@ -18497,8 +17561,6 @@ replicated app create "Custom App" --output table --- -# replicated app ls - # replicated app ls List applications @@ -18564,8 +17626,6 @@ replicated app ls "App Name" --output table --- -# replicated app rm - # replicated app rm Delete an application @@ -18626,8 +17686,6 @@ replicated app delete "Custom App" --output json --- -# replicated app - # replicated app Manage applications @@ -18694,8 +17752,6 @@ replicated app ls --output json --- -# replicated channel create - # replicated channel create Create a new channel in your app @@ -18739,8 +17795,6 @@ replicated channel create --name Beta --description 'New features subject to cha --- -# replicated channel demote - # replicated channel demote Demote a release from a channel @@ -18787,8 +17841,6 @@ replicated channel demote CHANNEL_ID_OR_NAME [flags] --- -# replicated channel disable-semantic-versioning - # replicated channel disable-semantic-versioning Disable semantic versioning for CHANNEL_ID @@ -18829,8 +17881,6 @@ replicated channel disable-semantic-versioning CHANNEL_ID --- -# replicated channel enable-semantic-versioning - # replicated channel enable-semantic-versioning Enable semantic versioning for CHANNEL_ID @@ -18871,8 +17921,6 @@ replicated channel enable-semantic-versioning CHANNEL_ID --- -# replicated channel inspect - # replicated channel inspect Show full details for a channel @@ -18908,8 +17956,6 @@ replicated channel inspect CHANNEL_ID [flags] --- -# replicated channel ls - # replicated channel ls List all channels in your app @@ -18951,8 +17997,6 @@ ls, list --- -# replicated channel rm - # replicated channel rm Remove (archive) a channel @@ -18993,8 +18037,6 @@ rm, delete --- -# replicated channel un-demote - # replicated channel un-demote Un-demote a release from a channel @@ -19041,8 +18083,6 @@ replicated channel un-demote CHANNEL_ID_OR_NAME [flags] --- -# replicated channel - # replicated channel List channels @@ -19081,8 +18121,6 @@ List channels --- -# replicated cluster addon create object-store - # replicated cluster addon create object-store Create an object store bucket for a cluster. @@ -19140,8 +18178,6 @@ replicated cluster addon create object-store 05929b24 --bucket-prefix custom-pre --- -# replicated cluster addon create - # replicated cluster addon create Create cluster add-ons. @@ -19183,8 +18219,6 @@ replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket --- -# replicated cluster addon ls - # replicated cluster addon ls List cluster add-ons for a cluster. @@ -19241,8 +18275,6 @@ replicated cluster addon ls CLUSTER_ID --output wide --- -# replicated cluster addon rm - # replicated cluster addon rm Remove cluster add-on by ID. @@ -19293,8 +18325,6 @@ replicated cluster addon rm cluster456 --id abc123 --- -# replicated cluster addon - # replicated cluster addon Manage cluster add-ons. @@ -19346,8 +18376,6 @@ replicated cluster addon ls CLUSTER_ID --output json --- -# replicated cluster create - # replicated cluster create Create test clusters. @@ -19432,8 +18460,6 @@ replicated cluster create --distribution eks --version 1.21 --nodes 3 --addon ob --- -# replicated cluster kubeconfig - # replicated cluster kubeconfig Download credentials for a test cluster. @@ -19495,8 +18521,6 @@ replicated cluster kubeconfig --id CLUSTER_ID --- -# replicated cluster ls - # replicated cluster ls List test clusters. @@ -19568,8 +18592,6 @@ replicated cluster ls --output wide --- -# replicated cluster nodegroup ls - # replicated cluster nodegroup ls List node groups for a cluster. @@ -19628,8 +18650,6 @@ replicated cluster nodegroup ls CLUSTER_ID --output wide --- -# replicated cluster nodegroup - # replicated cluster nodegroup Manage node groups for clusters. @@ -19670,8 +18690,6 @@ replicated cluster nodegroup ls CLUSTER_ID --- -# replicated cluster port expose - # replicated cluster port expose Expose a port on a cluster to the public internet. @@ -19732,8 +18750,6 @@ replicated cluster port expose CLUSTER_ID --port 8080 --protocol https --output --- -# replicated cluster port ls - # replicated cluster port ls List cluster ports for a cluster. @@ -19790,8 +18806,6 @@ replicated cluster port ls CLUSTER_ID --output wide --- -# replicated cluster port rm - # replicated cluster port rm Remove cluster port by ID. @@ -19851,8 +18865,6 @@ replicated cluster port rm CLUSTER_ID --id PORT_ID --output json --- -# replicated cluster port - # replicated cluster port Manage cluster ports. @@ -19901,8 +18913,6 @@ replicated cluster port expose [CLUSTER_ID] [PORT] --- -# replicated cluster prepare - # replicated cluster prepare Prepare cluster for testing. @@ -19977,8 +18987,6 @@ replicated cluster prepare --distribution eks --version 1.27 --instance-type c6. --- -# replicated cluster rm - # replicated cluster rm Remove test clusters. @@ -20039,8 +19047,6 @@ replicated cluster rm --all --- -# replicated cluster shell - # replicated cluster shell Open a new shell with kubeconfig configured. @@ -20091,8 +19097,6 @@ replicated cluster shell --name "My Cluster" --- -# replicated cluster update nodegroup - # replicated cluster update nodegroup Update a nodegroup for a test cluster. @@ -20147,8 +19151,6 @@ replicated cluster update nodegroup CLUSTER_ID --nodegroup-id NODEGROUP_ID --min --- -# replicated cluster update ttl - # replicated cluster update ttl Update TTL for a test cluster. @@ -20194,8 +19196,6 @@ replicated cluster update ttl CLUSTER_ID --ttl 24h --- -# replicated cluster update - # replicated cluster update Update cluster settings. @@ -20242,8 +19242,6 @@ replicated cluster update --name <cluster-name> [subcommand] --- -# replicated cluster upgrade - # replicated cluster upgrade Upgrade a test cluster. @@ -20295,8 +19293,6 @@ replicated cluster upgrade [CLUSTER_ID] --version 1.31 --wait 30m --- -# replicated cluster versions - # replicated cluster versions List cluster versions. @@ -20346,8 +19342,6 @@ replicated cluster versions --output json --- -# replicated cluster - # replicated cluster Manage test Kubernetes clusters. @@ -20406,8 +19400,6 @@ replicated cluster nodegroup ls <cluster-id> --- -# replicated completion - # replicated completion Generate completion script @@ -20485,8 +19477,6 @@ PowerShell: --- -# replicated customer archive - # replicated customer archive Archive a customer @@ -20543,8 +19533,6 @@ replicated customer archive --app myapp "Acme Inc" --- -# replicated customer create - # replicated customer create Create a new customer for the current application @@ -20630,8 +19618,6 @@ replicated customer create --app myapp --name "Full Options Inc" --custom-id "FU --- -# replicated customer download-license - # replicated customer download-license Download a customer's license @@ -20687,8 +19673,6 @@ replicated customer download-license --app myapp --customer "Acme Inc" --output --- -# replicated customer inspect - # replicated customer inspect Show detailed information about a specific customer @@ -20747,8 +19731,6 @@ replicated customer inspect --app myapp --customer "Acme Inc" --- -# replicated customer ls - # replicated customer ls List customers for the current application @@ -20812,8 +19794,6 @@ replicated customer ls --app myapp --output json --- -# replicated customer update - # replicated customer update Update an existing customer @@ -20898,8 +19878,6 @@ replicated customer update --customer cus_abcdef123456 --name "JSON Corp" --outp --- -# replicated customer - # replicated customer Manage customers @@ -20936,8 +19914,6 @@ The customers command allows vendors to create, display, modify end customer rec --- -# replicated default clear-all - # replicated default clear-all Clear all default values @@ -20981,8 +19957,6 @@ replicated default clear-all --- -# replicated default clear - # replicated default clear Clear default value for a key @@ -21029,8 +20003,6 @@ replicated default clear app --- -# replicated default set - # replicated default set Set default value for a key @@ -21081,8 +20053,6 @@ replicated default set app my-app-slug --- -# replicated default show - # replicated default show Show default value for a key @@ -21134,8 +20104,6 @@ replicated default show app --- -# replicated default - # replicated default Manage default values used by other commands @@ -21166,8 +20134,6 @@ Manage default values used by other commands --- -# replicated installer create - # replicated installer create Create a new installer spec @@ -21208,8 +20174,6 @@ replicated installer create [flags] --- -# replicated installer ls - # replicated installer ls List an app's Kubernetes Installers @@ -21251,8 +20215,6 @@ ls, list --- -# replicated installer - # replicated installer Manage Kubernetes installers @@ -21285,8 +20247,6 @@ The installers command allows vendors to create, display, modify and promote kur --- -# Installing the Replicated CLI - import Verify from "../partials/replicated-cli/_verify-install.mdx" import Sudo from "../partials/replicated-cli/_sudo-install.mdx" import Login from "../partials/replicated-cli/_login.mdx" @@ -21488,8 +20448,6 @@ To set the `REPLICATED_APP` environment variable: --- -# replicated instance inspect - # replicated instance inspect Show full details for a customer instance @@ -21527,8 +20485,6 @@ replicated instance inspect [flags] --- -# replicated instance ls - # replicated instance ls list customer instances @@ -21572,8 +20528,6 @@ ls, list --- -# replicated instance tag - # replicated instance tag tag an instance @@ -21612,8 +20566,6 @@ replicated instance tag [flags] --- -# replicated instance - # replicated instance Manage instances @@ -21647,8 +20599,6 @@ The instance command allows vendors to display and tag customer instances. --- -# replicated login - # replicated login Log in to Replicated @@ -21683,8 +20633,6 @@ replicated login [flags] --- -# replicated logout - # replicated logout Logout from Replicated @@ -21719,8 +20667,6 @@ replicated logout [flags] --- -# replicated registry add dockerhub - # replicated registry add dockerhub Add a DockerHub registry @@ -21762,8 +20708,6 @@ replicated registry add dockerhub [flags] --- -# replicated registry add ecr - # replicated registry add ecr Add an ECR registry @@ -21804,8 +20748,6 @@ replicated registry add ecr [flags] --- -# replicated registry add gar - # replicated registry add gar Add a Google Artifact Registry @@ -21847,8 +20789,6 @@ replicated registry add gar [flags] --- -# replicated registry add gcr - # replicated registry add gcr Add a Google Container Registry @@ -21888,8 +20828,6 @@ replicated registry add gcr [flags] --- -# replicated registry add ghcr - # replicated registry add ghcr Add a GitHub Container Registry @@ -21927,8 +20865,6 @@ replicated registry add ghcr [flags] --- -# replicated registry add other - # replicated registry add other Add a generic registry @@ -21969,8 +20905,6 @@ replicated registry add other [flags] --- -# replicated registry add quay - # replicated registry add quay Add a quay.io registry @@ -22010,8 +20944,6 @@ replicated registry add quay [flags] --- -# replicated registry add - # replicated registry add add @@ -22050,8 +20982,6 @@ add --- -# replicated registry ls - # replicated registry ls list registries @@ -22093,8 +21023,6 @@ ls, list --- -# replicated registry rm - # replicated registry rm remove registry @@ -22135,8 +21063,6 @@ rm, delete --- -# replicated registry test - # replicated registry test test registry @@ -22172,8 +21098,6 @@ replicated registry test HOSTNAME [flags] --- -# replicated registry - # replicated registry Manage registries @@ -22208,8 +21132,6 @@ registry can be used to manage existing registries and add new registries to a t --- -# replicated release compatibility - # replicated release compatibility Report release compatibility @@ -22249,8 +21171,6 @@ replicated release compatibility SEQUENCE [flags] --- -# replicated release create - # replicated release create Create a new release @@ -22295,8 +21215,6 @@ replicated release create [flags] --- -# replicated release download - # replicated release download Download application manifests for a release. @@ -22340,8 +21258,6 @@ replicated release download 1 --dest ./manifests --- -# replicated release inspect - # replicated release inspect Long: information about a release @@ -22393,8 +21309,6 @@ replicated release inspect 123 --output json --- -# replicated release lint - # replicated release lint Lint a directory of KOTS manifests @@ -22432,8 +21346,6 @@ replicated release lint [flags] --- -# replicated release ls - # replicated release ls List all of an app's releases @@ -22475,8 +21387,6 @@ ls, list --- -# replicated release promote - # replicated release promote Set the release for a channel @@ -22521,8 +21431,6 @@ replicated release promote 15 fe4901690971757689f022f7a460f9b2 --- -# replicated release test - # replicated release test Test the application release @@ -22557,8 +21465,6 @@ replicated release test SEQUENCE [flags] --- -# replicated release update - # replicated release update Updated a release's yaml config @@ -22596,8 +21502,6 @@ replicated release update SEQUENCE [flags] --- -# replicated release - # replicated release Manage app releases @@ -22637,8 +21541,6 @@ The release command allows vendors to create, display, and promote their release --- -# replicated version upgrade - # replicated version upgrade Upgrade the replicated CLI to the latest version @@ -22673,8 +21575,6 @@ replicated version upgrade [flags] --- -# replicated version - # replicated version Print the current version and exit @@ -22711,8 +21611,6 @@ replicated version [flags] --- -# replicated vm create - # replicated vm create Create one or more test VMs with specified distribution, version, and configuration options. @@ -22778,8 +21676,6 @@ replicated vm create --distribution ubuntu --version 20.04 --count 5 --instance- --- -# replicated vm ls - # replicated vm ls List test VMs and their status, with optional filters for start/end time and terminated VMs. @@ -22847,8 +21743,6 @@ replicated vm ls --watch --- -# replicated vm port expose - # replicated vm port expose Expose a port on a vm to the public internet. @@ -22907,8 +21801,6 @@ replicated vm port expose VM_ID --port 8080 --protocol https --output json --- -# replicated vm port ls - # replicated vm port ls List vm ports for a vm. @@ -22959,8 +21851,6 @@ replicated vm port ls VM_ID --output wide --- -# replicated vm port rm - # replicated vm port rm Remove vm port by ID. @@ -23009,8 +21899,6 @@ replicated vm port rm VM_ID --id PORT_ID --output json --- -# replicated vm port - # replicated vm port Manage VM ports. @@ -23059,8 +21947,6 @@ replicated vm port expose [VM_ID] [PORT] --- -# replicated vm rm - # replicated vm rm Remove test VM(s) immediately, with options to filter by name, tag, or remove all VMs. @@ -23131,8 +22017,6 @@ replicated vm rm --all --dry-run --- -# replicated vm update ttl - # replicated vm update ttl Update TTL for a test VM. @@ -23187,8 +22071,6 @@ replicated vm update ttl aaaaa11 --ttl 30m --- -# replicated vm update - # replicated vm update Update VM settings. @@ -23237,8 +22119,6 @@ replicated vm update --name --ttl 12h --- -# replicated vm versions - # replicated vm versions List available VM versions. @@ -23291,8 +22171,6 @@ replicated vm versions --output json --- -# replicated vm - # replicated vm Manage test virtual machines. @@ -23345,8 +22223,6 @@ replicated vm update ttl <vm-id> --ttl 24h --- -# Replicated SDK API - # Replicated SDK API The Replicated SDK provides an API that you can use to embed Replicated functionality in your Helm chart application. @@ -23870,8 +22746,6 @@ To revoke access to your application when a license expires: --- -# replicated - # replicated Manage your Commercial Software Distribution Lifecycle using Replicated @@ -23911,8 +22785,6 @@ The 'replicated' CLI allows Replicated customers (vendors) to manage their Comme --- -# About Template Functions - import UseCases from "../partials/template-functions/_use-cases.mdx" # About Template Functions @@ -24076,8 +22948,6 @@ For a list of all KOTS template functions available in the identity context, see --- -# Config Context - # Config Context ## ConfigOption @@ -24343,8 +23213,6 @@ HasLocalRegistry is always true for air gap installations. HasLocalRegistry is t --- -# Template Function Examples - import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" import StringComparison from "../partials/template-functions/_string-comparison.mdx" import NeComparison from "../partials/template-functions/_ne-comparison.mdx" @@ -24794,8 +23662,6 @@ The following topics include additional examples of using KOTS template function --- -# Identity Context - # Identity Context ## IdentityServiceEnabled @@ -24916,8 +23782,6 @@ kind: Ingress --- -# kURL Context - # kURL Context ## kURL Context Functions @@ -25006,8 +23870,6 @@ Returns all values in the Installer custom resource as key:value pairs, sorted b --- -# License Context - # License Context ## LicenseFieldValue @@ -25133,8 +23995,6 @@ IsAirgap is `true` when the app is installed via uploading an airgap package, fa --- -# Static Context - # Static Context ## About Mastermind Sprig @@ -25771,8 +24631,6 @@ repl{{ ConfigOptionData "my_file_upload" | YamlEscape }} --- -# Using the Vendor API v3 - import ApiAbout from "../partials/vendor-api/_api-about.mdx" # Using the Vendor API v3 @@ -25809,8 +24667,6 @@ For the Vendor API swagger specification, see [vendor-api-v3.json](https://api.r --- -# Adding Links to the Dashboard - # Adding Links to the Dashboard This topic describes how to use the Kubernetes SIG Application custom resource to add links to the Replicated KOTS Admin Console dashboard. @@ -25901,8 +24757,6 @@ For more information about working with KOTS template functions, see [About Temp --- -# Customizing the Application Icon - # Customizing the Application Icon You can add a custom application icon that displays in the Replicated Admin Console and the download portal. Adding a custom icon helps ensure that your brand is reflected for your customers. @@ -25966,8 +24820,6 @@ For your custom application icon to look best in the Admin Console, consider the --- -# Creating and Editing Configuration Fields - # Creating and Editing Configuration Fields This topic describes how to use the KOTS Config custom resource manifest file to add and edit fields in the KOTS Admin Console configuration screen. @@ -26107,8 +24959,6 @@ For more information, see [Mapping User-Supplied Values](config-screen-map-input --- -# Adding Resource Status Informers - import StatusesTable from "../partials/status-informers/_statusesTable.mdx" import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" @@ -26201,8 +25051,6 @@ The following table lists the supported Kubernetes resources and the conditions --- -# Port Forwarding Services with KOTS - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import ServicePortNote from "../partials/custom-resource-application/_servicePort-note.mdx" @@ -26447,8 +25295,6 @@ To test this example: --- -# Adding Custom Graphs - import OverviewProm from "../partials/monitoring/_overview-prom.mdx" import LimitationEc from "../partials/monitoring/_limitation-ec.mdx" @@ -26538,8 +25384,6 @@ To customize graphs on the Admin Console dashboard: --- -# About Integrating with CI/CD - import TestRecs from "../partials/ci-cd/_test-recs.mdx" # About Integrating with CI/CD @@ -26572,8 +25416,6 @@ The following are Replicated's best practices and recommendations for CI/CD: --- -# Integrating Replicated GitHub Actions - # Integrating Replicated GitHub Actions This topic describes how to integrate Replicated's custom GitHub actions into continuous integration and continuous delivery (CI/CD) workflows that use the GitHub Actions platform. @@ -26709,8 +25551,6 @@ For an up-to-date list of the avilable custom GitHub actions, see the [replicate --- -# Recommended CI/CD Workflows - import Build from "../partials/ci-cd/_build-source-code.mdx" # Recommended CI/CD Workflows @@ -27018,8 +25858,6 @@ See the [channel rm](/reference/replicated-cli-channel-rm) Replicated CLI comman --- -# Viewing Compatibility Matrix Usage History - # Viewing Compatibility Matrix Usage History This topic describes using the Replicated Vendor Portal to understand Compatibility Matrix usage across your team. @@ -27255,8 +26093,6 @@ with the other endpoints as well. --- -# About the Configuration Screen - # About the Configuration Screen This topic describes the configuration screen on the Config tab in the Replicated Admin Console. @@ -27301,8 +26137,6 @@ The following shows an example of how the configuration screen displays in the A --- -# Using Conditional Statements in Configuration Fields - import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" import PropertyWhen from "../partials/config/_property-when.mdx" import DistroCheck from "../partials/template-functions/_string-comparison.mdx" @@ -27554,8 +26388,6 @@ Additionally, the options relevant to the load balancer display when the user se --- -# Mapping User-Supplied Values - # Mapping User-Supplied Values This topic describes how to map the values that your users provide in the Replicated Admin Console configuration screen to your application. @@ -27731,8 +26563,6 @@ To map user inputs from the configuration screen to the `values.yaml` file: --- -# Using Custom Domains - # Using Custom Domains This topic describes how to use the Replicated Vendor Portal to add and manage custom domains to alias the Replicated registry, the Replicated proxy registry, the Replicated app service, and the download portal. @@ -27883,8 +26713,6 @@ To remove a custom domain: --- -# About Custom Domains - # About Custom Domains This topic provides an overview and the limitations of using custom domains to alias the Replicated private registry, Replicated proxy registry, Replicated app service, and the Download Portal. @@ -27930,8 +26758,6 @@ Using custom domains has the following limitations: --- -# Configuring Custom Metrics (Beta) - # Configuring Custom Metrics (Beta) This topic describes how to configure an application to send custom metrics to the Replicated Vendor Portal. @@ -28138,8 +26964,6 @@ You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically --- -# Adoption Report - # Adoption Report This topic describes the insights in the **Adoption** section on the Replicated Vendor Portal **Dashboard** page. @@ -28252,8 +27076,6 @@ The following table describes each metric in the **Adoption** section, including --- -# Customer Reporting - # Customer Reporting This topic describes the customer and instance data displayed in the **Customers > Reporting** page of the Replicated Vendor Portal. @@ -28346,8 +27168,6 @@ The **Instances** section displays the following details about each active insta --- -# Data Availability and Continuity - # Data Availability and Continuity Replicated uses redundancy and a cloud-native architecture in support of availability and continuity of vendor data. @@ -28388,8 +27208,6 @@ For additional data redundancy, an offsite data backup add-on is available to co --- -# About Managing Stateful Services - # About Managing Stateful Services This topic provides recommendations for managing stateful services that you install into existing clusters. @@ -28410,8 +27228,6 @@ For an example of integrating persistent datastores, see [Example: Adding Databa --- -# Disaster Recovery for Embedded Cluster (Alpha) - # Disaster Recovery for Embedded Cluster (Alpha) This topic describes the disaster recovery feature for Replicated Embedded Cluster, including how to enable disaster recovery for your application. It also describes how end users can configure disaster recovery in the Replicated KOTS Admin Console and restore from a backup. @@ -28673,8 +27489,6 @@ To restore from a backup: --- -# Embedded Cluster Overview - import EmbeddedCluster from "../partials/embedded-cluster/_definition.mdx" import Requirements from "../partials/embedded-cluster/_requirements.mdx" import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" @@ -28812,8 +27626,6 @@ Embedded Cluster has the following limitations: --- -# Using Embedded Cluster - import UpdateOverview from "../partials/embedded-cluster/_update-overview.mdx" import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" @@ -29095,8 +27907,6 @@ If you include the NVIDIA GPU Operator as a Helm extension, remove any existing --- -# Using the Proxy Registry with Helm Installations - import StepCreds from "../partials/proxy-service/_step-creds.mdx" import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" @@ -29235,8 +28045,6 @@ To enable the proxy registry: --- -# Installing and Updating with Helm in Air Gap Environments - import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" # Installing and Updating with Helm in Air Gap Environments @@ -29347,8 +28155,6 @@ You can integrate the Replicated proxy registry with an existing Harbor or jFrog --- -# About Helm Installations with Replicated - import Helm from "../partials/helm/_helm-definition.mdx" # About Helm Installations with Replicated @@ -29396,8 +28202,6 @@ Helm installations have the following limitations: --- -# Packaging a Helm Chart for a Release - import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" import HelmPackage from "../partials/helm/_helm-package.mdx" @@ -29458,8 +28262,6 @@ To package a Helm chart so that it can be added to a release: --- -# Troubleshooting Helm Installations with Replicated - # Troubleshooting Helm Installations with Replicated This topic provides troubleshooting information for common issues related to performing installations and upgrades with the Helm CLI. @@ -29489,8 +28291,6 @@ Alternatively, if a release contains charts that must use the same name, convert --- -# Helm global.replicated Values Schema - import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" # Helm global.replicated Values Schema @@ -29556,8 +28356,6 @@ The `global.replicated` values schema contains the following fields: --- -# About Distributing Helm Charts with KOTS - import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" import TemplateLimitation from "../partials/helm/_helm-template-limitation.mdx" @@ -29819,8 +28617,6 @@ The following limitations apply when using version `kots.io/v1beta1` of the Helm --- -# Configuring the HelmChart Custom Resource v2 - import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" # Configuring the HelmChart Custom Resource v2 @@ -30197,8 +28993,6 @@ There are different steps for migrating to HelmChart v2 depending on the applica --- -# Example: Including Optional Helm Charts - # Example: Including Optional Helm Charts This topic describes using optional Helm charts in your application. It also provides an example of how to configure the Replicated HelmChart custom resource to exclude optional Helm charts from your application when a given condition is met. @@ -30349,8 +29143,6 @@ Finally, edit the HelmChart custom resource: --- -# Setting Helm Values with KOTS - import Values from "../partials/helm/_helm-cr-values.mdx" import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" @@ -30572,8 +29364,6 @@ For more information about using a `null` value to delete a key, see [Deleting a --- -# Packaging Air Gap Bundles for Helm Charts - import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" @@ -30654,8 +29444,6 @@ Many applications have images that are included or excluded based on a given con --- -# Migrating Existing Installations to HelmChart v2 - # Migrating Existing Installations to HelmChart v2 This topic describes how to migrate existing Replicated KOTS installations to the KOTS HelmChart `kots.io/v1beta2` (HelmChart v2) installation method, without having to reinstall the application. It also includes information about how to support both HelmChart v1 and HelmChart v2 installations from a single release, and lists frequently-asked questions (FAQs) related to migrating to HelmChart v2. @@ -30864,8 +29652,6 @@ For more information about how KOTS deploys Helm charts, including information a --- -# Enabling and Configuring Identity Service (Beta) - :::important This topic is deleted from the product documentation because this Beta feature is deprecated. ::: @@ -31093,8 +29879,6 @@ spec: --- -# Enabling and Understanding Application Status - import StatusesTable from "../partials/status-informers/_statusesTable.mdx" import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" @@ -31184,8 +29968,6 @@ The following table lists the supported Kubernetes resources and the conditions --- -# Installing with Helm - import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" @@ -31278,8 +30060,6 @@ To install a Helm chart: --- -# Installer History - import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" # Installer History @@ -31320,8 +30100,6 @@ The **kURL Installer History** page includes the following information for each --- -# Export Customer and Instance Data - import Download from "../partials/customers/_download.mdx" # Export Customer and Instance Data @@ -31677,8 +30455,6 @@ The following table lists the data fields that can be included in the customers --- -# Instance Details - # Instance Details This topic describes using the Replicated Vendor Portal to quickly understand the recent events and performance of application instances installed in your customers' environments. @@ -32057,8 +30833,6 @@ For more information about configuring custom metrics, see [Configuring Custom M --- -# About Instance and Event Data - import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" # About Instance and Event Data @@ -32165,8 +30939,6 @@ The Vendor Portal has the following limitations for reporting instance data and --- -# Configuring Instance Notifications (Beta) - import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" @@ -32230,8 +31002,6 @@ There is a 30-second buffer between event detection and notifications being sent --- -# Replicated FAQs - import SDKOverview from "../partials/replicated-sdk/_overview.mdx" import EmbeddedKubernetes from "../partials/kots/_embedded-kubernetes-definition.mdx" import Helm from "../partials/helm/_helm-definition.mdx" @@ -32448,8 +31218,6 @@ For more information, see [About Instance and Event Data](/vendor/instance-insig --- -# Introduction to kURL - import KurlDefinition from "../partials/kurl/_kurl-definition.mdx" import Installers from "../partials/kurl/_installers.mdx" import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" @@ -32507,8 +31275,6 @@ The open source kURL documentation contains additional information including kUR --- -# Exposing Services Using NodePorts - import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" # Exposing Services Using NodePorts @@ -32633,8 +31399,6 @@ For example: --- -# Resetting a kURL Cluster - import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" # Resetting a kURL Cluster @@ -32668,8 +31432,6 @@ If the `reset` command is unsuccessful, discard your current VM, and recreate th --- -# About Community Licenses - # About Community Licenses This topic describes community licenses. For more information about other types of licenses, see [Customer Types](licenses-about#customer-types) in _About Customers_. @@ -32713,8 +31475,6 @@ For applications installed with KOTS, the branding in the admin console for comm --- -# About Customers and Licensing - import ChangeChannel from "../partials/customers/_change-channel.mdx" # About Customers and Licensing @@ -32905,8 +31665,6 @@ Unless the existing customer is using a community license, it is not possible to --- -# Managing Customer License Fields - # Managing Customer License Fields This topic describes how to manage customer license fields in the Replicated Vendor Portal, including how to add custom fields and set initial values for the built-in fields. @@ -33022,8 +31780,6 @@ To delete a custom license field: --- -# Downloading Customer Licenses - import AirGapLicenseDownload from "../partials/install/_airgap-license-download.mdx" # Downloading Customer Licenses @@ -33056,8 +31812,6 @@ To enable the air gap entitlement and download the license: --- -# Managing Install Types for a License - import InstallerOnlyAnnotation from "../partials/helm/_installer-only-annotation.mdx" # Managing Install Types for a License @@ -33187,8 +31941,6 @@ Otherwise, if the **KOTS Install Enabled** field is disabled for the existing li --- -# Checking Entitlements in Helm Charts Before Deployment - # Checking Entitlements in Helm Charts Before Deployment This topic describes how to check license entitlements before a Helm chart is installed or upgraded. The information in this topic applies to Helm charts installed with Replicated KOTS or Helm. @@ -33249,8 +32001,6 @@ To check entitlements before installation: --- -# Querying Entitlements with the KOTS License API - # Querying Entitlements with the KOTS License API This topic describes how to use the Replicated KOTS License API to query license fields during runtme. The information in this topic applies to applications installed with KOTS. @@ -33316,8 +32066,6 @@ rp({ --- -# Querying Entitlements with the Replicated SDK API - # Querying Entitlements with the Replicated SDK API This topic describes how to query license entitlements at runtime using the Replicated SDK in-cluster API. The information in this topic applies to applications installed with Replicated KOTS or Helm. @@ -33413,8 +32161,6 @@ To use the SDK API to query entitlements at runtime: --- -# Checking Entitlements in Preflights with KOTS Template Functions - # Checking Entitlements in Preflights with KOTS Template Functions This topic describes how to check custom entitlements before installation or upgrade using preflight checks and KOTS template functions in the License context. The information in this topic applies to applications installed with KOTS. @@ -33466,8 +32212,6 @@ For more information about defining preflight checks, see [Defining Preflight Ch --- -# Built-In License Fields - import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" # Built-In License Fields @@ -33674,8 +32418,6 @@ The table below describes the built-in license fields related to the Admin Conso --- -# Verifying License Field Signatures with the Replicated SDK API - # Verifying License Field Signatures with the Replicated SDK API This topic describes how to verify the signatures of license fields when checking customer license entitlements with the Replicated SDK. @@ -33732,8 +32474,6 @@ To use your public key to verify license field signatures: --- -# Application Namespaces - # Application Namespaces Replicated strongly recommends that applications are architected to deploy a single application into a single namespace when possible. @@ -33783,8 +32523,6 @@ spec: --- -# Offsite Data Backup - # Offsite Data Backup Replicated stores customer data in multiple databases across Amazon Web @@ -33835,8 +32573,6 @@ vendor registry and the customer portal when offsite data backup is enabled. --- -# Defining Additional Images - import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" # Defining Additional Images @@ -33879,8 +32615,6 @@ spec: --- -# Defining Additional Namespaces - # Defining Additional Namespaces Operators often need to be able to manage resources in multiple namespaces in the cluster. @@ -33937,8 +32671,6 @@ This will ensure that the Admin Console will continue to have permissions to all --- -# About Packaging a Kubernetes Operator Application - # About Packaging a Kubernetes Operator Application Kubernetes Operators can be packaged and delivered as an application using the same methods as other Kubernetes applications. @@ -33959,8 +32691,6 @@ An application includes a definition for the developer to list the additional im --- -# Referencing Images - # Referencing Images This topic explains how to support the use of private image registries for applications that are packaged with Kubernetes Operators. @@ -34087,8 +32817,6 @@ The developer of the Operator should use these environment variables to change t --- -# Orchestrating Resource Deployment - import WeightLimitation from "../partials/helm/_helm-cr-weight-limitation.mdx" import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" @@ -34239,8 +32967,6 @@ status: --- -# Excluding MinIO from Air Gap Bundles (Beta) - # Excluding MinIO from Air Gap Bundles (Beta) The Replicated KOTS Admin Console requires an S3-compatible object store to store application archives and support bundles. By default, KOTS deploys MinIO to satisfy the object storage requirement. For more information about the options for installing without MinIO in existing clusters, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). @@ -34265,8 +32991,6 @@ To exclude MinIO from the `kotsadm.tar.gz` Admin Console air gap bundle: --- -# Cleaning Up Kubernetes Jobs - # Cleaning Up Kubernetes Jobs This topic describes how to use the Replicated KOTS `kots.io/hook-delete-policy` annotation to remove Kubernetes job objects from the cluster after they complete. @@ -34316,8 +33040,6 @@ spec: --- -# Creating a kURL Installer - import Installers from "../partials/kurl/_installers.mdx" import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" @@ -34486,8 +33208,6 @@ When creating a kURL installer, consider the following requirements and guidelin --- -# Conditionally Including or Excluding Resources - # Conditionally Including or Excluding Resources This topic describes how to include or exclude optional application resources based on one or more conditional statements. The information in this topic applies to Helm chart- and standard manifest-based applications. @@ -34618,8 +33338,6 @@ spec: --- -# Adding Cluster Ingress Options - # Adding Cluster Ingress Options When delivering a configurable application, ingress can be challenging as it is very cluster specific. @@ -34740,8 +33458,6 @@ spec: --- -# About Selecting Storage Add-ons - import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" # About Selecting Storage Add-ons @@ -34905,8 +33621,6 @@ For more information about properties for the Rook add-on, see [Rook](https://ku --- -# Setting Minimum and Target Versions for KOTS - # Setting Minimum and Target Versions for KOTS This topic describes how to set minimum and target version for Replicated KOTS in the KOTS [Application](/reference/custom-resource-application) custom resource. @@ -34960,8 +33674,6 @@ For more information about the KOTS add-on, see [KOTS add-on](https://kurl.sh/do --- -# Connecting to an External Registry - # Connecting to an External Registry This topic describes how to add credentials for an external private registry using the Replicated Vendor Portal or Replicated CLI. Adding an external registry allows you to grant proxy access to private images using the Replicated proxy registry. For more information, see [About the Replicated Proxy Registry](private-images-about). @@ -35221,8 +33933,6 @@ replicated registry test index.docker.io --image my-company/my-image:v1.2.3 --- -# Replicated Registry Security - # Replicated Registry Security This document lists the security measures and processes in place to ensure that images pushed to the Replicated registry remain private. For more information about pushing images to the Replicated registry, see [Using the Replicated Registry for KOTS Installations](private-images-replicated). @@ -35276,8 +33986,6 @@ Replicated completed a formal pen test that included the private registry in the --- -# Connecting to a Public Registry through the Proxy Registry - # Connecting to a Public Registry through the Proxy Registry This topic describes how to pull images from public registries using the Replicated proxy registry. @@ -35345,8 +34053,6 @@ For information about how to set a custom domain for the proxy registry, see [Us --- -# Configuring KOTS RBAC - # Configuring KOTS RBAC This topic describes role-based access control (RBAC) for Replicated KOTS in existing cluster installations. It includes information about how to change the default cluster-scoped RBAC permissions granted to KOTS. @@ -35467,8 +34173,6 @@ The following limitations apply when using the `requireMinimalRBACPrivileges` or --- -# Using TLS Certificates - import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" # Using TLS Certificates @@ -35600,8 +34304,6 @@ spec: --- -# Customer Application Deployment Questionnaire - # Customer Application Deployment Questionnaire Before you package and distribute an application, Replicated recommends that you @@ -35679,8 +34381,6 @@ If it’s more convenient, limit answers to the scope of the target infrastructu --- -# Data Transmission Policy - # Data Transmission Policy A Replicated installation connects to a Replicated-hosted endpoint periodically to perform various tasks including checking for updates and synchronizing the installed license properties. During this time, some data is transmitted from an installed instance to the Replicated API. This data is limited to: @@ -35706,8 +34406,6 @@ Last modified December 31, 2023 --- -# Infrastructure and Subprocessor Providers - # Infrastructure and Subprocessor Providers @@ -35755,8 +34453,6 @@ Last modified January 4, 2024 --- -# Support Lifecycle Policy - # Support Lifecycle Policy Replicated will provide support for products per our terms and services until that product is noted as End of Life (EOL). @@ -35894,8 +34590,6 @@ Last modified January 2, 2025. --- -# Vulnerability Patch Policy - # Vulnerability Patch Policy @@ -35970,8 +34664,6 @@ Last modified January 29, 2025. --- -# Defining Preflight Checks - # Defining Preflight Checks This topic describes how to define preflight checks in Helm and Kubernetes manifest-based applications. For more information about preflight checks, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). @@ -36094,8 +34786,6 @@ For common examples of collectors and analyzers used in preflight checks, see [E --- -# Example Preflight Specs - import HttpSecret from "../partials/preflights/_http-requests-secret.mdx" import HttpCr from "../partials/preflights/_http-requests-cr.mdx" import MySqlSecret from "../partials/preflights/_mysql-secret.mdx" @@ -36292,8 +34982,6 @@ For more information, see [Cluster Resources](https://troubleshoot.sh/docs/colle --- -# Customizing Host Preflight Checks for kURL - # Customizing Host Preflight Checks for kURL This topic provides information about how to customize host preflight checks for installations with Replicated kURL. For information about the default host preflight checks that run for installations with Replicated Embedded Cluster, see [About Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. @@ -36482,8 +35170,6 @@ spec: --- -# Running Preflight Checks for Helm Installations - # Running Preflight Checks for Helm Installations This topic describes how to use the preflight kubectl plugin to run preflight checks for applications installed with the Helm CLI. @@ -36606,8 +35292,6 @@ To save the results of preflight checks to a `.txt` file, users can can press `s --- -# About Preflight Checks and Support Bundles - import Overview from "../partials/preflights/_preflights-sb-about.mdx" # About Preflight Checks and Support Bundles @@ -36762,8 +35446,6 @@ For more information, see [Generating Support Bundles](support-bundle-generating --- -# About the Replicated Proxy Registry - # About the Replicated Proxy Registry This topic describes how the Replicated proxy registry can be used to grant proxy access to your application's private images or allow pull through access of public images. @@ -36797,8 +35479,6 @@ For more information about how to pull public images through the proxy registry, --- -# Using the Proxy Registry with KOTS Installations - import Deprecated from "../partials/helm/_replicated-deprecated.mdx" import StepCreds from "../partials/proxy-service/_step-creds.mdx" import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" @@ -36876,8 +35556,6 @@ To enable the proxy registry: --- -# Using the Replicated Registry for KOTS Installations - import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" # Using the Replicated Registry for KOTS Installations @@ -36962,8 +35640,6 @@ Docker format: --- -# Using Image Tags and Digests - # Using Image Tags and Digests This topic describes using image tags and digests with your application images. It includes information about when image tags and digests are supported, and how to enable support for image digests in air gap bundles. @@ -37040,8 +35716,6 @@ To enable the new air gap bundle format on a channel: --- -# Replicated Quick Start - import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" import HelmPackage from "../partials/helm/_helm-package.mdx" import Tabs from '@theme/Tabs'; @@ -37486,8 +36160,6 @@ For additional tutorials related to this quick start, see: --- -# About Channels and Releases - import ChangeChannel from "../partials/customers/_change-channel.mdx" import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" @@ -37747,8 +36419,6 @@ You can do the following tasks on the **Draft** page: --- -# Creating and Editing Channels - # Creating and Editing Channels This topic describes how to create and edit channels using the Replicated Vendor Portal. For more information about channels, see [About Channels and Releases](releases-about). @@ -37824,8 +36494,6 @@ To archive a channel with the Vendor Portal or the Replicated CLI: --- -# Managing Releases with the CLI - # Managing Releases with the CLI This topic describes how to use the Replicated CLI to create and promote releases. @@ -37943,8 +36611,6 @@ To create and promote a release: --- -# Creating and Managing Customers - import ChangeChannel from "../partials/customers/_change-channel.mdx" import Download from "../partials/customers/_download.mdx" import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" @@ -38064,8 +36730,6 @@ If you want to filter information using multiple license types or channels, you --- -# Managing Releases with the Vendor Portal - import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" @@ -38197,8 +36861,6 @@ To demote a release in the Vendor Portal: --- -# Downloading Assets from the Download Portal - import DownloadPortal from "../partials/kots/_download-portal-about.mdx" # Downloading Assets from the Download Portal @@ -38289,8 +36951,6 @@ and preview your customer's experience. --- -# Finding Installation Commands for a Release - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; # Finding Installation Commands for a Release @@ -38431,8 +37091,6 @@ To get customer-specific Helm or Embedded Cluster installation instructions: --- -# Generating API Tokens - import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" # Generating API Tokens @@ -38536,8 +37194,6 @@ To generate a user API token: --- -# Replicated Onboarding - import CreateRelease from "../partials/getting-started/_create-promote-release.mdx" import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" import EcCr from "../partials/embedded-cluster/_ec-config.mdx" @@ -39125,8 +37781,6 @@ For guidance on how to get started with documentation for applications distribut --- -# Installing the SDK in Air Gap Environments - # Installing the SDK in Air Gap Environments This topic explains how to install the Replicated SDK in air gap environments by enabling air gap mode. @@ -39173,8 +37827,6 @@ When the SDK is installed by KOTS in an air gap environment, KOTS automatically --- -# Customizing the Replicated SDK - # Customizing the Replicated SDK This topic describes various ways to customize the Replicated SDK, including customizing RBAC, setting environment variables, adding tolerations, and more. @@ -39426,8 +38078,6 @@ replicated: --- -# Developing Against the SDK API - import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" # Developing Against the SDK API @@ -39470,8 +38120,6 @@ To port forward the SDK API service to your local machine: --- -# Installing the Replicated SDK - import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" @@ -39658,8 +38306,6 @@ To solve this issue: --- -# About the Replicated SDK - import SDKOverview from "../partials/replicated-sdk/_overview.mdx" import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" @@ -39698,8 +38344,6 @@ When serving requests, if the upstream APIs become unavailable, the SDK serves f --- -# SLSA Provenance Validation Process for the Replicated SDK - # SLSA Provenance Validation Process for the Replicated SDK This topic describes the process to perform provenance validation on the Replicated SDK. @@ -39755,8 +38399,6 @@ slsa-verifier verify-image "${IMAGE_WITH_DIGEST}" \ --- -# Templating Annotations - # Templating Annotations This topic describes how to use Replicated KOTS template functions to template annotations for resources and objects based on user-supplied values. @@ -40021,8 +38663,6 @@ In your Helm chart templates, you can access these values from the `values.yaml` --- -# Configuring Snapshots - # Configuring Snapshots This topic provides information about how to configure the Velero Backup resource to enable Replicated KOTS snapshots for an application. @@ -40121,8 +38761,6 @@ To configure snapshots for your application: --- -# Configuring Backup and Restore Hooks for Snapshots - # Configuring Backup and Restore Hooks for Snapshots This topic describes the use of custom backup and restore hooks and demonstrates a common example. @@ -40227,8 +38865,6 @@ The `extraVolumeMounts` field mounts the volume into the `/scratch` directory of --- -# About Backup and Restore with Snapshots - import RestoreTable from "../partials/snapshots/_restoreTable.mdx" import NoEcSupport from "../partials/snapshots/_limitation-no-ec-support.mdx" import RestoreTypes from "../partials/snapshots/_restore-types.mdx" @@ -40390,8 +39026,6 @@ To support end users with backup and restore, use the following resources: --- -# Adding and Customizing Support Bundles - # Adding and Customizing Support Bundles This topic describes how to add a default support bundle spec to a release for your application. It also describes how to customize the default support bundle spec based on your application's needs. For more information about support bundles, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). @@ -40583,8 +39217,6 @@ For common examples of collectors and analyzers used in support bundle specs, se --- -# Generating Support Bundles for Embedded Cluster - import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" @@ -40605,8 +39237,6 @@ For information about generating host support bundles for Replicated kURL instal --- -# Example Support Bundle Specs - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import HttpSecret from "../partials/support-bundles/_http-requests-secret.mdx" @@ -40775,8 +39405,6 @@ For more information, see [Run Pods](https://troubleshoot.sh/docs/collect/run-po --- -# Generating Support Bundles - import InstallPlugin from "../partials/support-bundles/_install-plugin.mdx" import GenerateBundle from "../partials/support-bundles/_generate-bundle.mdx" @@ -40807,8 +39435,6 @@ kubectl support-bundle https://raw.githubusercontent.com/replicatedhq/troublesho --- -# Enabling Support Bundle Uploads (Beta) - # Enabling Support Bundle Uploads (Beta) :::note @@ -40846,8 +39472,6 @@ Direct bundle uploads are disabled by default. To enable this feature for your c --- -# Generating Host Bundles for kURL - import GenerateBundleHost from "../partials/support-bundles/_generate-bundle-host.mdx" # Generating Host Bundles for kURL @@ -40927,8 +39551,6 @@ Do not store support bundles on public shares, as they may still contain informa --- -# Inspecting Support Bundles - # Inspecting Support Bundles You can use the Vendor Portal to get a visual analysis of customer support bundles and use the file inspector to drill down into the details and logs files. Use this information to get insights and help troubleshoot your customer issues. @@ -40977,8 +39599,6 @@ To inspect a support bundle: --- -# About Creating Modular Support Bundle Specs - # About Creating Modular Support Bundle Specs This topic describes how to use a modular approach to creating support bundle specs. @@ -41058,8 +39678,6 @@ For more information and additional options, see [Generating Support Bundles](su --- -# Making Support Bundle Specs Available Online - # Making Support Bundle Specs Available Online This topic describes how to make your application's support bundle specs available online as well as how to link to online specs. @@ -41132,8 +39750,6 @@ For more information about the URI, see [Troubleshoot schema supports a `uri://` --- -# Submitting a Support Request - # Submitting a Support Request You can submit a support request and a support bundle using the Replicated Vendor Portal. Uploading a support bundle is secure and helps the Replicated support team troubleshoot your application faster. Severity 1 issues are resolved three times faster when you submit a support bundle with your support request. @@ -41169,8 +39785,6 @@ To submit a support request: --- -# Managing Collab Repository Access - import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" import CollabExistingUser from "../partials/collab-repo/_collab-existing-user.mdx" @@ -41315,8 +39929,6 @@ For more information about how to edit the `allowed:` or `denied:` lists for cus --- -# Managing Google Authentication - # Managing Google Authentication This topic describes the Google authentication options that you can configure to control access to the Replicated Vendor Portal. @@ -41361,8 +39973,6 @@ Google authentication is not entirely compatible with Replicated two-factor auth --- -# Configuring RBAC Policies - import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" # Configuring RBAC Policies @@ -41629,8 +40239,6 @@ In the following example, a policy grants access to viewing all customers, but n --- -# RBAC Resource Names - import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" # RBAC Resource Names @@ -42059,8 +40667,6 @@ Grants the holder permission to delete user tokens. --- -# Managing SAML Authentication - # Managing SAML Authentication This topic describes how to enable or disable SAML authentication for the Replicated Vendor Portal. @@ -42204,8 +40810,6 @@ To disable SAML enforcement: --- -# Configuring a Slack Webhook (Beta) - import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" @@ -42249,8 +40853,6 @@ To configure the Slack webhook: --- -# Managing Two-Factor Authentication - # Managing Two-Factor Authentication This topic describes how to enable and disable Replicated two-factor authentication for individual and team accounts in the Replicated Vendor Portal. @@ -42341,8 +40943,6 @@ To enable or disable 2FA for a team: --- -# Managing Team Members - import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" import CollabRbacImportant from "../partials/collab-repo/_collab-rbac-important.mdx" @@ -42473,8 +41073,6 @@ To update the email address for a team member: --- -# Collecting Telemetry for Air Gap Instances - import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" # Collecting Telemetry for Air Gap Instances @@ -42537,8 +41135,6 @@ To collect telemetry from air gap instances: --- -# About Compatibility Matrix - import Overview from "../partials/cmx/_overview.mdx" import SupportedClusters from "../partials/cmx/_supported-clusters-overview.mdx" @@ -42633,8 +41229,6 @@ For additional distribution-specific limitations, see [Supported Compatibility M --- -# Compatibility Matrix Cluster Add-ons (Alpha) - # Compatibility Matrix Cluster Add-ons (Alpha) This topic describes the supported cluster add-ons for Replicated Compatibility Matrix. @@ -42718,8 +41312,6 @@ Additional service accounts can be created in any namespace with access to the o --- -# Using Compatibility Matrix - import TestRecs from "../partials/ci-cd/_test-recs.mdx" import Prerequisites from "../partials/cmx/_prerequisites.mdx" @@ -43038,8 +41630,6 @@ Incorporating code tests into your CI/CD workflows is important for ensuring tha --- -# Accessing Your Application - # Accessing Your Application This topic describes the networking options for accessing applications deployed on clusters created with Replicated Compatibility Matrix. It also describes how to use and manage Compatibility Matrix tunnels. @@ -43159,8 +41749,6 @@ Removing all protocols also removes the DNS record and TLS cert. --- -# Compatibility Matrix Pricing - # Compatibility Matrix Pricing This topic describes the pricing for Replicated Compatibility Matrix. @@ -43988,8 +42576,6 @@ Last modified January 06, 2025 --- -# Supported Compatibility Matrix Cluster Types - import Pool from "../partials/cmx/\_openshift-pool.mdx" # Supported Compatibility Matrix Cluster Types @@ -44514,8 +43100,6 @@ We do not maintain forks or patches of the supported distributions. When a Kuber --- -# Example: Adding Database Configuration Options - # Example: Adding Database Configuration Options In this tutorial, we'll explore ways to give your end user the option to either embed a database instance with the application, or connect your application to an external database instance that they will manage. @@ -45472,8 +44056,6 @@ If you'd like at this point, you can integrate a real database in your environme --- -# Step 2: Create an Application - # Step 2: Create an Application After you install the Replicated CLI and create an API token, you can use the CLI to create a new application. @@ -45520,8 +44102,6 @@ Continue to [Step 3: Get the Sample Manifests](tutorial-cli-manifests) to downlo --- -# Step 5: Create a Customer - # Step 5: Create a Customer After promoting the first release for the `cli-tutorial` application, create a customer so that you can install the application. @@ -45633,8 +44213,6 @@ Continue to [Step 6: Install KOTS and the Application](tutorial-cli-install-app- --- -# Step 8: Create a New Version - # Step 8: Create a New Version In this step, you make an edit to the Config custom resource manifest file in the `replicated-cli-tutorial/manifests` directory for the `cli-tutorial` application to create a new field on the **Config** page in the Admin Console. You will then create and promote a new release to the Unstable channel with your changes. @@ -45779,8 +44357,6 @@ Continue to [Step 9: Update the Application](tutorial-cli-update-app) to return --- -# Step 4: Create a Release - # Step 4: Create a Release Now that you have the manifest files for the sample Kubernetes application, you can create a release for the `cli-tutorial` application and promote the release to the Unstable channel. @@ -45871,8 +44447,6 @@ Continue to [Step 5: Create a Customer](tutorial-cli-create-customer) to create --- -# Step 7: Configure the Application - # Step 7: Configure the Application After you install KOTS, you can log in to the KOTS Admin Console. This procedure shows you how to make a configuration change for the application from the Admin Console, which is a typical task performed by end users. @@ -45925,8 +44499,6 @@ Continue to [Step 8: Create a New Version](tutorial-cli-create-new-version) to m --- -# Step 6: Install KOTS and the Application - # Step 6: Install KOTS and the Application The next step is to test the installation process for the application release that you promoted. Using the KOTS CLI, you will install KOTS and the sample application in your cluster. @@ -46033,8 +44605,6 @@ Continue to [Step 7: Configure the Application](tutorial-cli-deploy-app) to log --- -# Step 1: Install the Replicated CLI - # Step 1: Install the Replicated CLI In this tutorial, you use the Replicated CLI to create and promote releases for a sample application with Replicated. The Replicated CLI is the CLI for the Replicated Vendor Portal. @@ -46120,8 +44690,6 @@ Continue to [Step 2: Create an Application](tutorial-cli-create-app) to use the --- -# Step 3: Get the Sample Manifests - # Step 3: Get the Sample Manifests To create a release for the `cli-tutorial` application, first create the Kubernetes manifest files for the application. This tutorial provides a set of sample manifest files for a simple Kubernetes application that deploys an NGINX service. @@ -46162,8 +44730,6 @@ Continue to [Step 4: Create a Release](tutorial-cli-create-release) to create an --- -# Introduction and Setup - import KubernetesTraining from "../partials/getting-started/_kubernetes-training.mdx" import LabsIntro from "../partials/getting-started/_labs-intro.mdx" import TutorialIntro from "../partials/getting-started/_tutorial-intro.mdx" @@ -46204,8 +44770,6 @@ As part of this tutorial, you will install a sample application into a Kubernete --- -# Step 9: Update the Application - # Step 9: Update the Application To test the new release that you promoted, return to the Admin Console in a browser to update the application. @@ -46269,8 +44833,6 @@ Congratulations! As part of this tutorial, you: --- -# Step 2: Create an Application - # Step 2: Create an Application Next, install the Replicated CLI and then create an application. @@ -46336,8 +44898,6 @@ Add the Replicated SDK to the Helm chart and package the chart to an archive. Se --- -# Step 5: Create a KOTS-Enabled Customer - # Step 5: Create a KOTS-Enabled Customer After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. @@ -46380,8 +44940,6 @@ Get the KOTS installation command and install. See [Step 6: Install the Release --- -# Step 4: Add the Chart Archive to a Release - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import HelmChart from "../partials/getting-started/_grafana-helmchart.mdx" @@ -46520,8 +45078,6 @@ Create a customer with the KOTS entitlement so that you can install the release --- -# Step 1: Get the Sample Chart and Test - # Step 1: Get the Sample Chart and Test To begin, get the sample Grafana Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated vendor platform. @@ -46645,8 +45201,6 @@ Log in to the Vendor Portal and create an application. See [Step 2: Create an Ap --- -# Step 6: Install the Release with KOTS - # Step 6: Install the Release with KOTS Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. @@ -46806,8 +45360,6 @@ To learn more about how to customize the Config custom resource to create config --- -# Step 3: Package the Helm Chart - import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" @@ -46842,8 +45394,6 @@ Create a release using the Helm chart archive. See [Step 4: Add the Chart Archiv --- -# Introduction and Setup - # Introduction and Setup This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. @@ -46868,8 +45418,6 @@ Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [ --- -# Tutorial: Using ECR for Private Images - # Tutorial: Using ECR for Private Images ## Objective @@ -47175,8 +45723,6 @@ The install of the new version should have created a new pod. If we run `kubectl --- -# Step 1: Create an Application - # Step 1: Create an Application To begin, install the Replicated CLI and create an application in the Replicated Vendor Portal. @@ -47244,8 +45790,6 @@ Add the Replicated SDK to the Helm chart and package the chart to an archive. Se --- -# Step 4: Create an Embedded Cluster-Enabled Customer - # Step 4: Create an Embedded Cluster-Enabled Customer After promoting the release, create a customer with the Replicated KOTS and Embedded Cluster entitlements so that you can install the release with Embedded Cluster. A _customer_ represents a single licensed user of your application. @@ -47284,8 +45828,6 @@ Get the Embedded Cluster installation commands and install. See [Step 5: Install --- -# Step 3: Add the Chart Archive to a Release - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" @@ -47422,8 +45964,6 @@ Create a customer with the Embedded Cluster entitlement so that you can install --- -# Step 5: Install the Release on a VM - import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" # Step 5: Install the Release on a VM @@ -47539,8 +46079,6 @@ Congratulations! As part of this tutorial, you created a release in the Replicat --- -# Step 2: Package the Gitea Helm Chart - import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" @@ -47597,8 +46135,6 @@ Create a release using the Helm chart archive. See [Step 3: Add the Chart Archiv --- -# Introduction and Setup - import Requirements from "../partials/embedded-cluster/_requirements.mdx" # Introduction and Setup @@ -47627,8 +46163,6 @@ Install the Replicated CLI and create an application in the Replicated Vendor Po --- -# Step 2: Create an Application - # Step 2: Create an Application Next, install the Replicated CLI and then create an application. @@ -47696,8 +46230,6 @@ Add the Replicated SDK to the Helm chart and package the chart to an archive. Se --- -# Step 5: Create a KOTS-Enabled Customer - # Step 5: Create a KOTS-Enabled Customer After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. A _customer_ represents a single licensed user of your application. @@ -47740,8 +46272,6 @@ Get the KOTS installation command and install. See [Step 6: Install the Release --- -# Step 4: Add the Chart Archive to a Release - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" @@ -47869,8 +46399,6 @@ Create a customer with the KOTS entitlement so that you can install the release --- -# Step 1: Get the Sample Chart and Test - # Step 1: Get the Sample Chart and Test To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated Vendor Portal. @@ -47982,8 +46510,6 @@ Log in to the Vendor Portal and create an application. See [Step 2: Create an Ap --- -# Step 7: Install the Release with the Helm CLI - # Step 7: Install the Release with the Helm CLI Next, install the same release using the Helm CLI. All releases that contain one or more Helm charts can be installed with the Helm CLI. @@ -48106,8 +46632,6 @@ Congratulations! As part of this tutorial, you created a release in the Replicat --- -# Step 6: Install the Release with KOTS - import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" # Step 6: Install the Release with KOTS @@ -48260,8 +46784,6 @@ Install the same release with the Helm CLI. See [Step 7: Install the Release wit --- -# Step 3: Package the Helm Chart - import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" @@ -48300,8 +46822,6 @@ Create a release using the Helm chart archive. See [Step 4: Add the Chart Archiv --- -# Introduction and Setup - # Introduction and Setup This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. @@ -48349,8 +46869,6 @@ Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [ --- -# Step 2: Add a Preflight Spec to the Chart - import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" # Step 2: Add a Preflight Spec to the Chart @@ -48423,8 +46941,6 @@ Add the chart archive to a release. See [Add the Chart Archive to a Release](tut --- -# Step 4: Create a Customer - # Step 4: Create a Customer After promoting the release, create a customer so that you can run the preflight checks and install. @@ -48461,8 +46977,6 @@ Use the Helm CLI to run the preflight checks you defined and install Gitea. See --- -# Step 3: Add the Chart Archive to a Release - # Step 3: Add the Chart Archive to a Release Use the Replicated CLI to add the Gitea Helm chart archive to a release in the Replicated vendor platform. @@ -48557,8 +47071,6 @@ Create a customer so that you can install the release in a development environme --- -# Step 1: Get the Sample Chart and Test - # Step 1: Get the Sample Chart and Test To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install the application before adding preflight checks to the chart. @@ -48678,8 +47190,6 @@ Define preflight checks and add them to the Gitea Helm chart. See [Add a Preflig --- -# Step 6: Run Preflights with KOTS - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" @@ -48883,8 +47393,6 @@ To learn more about defining and running preflight checks, see: --- -# Step 5: Run Preflights with the Helm CLI - # Step 5: Run Preflights with the Helm CLI Use the Helm CLI installation instructions provided for the customer that you created to run the preflight checks for Gitea and install. The purpose of this step is to demonstrate how enterprise users can run preflight checks defined in a Helm chart before installing. @@ -48958,8 +47466,6 @@ Install the application with KOTS to see how preflight checks are run from the K --- -# Introduction and Setup - # Introduction and Setup This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. @@ -49008,8 +47514,6 @@ Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [ --- -# Using a Registry Proxy for Helm Air Gap Installations - # Using a Registry Proxy for Helm Air Gap Installations This topic describes how to connect the Replicated proxy registry to a Harbor or jFrog Artifactory instance to support pull-through image caching. It also includes information about how to set up replication rules in Harbor for image mirroring. @@ -49087,8 +47591,6 @@ For information about how to configure a pull-through cache with Artifactory, se --- -# Application Settings Page - # Application Settings Page Each application has its own settings, which include the application name and application slug. @@ -49110,8 +47612,6 @@ The following describes each of the application settings: --- -# Creating a Vendor Account - # Creating a Vendor Account To get started with Replicated, you must create a Replicated vendor account. When you create your account, you are also prompted to create an application. To create additional applications in the future, log in to the Replicated Vendor Portal and select **Create new app** from the Applications drop-down list. @@ -49163,8 +47663,6 @@ Invite team members to collaborate with you in Vendor Portal. See [Invite Member --- -# Managing Applications - # Managing Applications This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. diff --git a/static/llms.txt b/static/llms.txt index 2f42e64dbe..01b5a91c9b 100644 --- a/static/llms.txt +++ b/static/llms.txt @@ -1,6 +1,6 @@ -# Replicated Documentation for LLMs +# Replicated Documentation -This file contains markdown-formatted links to Replicated documentation pages. +> Replicated is a commercial software distribution platform. Independent software vendors (ISVs) can use features of the Replicated Platform to distribute modern commercial software into complex, customer-controlled environments, including on-prem and air gap. ## Docs @@ -758,8 +758,8 @@ For a complete archive of all documentation pages, see [llms-full.txt](https://d - [Creating a Vendor Account](https://docs.replicated.com/vendor/vendor-portal-creating-account.md): To get started with Replicated, you must create a Replicated vendor account. - [Managing Applications](https://docs.replicated.com/vendor/vendor-portal-manage-app.md): This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. -## Additional Resources +## Optional For more information, visit: -- [Replicated Documentation Home](https://docs.replicated.com) -- [Replicated Help Center](https://help.replicated.com) +- [Replicated Community](https://community.replicated.com/) +- [Replicated Vendor API v3 Docs](https://replicated-vendor-api.readme.io/reference/) From b71e87c3af32b24f9d3a9e8ea52886c1cab7af1b Mon Sep 17 00:00:00 2001 From: Paige Calvert <paige@replicated.com> Date: Thu, 27 Mar 2025 17:45:47 -0600 Subject: [PATCH 7/9] remove repomix --- .repomixignore | 25 - package.json | 3 +- repomix.config.json | 28 - static/llms/llms-docs.txt | 48314 ------------------------------------ static/llms/llms.txt | 16 - 5 files changed, 1 insertion(+), 48385 deletions(-) delete mode 100644 .repomixignore delete mode 100644 repomix.config.json delete mode 100644 static/llms/llms-docs.txt delete mode 100644 static/llms/llms.txt diff --git a/.repomixignore b/.repomixignore deleted file mode 100644 index f5912d0a7a..0000000000 --- a/.repomixignore +++ /dev/null @@ -1,25 +0,0 @@ -# Add patterns to ignore here, one per line -# Example: -# *.log -# tmp/ - -docs/release-notes/ -docs/templates/ -docs/pdfs/ -docs/.history/ -.github/ -src/ -.gitignore -.repomixignore -babel.config.js -CODEOWNERS -config.json -docusaurus.config.js -js/ -LICENSE -netlify.toml -package.json -README.md -repomix.config.json -sidebars.js -variables.js \ No newline at end of file diff --git a/package.json b/package.json index 94e8c3cfb0..75e870f334 100644 --- a/package.json +++ b/package.json @@ -36,8 +36,7 @@ "devDependencies": { "@docusaurus/module-type-aliases": "3.5.2", "@docusaurus/types": "3.5.2", - "typescript": "~5.8.2", - "repomix": "0.2.36" + "typescript": "~5.8.2" }, "resolutions": { "immer": "^10.1.1", diff --git a/repomix.config.json b/repomix.config.json deleted file mode 100644 index 1fb48f3b52..0000000000 --- a/repomix.config.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "output": { - "filePath": "llms-docs.txt", - "style": "plain", - "parsableStyle": false, - "fileSummary": true, - "directoryStructure": true, - "removeComments": false, - "removeEmptyLines": false, - "compress": false, - "topFilesLength": 5, - "showLineNumbers": false, - "copyToClipboard": false - }, - "include": [ - "docs/" - ], - "ignore": { - "useGitignore": true, - "useDefaultPatterns": true - }, - "security": { - "enableSecurityCheck": true - }, - "tokenCount": { - "encoding": "o200k_base" - } -} \ No newline at end of file diff --git a/static/llms/llms-docs.txt b/static/llms/llms-docs.txt deleted file mode 100644 index d747d1083a..0000000000 --- a/static/llms/llms-docs.txt +++ /dev/null @@ -1,48314 +0,0 @@ -This file is a merged representation of a subset of the codebase, containing specifically included files, combined into a single document by Repomix. - -================================================================ -File Summary -================================================================ - -Purpose: --------- -This file contains a packed representation of the entire repository's contents. -It is designed to be easily consumable by AI systems for analysis, code review, -or other automated processes. - -File Format: ------------- -The content is organized as follows: -1. This summary section -2. Repository information -3. Directory structure -4. Multiple file entries, each consisting of: - a. A separator line (================) - b. The file path (File: path/to/file) - c. Another separator line - d. The full contents of the file - e. A blank line - -Usage Guidelines: ------------------ -- This file should be treated as read-only. Any changes should be made to the - original repository files, not this packed version. -- When processing this file, use the file path to distinguish - between different files in the repository. -- Be aware that this file may contain sensitive information. Handle it with - the same level of security as you would the original repository. - -Notes: ------- -- Some files may have been excluded based on .gitignore rules and Repomix's configuration -- Binary files are not included in this packed representation. Please refer to the Repository Structure section for a complete list of file paths, including binary files -- Only files matching these patterns are included: docs/ -- Files matching patterns in .gitignore are excluded -- Files matching default ignore patterns are excluded - -Additional Info: ----------------- - -================================================================ -Directory Structure -================================================================ -docs/ - enterprise/ - auth-changing-passwords.md - auth-configuring-rbac.md - auth-identity-provider.md - cluster-management-add-nodes.md - delete-admin-console.md - embedded-manage-nodes.mdx - embedded-tls-certs.mdx - gitops-managing-secrets.mdx - gitops-workflow.mdx - image-registry-kurl.md - image-registry-rate-limits.md - image-registry-settings.mdx - installing-embedded-air-gap.mdx - installing-embedded-automation.mdx - installing-embedded-requirements.mdx - installing-embedded.mdx - installing-existing-cluster-airgapped.mdx - installing-existing-cluster-automation.mdx - installing-existing-cluster.mdx - installing-general-requirements.mdx - installing-kurl-airgap.mdx - installing-kurl-automation.mdx - installing-kurl-requirements.mdx - installing-kurl.mdx - installing-overview.md - installing-stateful-component-requirements.md - monitoring-access-dashboards.mdx - monitoring-applications.mdx - monitoring-external-prometheus.md - sbom-validating.md - snapshots-config-workflow.md - snapshots-configuring-hostpath.md - snapshots-configuring-nfs.md - snapshots-creating.md - snapshots-restoring-full.mdx - snapshots-storage-destinations.md - snapshots-troubleshooting-backup-restore.md - snapshots-updating-with-admin-console.md - snapshots-velero-cli-installing.md - snapshots-velero-installing-config.mdx - status-viewing-details.md - troubleshooting-an-app.mdx - updating-app-manager.mdx - updating-apps.mdx - updating-embedded.mdx - updating-kurl-about.mdx - updating-kurl.mdx - updating-licenses.md - updating-patching-with-kustomize.md - updating-tls-cert.md - partials/ - airgap/ - _airgap-bundle.mdx - application-links/ - _nginx-deployment.mdx - _nginx-k8s-app.mdx - _nginx-kots-app.mdx - _nginx-service.mdx - ci-cd/ - _build-source-code.mdx - _test-recs.mdx - cmx/ - _openshift-pool.mdx - _overview.mdx - _prerequisites.mdx - _supported-clusters-overview.mdx - collab-repo/ - _collab-existing-user.mdx - _collab-rbac-important.mdx - _collab-rbac-resources-important.mdx - _collab-repo-about.mdx - config/ - _affixExample.mdx - _defaultExample.mdx - _helpTextExample.mdx - _hiddenExample.mdx - _item-types.mdx - _nameExample.mdx - _property-when.mdx - _randomStringNote.mdx - _readonlyExample.mdx - _recommendedExample.mdx - _regexValidationExample.mdx - _requiredExample.mdx - _typeExample.mdx - _valueExample.mdx - _when-note.mdx - _when-requirements.mdx - _whenExample.mdx - configValues/ - _boolExample.mdx - _config-values-procedure.mdx - _configValuesExample.mdx - _fileExample.mdx - _passwordExample.mdx - _selectOneExample.mdx - _textareaExample.mdx - _textExample.mdx - custom-domains/ - _wizard.mdx - custom-resource-application/ - _additionalImages.mdx - _additionalNamespaces.mdx - _allowRollback.mdx - _graphs-templates.mdx - _graphs.mdx - _icon.mdx - _minKotsVersion.mdx - _ports-applicationURL.mdx - _ports-kurl-note.mdx - _ports-localPort.mdx - _ports-serviceName.mdx - _ports-servicePort.mdx - _ports.mdx - _proxyRegistryDomain.mdx - _releaseNotes.mdx - _replicatedRegistryDomain.mdx - _requireMinimalRBACPrivileges.mdx - _servicePort-note.mdx - _statusInformers.mdx - _supportMinimalRBACPrivileges.mdx - _targetKotsVersion.mdx - _title.mdx - customers/ - _change-channel.mdx - _download.mdx - embedded-cluster/ - _definition.mdx - _ec-config.mdx - _multi-node-ha-arch.mdx - _port-reqs.mdx - _proxy-install-limitations.mdx - _proxy-install-reqs.mdx - _requirements.mdx - _update-air-gap-admin-console.mdx - _update-air-gap-cli.mdx - _update-air-gap-overview.mdx - _update-overview.mdx - _warning-do-not-downgrade.mdx - getting-started/ - _create-promote-release.mdx - _csdl-overview.mdx - _gitea-ec-config.mdx - _gitea-helmchart-cr-ec.mdx - _gitea-helmchart-cr.mdx - _gitea-k8s-app-cr.mdx - _gitea-kots-app-cr-ec.mdx - _gitea-kots-app-cr.mdx - _grafana-config.mdx - _grafana-helmchart.mdx - _grafana-k8s-app.mdx - _grafana-kots-app.mdx - _kubernetes-training.mdx - _labs-intro.mdx - _related-topics.mdx - _replicated-definition.mdx - _test-your-changes.mdx - _tutorial-intro.mdx - _vm-requirements.mdx - gitops/ - _gitops-not-recommended.mdx - helm/ - _gitops-limitation.mdx - _helm-builder-requirements.mdx - _helm-cr-builder-airgap-intro.mdx - _helm-cr-builder-example.mdx - _helm-cr-chart-name.mdx - _helm-cr-chart-release-name.mdx - _helm-cr-chart-version.mdx - _helm-cr-chart.mdx - _helm-cr-exclude.mdx - _helm-cr-namespace.mdx - _helm-cr-optional-values-recursive-merge.mdx - _helm-cr-optional-values-when.mdx - _helm-cr-optional-values.mdx - _helm-cr-upgrade-flags.mdx - _helm-cr-values.mdx - _helm-cr-weight-limitation.mdx - _helm-cr-weight.mdx - _helm-definition.mdx - _helm-install-beta.mdx - _helm-install-prereqs.mdx - _helm-package.mdx - _helm-template-limitation.mdx - _helm-version-limitation.mdx - _hook-weights-limitation.mdx - _hooks-limitation.mdx - _installer-only-annotation.mdx - _kots-helm-cr-description.mdx - _replicated-deprecated.mdx - _replicated-helm-migration.mdx - _set-values-config-example.mdx - _set-values-license-example.mdx - _v2-native-helm-cr-example.mdx - image-registry/ - _docker-compatibility.mdx - _image-registry-settings.mdx - install/ - _access-admin-console.mdx - _airgap-bundle-build.mdx - _airgap-bundle-download.mdx - _airgap-bundle-view-contents.mdx - _airgap-license-download.mdx - _automation-intro-embedded.mdx - _automation-intro-existing.mdx - _config-values-procedure.mdx - _download-kotsadm-bundle.mdx - _download-kurl-bundle.mdx - _ec-prereqs.mdx - _embedded-ha-step.mdx - _embedded-login-password.mdx - _extract-kurl-bundle.mdx - _firewall-openings-intro.mdx - _firewall-openings.mdx - _ha-load-balancer-about.mdx - _ha-load-balancer-prereq.mdx - _install-kots-cli-airgap.mdx - _install-kots-cli.mdx - _intro-air-gap.mdx - _intro-embedded.mdx - _intro-existing.mdx - _kots-airgap-version-match.mdx - _kots-install-prompts.mdx - _kubernetes-compatibility.mdx - _kurl-about.mdx - _license-file-prereq.mdx - _placeholder-airgap-bundle.mdx - _placeholder-app-name-UI.mdx - _placeholder-namespace-embedded.mdx - _placeholder-namespace-existing.mdx - _placeholder-ro-creds.mdx - _placeholders-global.mdx - _prereqs-embedded-cluster.mdx - _prereqs-existing-cluster.mdx - _provision-cluster-intro.mdx - _push-kotsadm-images.mdx - instance-insights/ - _airgap-telemetry.mdx - _notifications-about.mdx - _supported-resources-status.mdx - kots/ - _admin-console-about.mdx - _download-portal-about.mdx - _embedded-kubernetes-definition.mdx - _kots-definition.mdx - _kots-entitlement-note.mdx - kots-cli/ - _ensure-rbac.mdx - _help.mdx - _kotsadm-namespace.mdx - _kotsadm-registry.mdx - _registry-password.mdx - _registry-username.mdx - _skip-rbac-check.mdx - _strict-sec-context-yaml.mdx - _strict-security-context.mdx - _use-minimal-rbac.mdx - _wait-duration.mdx - _with-minio.mdx - kurl/ - _installers.mdx - _kurl-availability.mdx - _kurl-definition.mdx - linter-rules/ - _allow-privilege-escalation.mdx - _application-icon.mdx - _application-spec.mdx - _application-statusInformers.mdx - _config-option-invalid-regex-validator.mdx - _config-option-invalid-type.mdx - _config-option-is-circular.mdx - _config-option-password-type.mdx - _config-option-regex-validator-invalid-type.mdx - _config-spec.mdx - _container-image-latest-tag.mdx - _container-image-local-image-name.mdx - _container-resource-limits.mdx - _container-resource-requests.mdx - _container-resources.mdx - _deprecated-kubernetes-installer-version.mdx - _hardcoded-namespace.mdx - _invalid_type.mdx - _invalid-helm-release-name.mdx - _invalid-kubernetes-installer.mdx - _invalid-min-kots-version.mdx - _invalid-rendered-yaml.mdx - _invalid-target-kots-version.mdx - _invalid-yaml.mdx - _linter-definition.mdx - _may-contain-secrets.mdx - _missing-api-version-field.mdx - _missing-kind-field.mdx - _preflight-spec.mdx - _privileged.mdx - _repeat-option-malformed-yamlpath.mdx - _repeat-option-missing-template.mdx - _repeat-option-missing-valuesByGroup.mdx - _replicas-1.mdx - _resource-limits-cpu.mdx - _resource-limits-memory.mdx - _resource-requests-cpu.mdx - _resource-requests-memory.mdx - _troubleshoot-spec.mdx - _volume-docker-sock.mdx - _volumes-host-paths.mdx - monitoring/ - _limitation-ec.mdx - _overview-prom.mdx - preflights/ - _analyzers-note.mdx - _http-requests-cr.mdx - _http-requests-secret.mdx - _k8s-distro-cr.mdx - _k8s-distro-secret.mdx - _k8s-version-cr.mdx - _k8s-version-secret.mdx - _mysql-cr.mdx - _mysql-secret.mdx - _node-count-secret.mdx - _node-cpu-cr.mdx - _node-cpu-secret.mdx - _node-ephem-storage-cr.mdx - _node-ephem-storage-secret.mdx - _node-mem-cr.mdx - _node-mem-secret.mdx - _node-req-cr.mdx - _node-req-secret.mdx - _node-storage-cr.mdx - _node-storage-secret.mdx - _preflight-sb-helm-templates.mdx - _preflights-add-analyzers.mdx - _preflights-define-xref.mdx - _preflights-define.mdx - _preflights-sb-about.mdx - _preflights-sb-note.mdx - _preflights-spec-locations.mdx - _preflights-strict.mdx - proxy-service/ - _step-creds.mdx - _step-custom-domain.mdx - redactors/ - _redactors-about.mdx - releases/ - _required-releases-description.mdx - _required-releases-limitations.mdx - _version-label-reqs-helm.mdx - replicated-cli/ - _app.mdx - _authorize-with-token-note.mdx - _authtype.mdx - _chart-yaml-dir-reqs.mdx - _help.mdx - _login.mdx - _logout.mdx - _output.mdx - _password-stdin.mdx - _password.mdx - _skip-validation.mdx - _sudo-install.mdx - _token-stdin.mdx - _token.mdx - _username.mdx - _verify-install.mdx - _yaml-dir.mdx - replicated-sdk/ - _401-unauthorized.mdx - _dependency-yaml.mdx - _integration-mode-install.mdx - _kots-version-req.mdx - _overview.mdx - _registry-logout.mdx - _sdk-values.mdx - snapshots/ - _checkVersion.mdx - _installVelero.mdx - _limitation-cli-restores.mdx - _limitation-dr.mdx - _limitation-install-method.mdx - _limitation-no-ec-support.mdx - _limitation-os.mdx - _node-agent-mem-limit.mdx - _registryCredentialsNote.mdx - _resticDaemonSet.mdx - _restore-types.mdx - _restoreTable.mdx - _step-get-backups.mdx - _step-restore.mdx - _updateDefaultStorage.mdx - status-informers/ - _aggregate-status-intro.mdx - _aggregateStatus.mdx - _statusesTable.mdx - support-bundles/ - _configmap-note.mdx - _customize-support-bundle-spec.mdx - _deploy-status-cr.mdx - _deploy-status-secret.mdx - _ec-support-bundle-intro.mdx - _generate-bundle-admin-console.mdx - _generate-bundle-default-kots.mdx - _generate-bundle-ec.mdx - _generate-bundle-host.mdx - _generate-bundle.mdx - _http-requests-cr.mdx - _http-requests-secret.mdx - _install-plugin.mdx - _k8s-version-cr.mdx - _k8s-version-secret.mdx - _logs-limits-cr.mdx - _logs-limits-secret.mdx - _logs-selectors-cr.mdx - _logs-selectors-secret.mdx - _node-resources-cr.mdx - _node-resources-secret.mdx - _node-status-cr.mdx - _node-status-secret.mdx - _redis-mysql-cr.mdx - _redis-mysql-secret.mdx - _run-pods-cr.mdx - _run-pods-secret.mdx - _support-bundle-add-analyzers.mdx - _support-bundle-add-logs.mdx - _support-bundle-custom-collectors.mdx - template-functions/ - _go-sprig.mdx - _integer-comparison.mdx - _ne-comparison.mdx - _string-comparison.mdx - _use-cases.mdx - updating/ - _admin-console-air-gap.mdx - _admin-console.mdx - _installerRequirements.mdx - _upgradePrompt.mdx - vendor-api/ - _api-about.mdx - _team-token-note.mdx - reference/ - cron-expressions.md - custom-resource-about.md - custom-resource-application.mdx - custom-resource-backup.md - custom-resource-config.mdx - custom-resource-helmchart-v2.mdx - custom-resource-helmchart.mdx - custom-resource-identity.md - custom-resource-lintconfig.mdx - custom-resource-preflight.md - custom-resource-redactor.md - embedded-cluster-install.mdx - embedded-config.mdx - kots-cli-admin-console-garbage-collect-images.md - kots-cli-admin-console-generate-manifests.mdx - kots-cli-admin-console-index.md - kots-cli-admin-console-push-images.md - kots-cli-admin-console-upgrade.mdx - kots-cli-backup-index.md - kots-cli-backup-ls.md - kots-cli-docker-ensure-secret.md - kots-cli-docker-index.md - kots-cli-download.md - kots-cli-enable-ha.md - kots-cli-get-apps.md - kots-cli-get-backups.md - kots-cli-get-config.md - kots-cli-get-index.md - kots-cli-get-restores.md - kots-cli-get-versions.md - kots-cli-getting-started.md - kots-cli-global-flags.md - kots-cli-identity-service-enable-shared-password.md - kots-cli-identity-service-index.md - kots-cli-install.mdx - kots-cli-pull.md - kots-cli-remove.md - kots-cli-reset-password.md - kots-cli-reset-tls.md - kots-cli-restore-index.md - kots-cli-restore-ls.md - kots-cli-set-config.mdx - kots-cli-set-index.md - kots-cli-upload.mdx - kots-cli-upstream-download.md - kots-cli-upstream-upgrade.mdx - kots-cli-upstream.md - kots-cli-velero-configure-aws-s3.md - kots-cli-velero-configure-azure.md - kots-cli-velero-configure-gcp.md - kots-cli-velero-configure-hostpath.mdx - kots-cli-velero-configure-internal.md - kots-cli-velero-configure-nfs.mdx - kots-cli-velero-configure-other-s3.mdx - kots-cli-velero-ensure-permissions.md - kots-cli-velero-index.md - kots-cli-velero-print-fs-instructions.md - linter.mdx - replicated-cli-api-get.mdx - replicated-cli-api-patch.mdx - replicated-cli-api-post.mdx - replicated-cli-api-put.mdx - replicated-cli-api.mdx - replicated-cli-app-create.mdx - replicated-cli-app-ls.mdx - replicated-cli-app-rm.mdx - replicated-cli-app.mdx - replicated-cli-channel-create.mdx - replicated-cli-channel-demote.mdx - replicated-cli-channel-disable-semantic-versioning.mdx - replicated-cli-channel-enable-semantic-versioning.mdx - replicated-cli-channel-inspect.mdx - replicated-cli-channel-ls.mdx - replicated-cli-channel-rm.mdx - replicated-cli-channel-un-demote.mdx - replicated-cli-channel.mdx - replicated-cli-cluster-addon-create-object-store.mdx - replicated-cli-cluster-addon-create.mdx - replicated-cli-cluster-addon-ls.mdx - replicated-cli-cluster-addon-rm.mdx - replicated-cli-cluster-addon.mdx - replicated-cli-cluster-create.mdx - replicated-cli-cluster-kubeconfig.mdx - replicated-cli-cluster-ls.mdx - replicated-cli-cluster-nodegroup-ls.mdx - replicated-cli-cluster-nodegroup.mdx - replicated-cli-cluster-port-expose.mdx - replicated-cli-cluster-port-ls.mdx - replicated-cli-cluster-port-rm.mdx - replicated-cli-cluster-port.mdx - replicated-cli-cluster-prepare.mdx - replicated-cli-cluster-rm.mdx - replicated-cli-cluster-shell.mdx - replicated-cli-cluster-update-nodegroup.mdx - replicated-cli-cluster-update-ttl.mdx - replicated-cli-cluster-update.mdx - replicated-cli-cluster-upgrade.mdx - replicated-cli-cluster-versions.mdx - replicated-cli-cluster.mdx - replicated-cli-completion.mdx - replicated-cli-customer-archive.mdx - replicated-cli-customer-create.mdx - replicated-cli-customer-download-license.mdx - replicated-cli-customer-inspect.mdx - replicated-cli-customer-ls.mdx - replicated-cli-customer-update.mdx - replicated-cli-customer.mdx - replicated-cli-default-clear-all.mdx - replicated-cli-default-clear.mdx - replicated-cli-default-set.mdx - replicated-cli-default-show.mdx - replicated-cli-default.mdx - replicated-cli-installer-create.mdx - replicated-cli-installer-ls.mdx - replicated-cli-installer.mdx - replicated-cli-installing.mdx - replicated-cli-instance-inspect.mdx - replicated-cli-instance-ls.mdx - replicated-cli-instance-tag.mdx - replicated-cli-instance.mdx - replicated-cli-login.mdx - replicated-cli-logout.mdx - replicated-cli-registry-add-dockerhub.mdx - replicated-cli-registry-add-ecr.mdx - replicated-cli-registry-add-gar.mdx - replicated-cli-registry-add-gcr.mdx - replicated-cli-registry-add-ghcr.mdx - replicated-cli-registry-add-other.mdx - replicated-cli-registry-add-quay.mdx - replicated-cli-registry-add.mdx - replicated-cli-registry-ls.mdx - replicated-cli-registry-rm.mdx - replicated-cli-registry-test.mdx - replicated-cli-registry.mdx - replicated-cli-release-compatibility.mdx - replicated-cli-release-create.mdx - replicated-cli-release-download.mdx - replicated-cli-release-inspect.mdx - replicated-cli-release-lint.mdx - replicated-cli-release-ls.mdx - replicated-cli-release-promote.mdx - replicated-cli-release-test.mdx - replicated-cli-release-update.mdx - replicated-cli-release.mdx - replicated-cli-version-upgrade.mdx - replicated-cli-version.mdx - replicated-cli-vm-create.mdx - replicated-cli-vm-ls.mdx - replicated-cli-vm-port-expose.mdx - replicated-cli-vm-port-ls.mdx - replicated-cli-vm-port-rm.mdx - replicated-cli-vm-port.mdx - replicated-cli-vm-rm.mdx - replicated-cli-vm-update-ttl.mdx - replicated-cli-vm-update.mdx - replicated-cli-vm-versions.mdx - replicated-cli-vm.mdx - replicated-sdk-apis.md - replicated.mdx - template-functions-about.mdx - template-functions-config-context.md - template-functions-examples.mdx - template-functions-identity-context.md - template-functions-kurl-context.md - template-functions-license-context.md - template-functions-static-context.md - vendor-api-using.md - vendor/ - admin-console-adding-buttons-links.mdx - admin-console-customize-app-icon.md - admin-console-customize-config-screen.md - admin-console-display-app-status.md - admin-console-port-forward.mdx - admin-console-prometheus-monitoring.mdx - ci-overview.md - ci-workflows-github-actions.md - ci-workflows.mdx - compatibility-matrix-usage.md - config-screen-about.md - config-screen-conditional.mdx - config-screen-map-inputs.md - custom-domains-using.md - custom-domains.md - custom-metrics.md - customer-adoption.md - customer-reporting.md - data-availability.md - database-config-adding-options.md - embedded-disaster-recovery.mdx - embedded-overview.mdx - embedded-using.mdx - helm-image-registry.mdx - helm-install-airgap.mdx - helm-install-overview.mdx - helm-install-release.md - helm-install-troubleshooting.mdx - helm-install-values-schema.mdx - helm-native-about.mdx - helm-native-v2-using.md - helm-optional-charts.md - helm-optional-value-keys.md - helm-packaging-airgap-bundles.mdx - helm-v2-migrate.md - identity-service-configuring.md - insights-app-status.md - install-with-helm.mdx - installer-history.mdx - instance-data-export.md - instance-insights-details.md - instance-insights-event-data.mdx - instance-notifications-config.mdx - kots-faq.mdx - kurl-about.mdx - kurl-nodeport-services.mdx - kurl-reset.mdx - licenses-about-types.md - licenses-about.mdx - licenses-adding-custom-fields.md - licenses-download.md - licenses-install-types.mdx - licenses-reference-helm.md - licenses-reference-kots-runtime.mdx - licenses-reference-sdk.mdx - licenses-referencing-fields.md - licenses-using-builtin-fields.mdx - licenses-verify-fields-sdk-api.md - namespaces.md - offsite-backup.md - operator-defining-additional-images.mdx - operator-defining-additional-namespaces.md - operator-packaging-about.md - operator-referencing-images.md - orchestrating-resource-deployment.md - packaging-air-gap-excluding-minio.md - packaging-cleaning-up-jobs.md - packaging-embedded-kubernetes.mdx - packaging-include-resources.md - packaging-ingress.md - packaging-installer-storage.mdx - packaging-kots-versions.md - packaging-private-images.md - packaging-private-registry-security.md - packaging-public-images.mdx - packaging-rbac.md - packaging-using-tls-certs.mdx - planning-questionnaire.md - policies-data-transmission.md - policies-infrastructure-and-subprocessors.md - policies-support-lifecycle.md - policies-vulnerability-patch.md - preflight-defining.mdx - preflight-examples.mdx - preflight-host-preflights.md - preflight-running.md - preflight-sb-helm-templates-about.md - preflight-support-bundle-about.mdx - private-images-about.md - private-images-kots.mdx - private-images-replicated.mdx - private-images-tags-digests.md - quick-start.mdx - releases-about.mdx - releases-creating-channels.md - releases-creating-cli.mdx - releases-creating-customer.mdx - releases-creating-releases.mdx - releases-share-download-portal.md - releases-sharing-license-install-script.mdx - replicated-api-tokens.md - replicated-onboarding.mdx - replicated-sdk-airgap.mdx - replicated-sdk-customizing.md - replicated-sdk-development.mdx - replicated-sdk-installing.mdx - replicated-sdk-overview.mdx - replicated-sdk-slsa-validating.md - resources-annotations-templating.md - snapshots-configuring-backups.md - snapshots-hooks.md - snapshots-overview.mdx - support-bundle-customizing.mdx - support-bundle-embedded.mdx - support-bundle-examples.mdx - support-bundle-generating.mdx - support-enabling-direct-bundle-uploads.md - support-host-support-bundles.md - support-inspecting-support-bundles.md - support-modular-support-bundle-specs.md - support-online-support-bundle-specs.md - support-submit-request.md - team-management-github-username.mdx - team-management-google-auth.md - team-management-rbac-configuring.md - team-management-rbac-resource-names.md - team-management-saml-auth.md - team-management-slack-config.mdx - team-management-two-factor-auth.md - team-management.md - telemetry-air-gap.mdx - testing-about.md - testing-cluster-addons.md - testing-how-to.md - testing-ingress.md - testing-pricing.mdx - testing-supported-clusters.md - tutorial-adding-db-config.md - tutorial-cli-create-app.mdx - tutorial-cli-create-customer.mdx - tutorial-cli-create-new-version.mdx - tutorial-cli-create-release.mdx - tutorial-cli-deploy-app.mdx - tutorial-cli-install-app-manager.mdx - tutorial-cli-install-cli.mdx - tutorial-cli-manifests.mdx - tutorial-cli-setup.mdx - tutorial-cli-update-app.mdx - tutorial-config-create-app.md - tutorial-config-create-customer.md - tutorial-config-create-release.md - tutorial-config-get-chart.md - tutorial-config-install-kots.md - tutorial-config-package-chart.md - tutorial-config-setup.md - tutorial-ecr-private-images.md - tutorial-embedded-cluster-create-app.mdx - tutorial-embedded-cluster-create-customer.mdx - tutorial-embedded-cluster-create-release.mdx - tutorial-embedded-cluster-install.mdx - tutorial-embedded-cluster-package-chart.mdx - tutorial-embedded-cluster-setup.mdx - tutorial-kots-helm-create-app.md - tutorial-kots-helm-create-customer.md - tutorial-kots-helm-create-release.md - tutorial-kots-helm-get-chart.md - tutorial-kots-helm-install-helm.md - tutorial-kots-helm-install-kots.md - tutorial-kots-helm-package-chart.md - tutorial-kots-helm-setup.md - tutorial-preflight-helm-add-spec.mdx - tutorial-preflight-helm-create-customer.mdx - tutorial-preflight-helm-create-release.mdx - tutorial-preflight-helm-get-chart.mdx - tutorial-preflight-helm-install-kots.mdx - tutorial-preflight-helm-install.mdx - tutorial-preflight-helm-setup.mdx - using-third-party-registry-proxy.mdx - vendor-portal-application-settings.md - vendor-portal-creating-account.md - vendor-portal-manage-app.md - intro-kots.mdx - intro-replicated.mdx - intro.md - -================================================================ -Files -================================================================ - -================ -File: docs/enterprise/auth-changing-passwords.md -================ -# Changing an Admin Console Password - -When you install for the first time with Replicated kURL, the Replicated KOTS Admin Console is secured with a single shared password that is set automatically for all users. Replicated recommends that you change this to a new, unique password for security purposes as this automated password is displayed to the user in plain text. - -The Admin Console password is salted and one-way hashed using bcrypt. The irreversible hash is stored in a Secret named `kotsadm-password`. The password is not retrievable if lost. If you lose your Admin Console password, reset your password to access the Admin Console. - -For more information about bcrypt, see [bcrypt](https://en.wikipedia.org/wiki/Bcrypt) on Wikipedia. - -:::note -Users with Identity Provider (IDP) access cannot change their password using this procedure. If an attempt is made, IDP users receive a message in the user interface to contact the identity service provider to change their password. For more information about resetting an IDP user password, see [Resetting Authentication](auth-identity-provider#resetting-authentication) in _Using an Identity Provider for User Access (Beta)_. -::: - -To change your Admin Console password: - -1. Log in to the Admin Console using your current password. -1. In the drop-down in the top right of any page, click **Change password**. -1. In the Change Admin Console Password dialog, edit the fields. - - - The new password must be at least 6 characters and must not be the same as your current password. - - The **New Password** and **Confirm New Password** fields must match each other. - -1. Click **Change Password**. - - If there are any issues with changing the password, an error message displays the specific problem. - - When the password change succeeds, the current session closes and you are redirected to the Log In page. - -1. Log in with the new password. - -================ -File: docs/enterprise/auth-configuring-rbac.md -================ -# Configuring Role-based Access Control (Beta) - -You can regulate access to the Replicated KOTS Admin Console resources based on the roles of individual users within your organization. - -To configure role based access control (RBAC) for the Admin Console: -1. Go to the **Access** page. Under **Role Based Access Control Group Policy**, click **Add a group**. -1. Enter a group name that matches one of the group names already established with your identity provider. -1. Choose one of the pre-defined Admin Console roles to be assigned to that group. For a list of Admin Console roles, see [Admin Console roles](#admin-console-roles) below. -1. Click **Add group**. - -![Role Based Access Control](/images/identity-service-kotsadm-rbac.png) - -## Admin Console Roles - -The Admin Console comes with pre-defined identity service roles that can be assigned to groups when you configure RBAC for the Admin Console. - -- **Read Access:** This role has read permissions to all resources. - -- **Write Access:** This role has write permissions to all resources. - -## Support Roles - -- **Read Access:** This role has read permissions to all resources except the application's file tree. - -- **Write Access:** This role has write permissions to the following resources: - - * Support bundles - * Preflight checks - -================ -File: docs/enterprise/auth-identity-provider.md -================ -# Using an Identity Provider for User Access (Beta) - -When you install an application for the first time, the Replicated KOTS Admin Console is secured with a single shared password for all users. It is possible to further configure the Admin Console to authenticate users with your organization's user management system. This feature is only available for licenses that have the Replicated identity service feature enabled. - -Replicated KOTS leverages the open source project Dex as an intermediary to control access to the Admin Console. Dex implements an array of protocols for querying other user-management systems, known as connectors. For more information, see the [Dex documentation](https://dexidp.io/docs/). - -The identity service has the following limitations: -* Only available for installations in a cluster created by Replicated kURL. -* Only available through the Admin Console. - -## Prerequisite - -When you are installing the Admin Console and setting up TLS certificates on the HTTPS page, you must configure the hostname to use to access the Admin Console. The hostname is required whether you are using the identity service with either a self-signed certificate or a custom certificate. For more information about configuring the hostname field, see [Install and Deploy the Application](installing-kurl#install-app) in _Online Installation with kURL_. - -## Configuration - -To begin, click the **Access** tab at the top of the Admin Console. -Here you can configure access to the Admin Console, integrating with one of the supported identity providers. - -![Configure Identity Provider](/images/access-identity.png) - -## Supported Providers - -**OpenID Connect:** For more information, see the [OpenID Connect documentation](https://openid.net/connect/). - -## Resetting Authentication - -When you enable identity provider access to the Admin Console, shared password authentication is disabled. -If you want to re-enable the shared password authentication, run the `kubectl kots identity-service enable-shared-password --namespace [namespace]` command. For more information, see [identity-service enable-shared-password](/reference/kots-cli-identity-service-enable-shared-password/) in the KOTS CLI documentation. - -================ -File: docs/enterprise/cluster-management-add-nodes.md -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Adding Nodes to kURL Clusters - -<KurlAvailability/> - -This topic describes how to add primary and secondary nodes to a Replicated kURL cluster. - -## Overview - -You can generate commands in the Replicated KOTS Admin Console to join additional primary and secondary nodes to kURL clusters. Primary nodes run services that control the cluster. Secondary nodes run services that control the pods that host the application containers. Adding nodes can help manage resources to ensure that the application runs smoothly. - -For high availability clusters, Kubernetes recommends using at least three primary nodes, and that you use an odd number of nodes to help with leader selection if machine or zone failure occurs. For more information, see [Creating Highly Available Clusters with kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) in the Kubernetes documentation. - -## Join Primary and Secondary Nodes - -You can join primary and secondary nodes on the Admin Console **Cluster management** page. - -To add primary and secondary nodes: - -1. (Air Gap Only) For air gapped environments, download and extract the `.tar.gz` bundle on the remote node before running the join command. -1. In the Admin Console, click **Cluster Management > Add a node**. -1. Copy the command that displays in the text box and run it on the node that you are joining to the cluster. - - ![Join node in Admin Console](/images/join-node.png) - - [View a larger image](/images/join-node.png) - -================ -File: docs/enterprise/delete-admin-console.md -================ -# Deleting the Admin Console and Removing Applications - -This topic describes how to remove installed applications and delete the Replicated KOTS Admin Console. The information in this topic applies to existing cluster installations with KOTS. - -## Remove an Application - -The Replicated KOTS CLI `kots remove` command removes the reference to an installed application from the Admin Console. When you use `kots remove`, the Admin Console no longer manages the application because the record of that application’s installation is removed. This means that you can no longer manage the application through the Admin Console or through the KOTS CLI. - -By default, `kots remove` does not delete any of the installed Kubernetes resources for the application from the cluster. To remove both the reference to an application from the Admin Console and remove any resources for the application from the cluster, you can run `kots remove` with the `--undeploy` flag. - -It can be useful to remove only the reference to an application from the Admin Console if you want to reinstall the application, but you do not want to recreate the namespace or other Kubernetes resources. For example, if you installed an application using an incorrect license file and need to reinstall with the correct license. - -To remove an application: - -1. Run the following command to list the installed applications for a namespace: - ``` - kubectl kots get apps -n NAMESPACE - ``` - Replace `NAMESPACE` with the name of the namespace where the Admin Console is installed. - - In the output of this command, note the slug for the application that you want to remove. - -1. Run _one_ of the following commands: - - * Remove only the reference to the application from the Admin Console: - - ``` - kubectl kots remove APP_SLUG -n NAMESPACE - ``` - Replace: - * `APP_SLUG` with the slug for the application that you want to remove. - * `NAMESPACE` with the name of the namespace where the Admin Console is installed. - - * Remove the reference to the application from the Admin Console and remove its resources from the cluster: - - ``` - kubectl kots remove APP_SLUG -n NAMESPACE --undeploy - ``` - - :::note - Optionally, use the `--force` flag to remove the application reference from the Admin Console when the application has already been deployed. The `--force` flag is implied when `--undeploy` is used. For more information, see [remove](/reference/kots-cli-remove) in _KOTS CLI_. - ::: - - -## Delete the Admin Console - -When you install an application, KOTS creates the Kubernetes resources for the Admin Console itself on the cluster. The Admin Console includes Deployments and Services, Secrets, and other resources such as StatefulSets and PersistentVolumeClaims. - -By default, KOTS also creates Kubernetes ClusterRole and ClusterRoleBinding resources that grant permissions to the Admin Console on the cluster level. These `kotsadm-role` and `kotsadm-rolebinding` resources are managed outside of the namespace where the Admin Console is installed. Alternatively, when the Admin Console is installed with namespace-scoped access, KOTS creates Role and RoleBinding resources inside the namespace where the Admin Console is installed. - -In existing cluster installations, if the Admin Console is not installed in the `default` namespace, then you delete the Admin Console by deleting the namespace where it is installed. - -If you installed the Admin Console with namespace-scoped access, then the Admin Console Role and RoleBinding RBAC resources are also deleted when you delete the namespace. Alternatively, if you installed with the default cluster-scoped access, then you manually delete the Admin Console ClusterRole and ClusterRoleBindings resources from the cluster. For more information, see [supportMinimalRBACPrivileges](/reference/custom-resource-application#supportminimalrbacprivileges) and [requireMinimalRBACPrivileges](/reference/custom-resource-application#requireminimalrbacprivileges) in _Application_. - -For more information about installing with cluster- or namespace-scoped access, see [RBAC Requirements](/enterprise/installing-general-requirements#rbac-requirements) in _Installation Requirements_. - -To completely delete the Admin Console from an existing cluster: - -1. Run the following command to delete the namespace where the Admin Console is installed: - - :::important - This command deletes everything inside the specified namespace, including the Admin Console Role and RoleBinding resources if you installed with namespace-scoped access. - ::: - - ``` - kubectl delete ns NAMESPACE - ``` - Replace `NAMESPACE` with the name of the namespace where the Admin Console is installed. - - :::note - You cannot delete the `default` namespace. - ::: - -1. (Cluster-scoped Access Only) If you installed the Admin Console with the default cluster-scoped access, run the following commands to delete the Admin Console ClusterRole and ClusterRoleBinding from the cluster: - - ``` - kubectl delete clusterrole kotsadm-role - ``` - - ``` - kubectl delete clusterrolebinding kotsadm-rolebinding - ``` - -1. (Optional) To uninstall the KOTS CLI, see [Uninstall](https://docs.replicated.com/reference/kots-cli-getting-started#uninstall) in _Installing the KOTS CLI_. - -================ -File: docs/enterprise/embedded-manage-nodes.mdx -================ -import HaArchitecture from "../partials/embedded-cluster/_multi-node-ha-arch.mdx" - -# Managing Multi-Node Clusters with Embedded Cluster - -The topic describes managing nodes in clusters created with Replicated Embedded Cluster, including how to add nodes and enable high-availability for multi-node clusters. - -## Limitations - -Multi-node clusters with Embedded Cluster have the following limitations: - -* Support for multi-node clusters with Embedded Cluster is Beta. Only single-node embedded clusters are Generally Available (GA). - -* High availability for Embedded Cluster in an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). - -* The same Embedded Cluster data directory used at installation is used for all nodes joined to the cluster. This is either the default `/var/lib/embedded-cluster` directory or the directory set with the [`--data-dir`](/reference/embedded-cluster-install#flags) flag. You cannot choose a different data directory for Embedded Cluster when joining nodes. - -## Add Nodes to a Cluster (Beta) {#add-nodes} - -You can add nodes to create a multi-node cluster in online (internet-connected) and air-gapped (limited or no outbound internet access) environments. The Admin Console provides the join command that you use to join nodes to the cluster. - -:::note -Multi-node clusters are not highly available by default. For information about enabling high availability, see [Enable High Availability for Multi-Node Clusters (Alpha)](#ha) below. -::: - -To add nodes to a cluster: - -1. (Optional) In the Embedded Cluster Config, configure the `roles` key to customize node roles. For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. When you are done, create and promote a new release with the updated Config. - -1. Do one of the following to get the join command from the Admin Console: - - 1. To add nodes during the application installation process, follow the steps in [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) to install. A **Nodes** screen is displayed as part of the installation flow in the Admin Console that allows you to choose a node role and copy the relevant join command. - - 1. Otherwise, if you have already installed the application: - - 1. Log in to the Admin Console. - - 1. If you promoted a new release that configures the `roles` key in the Embedded Cluster Config, update the instance to the new version. See [Performing Updates in Embedded Clusters](/enterprise/updating-embedded). - - 1. Go to **Cluster Management > Add node** at the top of the page. - - <img alt="Add node page in the Admin Console" src="/images/admin-console-add-node.png" width="600px"/> - - [View a larger version of this image](/images/admin-console-add-node.png) - -1. Either on the Admin Console **Nodes** screen that is displayed during installation or in the **Add a Node** dialog, select one or more roles for the new node that you will join. Copy the join command. - - Note the following: - - * If the Embedded Cluster Config [roles](/reference/embedded-config#roles) key is not configured, all new nodes joined to the cluster are assigned the `controller` role by default. The `controller` role designates nodes that run the Kubernetes control plane. Controller nodes can also run other workloads, such as application or Replicated KOTS workloads. - - * Roles are not updated or changed after a node is added. If you need to change a node’s role, reset the node and add it again with the new role. - - * For multi-node clusters with high availability (HA), at least three `controller` nodes are required. You can assign both the `controller` role and one or more `custom` roles to the same node. For more information about creating HA clusters with Embedded Cluster, see [Enable High Availability for Multi-Node Clusters (Alpha)](#ha) below. - - * To add non-controller or _worker_ nodes that do not run the Kubernetes control plane, select one or more `custom` roles for the node and deselect the `controller` role. - -1. Do one of the following to make the Embedded Cluster installation assets available on the machine that you will join to the cluster: - - * **For online (internet-connected) installations**: SSH onto the machine that you will join. Then, use the same commands that you ran during installation to download and untar the Embedded Cluster installation assets on the machine. See [Online Installation with Embedded Cluster](/enterprise/installing-embedded). - - * **For air gap installations with limited or no outbound internet access**: On a machine that has internet access, download the Embedded Cluster installation assets (including the air gap bundle) using the same command that you ran during installation. See [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). Then, move the downloaded assets to the air-gapped machine that you will join, and untar. - - :::important - The Embedded Cluster installation assets on each node must all be the same version. If you use a different version than what is installed elsewhere in the cluster, the cluster will not be stable. To download a specific version of the Embedded Cluster assets, select a version in the **Embedded cluster install instructions** dialog. - ::: - -1. On the machine that you will join to the cluster, run the join command that you copied from the Admin Console. - - **Example:** - - ```bash - sudo ./APP_SLUG join 10.128.0.32:30000 TxXboDstBAamXaPdleSK7Lid - ``` - **Air Gap Example:** - - ```bash - sudo ./APP_SLUG join --airgap-bundle APP_SLUG.airgap 10.128.0.32:30000 TxXboDstBAamXaPdleSK7Lid - ``` - -1. In the Admin Console, either on the installation **Nodes** screen or on the **Cluster Management** page, verify that the node appears. Wait for the node's status to change to Ready. - -1. Repeat these steps for each node you want to add. - -## Enable High Availability for Multi-Node Clusters (Alpha) {#ha} - -Multi-node clusters are not highly available by default. The first node of the cluster is special and holds important data for Kubernetes and KOTS, such that the loss of this node would be catastrophic for the cluster. Enabling high availability (HA) requires that at least three controller nodes are present in the cluster. Users can enable HA when joining the third node. - -:::important -High availability for Embedded Cluster in an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). -::: - -### HA Architecture - -<HaArchitecture/> - -For more information about the Embedded Cluster built-in extensions, see [Built-In Extensions](/vendor/embedded-overview#built-in-extensions) in _Embedded Cluster Overview_. - -### Requirements - -Enabling high availability has the following requirements: - -* High availability is supported with Embedded Cluster 1.4.1 or later. - -* High availability is supported only for clusters where at least three nodes with the `controller` role are present. - -### Limitations - -Enabling high availability has the following limitations: - -* High availability for Embedded Cluster in an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). - -* The `--enable-ha` flag serves as a feature flag during the Alpha phase. In the future, the prompt about migrating to high availability will display automatically if the cluster is not yet HA and you are adding the third or more controller node. - -* HA multi-node clusters use rqlite to store support bundles up to 100 MB in size. Bundles over 100 MB can cause rqlite to crash and restart. - -### Best Practices for High Availability - -Consider the following best practices and recommendations for creating HA clusters: - -* At least three _controller_ nodes that run the Kubernetes control plane are required for HA. This is because clusters use a quorum system, in which more than half the nodes must be up and reachable. In clusters with three controller nodes, the Kubernetes control plane can continue to operate if one node fails because a quorum can still be reached by the remaining two nodes. By default, with Embedded Cluster, all new nodes added to a cluster are controller nodes. For information about customizing the `controller` node role, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. - -* Always use an odd number of controller nodes in HA clusters. Using an odd number of controller nodes ensures that the cluster can make decisions efficiently with quorum calculations. Clusters with an odd number of controller nodes also avoid split-brain scenarios where the cluster runs as two, independent groups of nodes, resulting in inconsistencies and conflicts. - -* You can have any number of _worker_ nodes in HA clusters. Worker nodes do not run the Kubernetes control plane, but can run workloads such as application or Replicated KOTS workloads. - -### Create a Multi-Node HA Cluster - -To create a multi-node HA cluster: - -1. Set up a cluster with at least two controller nodes. You can do an online (internet-connected) or air gap installation. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). - -1. SSH onto a third node that you want to join to the cluster as a controller. - -1. Run the join command provided in the Admin Console **Cluster Management** tab and pass the `--enable-ha` flag. For example: - - ```bash - sudo ./APP_SLUG join --enable-ha 10.128.0.80:30000 tI13KUWITdIerfdMcWTA4Hpf - ``` - -1. After the third node joins the cluster, type `y` in response to the prompt asking if you want to enable high availability. - - ![high availability command line prompt](/images/embedded-cluster-ha-prompt.png) - [View a larger version of this image](/images/embedded-cluster-ha-prompt.png) - -1. Wait for the migration to complete. - -================ -File: docs/enterprise/embedded-tls-certs.mdx -================ -# Updating Custom TLS Certificates in Embedded Cluster Installations - -This topic describes how to update custom TLS certificates in Replicated Embedded Cluster installations. - -## Update Custom TLS Certificates - -Users can provide custom TLS certificates with Embedded Cluster installations and can update TLS certificates through the Admin Console. - -:::important -Adding the `acceptAnonymousUploads` annotation temporarily creates a vulnerability for an attacker to maliciously upload TLS certificates. After TLS certificates have been uploaded, the vulnerability is closed again. - -Replicated recommends that you complete this upload process quickly to minimize the vulnerability risk. -::: - -To upload a new custom TLS certificate in Embedded Cluster installations: - -1. SSH onto a controller node where Embedded Cluster is installed. Then, run the following command to start a shell so that you can access the cluster with kubectl: - - ```bash - sudo ./APP_SLUG shell - ``` - Where `APP_SLUG` is the unique slug of the installed application. - -1. In the shell, run the following command to restore the ability to upload new TLS certificates by adding the `acceptAnonymousUploads` annotation: - - ```bash - kubectl -n kotsadm annotate secret kotsadm-tls acceptAnonymousUploads=1 --overwrite - ``` - -1. Run the following command to get the name of the kurl-proxy server: - - ```bash - kubectl get pods -A | grep kurl-proxy | awk '{print $2}' - ``` - :::note - This server is named `kurl-proxy`, but is used in both Embedded Cluster and kURL installations. - ::: - -1. Run the following command to delete the kurl-proxy pod. The pod automatically restarts after the command runs. - - ```bash - kubectl delete pods PROXY_SERVER - ``` - - Replace `PROXY_SERVER` with the name of the kurl-proxy server that you got in the previous step. - -1. After the pod has restarted, go to `http://<ip>:30000/tls` in your browser and complete the process in the Admin Console to upload a new certificate. - -================ -File: docs/enterprise/gitops-managing-secrets.mdx -================ -import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" - -# Managing Secrets with KOTS Auto-GitOps (Alpha) - -<GitOpsNotRecommended/> - -When you enable Auto-GitOps, the Replicated KOTS Admin Console pushes the rendered application manifests to the configured git repository. Application manifests often contain secrets and sensitive information that should not be committed to git. - -Replicated KOTS v1.18 introduces an integration with SealedSecrets to encrypt secrets before committing. -This integration is currently alpha and subject to change in future releases of KOTS. For more information, see the [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) Github repository. - -To enable this integration, a Secret with specific labels must be deployed to the same namespace as the Admin Console. -This secret must contain the SealedSecrets public key and is used by KOTS to replace all Secret objects created by the application and by the Admin Console. - -This Secret must be manually deployed to the same namespace as the Admin Console. There is currently no way to automate or use the Admin Console to configure this functionality. The Secret can be named anything unique that does not conflict with application Secrets. The labels in this example YAML file are important and must be used. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: kots-sealed-secret - namespace: NAMESPACE - labels: - kots.io/buildphase: secret - kots.io/secrettype: sealedsecrets -data: - cert.pem: SEALED_SECRET_KEY -``` - -Replace: - -- `NAMESPACE` with the namespace where the Admin Console is installed. - -- `SEALED_SECRET_KEY` with the base64 encoded, sealed Secret public key. The sealed Secret public key is included in the sealed Secret controller logs during startup. - - **Example:** - - ```bash - kubectl logs -n kube-system sealed-secrets-controller-7684c7b86c-6bhhw - 2022/04/20 15:49:49 Starting sealed-secrets controller version: 0.17.5 - controller version: 0.17.5 - 2022/04/20 15:49:49 Searching for existing private keys - 2022/04/20 15:49:58 New key written to kube-system/sealed-secrets-keyxmwv2 - 2022/04/20 15:49:58 Certificate is - -----BEGIN CERTIFICATE----- - MIIEzDCCArSgAwIBAgIQIkCjUuODpQV7zK44IB3O9TANBgkqhkiG9w0BAQsFADAA - MB4XDTIyMDQyMDE1NDk1OFoXDTMyMDQxNzE1NDk1OFowADCCAiIwDQYJKoZIhvcN - AQEBBQADggIPADCCAgoCggIBAN0cle8eERYUglhGapLQZWYS078cP9yjOZpoUtXe - mpNE4eLBMo2bDAOopL9YV6TIh2EQMGOr7Njertnf7sKl/1/ZEnIpDw+b/U40LD6o - XMymCrv9GznlsEkaqfGynsY22oamQnHNLIPTYfxUueDqqQFSJN3h1vKZaFi850I4 - y29r+kxX8gGTRmuratGw0Rd4VvHtqi4lDlD9pBToQzbYsbhiySKhClAWC8Hbwzw8 - 4rPamYO8am92jpWIw0liSJUq5urnHR+S0S2P8FlOh7nbCI4ZkmY/Edjxz6ew7yB3 - OFONxlkweD2/KMzquMgOxhxUUdrbBZxXtb6s3MUeF4ENnJ2iL73dgx7O81HTUyu4 - Ok0YK1zqlnj4B683ySV3/RAtHbJJJWJMrLqbjhUNiYf+Ey6wXHJIwqXnjkG4UjP/ - OzrAmZiMa+z/uniUS0M+6siDJuj1FZsN9o1HhwwAWKcEJov2Jlo65gRsaLvalQfr - /VGrHQ1nQ2323hNVIZNKZ6zS6HlJOyOEQ7dcW3XsP1F5gEGkKkgLklOs3jt5OF4i - 2eiimHVnXveXgYZhDudY20ungRnslO2NBpTXgKIDu4YKUXhouQe1LAOkSIdtYSJL - eBFT1cO+rYqNUnffvsv2f9cE0SLp9XQ3VD5Eb+oJCpHc0qZ37/SB3VuDsXW2U/ih - TepxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIAATAPBgNVHRMBAf8EBTADAQH/MB0G - A1UdDgQWBBSvvAr9OTTWZBiCu7+b023YlCL6KzANBgkqhkiG9w0BAQsFAAOCAgEA - oXqAxZUCtZQCv23NMpABnJm2dM3qj5uZRbwqUBxutvlQ6WXKj17dbQ0SoNc2BOKT - 7hpR7wkN9Ic6UrTnx8NUf/CZwHrU+ZXzG8PigOccoP4XBJ6v7k4vOjwpuyr14Jtw - BXxcqbwK/bZPHbjn/N1eZhVyeOZlVE4oE+xbI0s6vJnn2N4tz/YrHB3VBRx9rbtN - WbbparStldRzfGyOXLZsu0eQFfHdGXtYAJP0Hougc26Wz2UEozjczUqFYc7s66Z4 - 1SCXpIpumm+aIKifjzIDPVZ3gDqpZaQYB877mCLVQ0rvfZgw/lVMtnnda+XjWh82 - YUORubKqKIM4OBM9RvaTih6k5En70Xh9ouyYgwE0fbUEvFThADVR5fUE0e7/34sE - oeAONWIZ4sbqewhvKjbYpKOZD7a9GrxCiB5C92WvA1xrI4x6F0EOK0jp16FSNuxN - us9lhAxX4V7HN3KR+O0msygeb/LAE+Vgcr3ZxlNvkIoLY318vKFsGCPgYTXLk5cs - uP2mg/JbTuntXaZTP+gM7hd8enugaUcvyX/AtduTeIXgs7KLLRZW+2M+gq/dlRwl - jCwIzOs3BKuiotGAWACaURFiKhyY+WiEpsIN1H6hswAwY0lcV1rrOeQgg9rfYvoN - 0tXH/eHuyzyHdWt0BX6LLY4cqP2rP5QyP117Vt2i1jY= - -----END CERTIFICATE----- - - 2022/04/20 15:49:58 HTTP server serving on :8080 - ... - ``` - -================ -File: docs/enterprise/gitops-workflow.mdx -================ -import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" -import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" - -# KOTS Auto-GitOps Workflow - -<GitOpsNotRecommended/> - -## Overview of the Auto-GitOps Workflow - -The Replicated KOTS Admin Console default workflow is configured to receive updates, show the changes, and deploy the updates to the cluster. You can enable the KOTS Auto-GitOps workflow instead. When using the Auto-GitOps workflow, changes from the Admin Console are pushed to a private Git repository, where an existing CI/CD process can execute the delivery of manifests to the cluster. Changes can include local configuration changes and upstream updates from your vendor (such as application and license updates). - -If you have more than one application installed, you can selectively enable Auto-GitOps for each application. - -After enabling the Auto-GitOps workflow for an application, the Admin Console makes your first commit with the latest available version in the Admin Console. The latest available version is often the current version that is deployed. Subsequently, the Admin Console makes separate commits with any available updates. - -If you configure automatic updates for the application, any updates from your vendor are automatically committed to your Git repository. For more information about configuring automatic updates, see [Configuring Automatic Updates](/enterprise/updating-apps). - -You can change your GitOps settings or disable Auto-GitOps at any time from the **GitOps** tab in the Admin Console. - -## Limitations - -- <GitOpsLimitation/> - -- To enable pushing updates through the Auto-GitOps workflow, you must first follow the installation workflow for the application using the Admin Console or the Replicated KOTS CLI. If the preflight checks pass during installation, then the application is deployed. - -- After you have completed the installation workflow, you can enable Auto-GitOps for all subsequent application updates. It is not required that the application deploy successfully to enable Auto-GitOps. For example, if the preflight checks fail during the installation workflow and the application is not deployed, you can still enable Auto-GitOps for subsequent application updates. - -- When you enable Auto-GitOps, the Admin Console sends all application updates, including the version that you initially installed before Auto-GitOps was enabled, to the repository that you specify. - -- If your organization has security requirements that prevent you from completing the installation workflow for the application first with the Admin Console or KOTS CLI, you cannot enable Auto-GitOps. - -## Prerequisites - -- A Git repository that you have read/write access to. -- If the repository does not have files or folders committed yet, you must make at least one commit with any content so that the connection attempt succeeds with the SSH key when you perform the following task. - -## Enable Auto-GitOps - -To enable pushing updates to the Auto-GitOps workflow: - -1. Click the **GitOps** tab at the top of the Admin Console. - -1. On the GitOps Configuration page: - - 1. If you have more than one application, select the application where you want to enable Auto-GitOps. - 1. Select the Git provider. - 1. Enter the repository details: - - <table> - <tr> - <th width="30%">Field Name</th> - <th width="70%">Description</th> - </tr> - <tr> - <td>Owner & Repository</td> - <td>Enter the owner and repository name where the commit will be made.</td> - </tr> - <tr> - <td>Branch</td> - <td>Enter the branch name or leave the field blank to use the default branch.</td> - </tr> - <tr> - <td>Path</td> - <td>Enter the folder name in the repository where the application deployment file will be committed. If you leave this field blank, the Replicated KOTS creates a folder for you. However, the best practice is to manually create a folder in the repository labeled with the application name and dedicated for the deployment file only.</td> - </tr> - </table> - - 1. Click **Generate SSH Key**, and then **Copy key**. - 1. Go to your Git repository and open the settings page. On the settings page: - 1. Add the SSH public key that you copied in the previous step. - 1. Enable write access for the key. This allows the Admin Console to push commits to the repository. - -1. On the **GitOps Configuration** page, click **Test connection to repository** to verify that the Admin Console can connect. - - When the Admin Console establishes a connection to the repository, a dialog displays that says GitOps is enabled. - -================ -File: docs/enterprise/image-registry-kurl.md -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Working with the kURL Image Registry - -<KurlAvailability/> - -This topic describes the Replicated kURL registry for kURL clusters. - -## Overview - -The kURL Registry add-on can be used to host application images. For air gap installations, this kURL registry is automatically used to host all application images. - -With every application update, new images are pushed to the kURL registry. -To keep the registry from running out of storage, images that are no longer used are automatically deleted from the registry. - -For more information about the kURL Registry add-on, see [Registry Add-On](https://kurl.sh/docs/add-ons/registry) in the kURL documentation. - -:::note -Users can also configure their own private registry for kURL installations instead of using the kURL registry. For more information, see [Configuring Local Image Registries](/enterprise/image-registry-settings). -::: - -## Trigger Garbage Collection - -Every time the application instance is upgraded, image garbage collection automatically deletes images that are no longer used. - -You can also manually trigger image garbage collection. To manually run garbage collection: - -```bash -kubectl kots admin-console garbage-collect-images -n NAMESPACE -``` -Where `NAMESPACE` is the namespace where the application is installed. - -For more information, see [admin-console garbage-collect-images](/reference/kots-cli-admin-console-garbage-collect-images/). - -## Disable Image Garbage Collection - -Image garbage collection is enabled by default for kURL clusters that use the kURL registry. - -To disable image garbage collection: - -```bash -kubectl patch configmaps kotsadm-confg --type merge -p "{\"data\":{\"enable-image-deletion\":\"false\"}}" -``` - -To enable garbage collection again: -```bash -kubectl patch configmaps kotsadm-confg --type merge -p "{\"data\":{\"enable-image-deletion\":\"true\"}}" -``` - -## Restore Deleted Images - -Deleted images can be reloaded from air gap bundles using the `admin-console push-images` command. For more information, see [admin-console push-images](/reference/kots-cli-admin-console-push-images/) in the KOTS CLI documentation. - -The registry address and namespace can be found on the **Registry Settings** page in the Replicated KOTS Admin Console. -The registry username and password can be found in the `registry-creds` secret in the default namespace. - -## Limitations - -The kURL registry image garbage collection feature has following limitations: - -* **Optional components**: Some applications define Kubernetes resources that can be enabled or disabled dynamically. For example, template functions can be used to conditionally deploy a StatefulSet based on configuration from the user. - - If a resource is disabled and no longer deployed, its images can be included in the garbage collection. - - To prevent this from happening, include the optional images in the `additionalImages` list of the Application custom resource. For more information, see [`additionalImages`](/reference/custom-resource-application#additionalimages) in _Application_. - -* **Shared Image Registries**: The image garbage collection process assumes that the registry is not shared with any other instances of Replicated KOTS, nor shared with any external applications. If the built-in kURL registry is used by another external application, disable garbage collection to prevent image loss. - -* **Customer-Supplied Registries**: Image garbage collection is supported only when used with the built-in kURL registry. If the KOTS instance is configured to use a different registry, disable garbage collection to prevent image loss. For more information about configuring an image registry in the Admin Console, see [Configuring Local Image Registries](/enterprise/image-registry-settings). - -* **Application Rollbacks**: Image garbage collection has no effect when the `allowRollback` field in the KOTS Application custom resource is set to `true`. For more information, see [Application](/reference/custom-resource-application) in _KOTS Custom Resources_. - -================ -File: docs/enterprise/image-registry-rate-limits.md -================ -# Avoiding Docker Hub Rate Limits - -This topic describes how to avoid rate limiting for anonymous and free authenticated use of Docker Hub by providing a Docker Hub username and password to the `kots docker ensure-secret` command. - -## Overview - -On November 20, 2020, rate limits for anonymous and free authenticated use of Docker Hub went into effect. -Anonymous and Free Docker Hub users are limited to 100 and 200 container image pull requests per six hours, respectively. -Docker Pro and Docker Team accounts continue to have unlimited access to pull container images from Docker Hub. - -For more information on rate limits, see [Understanding Docker Hub rate limiting](https://www.docker.com/increase-rate-limits) on the Docker website. - -If the application that you are installing or upgrading has public Docker Hub images that are rate limited, then an error occurs when the rate limit is reached. - -## Provide Docker Hub Credentials - -To avoid errors caused by reaching the Docker Hub rate limit, a Docker Hub username and password can be passed to the `kots docker ensure-secret` command. The Docker Hub username and password are used only to increase rate limits and do not need access to any private repositories on Docker Hub. - -Example: - -```bash -kubectl kots docker ensure-secret --dockerhub-username sentrypro --dockerhub-password password --namespace sentry-pro -``` - -The `kots docker ensure-secret` command creates an image pull secret that KOTS can use when pulling images. - -KOTS then creates a new release sequence for the application to apply the image pull secret to all Kubernetes manifests that have images. After running the `kots docker ensure-secret` command, deploy this new release sequence either from the Admin Console or the KOTS CLI. - -For more information, see [docker ensure-secret](/reference/kots-cli-docker-ensure-secret) in the KOTS CLI documentation. - -================ -File: docs/enterprise/image-registry-settings.mdx -================ -import ImageRegistrySettings from "../partials/image-registry/_image-registry-settings.mdx" -import DockerCompatibility from "../partials/image-registry/_docker-compatibility.mdx" - -# Configuring Local Image Registries - -This topic describes how to configure private registry settings in the Replicated KOTS Admin Console. - -The information in this topic applies to existing cluster installations with KOTS and installations with Replicated kURL. This topic does _not_ apply to Replciated Embedded Cluster installations. - -## Overview - -Using a private registry lets you create a custom image pipeline. Any proprietary configurations that you make to the application are shared only with the groups that you allow access, such as your team or organization. You also have control over the storage location, logging messages, load balancing requests, and other configuration options. Private registries can be used with online or air gap clusters. - -## Requirement - -The domain of the image registry must support a Docker V2 protocol. KOTS has been tested for compatibility with the following registries: - -<DockerCompatibility/> - -## Configure Local Private Registries in Online Clusters - -In online (internet-connected) installations, you can optionally use a local private image registry. You can also disable the connection or remove the registry settings if needed. - -To configure private registry settings in an online cluster: - -1. In the Admin Console, on the **Registry settings** tab, edit the fields: - - <img src="/images/registry-settings.png" alt="Registry Settings" width="400"></img> - - [View a larger version of this image](/images/registry-settings.png) - - The following table describes the fields: - - <ImageRegistrySettings/> - -1. Click **Test Connection** to test the connection between KOTS and the registry host. - -1. Click **Save changes**. - -## Change Private Registries in Air Gap Clusters {#air-gap} - -You can change the private registry settings at any time in the Admin Console. - -To change private registry settings in an air gap cluster: - -1. In the Admin Console, on the **Registry settings** tab, select the **Disable Pushing Images to Private Registry** checkbox. Click **Save changes**. - - :::note - This is a temporary action that allows you to edit the registry namespace and hostname. If you only want to change the username or password for the registry, you do not have to disable pushing the images. - ::: - -1. Edit the fields as needed, and click **Save changes**. - - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Description</th> - </tr> - <tr> - <td>Hostname</td> - <td>Specify a registry domain that uses the Docker V2 protocol.</td> - </tr> - <tr> - <td>Username</td> - <td>Specify the username for the domain.</td> - </tr> - <tr> - <td>Password</td> - <td>Specify the password for the domain.</td> - </tr> - <tr> - <td>Registry Namespace</td> - <td>Specify the registry namespace. For air gap environments, this setting overwrites the registry namespace that you pushed images to when you installed KOTS.</td> - </tr> - </table> - -1. Deselect the **Disable Pushing Images to Private Registry** checkbox. This action re-enables KOTS to push images to the registry. - -1. Click **Test Connection** to test the connection between KOTS and the private registry host. - -1. Click **Save changes**. - -## Stop Using a Registry and Remove Registry Settings - -To stop using a registry and remove registry settings from the Admin Console: - -1. Log in to the Admin Console and go to **Registry Settings**. - -1. Click **Stop using registry** to remove the registry settings from the Admin Console. - -================ -File: docs/enterprise/installing-embedded-air-gap.mdx -================ -import UpdateAirGapAdm from "../partials/embedded-cluster/_update-air-gap-admin-console.mdx" -import UpdateAirGapCli from "../partials/embedded-cluster/_update-air-gap-cli.mdx" -import UpdateAirGapOverview from "../partials/embedded-cluster/_update-air-gap-overview.mdx" -import DoNotDowngrade from "../partials/embedded-cluster/_warning-do-not-downgrade.mdx" -import Prerequisites from "../partials/install/_ec-prereqs.mdx" - -# Air Gap Installation with Embedded Cluster - -This topic describes how to install applications with Embedded Cluster on a virtual machine (VM) or bare metal server with no outbound internet access. - -## Overview - -When an air gap bundle is built for a release containing an Embedded Cluster Config, both an application air gap bundle and an Embedded Cluster air gap bundle are built. The application air gap bundle can be used for air gap installations with Replicated kURL or with Replicated KOTS in an existing cluster. The Embedded Cluster air gap bundle is used for air gap installations with Embedded Cluster. - -The Embedded Cluster air gap bundle not only contains the assets normally contained in an application air gap bundle (`airgap.yaml`, `app.tar.gz`, and an images directory), but it also contains an `embedded-cluster` directory with the assets needed to install the infrastructure (Embedded Cluster/k0s and [extensions](/reference/embedded-config#extensions). - -During installation with Embedded Cluster in air gap environments, a Docker registry is deployed to the cluster to store application images. Infrastructure images (for Embedded Cluster and Helm extensions) and the Helm charts are preloaded on each node at installation time. - -### Requirement - -Air gap installations are supported with Embedded Cluster version 1.3.0 or later. - -### Limitations and Known Issues - -Embedded Cluster installations in air gap environments have the following limitations and known issues: - -* If you pass `?airgap=true` to the `replicated.app` endpoint but an air gap bundle is not built for the latest release, the API will not return a 404. Instead it will return the tarball without the air gap bundle (as in, with the installer and the license in it, like for online installations). - -* Images used by Helm extensions must not refer to a multi-architecture image by digest. Only x64 images are included in air gap bundles, and the digest for the x64 image will be different from the digest for the multi-architecture image, preventing the image from being discovered in the bundle. An example of a chart that does this is ingress-nginx/ingress-nginx chart. For an example of how the digests should be set to empty string to pull by tag only, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. - -* Images for Helm extensions are loaded directly into containerd so that they are available without internet access. But if an image used by a Helm extension has **Always** set as the image pull policy, Kubernetes will try to pull the image from the internet. If necessary, use the Helm values to set `IfNotPresent` as the image pull policy to ensure the extension works in air gap environments. - -* On the channel release history page, the links for **Download air gap bundle**, **Copy download URL**, and **View bundle contents** pertain to the application air gap bundle only, not the Embedded Cluster bundle. - -## Prerequisites - -Before you install, complete the following prerequisites: - -<Prerequisites/> - -## Install - -To install with Embedded Cluster in an air gap environment: - -1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the target release was promoted to build the air gap bundle. Do one of the following: - * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. - * If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. - - :::note - Errors in building either the application air gap bundle or the Embedded Cluster infrastructure will be shown if present. - ::: - -1. Go to **Customers** and click on the target customer. - -1. On the **Manage customer** tab, under **License options**, enable the **Airgap Download Enabled** license field. - -1. At the top of the page, click **Install instructions > Embedded Cluster**. - - ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) - - [View a larger version of this image](/images/customer-install-instructions-dropdown.png) - -1. In the **Embedded Cluster install instructions** dialog, verify that the **Install in an air gap environment** checkbox is enabled. - - <img alt="Embedded cluster install instruction dialog" src="/images/embedded-cluster-install-dialog-airgap.png" width="500px"/> - - [View a larger version of this image](/images/embedded-cluster-install-dialog-airgap.png) - -1. (Optional) For **Select a version**, select a specific application version to install. By default, the latest version is selected. - -1. SSH onto the machine where you will install. - -1. On a machine with internet access, run the curl command to download the air gap installation assets as a `.tgz`. - -1. Move the downloaded `.tgz` to the air-gapped machine where you will install. - -1. On your air-gapped machine, untar the `.tgz` following the instructions provided in the **Embedded Cluster installation instructions** dialog. This will produce three files: - * The installer - * The license - * The air gap bundle (`APP_SLUG.airgap`) - -1. Install the application with the installation command copied from the **Embedded Cluster installation instructions** dialog: - - ```bash - sudo ./APP_SLUG install --license license.yaml --airgap-bundle APP_SLUG.airgap - ``` - Where `APP_SLUG` is the unique application slug. - - :::note - Embedded Cluster supports installation options such as installing behind a proxy and changing the data directory used by Embedded Cluster. For the list of flags supported with the Embedded Cluster `install` command, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - ::: - -1. When prompted, enter a password for accessing the KOTS Admin Console. - - The installation command takes a few minutes to complete. During installation, Embedded Cluster completes tasks to prepare the cluster and install KOTS in the cluster. Embedded Cluster also automatically runs a default set of [_host preflight checks_](/vendor/embedded-using#about-host-preflight-checks) which verify that the environment meets the requirements for the installer. - - **Example output:** - - ```bash - ? Enter an Admin Console password: ******** - ? Confirm password: ******** - ✔ Host files materialized! - ✔ Running host preflights - ✔ Node installation finished! - ✔ Storage is ready! - ✔ Embedded Cluster Operator is ready! - ✔ Admin Console is ready! - ✔ Additional components are ready! - Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 - ``` - - At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. - -1. Go to the URL provided in the output to access to the Admin Console. - -1. On the Admin Console landing page, click **Start**. - -1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. - -1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. - - By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. - -1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. - -1. On the **Nodes** page, you can view details about the machine where you installed, including its node role, status, CPU, and memory. - - Optionally, add nodes to the cluster before deploying the application. For more information about joining nodes, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). Click **Continue**. - -1. On the **Configure [App Name]** screen, complete the fields for the application configuration options. Click **Continue**. - -1. On the **Validate the environment & deploy [App Name]** screen, address any warnings or failures identified by the preflight checks and then click **Deploy**. - - Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. - -The Admin Console dashboard opens. - -On the Admin Console dashboard, the application status changes from Missing to Unavailable while the application is being installed. When the installation is complete, the status changes to Ready. For example: - -![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) - -[View a larger version of this image](/images/gitea-ec-ready.png) - -================ -File: docs/enterprise/installing-embedded-automation.mdx -================ -import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" -import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" - -# Automating Installation with Embedded Cluster - -This topic describes how to install an application with Replicated Embedded Cluster from the command line, without needing to access the Replicated KOTS Admin Console. - -## Overview - -A common use case for installing with Embedded Cluster from the command line is to automate installation, such as performing headless installations as part of CI/CD pipelines. - -With headless installation, you provide all the necessary installation assets, such as the license file and the application config values, with the installation command rather than through the Admin Console UI. Any preflight checks defined for the application run automatically during headless installations from the command line rather than being displayed in the Admin Console. - -## Prerequisite - -Create a ConfigValues YAML file to define the configuration values for the application release. The ConfigValues file allows you to pass the configuration values for an application from the command line with the install command, rather than through the Admin Console UI. For air-gapped environments, ensure that the ConfigValues file can be accessed from the installation environment. - -The KOTS ConfigValues file includes the fields that are defined in the KOTS Config custom resource for an application release, along with the user-supplied and default values for each field, as shown in the example below: - -<ConfigValuesExample/> - -<ConfigValuesProcedure/> - -## Online (Internet-Connected) Installation - -To install with Embedded Cluster in an online environment: - -1. Follow the steps provided in the Vendor Portal to download and untar the Embedded Cluster installation assets. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded). - -1. Run the following command to install: - - ```bash - sudo ./APP_SLUG install --license-file PATH_TO_LICENSE \ - --config-values PATH_TO_CONFIGVALUES \ - --admin-console-password ADMIN_CONSOLE_PASSWORD - ``` - - Replace: - * `APP_SLUG` with the unique slug for the application. - * `LICENSE_FILE` with the customer license. - * `ADMIN_CONSOLE_PASSWORD` with a password for accessing the Admin Console. - * `PATH_TO_CONFIGVALUES` with the path to the ConfigValues file. - -## Air Gap Installation - -To install with Embedded Cluster in an air-gapped environment: - -1. Follow the steps provided in the Vendor Portal to download and untar the Embedded Cluster air gap installation assets. For more information, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). - -1. Ensure that the Embedded Cluster installation assets are available on the air-gapped machine, then run the following command to install: - - ```bash - sudo ./APP_SLUG install --license-file PATH_TO_LICENSE \ - --config-values PATH_TO_CONFIGVALUES \ - --admin-console-password ADMIN_CONSOLE_PASSWORD \ - --airgap-bundle PATH_TO_AIRGAP_BUNDLE - ``` - - Replace: - * `APP_SLUG` with the unique slug for the application. - * `LICENSE_FILE` with the customer license. - * `PATH_TO_CONFIGVALUES` with the path to the ConfigValues file. - * `ADMIN_CONSOLE_PASSWORD` with a password for accessing the Admin Console. - * `PATH_TO_AIRGAP_BUNDLE` with the path to the Embedded Cluster `.airgap` bundle for the release. - -================ -File: docs/enterprise/installing-embedded-requirements.mdx -================ -import EmbeddedClusterRequirements from "../partials/embedded-cluster/_requirements.mdx" -import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" -import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" - -# Embedded Cluster Installation Requirements - -This topic lists the installation requirements for Replicated Embedded Cluster. Ensure that the installation environment meets these requirements before attempting to install. - -## System Requirements - -<EmbeddedClusterRequirements/> - -## Port Requirements - -<EmbeddedClusterPortRequirements/> - -## Firewall Openings for Online Installations with Embedded Cluster {#firewall} - -<FirewallOpeningsIntro/> - -<table> - <tr> - <th width="50%">Domain</th> - <th>Description</th> - </tr> - <tr> - <td>`proxy.replicated.com`</td> - <td><p>Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.</p></td> - </tr> - <tr> - <td>`replicated.app`</td> - <td><p>Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.</p></td> - </tr> - <tr> - <td>`registry.replicated.com` *</td> - <td><p>Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.</p><p> For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.</p></td> - </tr> -</table> - -* Required only if the application uses the [Replicated private registry](/vendor/private-images-replicated). - -## About Firewalld Configuration - -When Firewalld is enabled in the installation environment, Embedded Cluster modifies the Firewalld config to allow traffic over the pod and service networks and to open the required ports on the host. No additional configuration is required. - -The following rule is added to Firewalld: - -```xml -<?xml version="1.0" encoding="utf-8"?> -<zone target="ACCEPT"> - <interface name="cali+"/> - <interface name="tunl+"/> - <interface name="vxlan-v6.calico"/> - <interface name="vxlan.calico"/> - <interface name="wg-v6.cali"/> - <interface name="wireguard.cali"/> - <source address="[pod-network-cidr]"/> - <source address="[service-network-cidr]"/> -</zone> -``` - -The following ports are opened in the default zone: - -<table> -<tr> - <th>Port</th> - <th>Protocol</th> -</tr> -<tr> - <td>6443</td> - <td>TCP</td> -</tr> -<tr> - <td>10250</td> - <td>TCP</td> -</tr> -<tr> - <td>9443</td> - <td>TCP</td> -</tr> -<tr> - <td>2380</td> - <td>TCP</td> -</tr> -<tr> - <td>4789</td> - <td>UDP</td> -</tr> -</table> - -================ -File: docs/enterprise/installing-embedded.mdx -================ -import Prerequisites from "../partials/install/_ec-prereqs.mdx" - -# Online Installation with Embedded Cluster - -This topic describes how to install an application in an online (internet-connected) environment with the Replicated Embedded Cluster installer. For information about air gap installations with Embedded Cluster, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). - -## Prerequisites - -Before you install, complete the following prerequisites: - -<Prerequisites/> - -* Ensure that the required domains are accessible from servers performing the installation. See [Firewall Openings for Online Installations](/enterprise/installing-embedded-requirements#firewall). - -## Install - -To install an application with Embedded Cluster: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers** and click on the target customer. Click **Install instructions > Embedded Cluster**. - - ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) - - [View a larger version of this image](/images/customer-install-instructions-dropdown.png) - - The **Embedded Cluster install instructions** dialog is displayed. - - <img alt="Embedded cluster install instruction dialog" src="/images/embedded-cluster-install-dialog.png" width="500px"/> - - [View a larger version of this image](/images/embedded-cluster-install-dialog.png) - -1. (Optional) In the **Embedded Cluster install instructions** dialog, under **Select a version**, select a specific application version to install. By default, the latest version is selected. - -1. SSH onto the machine where you will install. - -1. Run the first command in the **Embedded Cluster install instructions** dialog to download the installation assets as a `.tgz`. - -1. Run the second command to extract the `.tgz`. The will produce the following files: - - * The installer - * The license - -1. Run the third command to install the release: - - ```bash - sudo ./APP_SLUG install --license LICENSE_FILE - ``` - Where: - * `APP_SLUG` is the unique slug for the application. - * `LICENSE_FILE` is the customer license. - <br/> - :::note - Embedded Cluster supports installation options such as installing behind a proxy and changing the data directory used by Embedded Cluster. For the list of flags supported with the Embedded Cluster `install` command, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - ::: - -1. When prompted, enter a password for accessing the KOTS Admin Console. - - The installation command takes a few minutes to complete. During installation, Embedded Cluster completes tasks to prepare the cluster and install KOTS in the cluster. Embedded Cluster also automatically runs a default set of [_host preflight checks_](/vendor/embedded-using#about-host-preflight-checks) which verify that the environment meets the requirements for the installer. - - **Example output:** - - ```bash - ? Enter an Admin Console password: ******** - ? Confirm password: ******** - ✔ Host files materialized! - ✔ Running host preflights - ✔ Node installation finished! - ✔ Storage is ready! - ✔ Embedded Cluster Operator is ready! - ✔ Admin Console is ready! - ✔ Additional components are ready! - Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 - ``` - - At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. - -1. Go to the URL provided in the output to access to the Admin Console. - -1. On the Admin Console landing page, click **Start**. - -1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. - -1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. - - By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. - -1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. - -1. On the **Nodes** page, you can view details about the machine where you installed, including its node role, status, CPU, and memory. - - Optionally, add nodes to the cluster before deploying the application. For more information about joining nodes, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). Click **Continue**. - -1. On the **Configure [App Name]** screen, complete the fields for the application configuration options. Click **Continue**. - -1. On the **Validate the environment & deploy [App Name]** screen, address any warnings or failures identified by the preflight checks and then click **Deploy**. - - Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. - -The Admin Console dashboard opens. - -On the Admin Console dashboard, the application status changes from Missing to Unavailable while the application is being installed. When the installation is complete, the status changes to Ready. For example: - -![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) - -[View a larger version of this image](/images/gitea-ec-ready.png) - -================ -File: docs/enterprise/installing-existing-cluster-airgapped.mdx -================ -import IntroExisting from "../partials/install/_intro-existing.mdx" -import IntroAirGap from "../partials/install/_intro-air-gap.mdx" -import PrereqsExistingCluster from "../partials/install/_prereqs-existing-cluster.mdx" -import BuildAirGapBundle from "../partials/install/_airgap-bundle-build.mdx" -import DownloadAirGapBundle from "../partials/install/_airgap-bundle-download.mdx" -import ViewAirGapBundle from "../partials/install/_airgap-bundle-view-contents.mdx" -import LicenseFile from "../partials/install/_license-file-prereq.mdx" -import AirGapLicense from "../partials/install/_airgap-license-download.mdx" -import DownloadKotsBundle from "../partials/install/_download-kotsadm-bundle.mdx" -import InstallCommandPrompts from "../partials/install/_kots-install-prompts.mdx" -import AppNameUI from "../partials/install/_placeholder-app-name-UI.mdx" -import InstallKotsCliAirGap from "../partials/install/_install-kots-cli-airgap.mdx" -import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" -import PlaceholderRoCreds from "../partials/install/_placeholder-ro-creds.mdx" -import KotsVersionMatch from "../partials/install/_kots-airgap-version-match.mdx" - -# Air Gap Installation in Existing Clusters with KOTS - -<IntroExisting/> - -<IntroAirGap/> - -## Prerequisites - -Complete the following prerequisites: - -<PrereqsExistingCluster/> - -* Ensure that there is a compatible Docker image registry available inside the network. For more information about Docker registry compatibility, see [Compatible Image Registries](/enterprise/installing-general-requirements#registries). - - KOTS rewrites the application image names in all application manifests to read from the on-premises registry, and it re-tags and pushes the images to the on-premises registry. When authenticating to the registry, credentials with `push` permissions are required. - - A single application expects to use a single namespace in the Docker image registry. The namespace name can be any valid URL-safe string, supplied at installation time. A registry typically expects the namespace to exist before any images can be pushed into it. - - :::note - Amazon Elastic Container Registry (ECR) does not use namespaces. - ::: - -## Install {#air-gap} - -To install in an air gap cluster with KOTS: - -1. Download the customer license: - - <AirGapLicense/> - -1. Go the channel where the target release was promoted to build and download the air gap bundle for the release: - - <BuildAirGapBundle/> - -1. <DownloadAirGapBundle/> - -1. <ViewAirGapBundle/> - -1. <DownloadKotsBundle/> - -1. <InstallKotsCliAirGap/> - - <KotsVersionMatch/> - -1. <PushKotsImages/> - -1. Install the KOTS Admin Console using the images that you pushed in the previous step: - - ```shell - kubectl kots install APP_NAME \ - --kotsadm-registry REGISTRY_HOST \ - --registry-username RO-USERNAME \ - --registry-password RO-PASSWORD - ``` - - Replace: - - * `APP_NAME` with a name for the application. This is the unique name that KOTS will use to refer to the application that you install. - <PlaceholderRoCreds/> - -1. <InstallCommandPrompts/> - -1. Access the Admin Console on port 8800. If the port forward is active, go to [http://localhost:8800](http://localhost:8800) to access the Admin Console. - - If you need to reopen the port forward to the Admin Console, run the following command: - - ```shell - kubectl kots admin-console -n NAMESPACE - ``` - Replace `NAMESPACE` with the namespace where KOTS is installed. - -1. Log in with the password that you created during installation. - -1. Upload your license file. - -1. Upload the `.airgap` application air gap bundle. - -1. On the config screen, complete the fields for the application configuration options and then click **Continue**. - -1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. - - :::note - Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. - ::: - -1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. - - ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) - - [View a larger version of this image](/images/kubectl-preflight-command.png) - -The Admin Console dashboard opens. - -On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. For example: - -![Admin Console dashboard](/images/kotsadm-dashboard-graph.png) - -[View a larger version of this image](/images/kotsadm-dashboard-graph.png) - -================ -File: docs/enterprise/installing-existing-cluster-automation.mdx -================ -import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" -import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" -import PlaceholdersGlobal from "../partials/install/_placeholders-global.mdx" -import PlaceholderAirgapBundle from "../partials/install/_placeholder-airgap-bundle.mdx" -import PlaceholderNamespaceExisting from "../partials/install/_placeholder-namespace-existing.mdx" -import DownloadKotsBundle from "../partials/install/_download-kotsadm-bundle.mdx" -import InstallKotsCliAirGap from "../partials/install/_install-kots-cli-airgap.mdx" -import InstallKotsCli from "../partials/install/_install-kots-cli.mdx" -import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" -import KotsVersionMatch from "../partials/install/_kots-airgap-version-match.mdx" -import PlaceholderRoCreds from "../partials/install/_placeholder-ro-creds.mdx" -import AccessAdminConsole from "../partials/install/_access-admin-console.mdx" - -# Installing with the KOTS CLI - -This topic describes how to install an application with Replicated KOTS in an existing cluster using the KOTS CLI. - -## Overview - -You can use the KOTS CLI to install an application with Replicated KOTS. A common use case for installing from the command line is to automate installation, such as performing headless installations as part of CI/CD pipelines. - -To install with the KOTS CLI, you provide all the necessary installation assets, such as the license file and the application config values, with the installation command rather than through the Admin Console UI. Any preflight checks defined for the application run automatically from the CLI rather than being displayed in the Admin Console. - -The following shows an example of the output from the kots install command: - - ``` - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - • Waiting for Admin Console to be ready ✓ - • Waiting for installation to complete ✓ - • Waiting for preflight checks to complete ✓ - - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console - - • Go to http://localhost:8888 to access the application - ``` - -## Prerequisite - -Create a ConfigValues YAML file to define the configuration values for the application release. The ConfigValues file allows you to pass the configuration values for an application from the command line with the install command, rather than through the Admin Console UI. For air-gapped environments, ensure that the ConfigValues file can be accessed from the installation environment. - -The KOTS ConfigValues file includes the fields that are defined in the KOTS Config custom resource for an application release, along with the user-supplied and default values for each field, as shown in the example below: - -<ConfigValuesExample/> - -<ConfigValuesProcedure/> - -## Online (Internet-Connected) Installation - -To install with KOTS in an online existing cluster: - -1. <InstallKotsCli/> - -1. Install the application: - - ```bash - kubectl kots install APP_NAME \ - --shared-password PASSWORD \ - --license-file PATH_TO_LICENSE \ - --config-values PATH_TO_CONFIGVALUES \ - --namespace NAMESPACE \ - --no-port-forward - ``` - Replace: - - <PlaceholdersGlobal/> - - <PlaceholderNamespaceExisting/> - -## Air Gap Installation {#air-gap} - -To install with KOTS in an air-gapped existing cluster: - -1. <InstallKotsCliAirGap/> - -1. <DownloadKotsBundle/> - - <KotsVersionMatch/> - -1. <PushKotsImages/> - -1. Install the application: - - ```bash - kubectl kots install APP_NAME \ - --shared-password PASSWORD \ - --license-file PATH_TO_LICENSE \ - --config-values PATH_TO_CONFIGVALUES \ - --airgap-bundle PATH_TO_AIRGAP_BUNDLE \ - --namespace NAMESPACE \ - --kotsadm-registry REGISTRY_HOST \ - --registry-username RO_USERNAME \ - --registry-password RO_PASSWORD \ - --no-port-forward - ``` - - Replace: - - <PlaceholdersGlobal/> - - <PlaceholderAirgapBundle/> - - <PlaceholderNamespaceExisting/> - - <PlaceholderRoCreds/> - -## (Optional) Access the Admin Console - -<AccessAdminConsole/> - -================ -File: docs/enterprise/installing-existing-cluster.mdx -================ -import IntroExisting from "../partials/install/_intro-existing.mdx" -import PrereqsExistingCluster from "../partials/install/_prereqs-existing-cluster.mdx" -import LicenseFile from "../partials/install/_license-file-prereq.mdx" -import InstallCommandPrompts from "../partials/install/_kots-install-prompts.mdx" -import AppNameUI from "../partials/install/_placeholder-app-name-UI.mdx" - -# Online Installation in Existing Clusters with KOTS - -<IntroExisting/> - -## Prerequisites - -Complete the following prerequisites: - -<PrereqsExistingCluster/> -<LicenseFile/> - -## Install {#online} - -To install KOTS and the application in an existing cluster: - -1. Run one of these commands to install the Replicated KOTS CLI and KOTS. As part of the command, you also specify a name and version for the application that you will install. - - * **For the latest application version**: - - ```shell - curl https://kots.io/install | bash - kubectl kots install APP_NAME - ``` - * **For a specific application version**: - - ```shell - curl https://kots.io/install | bash - kubectl kots install APP_NAME --app-version-label=VERSION_LABEL - ``` - - Replace, where applicable: - - <AppNameUI/> - - * `VERSION_LABEL` with the label for the version of the application to install. For example, `--app-version-label=3.0.1`. - - **Examples:** - - ```shell - curl https://kots.io/install | bash - kubectl kots install application-name - ``` - - ```shell - curl https://kots.io/install | bash - kubectl kots install application-name --app-version-label=3.0.1 - ``` - -1. <InstallCommandPrompts/> - -1. Access the Admin Console on port 8800. If the port forward is active, go to [http://localhost:8800](http://localhost:8800) to access the Admin Console. - - If you need to reopen the port forward to the Admin Console, run the following command: - - ```shell - kubectl kots admin-console -n NAMESPACE - ``` - Replace `NAMESPACE` with the namespace where KOTS is installed. - -1. Log in with the password that you created during installation. - -1. Upload your license file. - -1. On the config screen, complete the fields for the application configuration options and then click **Continue**. - -1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. - - :::note - Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. - ::: - -1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. - - ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) - - [View a larger version of this image](/images/kubectl-preflight-command.png) - -The Admin Console dashboard opens. - -On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. For example: - -![Admin Console dashboard](/images/kotsadm-dashboard-graph.png) - -[View a larger version of this image](/images/kotsadm-dashboard-graph.png) - -================ -File: docs/enterprise/installing-general-requirements.mdx -================ -import DockerCompatibility from "../partials/image-registry/_docker-compatibility.mdx" -import KubernetesCompatibility from "../partials/install/_kubernetes-compatibility.mdx" -import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" - -# KOTS Installation Requirements - -This topic describes the requirements for installing in a Kubernetes cluster with Replicated KOTS. - -:::note -This topic does not include any requirements specific to the application. Ensure that you meet any additional requirements for the application before installing. -::: - -## Supported Browsers - -The following table lists the browser requirements for the Replicated KOTS Admin Console with the latest version of KOTS. - -| Browser | Support | -|----------------------|-------------| -| Chrome | 66+ | -| Firefox | 58+ | -| Opera | 53+ | -| Edge | 80+ | -| Safari (Mac OS only) | 13+ | -| Internet Explorer | Unsupported | - -## Kubernetes Version Compatibility - -Each release of KOTS maintains compatibility with the current Kubernetes version, and the two most recent versions at the time of its release. This includes support against all patch releases of the corresponding Kubernetes version. - -Kubernetes versions 1.25 and earlier are end-of-life (EOL). For more information about Kubernetes versions, see [Release History](https://kubernetes.io/releases/) in the Kubernetes documentation. - -Replicated recommends using a version of KOTS that is compatible with Kubernetes 1.26 and higher. - -<KubernetesCompatibility/> - -## Minimum System Requirements - -To install KOTS in an existing cluster, your environment must meet the following minimum requirements: - -* **KOTS Admin Console minimum requirements**: Clusters that have LimitRanges specified must support the following minimum requirements for the Admin Console: - - * **CPU resources and memory**: The Admin Console pod requests 100m CPU resources and 100Mi memory. - - * **Disk space**: The Admin Console requires a minimum of 5GB of disk space on the cluster for persistent storage, including: - - * **4GB for S3-compatible object store**: The Admin Console requires 4GB for an S3-compatible object store to store appplication archives, support bundles, and snapshots that are configured to use a host path and NFS storage destination. By default, KOTS deploys MinIO to satisfy this object storage requirement. During deployment, MinIO is configured with a randomly generated `AccessKeyID` and `SecretAccessKey`, and only exposed as a ClusterIP on the overlay network. - - :::note - You can optionally install KOTS without MinIO by passing `--with-minio=false` with the `kots install` command. This installs KOTS as a StatefulSet using a persistent volume (PV) for storage. For more information, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). - ::: - - * **1GB for rqlite PersistentVolume**: The Admin Console requires 1GB for a rqlite StatefulSet to store version history, application metadata, and other small amounts of data needed to manage the application(s). During deployment, the rqlite component is secured with a randomly generated password, and only exposed as a ClusterIP on the overlay network. - -* **Supported operating systems**: The following are the supported operating systems for nodes: - * Linux AMD64 - * Linux ARM64 - -* **Available StorageClass**: The cluster must have an existing StorageClass available. KOTS creates the required stateful components using the default StorageClass in the cluster. For more information, see [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) in the Kubernetes documentation. - -* **Kubernetes version compatibility**: The version of Kubernetes running on the cluster must be compatible with the version of KOTS that you use to install the application. This compatibility requirement does not include any specific and additional requirements defined by the software vendor for the application. - - For more information about the versions of Kubernetes that are compatible with each version of KOTS, see [Kubernetes Version Compatibility](#kubernetes-version-compatibility) above. - -* **OpenShift version compatibility**: For Red Hat OpenShift clusters, the version of OpenShift must use a supported Kubernetes version. For more information about supported Kubernetes versions, see [Kubernetes Version Compatibility](#kubernetes-version-compatibility) above. - -* **Storage class**: The cluster must have an existing storage class available. For more information, see [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) in the Kubernetes documentation. - -* **Port forwarding**: To support port forwarding, Kubernetes clusters require that the SOcket CAT (socat) package is installed on each node. - - If the package is not installed on each node in the cluster, you see the following error message when the installation script attempts to connect to the Admin Console: `unable to do port forwarding: socat not found`. - - To check if the package that provides socat is installed, you can run `which socat`. If the package is installed, the `which socat` command prints the full path to the socat executable file. For example, `usr/bin/socat`. - - If the output of the `which socat` command is `socat not found`, then you must install the package that provides the socat command. The name of this package can vary depending on the node's operating system. - -## RBAC Requirements - -The user that runs the installation command must have at least the minimum role-based access control (RBAC) permissions that are required by KOTS. If the user does not have the required RBAC permissions, then an error message displays: `Current user has insufficient privileges to install Admin Console`. - -The required RBAC permissions vary depending on if the user attempts to install KOTS with cluster-scoped access or namespace-scoped access: -* [Cluster-scoped RBAC Requirements (Default)](#cluster-scoped) -* [Namespace-scoped RBAC Requirements](#namespace-scoped) - -### Cluster-scoped RBAC Requirements (Default) {#cluster-scoped} - -By default, KOTS requires cluster-scoped access. With cluster-scoped access, a Kubernetes ClusterRole and ClusterRoleBinding are created that grant KOTS access to all resources across all namespaces in the cluster. - -To install KOTS with cluster-scoped access, the user must meet the following RBAC requirements: -* The user must be able to create workloads, ClusterRoles, and ClusterRoleBindings. -* The user must have cluster-admin permissions to create namespaces and assign RBAC roles across the cluster. - -### Namespace-scoped RBAC Requirements {#namespace-scoped} - -KOTS can be installed with namespace-scoped access rather than the default cluster-scoped access. With namespace-scoped access, a Kubernetes Role and RoleBinding are automatically created that grant KOTS permissions only in the namespace where it is installed. - -:::note -Depending on the application, namespace-scoped access for KOTS is required, optional, or not supported. Contact your software vendor for application-specific requirements. -::: - -To install or upgrade KOTS with namespace-scoped access, the user must have _one_ of the following permission levels in the target namespace: -* Wildcard Permissions (Default) -* Minimum KOTS RBAC Permissions - -See the sections below for more information. - -#### Wildcard Permissions (Default) - -By default, when namespace-scoped access is enabled, KOTS attempts to automatically create the following Role to acquire wildcard (`* * *`) permissions in the target namespace: - - ```yaml - apiVersion: "rbac.authorization.k8s.io/v1" - kind: "Role" - metadata: - name: "kotsadm-role" - rules: - - apiGroups: ["*"] - resources: ["*"] - verb: "*" - ``` - - To support this default behavior, the user must also have `* * *` permissions in the target namespace. - -#### Minimum KOTS RBAC Permissions - -In some cases, it is not possible to grant the user `* * *` permissions in the target namespace. For example, an organization might have security policies that prevent this level of permissions. - - If the user installing or upgrading KOTS cannot be granted `* * *` permissions in the namespace, then they can instead request the minimum RBAC permissions required by KOTS. Using the minimum KOTS RBAC permissions also requires manually creating a ServiceAccount, Role, and RoleBinding for KOTS, rather than allowing KOTS to automatically create a Role with `* * *` permissions. - - To use the minimum KOTS RBAC permissions to install or upgrade: - - 1. Ensure that the user has the minimum RBAC permissions required by KOTS. The following lists the minimum RBAC permissions: - - ```yaml - - apiGroups: [""] - resources: ["configmaps", "persistentvolumeclaims", "pods", "secrets", "services", "limitranges"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - - apiGroups: ["apps"] - resources: ["daemonsets", "deployments", "statefulsets"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - - apiGroups: ["batch"] - resources: ["jobs", "cronjobs"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - - apiGroups: ["networking.k8s.io", "extensions"] - resources: ["ingresses"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - - apiGroups: [""] - resources: ["namespaces", "endpoints", "serviceaccounts"] - verbs: ["get"] - - apiGroups: ["authorization.k8s.io"] - resources: ["selfsubjectaccessreviews", "selfsubjectrulesreviews"] - verbs: ["create"] - - apiGroups: ["rbac.authorization.k8s.io"] - resources: ["roles", "rolebindings"] - verbs: ["get"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec"] - verbs: ["get", "list", "watch", "create"] - - apiGroups: ["batch"] - resources: ["jobs/status"] - verbs: ["get", "list", "watch"] - ``` - - :::note - The minimum RBAC requirements can vary slightly depending on the cluster's Kubernetes distribution and the version of KOTS. Contact your software vendor if you have the required RBAC permissions listed above and you see an error related to RBAC during installation or upgrade. - ::: - - 1. Save the following ServiceAccount, Role, and RoleBinding to a single YAML file, such as `rbac.yaml`: - - ```yaml - apiVersion: v1 - kind: ServiceAccount - metadata: - labels: - kots.io/backup: velero - kots.io/kotsadm: "true" - name: kotsadm - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - labels: - kots.io/backup: velero - kots.io/kotsadm: "true" - name: kotsadm-role - rules: - - apiGroups: [""] - resources: ["configmaps", "persistentvolumeclaims", "pods", "secrets", "services", "limitranges"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - - apiGroups: ["apps"] - resources: ["daemonsets", "deployments", "statefulsets"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - - apiGroups: ["batch"] - resources: ["jobs", "cronjobs"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - - apiGroups: ["networking.k8s.io", "extensions"] - resources: ["ingresses"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - - apiGroups: [""] - resources: ["namespaces", "endpoints", "serviceaccounts"] - verbs: ["get"] - - apiGroups: ["authorization.k8s.io"] - resources: ["selfsubjectaccessreviews", "selfsubjectrulesreviews"] - verbs: ["create"] - - apiGroups: ["rbac.authorization.k8s.io"] - resources: ["roles", "rolebindings"] - verbs: ["get"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec"] - verbs: ["get", "list", "watch", "create"] - - apiGroups: ["batch"] - resources: ["jobs/status"] - verbs: ["get", "list", "watch"] - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - labels: - kots.io/backup: velero - kots.io/kotsadm: "true" - name: kotsadm-rolebinding - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kotsadm-role - subjects: - - kind: ServiceAccount - name: kotsadm - ``` - - 1. If the application contains any Custom Resource Definitions (CRDs), add the CRDs to the Role in the YAML file that you created in the previous step with as many permissions as possible: `["get", "list", "watch", "create", "update", "patch", "delete"]`. - - :::note - Contact your software vendor for information about any CRDs that are included in the application. - ::: - - **Example** - - ```yaml - rules: - - apiGroups: ["stable.example.com"] - resources: ["crontabs"] - verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - ``` - - 1. Run the following command to create the RBAC resources for KOTS in the namespace: - - ``` - kubectl apply -f RBAC_YAML_FILE -n TARGET_NAMESPACE - ``` - - Replace: - * `RBAC_YAML_FILE` with the name of the YAML file with the ServiceAccount, Role, and RoleBinding and that you created. - * `TARGET_NAMESPACE` with the namespace where the user will install KOTS. - -:::note -After manually creating these RBAC resources, the user must include both the `--ensure-rbac=false` and `--skip-rbac-check` flags when installing or upgrading. These flags prevent KOTS from checking for or attempting to create a Role with `* * *` permissions in the namespace. For more information, see [Prerequisites](installing-existing-cluster#prerequisites) in _Online Installation in Existing Clusters with KOTS_. -::: - -## Compatible Image Registries {#registries} - -A private image registry is required for air gap installations with KOTS in existing clusters. You provide the credentials for a compatible private registry during installation. You can also optionally configure a local private image registry for use with installations in online (internet-connected) environments. - -Private registry settings can be changed at any time. For more information, see [Configuring Local Image Registries](image-registry-settings). - -KOTS has been tested for compatibility with the following registries: - -<DockerCompatibility/> - -## Firewall Openings for Online Installations with KOTS in an Existing Cluster {#firewall} - -<FirewallOpeningsIntro/> - -<table> - <tr> - <th width="50%">Domain</th> - <th>Description</th> - </tr> - <tr> - <td>Docker Hub</td> - <td><p>Some dependencies of KOTS are hosted as public images in Docker Hub. The required domains for this service are `index.docker.io`, `cdn.auth0.com`, `*.docker.io`, and `*.docker.com.`</p></td> - </tr> - <tr> - <td>`proxy.replicated.com` *</td> - <td><p>Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.</p></td> - </tr> - <tr> - <td>`replicated.app`</td> - <td><p>Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.</p></td> - </tr> - <tr> - <td>`registry.replicated.com` **</td> - <td><p>Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.</p><p> For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.</p></td> - </tr> - <tr> - <td>`kots.io`</td> - <td><p>Requests are made to this domain when installing the Replicated KOTS CLI. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p></td> - </tr> - <tr> - <td>`github.com`</td> - <td>Requests are made to this domain when installing the Replicated KOTS CLI. For information about retrieving GitHub IP addresses, see [About GitHub's IP addresses](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-githubs-ip-addresses) in the GitHub documentation.</td> - </tr> -</table> - -* Required only if the application uses the [Replicated proxy registry](/vendor/private-images-about). - -** Required only if the application uses the [Replicated registry](/vendor/private-images-replicated). - -================ -File: docs/enterprise/installing-kurl-airgap.mdx -================ -import KurlAbout from "../partials/install/_kurl-about.mdx" -import IntroEmbedded from "../partials/install/_intro-embedded.mdx" -import IntroAirGap from "../partials/install/_intro-air-gap.mdx" -import PrereqsEmbeddedCluster from "../partials/install/_prereqs-embedded-cluster.mdx" -import HaLoadBalancerPrereq from "../partials/install/_ha-load-balancer-prereq.mdx" -import AirGapLicense from "../partials/install/_airgap-license-download.mdx" -import BuildAirGapBundle from "../partials/install/_airgap-bundle-build.mdx" -import DownloadAirGapBundle from "../partials/install/_airgap-bundle-download.mdx" -import ViewAirGapBundle from "../partials/install/_airgap-bundle-view-contents.mdx" -import LicenseFile from "../partials/install/_license-file-prereq.mdx" -import HAStep from "../partials/install/_embedded-ha-step.mdx" -import LoginPassword from "../partials/install/_embedded-login-password.mdx" -import DownloadKurlBundle from "../partials/install/_download-kurl-bundle.mdx" -import ExtractKurlBundle from "../partials/install/_extract-kurl-bundle.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Air Gap Installation with kURL - -<KurlAvailability/> - -<IntroEmbedded/> - -<IntroAirGap/> - -<KurlAbout/> - -## Prerequisites - -Complete the following prerequisites: - -<PrereqsEmbeddedCluster/> - -<HaLoadBalancerPrereq/> - -## Install {#air-gap} - -To install an application with kURL: - -1. Download the customer license: - - <AirGapLicense/> - -1. Go the channel where the target release was promoted to build and download the air gap bundle for the release: - - <BuildAirGapBundle/> - -1. <DownloadAirGapBundle/> - -1. <ViewAirGapBundle/> - -1. Download the `.tar.gz` air gap bundle for the kURL installer, which includes the components needed to run the kURL cluster and install the application with KOTS. kURL air gap bundles can be downloaded from the channel where the given release is promoted: - - * To download the kURL air gap bundle for the Stable channel: - - <DownloadKurlBundle/> - - * To download the kURL bundle for channels other than Stable: - - ```bash - replicated channel inspect CHANNEL - ``` - Replace `CHANNEL` with the exact name of the target channel, which can include uppercase letters or special characters, such as `Unstable` or `my-custom-channel`. - - In the output of this command, copy the curl command with the air gap URL. - -1. <ExtractKurlBundle/> - -1. Run one of the following commands to install in air gap mode: - - - For a regular installation, run: - - ```bash - cat install.sh | sudo bash -s airgap - ``` - - - For high availability, run: - - ```bash - cat install.sh | sudo bash -s airgap ha - ``` - -1. <HAStep/> - -1. <LoginPassword/> - -1. Go to the address provided in the `Kotsadm` field in the output of the installation command. For example, `Kotsadm: http://34.171.140.123:8800`. - -1. On the Bypass Browser TLS warning page, review the information about how to bypass the browser TLS warning, and then click **Continue to Setup**. - -1. On the HTTPS page, do one of the following: - - - To use the self-signed TLS certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Click **Skip & continue**. - - To use a custom certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Then upload a private key and SSL certificate to secure communication between your browser and the Admin Console. Click **Upload & continue**. - -1. Log in to the Admin Console with the password that was provided in the `Login with password (will not be shown again):` field in the output of the installation command. - -1. Upload your license file. - -1. Upload the `.airgap` bundle for the release that you downloaded in an earlier step. - -1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. - - :::note - Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. - ::: - -1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. - - ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) - - [View a larger version of this image](/images/kubectl-preflight-command.png) - - The Admin Console dashboard opens. - - On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. - - ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) - - [View a larger version of this image](/images/gitea-ec-ready.png) - -1. (Recommended) Change the Admin Console login password: - 1. Click the menu in the top right corner of the Admin Console, then click **Change password**. - 1. Enter a new password in the dialog, and click **Change Password** to save. - - Replicated strongly recommends that you change the password from the default provided during installation in a kURL cluster. For more information, see [Changing an Admin Console Password](auth-changing-passwords). - -1. Add primary and secondary nodes to the cluster. You might add nodes to either meet application requirements or to support your usage of the application. See [Adding Nodes to Embedded Clusters](cluster-management-add-nodes). - -================ -File: docs/enterprise/installing-kurl-automation.mdx -================ -import ConfigValuesExample from "../partials/configValues/_configValuesExample.mdx" -import ConfigValuesProcedure from "../partials/configValues/_config-values-procedure.mdx" -import PlaceholdersGlobal from "../partials/install/_placeholders-global.mdx" -import PlaceholderAirgapBundle from "../partials/install/_placeholder-airgap-bundle.mdx" -import PlaceholderNamespaceKurl from "../partials/install/_placeholder-namespace-embedded.mdx" -import IntroKurl from "../partials/install/_automation-intro-embedded.mdx" -import DownloadkURLBundle from "../partials/install/_download-kurl-bundle.mdx" -import ExtractKurlBundle from "../partials/install/_extract-kurl-bundle.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Installing with kURL from the Command Line - -<KurlAvailability/> - -This topic describes how to install an application with Replicated kURL from the command line. - -## Overview - -You can use the command line to install an application with Replicated kURL. A common use case for installing from the command line is to automate installation, such as performing headless installations as part of CI/CD pipelines. - -To install from the command line, you provide all the necessary installation assets, such as the license file and the application config values, with the installation command rather than through the Admin Console UI. Any preflight checks defined for the application run automatically during headless installations from the command line rather than being displayed in the Admin Console. - -## Prerequisite - -Create a ConfigValues YAML file to define the configuration values for the application release. The ConfigValues file allows you to pass the configuration values for an application from the command line with the install command, rather than through the Admin Console UI. For air-gapped environments, ensure that the ConfigValues file can be accessed from the installation environment. - -The KOTS ConfigValues file includes the fields that are defined in the KOTS Config custom resource for an application release, along with the user-supplied and default values for each field, as shown in the example below: - -<ConfigValuesExample/> - -<ConfigValuesProcedure/> - -## Online (Internet-Connected) Installation - -<IntroKurl/> - -To install with kURL on a VM or bare metal server: - -1. Create the kURL cluster: - - ```bash - curl -sSL https://k8s.kurl.sh/APP_NAME | sudo bash - ``` - -1. Install the application in the cluster: - - ```bash - kubectl kots install APP_NAME \ - --shared-password PASSWORD \ - --license-file PATH_TO_LICENSE \ - --config-values PATH_TO_CONFIGVALUES \ - --namespace default \ - --no-port-forward - ``` - - Replace: - - <PlaceholdersGlobal/> - - <PlaceholderNamespaceKurl/> - -## Air Gap Installation - -To install in an air-gapped kURL cluster: - -1. Download the kURL `.tar.gz` air gap bundle: - - <DownloadkURLBundle/> - -1. <ExtractKurlBundle/> - -1. Create the kURL cluster: - - ``` - cat install.sh | sudo bash -s airgap - ``` - -1. Install the application: - - ```bash - kubectl kots install APP_NAME \ - --shared-password PASSWORD \ - --license-file PATH_TO_LICENSE \ - --config-values PATH_TO_CONFIGVALUES \ - --airgap-bundle PATH_TO_AIRGAP_BUNDLE \ - --namespace default \ - --no-port-forward - ``` - - Replace: - - <PlaceholdersGlobal/> - - <PlaceholderAirgapBundle/> - - <PlaceholderNamespaceKurl/> - -================ -File: docs/enterprise/installing-kurl-requirements.mdx -================ -import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# kURL Installation Requirements - -<KurlAvailability/> - -This topic lists the installation requirements for Replicated kURL. Ensure that the installation environment meets these requirements before attempting to install. - -## Minimum System Requirements - -* 4 CPUs or equivalent per machine -* 8GB of RAM per machine -* 40GB of disk space per machine -* TCP ports 2379, 2380, 6443, 6783, and 10250 open between cluster nodes -* UDP port 8472 open between cluster nodes - - :::note - If the Kubernetes installer specification uses the deprecated kURL [Weave add-on](https://kurl.sh/docs/add-ons/weave), UDP ports 6783 and 6784 must be open between cluster nodes. Reach out to your software vendor for more information. - ::: - -* Root access is required -* (Rook Only) The Rook add-on version 1.4.3 and later requires block storage on each node in the cluster. For more information about how to enable block storage for Rook, see [Block Storage](https://kurl.sh/docs/add-ons/rook/#block-storage) in _Rook Add-On_ in the kURL documentation. - -## Additional System Requirements - -You must meet the additional kURL system requirements when applicable: - -- **Supported Operating Systems**: For supported operating systems, see [Supported Operating Systems](https://kurl.sh/docs/install-with-kurl/system-requirements#supported-operating-systems) in the kURL documentation. - -- **kURL Dependencies Directory**: kURL installs additional dependencies in the directory /var/lib/kurl and the directory requirements must be met. See [kURL Dependencies Directory](https://kurl.sh/docs/install-with-kurl/system-requirements#kurl-dependencies-directory) in the kURL documentation. - -- **Networking Requirements**: Networking requirements include firewall openings, host firewalls rules, and port availability. See [Networking Requirements](https://kurl.sh/docs/install-with-kurl/system-requirements#networking-requirements) in the kURL documentation. - -- **High Availability Requirements**: If you are operating a cluster with high availability, see [High Availability Requirements](https://kurl.sh/docs/install-with-kurl/system-requirements#high-availability-requirements) in the kURL documentation. - -- **Cloud Disk Performance**: For a list of cloud VM instance and disk combinations that are known to provide sufficient performance for etcd and pass the write latency preflight, see [Cloud Disk Performance](https://kurl.sh/docs/install-with-kurl/system-requirements#cloud-disk-performance) in the kURL documentation. - -## Firewall Openings for Online Installations with kURL {#firewall} - -<FirewallOpeningsIntro/> - -<table> - <tr> - <th width="50%">Domain</th> - <th>Description</th> - </tr> - <tr> - <td>Docker Hub</td> - <td><p>Some dependencies of KOTS are hosted as public images in Docker Hub. The required domains for this service are `index.docker.io`, `cdn.auth0.com`, `*.docker.io`, and `*.docker.com.`</p></td> - </tr> - <tr> - <td>`proxy.replicated.com` *</td> - <td><p>Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.</p></td> - </tr> - <tr> - <td>`replicated.app`</td> - <td><p>Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.</p></td> - </tr> - <tr> - <td>`registry.replicated.com` **</td> - <td><p>Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.</p><p> For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.</p></td> - </tr> - <tr> - <td><p>`k8s.kurl.sh`</p><p>`s3.kurl.sh`</p></td> - <td><p>kURL installation scripts and artifacts are served from [kurl.sh](https://kurl.sh). An application identifier is sent in a URL path, and bash scripts and binary executables are served from kurl.sh. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p> For the range of IP addresses for `k8s.kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L34-L39) in GitHub.</p><p> The range of IP addresses for `s3.kurl.sh` are the same as IP addresses for the `kurl.sh` domain. For the range of IP address for `kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L28-L31) in GitHub.</p></td> - </tr> - <tr> - <td>`amazonaws.com`</td> - <td>`tar.gz` packages are downloaded from Amazon S3 during installations with kURL. For information about dynamically scraping the IP ranges to allowlist for accessing these packages, see [AWS IP address ranges](https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html#aws-ip-download) in the AWS documentation.</td> - </tr> -</table> - -* Required only if the application uses the [Replicated proxy registry](/vendor/private-images-about). - -** Required only if the application uses the [Replicated registry](/vendor/private-images-replicated). - -================ -File: docs/enterprise/installing-kurl.mdx -================ -import KurlAbout from "../partials/install/_kurl-about.mdx" -import IntroEmbedded from "../partials/install/_intro-embedded.mdx" -import PrereqsEmbeddedCluster from "../partials/install/_prereqs-embedded-cluster.mdx" -import HaLoadBalancerPrereq from "../partials/install/_ha-load-balancer-prereq.mdx" -import LicenseFile from "../partials/install/_license-file-prereq.mdx" -import HAStep from "../partials/install/_embedded-ha-step.mdx" -import LoginPassword from "../partials/install/_embedded-login-password.mdx" -import AppNameUI from "../partials/install/_placeholder-app-name-UI.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Online Installation with kURL - -<KurlAvailability/> - -<IntroEmbedded/> - -<KurlAbout/> - -## Prerequisites - -Complete the following prerequisites: - -<PrereqsEmbeddedCluster/> - -<LicenseFile/> - -<HaLoadBalancerPrereq/> - -## Install {#install-app} - -To install an application with kURL: - -1. Run one of the following commands to create the cluster with the kURL installer: - - * For a regular installation, run: - - ```bash - curl -sSL https://k8s.kurl.sh/APP_NAME | sudo bash - ``` - - * For high availability mode: - - ```bash - curl -sSL https://k8s.kurl.sh/APP_NAME | sudo bash -s ha - ``` - - Replace: - - <AppNameUI/> - -1. <HAStep/> - -1. <LoginPassword/> - -1. Go to the address provided in the `Kotsadm` field in the output of the installation command. For example, `Kotsadm: http://34.171.140.123:8800`. - -1. On the Bypass Browser TLS warning page, review the information about how to bypass the browser TLS warning, and then click **Continue to Setup**. - -1. On the HTTPS page, do one of the following: - - - To use the self-signed TLS certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Click **Skip & continue**. - - To use a custom certificate only, enter the hostname (required) if you are using the identity service. If you are not using the identity service, the hostname is optional. Then upload a private key and SSL certificate to secure communication between your browser and the Admin Console. Click **Upload & continue**. - -1. Log in to the Admin Console with the password that was provided in the `Login with password (will not be shown again):` field in the output of the installation command. - -1. Upload your license file. - -1. On the **Preflight checks** page, the application-specific preflight checks run automatically. Preflight checks are conformance tests that run against the target namespace and cluster to ensure that the environment meets the minimum requirements to support the application. Click **Deploy**. - - :::note - Replicated recommends that you address any warnings or failures, rather than dismissing them. Preflight checks help ensure that your environment meets the requirements for application deployment. - ::: - -1. (Minimal RBAC Only) If you are installing with minimal role-based access control (RBAC), KOTS recognizes if the preflight checks failed due to insufficient privileges. When this occurs, a kubectl CLI preflight command displays that lets you manually run the preflight checks. The Admin Console then automatically displays the results of the preflight checks. Click **Deploy**. - - ![kubectl CLI preflight command](/images/kubectl-preflight-command.png) - - [View a larger version of this image](/images/kubectl-preflight-command.png) - - The Admin Console dashboard opens. - - On the Admin Console dashboard, the application status changes from Missing to Unavailable while the Deployment is being created. When the installation is complete, the status changes to Ready. - - ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) - - [View a larger version of this image](/images/gitea-ec-ready.png) - -1. (Recommended) Change the Admin Console login password: - 1. Click the menu in the top right corner of the Admin Console, then click **Change password**. - 1. Enter a new password in the dialog, and click **Change Password** to save. - - Replicated strongly recommends that you change the password from the default provided during installation in a kURL cluster. For more information, see [Changing an Admin Console Password](auth-changing-passwords). - -1. Add primary and secondary nodes to the cluster. You might add nodes to either meet application requirements or to support your usage of the application. See [Adding Nodes to Embedded Clusters](cluster-management-add-nodes). - -================ -File: docs/enterprise/installing-overview.md -================ -# Considerations Before Installing - -Before you install an application with KOTS in an existing cluster, consider the following installation options. - -## Online (Internet-Connected) or Air Gap Installations - -Most Kubernetes clusters are able to make outbound internet requests. Inbound access is never recommended or required. -As such, most cluster operators are able to perform an online installation. - -If the target cluster does not have outbound internet access, the application can also be delivered through an air gap installation. - -To install an application in an air-gapped environment, the cluster must have access to an image registry. In this case, KOTS re-tags and pushes all images to the target registry. - -For information about installing with KOTS in air-gapped environments, see [Air Gap Installation in Existing Clusters with KOTS](installing-existing-cluster-airgapped). - -## Hardened Environments - -By default, KOTS Pods and containers are not deployed with a specific security context. For installations into a hardened environment, you can use the `--strict-security-context` flag with the installation command so that KOTS runs with a strict security context for Pods and containers. - -For more information about the security context enabled by the `--strict-security-context` flag, see [kots install](/reference/kots-cli-install). - -## Configuring Local Image Registries - -During install, KOTS can re-tag and push images to a local image registry. -This is useful to enable CVE scans, image policy validation, and other pre-deployment rules. A private image registry is required for air gapped environments, and is optional for online environments. - -For information about image registry requirements, see [Compatible Image Registries](installing-general-requirements#registries). - -## Automated (Headless) Installation - -You can automate application installation in online and air-gapped environments using the KOTS CLI. In an automated installation, you provide all the information required to install and deploy the application with the `kots install` command, rather than providing this information in the Replicated Admin Console. - -For more information, see [Installing with the KOTS CLI](/enterprise/installing-existing-cluster-automation). - -## KOTS Installations Without Object Storage - -The KOTS Admin Console requires persistent storage for state. KOTS deploys MinIO for object storage by default. - -You can optionally install KOTS without object storage. When installed without object storage, KOTS deploys the Admin Console as a StatefulSet with an attached PersistentVolume (PV) instead of as a deployment. - -For more information about how to install KOTS without object storage, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). - -================ -File: docs/enterprise/installing-stateful-component-requirements.md -================ -# Installing KOTS in Existing Clusters Without Object Storage - -This topic describes how to install Replicated KOTS in existing clusters without the default object storage, including limitations of installing without object storage. - -## Overview - -The Replicated KOTS Admin Console requires persistent storage for state. By default, KOTS deploys an S3-compatible object store to satisfy the Admin Console's persistent storage requirement. The Admin Console stores the following in object storage: -* Support bundles -* Application archives -* Backups taken with Replicated snapshots that are configured to NFS or host path storage destinations - -For more information about the Admin Console's persistent storage requirements, see [Minimum System Requirements](/enterprise/installing-general-requirements#minimum-system-requirements) in _Installation Requirements_. - -For existing cluster installations, KOTS deploys MinIO for object storage by default. - -You can optionally install KOTS without object storage. When installed without object storage, KOTS deploys the Admin Console as a Statefulset with an attached PersistentVolume (PV) instead of as a deployment. In this case, support bundles and application archives are stored in the attached PV instead of in object storage. Additionally, for local snapshots storage, KOTS uses the `local-volume-provider` Velero plugin to store backups on local PVs instead of using object storage. The `local-volume-provider` plugin uses the existing Velero service account credentials to mount volumes directly to the Velero node-agent pods. For more information, see [`local-volume-provider`](https://github.com/replicatedhq/local-volume-provider) in GitHub. - -## How to Install and Upgrade Without Object Storage - -To install KOTS without object storage in an existing cluster, you can use the `--with-minio=false` flag. - -#### `kots install --with-minio=false` - -When `--with-minio=false` is used with the `kots install` command, KOTS does _not_ deploy MinIO. KOTS deploys the Admin Console as a Statefulset with an attached PV instead of as a deployment. For command usage, see [install](/reference/kots-cli-install/). - -#### `kots admin-console upgrade --with-minio=false` - -When `--with-minio=false` is used with the `kots admin-console upgrade` command, KOTS upgrades the existing Admin Console instance to the latest version, replaces the running deployment with a StatefulSet, and removes MinIO after a data migration. This results in temporary downtime for the Admin Console, but deployed applications are unaffected. For command usage, see [admin-console upgrade](/reference/kots-cli-admin-console-upgrade/). - -================ -File: docs/enterprise/monitoring-access-dashboards.mdx -================ -# Accessing Dashboards Using Port Forwarding - -This topic includes information about how to access Prometheus, Grafana, and Alertmanager in Replicated KOTS existing cluster and Replicated kURL installations. - -For information about how to configure Prometheus monitoring in existing cluster installations, see [Configuring Prometheus Monitoring in Existing Cluster KOTS Installations](monitoring-applications). - -## Overview - -The Prometheus [expression browser](https://prometheus.io/docs/visualization/browser/), Grafana, and some preconfigured dashboards are included with Kube-Prometheus for advanced visualization. Prometheus Altertmanager is also included for alerting. You can access Prometheus, Grafana, and Alertmanager dashboards using `kubectl port-forward`. - -:::note -You can also expose these pods on NodePorts or behind an ingress controller. This is an advanced use case. For information about exposing the pods on NodePorts, see [NodePorts](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/node-ports.md) in the kube-prometheus GitHub repository. For information about exposing the pods behind an ingress controller, see [Expose via Ingress](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/exposing-prometheus-alertmanager-grafana-ingress.md) in the kube-prometheus GitHub repository. -::: - -## Prerequisite - -For existing cluster KOTS installations, first install Prometheus in the cluster and configure monitoring. See [Configuring Prometheus Monitoring in Existing Cluster KOTS Installations](monitoring-applications) - -## Access Prometheus - -To access the Prometheus dashboard: - -1. Run the following command to port forward the Prometheus service: - - ```bash - kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090 - ``` - -1. Access the dashboard at http://localhost:9090. - -## Access Grafana - -Users can access the Grafana dashboard by logging in using a default username and password. For information about configuring Grafana, see the [Grafana documentation](https://grafana.com/docs/). For information about constructing Prometheus queries, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/) in the Prometheus documentation. - -To access the Grafana dashboard: - -1. Run the following command to port forward the Grafana service: - - ```bash - kubectl --namespace monitoring port-forward deployment/grafana 3000 - ``` -1. Access the dashboard at http://localhost:3000. -1. Log in to Grafana: - * **Existing cluster**: Use the default Grafana username and password: `admin:admin`. - * **kURL cluster**: The Grafana password is randomly generated by kURL and is displayed on the command line after kURL provisions the cluster. To log in, use this password generated by kURL and the username `admin`. - - To retrieve the password, run the following kubectl command: - - ``` - kubectl get secret -n monitoring grafana-admin -o jsonpath="{.data.admin-password}" | base64 -d - ``` - -## Access Alertmanager - -Alerting with Prometheus has two phases: - -* Phase 1: Alerting rules in Prometheus servers send alerts to an Alertmanager. -* Phase 2: The Alertmanager then manages those alerts, including silencing, inhibition, aggregation, and sending out notifications through methods such as email, on-call notification systems, and chat platforms. - -For more information about configuring Alertmanager, see [Configuration](https://prometheus.io/docs/alerting/configuration/) in the Prometheus documentation. - -To access the Alertmanager dashboard: - -1. Run the following command to port forward the Alertmanager service: - - ``` - kubectl --namespace monitoring port-forward svc/prometheus-alertmanager 9093 - ``` - -1. Access the dashboard at http://localhost:9093. - -================ -File: docs/enterprise/monitoring-applications.mdx -================ -import OverviewProm from "../partials/monitoring/_overview-prom.mdx" - -# Configuring Prometheus Monitoring in Existing Cluster KOTS Installations - -This topic describes how to monitor applications and clusters with Prometheus in existing cluster installations with Replicated KOTS. - -For information about how to access Prometheus, Grafana, and Alertmanager, see [Accessing Dashboards Using Port Forwarding](/enterprise/monitoring-access-dashboards). - -For information about consuming Prometheus metrics externally in kURL installations, see [Consuming Prometheus Metrics Externally](monitoring-external-prometheus). - -## Overview - -<OverviewProm/> - -## Configure Prometheus Monitoring - -For existing cluster installations with KOTS, users can install Prometheus in the cluster and then connect the Admin Console to the Prometheus endpoint to enable monitoring. - -### Step 1: Install Prometheus in the Cluster {#configure-existing} - -Replicated recommends that you use CoreOS's Kube-Prometheus distribution for installing and configuring highly available Prometheus on an existing cluster. For more information, see the [kube-prometheus](https://github.com/coreos/kube-prometheus) GitHub repository. - -This repository collects Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator. - -To install Prometheus using the recommended Kube-Prometheus distribution: - -1. Clone the [kube-prometheus](https://github.com/coreos/kube-prometheus) repository to the device where there is access to the cluster. - -1. Use `kubectl` to create the resources on the cluster: - - ```bash - # Create the namespace and CRDs, and then wait for them to be available before creating the remaining resources - kubectl create -f manifests/setup - until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done - kubectl create -f manifests/ - ``` - - For advanced and cluster-specific configuration, you can customize Kube-Prometheus by compiling the manifests using jsonnet. For more information, see the [jsonnet website](https://jsonnet.org/). - - For more information about advanced Kube-Prometheus configuration options, see [Customizing Kube-Prometheus](https://github.com/coreos/kube-prometheus#customizing-kube-prometheus) in the kube-prometheus GitHub repository. - -### Step 2: Connect to a Prometheus Endpoint - -To view graphs on the Admin Console dashboard, provide the address of a Prometheus instance installed in the cluster. - -To connect the Admin Console to a Prometheus endpoint: - -1. On the Admin Console dashboard, under Monitoring, click **Configure Prometheus Address**. -1. Enter the address for the Prometheus endpoint in the text box and click **Save**. - - ![Configuring Prometheus](/images/kotsadm-dashboard-configureprometheus.png) - - Graphs appear on the dashboard shortly after saving the address. - -================ -File: docs/enterprise/monitoring-external-prometheus.md -================ -import OverviewProm from "../partials/monitoring/_overview-prom.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Consuming Prometheus Metrics Externally - -<KurlAvailability/> - -This topic describes how to consume Prometheus metrics in Replicated kURL clusters from a monitoring service that is outside the cluster. - -For information about how to access Prometheus, Grafana, and Alertmanager, see [Accessing Dashboards Using Port Forwarding](/enterprise/monitoring-access-dashboards). - -## Overview - -<OverviewProm/> - -For kURL installations, if the [kURL Prometheus add-on](https://kurl.sh/docs/add-ons/prometheus) is included in the kURL installer spec, then the Prometheus monitoring system is installed alongside the application. No additional configuration is required to collect metrics and view any default and custom graphs on the Admin Console dashboard. - -Prometheus is deployed in kURL clusters as a NodePort service named `prometheus-k8s` in the `monitoring` namespace. The `prometheus-k8s` service is exposed on the IP address for each node in the cluster at port 30900. - -You can run the following command to view the `prometheus-k8s` service in your cluster: - -``` -kubectl get services -l app=kube-prometheus-stack-prometheus -n monitoring -``` -The output of the command includes details about the Prometheus service, including the type of service and the ports where the service is exposed. For example: - -``` -NAME TYPE CLUSTER_IP EXTERNAL_IP PORT(S) AGE -prometheus-k8s NodePort 10.96.2.229 <none> 9090:30900/TCP 5hr -``` -As shown in the example above, port 9090 on the `prometheus-k8s` service maps to port 30900 on each of the nodes. - -For more information about NodePort services, see [Type NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) in _Services_ in the Kubernetes documentation. - -## Prerequisite - -Before you can consume Prometheus metrics in kURL clusters externally, ensure that firewall rules on all nodes in the cluster allow inbound TCP traffic on port 30900. - -## Consume Metrics from External Services - -You can connect to the `prometheus-k8s` service on port 30900 from any node in the cluster to access Prometheus metrics emitted by kURL clusters. - -To consume Prometheus metrics from an external service: - -1. Get the external IP address for one of the nodes in the cluster. You will use this IP address in the next step to access the `prometheus-k8s` service. - - You can find the IP address for a node in the output of the following command: - - ``` - kubectl describe node NODE_NAME - ``` - Where `NODE_NAME` is the name of a node in the cluster. - - :::note - Depending on the node's network configuration, there might be different IP addresses for accessing the node from an external or internal network. For example, the IP address 10.128.0.35 might be assigned to the node in the internal network, whereas the IP address used to access the node from external or public networks is 34.28.178.93. - - Consult your infrastructure team to assist you in determining which IP address to use. - ::: - -1. In a browser, go to `http://NODE_IP_ADDRESS:30900` to verify that you can connect to the `prometheus-k8s` NodePort service. Replace `NODE_IP_ADDRESS` with the external IP address that you copied in the first step. For example, `http://34.28.178.93:30900`. - - If the connection is successful, the Prometheus UI displays in the browser. - -1. From your external monitoring solution, add Prometheus as an HTTP data source using the same URL from the previous step: `http://NODE_IP_ADDRESS:30900`. - -================ -File: docs/enterprise/sbom-validating.md -================ -# Validating SBOM Signatures - -This topic describes the process to perform the validation of software bill of material (SBOM) signatures for Replicated KOTS, Replicated kURL, and Troubleshoot releases. - -## About Software Bills of Materials - -A _software bill of materials_ (SBOM) is an inventory of all components used to create a software package. SBOMs have emerged as critical building blocks in software security and software supply chain risk management. - -When you install software, validating an SBOM signature can help you understand exactly what the software package is installing. This information can help you ensure that the files are compatible with your licensing policies and help determine whether there is exposure to CVEs. - -## Prerequisite - -Before you perform these tasks, you must install cosign. For more information, see the [sigstore repository](https://github.com/sigstore/cosign) in GitHub. - - -## Validate a KOTS SBOM Signature - -Each KOTS release includes a signed SBOM for KOTS Go dependencies. - -To validate a KOTS SBOM signature: - -1. Go to the [KOTS GitHub repository](https://github.com/replicatedhq/kots/releases) and download the specific KOTS release that you want to validate. -1. Extract the tar.gz file. - - **Example:** - - ``` - tar -zxvf kots_darwin_all.tar.gz - ``` - A KOTS binary and SBOM folder are created. -1. Run the following cosign command to validate the signatures: - ``` - cosign verify-blob --key sbom/key.pub --signature sbom/kots-sbom.tgz.sig sbom/kots-sbom.tgz - ``` - -## Validate a kURL SBOM Signature - -If a kURL installer is used, then signed SBOMs for kURL Go and Javascript dependencies are combined into a TAR file and are included with the release. - -To validate a kURL SBOM signature: - -1. Go to the [kURL GitHub repository](https://github.com/replicatedhq/kURL/releases) and download the specific kURL release files that you want to validate. - - There are three assets related to the SBOM: - - - `kurl-sbom.tgz` contains SBOMs for Go and Javascript dependencies - - `kurl-sbom.tgz.sig` is the digital signature for `kurl-sbom.tgz` - - `key.pub` is the public key from the key pair used to `sign kurl-sbom.tgz` - -2. Run the following cosign command to validate the signatures: - ``` - cosign verify-blob --key key.pub --signature kurl-sbom.tgz.sig kurl-sbom.tgz - - ``` - -## Validate a Troubleshoot SBOM Signature - -A signed SBOM for Troubleshoot dependencies is included in each release. - -To validate an Troubleshoot SBOM signature: - -1. Go to the [Troubleshoot GitHub repository](https://github.com/replicatedhq/troubleshoot/releases) and download the specific Troubleshoot release files that you want to validate. - - There are three assets related to the SBOM: - - - `troubleshoot-sbom.tgz` contains a software bill of materials for Troubleshoot. - - `troubleshoot-sbom.tgz.sig` is the digital signature for `troubleshoot-sbom.tgz` - - `key.pub` is the public key from the key pair used to sign `troubleshoot-sbom.tgz` - -2. Run the following cosign command to validate the signatures: - ``` - $ cosign verify-blob --key key.pub --signature troubleshoot-sbom.tgz.sig troubleshoot-sbom.tgz - - ``` - -================ -File: docs/enterprise/snapshots-config-workflow.md -================ -# How to Set Up Backup Storage - -This topic describes the process of setting up backup storage for the Replicated snapshots feature. - -## Configuring Backup Storage for Embedded Clusters - -You must configure a backup storage destination before you can create backups. This procedure describes how to configure backup storage for snapshots for _embedded clusters_ created by Replicated kURL. - -To configure snapshots for embedded clusters: - -1. On the Snapshots tab in the Admin Console, click **Check for Velero** to see whether kURL already installed Velero in the embedded cluster. - -1. If Velero was installed, update the default internal storage settings in the Admin Console because internal storage is insufficient for full backups. See [Updating Settings in the Admin Console](snapshots-updating-with-admin-console). - -1. If Velero was not installed: - - 1. Install the Velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). - - 1. Install Velero and configure a storage destination using one of the following procedures. - - - [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) - - [Configuring an NFS Storage Destination](snapshots-configuring-nfs) - - [Configuring Other Storage Destinations](snapshots-storage-destinations) - -1. Optionally increase the default memory for the node-agent (restic) Pod. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). - -## Configuring Backup Storage for Existing Clusters - -You must configure a backup storage destination before you can create backups. - -Follow this process to install Velero and configure the snapshots feature: - -1. Install the Velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). - -1. Install Velero and configure a storage destination using one of the following procedures. - - - [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) - - [Configuring an NFS Storage Destination](snapshots-configuring-nfs) - - [Configuring Other Storage Destinations](snapshots-storage-destinations) - -1. Enable access to the Velero namespace if you are using RBAC and optionally increase the default memory for the node-agent (restic) Pod. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). - -## Next Step - -After you configure a storage destination, you can create a backup. See [Creating and Scheduling Backups](snapshots-creating). - -## Additional Resources - -* [Restoring Full Backups](snapshots-restoring-full) -* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) - -================ -File: docs/enterprise/snapshots-configuring-hostpath.md -================ -import InstallVelero from "../partials/snapshots/_installVelero.mdx" -import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" -import ResticDaemonSet from "../partials/snapshots/_resticDaemonSet.mdx" -import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" -import CheckVersion from "../partials/snapshots/_checkVersion.mdx" - -# Configuring a Host Path Storage Destination - -This topic describes how to install Velero and configure a host path as your storage destination for backups. - -:::note -<UpdateDefaultStorage/> -::: - -## Requirements - -* The host path must be a dedicated directory. Do not use a partition used by a service like Docker or Kubernetes for ephemeral storage. -* The host path must exist and be writable by the user:group 1001:1001 on all nodes in the cluster. For example, in a Linux environment you might run `sudo chown -R 1001:1001 /backups` to change the user:group permissions. - - If you use a mounted directory for the storage destination, such as one that is created with the Common Internet File System (CIFS) or Server Message Block (SMB) protocols, ensure that you configure the user:group 1001:1001 permissions on all nodes in the cluster and from the server side as well. - - You cannot change the permissions of a mounted network shared filesystem from the client side. To reassign the user:group to 1001:1001 for a directory that is already mounted, you must remount the directory. For example, for a CIFS mounted directory, specify the `uid=1001,gid=1001` mount options in the CIFS mount command. - -## Prerequisites - -Complete the following items before you perform this task: - -* Review the limitations and considerations. See [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. -* Install the velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). - -## Install Velero and Configure Host Path Storage in Online Environments - -To install Velero and configure host path storage in online environments: - -1. <InstallVelero/> - -1. <ResticDaemonSet/> - -1. Run the following command to configure the host path storage destination: - - ``` - kubectl kots velero configure-hostpath --namespace NAME --hostpath /PATH - ``` - - Replace: - - `NAME` with the namespace where the Replicated KOTS Admin Console is installed and running - - `PATH` with the path to the directory where the backups will be stored - - For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. - -## Install Velero and Configure Host Path Storage in Air Gapped Environments - -To install Velero and configure host path storage in air gapped environments: - -1. <CheckVersion/> - -1. <InstallVelero/> - - <RegistryCredNote/> - -1. <ResticDaemonSet/> - -1. Run the following command to configure the host path storage destination: - - ``` - kubectl kots velero configure-hostpath \ - --namespace NAME \ - --hostpath /PATH \ - --kotsadm-registry REGISTRY_HOSTNAME[/REGISTRY_NAMESPACE] \ - --registry-username REGISTRY_USERNAME \ - --registry-password REGISTRY_PASSWORD - ``` - - Replace: - - `NAME` with the namespace where the Admin Console is installed and running - - `PATH` with the path to the directory where the backups will be stored - - `REGISTRY_HOSTNAME` with the registry endpoint where the images are hosted - - `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional) - - `REGISTRY_USERNAME` with the username to use to authenticate with the registry - - `REGISTRY_PASSWORD` with the password to use to authenticate with the registry - - For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. - -## Configure Host Path Storage in the Admin Console - -Alternatively, when the Admin Console and application are already installed, you can start in the Admin Console to install Velero and configure a host path storage destination. - -To install Velero and configure host path storage for existing clusters: - -1. From the Admin Console, click **Snapshots > Settings and Schedule**. - -1. Click **Add a new storage destination**. - - The Add a new destination dialog opens and shows instructions for setting up Velero with different providers. - -1. Click **Host Path**. - - ![Snapshot Provider Host Path](/images/snapshot-provider-hostpath.png) - -1. In the Configure Host Path dialog, enter the path to the directory where the backups will be stored. Click **Get instructions**. - - ![Snapshot Provider Host Path Fields](/images/snapshot-provider-hostpath-field.png) - - A dialog opens with instructions on how to set up Velero with the specified host path configuration. - -1. Follow the steps in the dialog to install Velero and configure the storage destination. - - ![Snapshot Provider File System Instructions](/images/snapshot-provider-hostpath-instructions.png) - -1. Return to the Admin Console and either click **Check for Velero** or refresh the page to verify that the Velero installation is detected. - - -## Next Steps - -* (Existing Clusters Only) Configure Velero namespace access if you are using minimal RBAC. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). -* (Optional) Increase the default memory limits. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). -* Create or schedule backups. See [Creating and Scheduling Backups](snapshots-creating). - -## Additional Resources - -* [Troubleshooting Snapshots](/enterprise/snapshots-troubleshooting-backup-restore) - -================ -File: docs/enterprise/snapshots-configuring-nfs.md -================ -import InstallVelero from "../partials/snapshots/_installVelero.mdx" -import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" -import ResticDaemonSet from "../partials/snapshots/_resticDaemonSet.mdx" -import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" -import CheckVersion from "../partials/snapshots/_checkVersion.mdx" - -# Configuring an NFS Storage Destination - -This topic describes how to install Velero and configure a Network File System (NFS) as your storage destination for backups. - -:::note -<UpdateDefaultStorage/> -::: - -## Requirements - -Configuring an NFS server as a snapshots storage destination has the following requirements: - -* The NFS server must be configured to allow access from all of the nodes in the cluster. -* The NFS directory must be writable by the user:group 1001:1001. -* Ensure that you configure the user:group 1001:1001 permissions for the directory on the NFS server. -* All of the nodes in the cluster must have the necessary NFS client packages installed to be able to communicate with the NFS server. For example, the `nfs-common` package is a common package used on Ubuntu. -* Any firewalls must be properly configured to allow traffic between the NFS server and clients (cluster nodes). - -## Prerequisites - -Complete the following items before you perform this task: - -* Review the limitations and considerations. See [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. -* Install the velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). - -## Install Velero and Configure NFS Storage in Online Environments - -To install Velero and configure NFS storage in an online environment: - -1. <InstallVelero/> - -1. <ResticDaemonSet/> - -1. Run the following command to configure the NFS storage destination: - - ``` - kubectl kots velero configure-nfs --namespace NAME --nfs-path PATH --nfs-server HOST - ``` - - Replace: - - `NAME` with the namespace where the Replicated KOTS Admin Console is installed and running - - `PATH` with the path that is exported by the NFS server - - `HOST` with the hostname or IP address of the NFS server - - For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. - -## Install Velero and Configure NFS Storage in Air Gapped Environments - -To install Velero and configure NFS storage in air gapped environments: - -1. <CheckVersion/> - -1. <InstallVelero/> - - <RegistryCredNote/> - -1. <ResticDaemonSet/> - -1. Run the following command to configure the NFS storage destination: - - ``` - kubectl kots velero configure-nfs \ - --namespace NAME \ - --nfs-server HOST \ - --nfs-path PATH \ - --kotsadm-registry REGISTRY_HOSTNAME[/REGISTRY_NAMESPACE] \ - --registry-username REGISTRY_USERNAME \ - --registry-password REGISTRY_PASSWORD - ``` - - Replace: - - `NAME` with the namespace where the Admin Console is installed and running - - `HOST` with the hostname or IP address of the NFS server - - `PATH` with the path that is exported by the NFS server - - `REGISTRY_HOSTNAME` with the registry endpoint where the images are hosted - - `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional) - - `REGISTRY_USERNAME` with the username to use to authenticate with the registry - - `REGISTRY_PASSWORD` with the password to use to authenticate with the registry - - For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. - -## Configure NFS Storage in the Admin Console - -Alternatively, when the Admin Console and application are already installed, you can start in the Admin Console to install Velero and configure an NFS storage destination. - -To install Velero and configure NFS storage for existing clusters: - -1. From the Admin Console, click **Snapshots > Settings and Schedule**. - -1. Click **Add a new storage destination**. - - The Add a new destination dialog opens and shows instructions for setting up Velero with different providers. - -1. Click **NFS**. - - ![Snapshot Provider NFS](/images/snapshot-provider-nfs.png) - -1. In the Configure NFS dialog, enter the NFS server hostname or IP Address, and the path that is exported by the NFS server. Click **Get instructions**. - - ![Snapshot Provider NFS Fields](/images/snapshot-provider-nfs-fields.png) - - A dialog opens with instructions on how to set up Velero with the specified NFS configuration. - -1. Follow the steps in the dialog to install Velero and configure the storage destination. - - ![Snapshot Provider File System Instructions](/images/snapshot-provider-nfs-instructions.png) - -1. Return to the Admin Console and either click **Check for Velero** or refresh the page to verify that the Velero installation is detected. - -## Next Steps - -* (Existing Clusters Only) Configure Velero namespace access if you are using minimal RBAC. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). -* (Optional) Increase the default memory limits. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). -* Create or schedule backups. See [Creating and Scheduling Backups](snapshots-creating). - -## Additional Resources - -* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) - -================ -File: docs/enterprise/snapshots-creating.md -================ -# Creating and Scheduling Backups - -This topic describes how to use the Replicated snapshots feature to create backups. It also includes information about how to use the Replicated KOTS Admin Console create a schedule for automatic backups. For information about restoring, see [Restoring from Backups](snapshots-restoring-full). - -## Prerequisites - -- Before you can create backups, you must configure a storage destination: - - - [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) - - [Configuring an NFS Storage Destination](snapshots-configuring-nfs) - - [Configuring Other Storage Destinations](snapshots-storage-destinations) - -- If you have multiple applications in the Admin Console, make sure that each application has its own Backup custom resource file so that they can be included in the full backup. Use the **View file** tab to check for the Backup custom resources (`kind: Backup`, `apiVersion: velero.io/v1`). - - If any Backup custom resource files are missing, contact your vendor. - -## Create a Full Backup (Recommended) {#full} - -Full backups, or _instance snapshots_, back up the Admin Console and all application data, including application volumes and manifest files. If you manage multiple applications with the Admin Console, data from all applications that support backups is included in a full backup. - -From a full backup, you can: -* Restore application and Admin Console data -* Restore only application data -* Restore only Admin Console data - -You can create a full backup with the following methods: -* [Create a Backup with the CLI](#cli-backup) -* [Create a Backup in the Admin Console](#admin-console-backup) - -### Create a Backup with the CLI {#cli-backup} - -To create a full backup with the Replicated KOTS CLI, run the following command: - - ``` - kubectl kots backup --namespace NAMESPACE - ``` - Replace `NAMESPACE` with the namespace where the Admin Console is installed. - -For more information, see [backup](/reference/kots-cli-backup-index) in _KOTS CLI_. - -### Create a Backup in the Admin Console {#admin-console-backup} - -To create a full backup in the Admin Console: - -1. To check if backups are supported for an application, go to the **View files** page, open the `upstream` folder, and confirm that the application includes a manifest file with `kind: Backup` and `apiVersion: velero.io/v1`. This manifest also shows which pod volumes are being backed up. - -1. Go to **Snapshots > Full Snapshots (Instance)**. -1. Click **Start a snapshot**. - - When the backup is complete, it appears in the list of backups on the page, as shown in the following image: - - ![Full snapshot page with one completed snapshot](/images/snapshot-instance-list.png) - -## Create a Partial Backup {#partial} - -Partial backups, or _application snapshots_, back up application volumes and application manifests only. Partial backups do not back up Admin Console data. - -:::note -Replicated recommends that you create full backups instead of partial backups because partial backups are not suitable for disaster recovery. See [Create a Full Backup](#full) above. -::: - -To create a partial backup in the Admin Console: - -1. Go to **Snapshots > Partial Snapshots (Application)**. - -1. If you manage multiple applications in the Admin Console, use the dropdown to select the application that you want to back up. - -1. Click **Start a snapshot**. - - When the snapshot is complete, it appears in the list of snapshots on the page as shown in the following image: - - ![Partial snapshot page with one completed snapshot](/images/snapshot-application-list.png) - -## Schedule Automatic Backups - -You can use the Admin Console to schedule full or partial backups. This is useful for automatically creating regular backups of Admin Console and application data. - -To schedule automatic backups in the Admin Console: - -1. Go to **Snapshots > Settings & Schedule**. - -1. Under **Automatic snapshots**, select **Full snapshots (Instance)** or **Partial snapshots (Application)** depending on the type of backup that you want to schedule. - - ![Snapshot Settings and Schedule page](/images/snapshot-schedule.png) - -1. (Partial Backups Only) If you manage multiple applications in the Admin Console, use the dropdown to select the application that you want to back up. - -1. Select **Enable automatic scheduled snapshots**. - -1. Configure the automatic backup schedule for the type of snapshots that you selected: - - * For **Schedule**, select Hourly, Daily, Weekly, or Custom. - * For **Cron Expression**, enter a cron expression to create a custom automatic backup schedule. For information about supported cron expressions, see [Cron Expressions](/reference/cron-expressions). - -1. (Optional) For **Retention Policy**, edit the amount of time that backup data is saved. By default, backup data is saved for 30 days. - - The retention policy applies to all backups, including both automatically- and manually-created backups. Changing the retention policy affects only backups created after the time of the change. -## Additional Resources - -[Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) - -================ -File: docs/enterprise/snapshots-restoring-full.mdx -================ -import RestoreTable from "../partials/snapshots/_restoreTable.mdx" -import RestoreTypes from "../partials/snapshots/_restore-types.mdx" -import GetBackups from "../partials/snapshots/_step-get-backups.mdx" -import Restore from "../partials/snapshots/_step-restore.mdx" -import Dr from "../partials/snapshots/_limitation-dr.mdx" -import Os from "../partials/snapshots/_limitation-os.mdx" -import InstallMethod from "../partials/snapshots/_limitation-install-method.mdx" -import CliRestores from "../partials/snapshots/_limitation-cli-restores.mdx" - -# Restoring from Backups - -This topic describes how to restore from full or partial backups using Replicated snapshots. - -## Overview - -<RestoreTypes/> - -You can do any type of restore from a full backup using the KOTS CLI. You can also restore an application from a full or partial backup using the Admin Console. - -## Limitations - -The following limitations apply to restoring from backups using snapshots: - -* <Dr/> -* <Os/> -* <InstallMethod/> -* <CliRestores/> - -For a full list of limitations and considerations related to the snapshots feature, see [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. - -## Restore From a Full Backup Using the CLI {#full-cli} - -You can use the KOTS CLI to restore both the Admin Console and the application, the Admin Console only, or the application only. If you need to restore the Admin Console, you must use the KOTS CLI because the Admin Console gets recreated and is disconnected during the restore process. - -:::note -<CliRestores/> -::: - -To restore using the CLI, see the corresponding procedure for your environment: - -- [Existing Clusters](#existing) -- [Online kURL Clusters](#online) -- [Air Gap kURL Clusters](#air-gapped) - -### Existing Clusters {#existing} - -:::note -If you are restoring to a healthy cluster, you can skip reinstalling Velero and continue to running the `get backups` and `restore` commands in the last two steps. -::: - -To restore a full backup in an existing cluster: - -1. (New or Unhealthy Clusters Only) In the cluster where you will do the restore, install a version of Velero that is compatible with the version that was used to make the snapshot backup. - - The Velero installation command varies depending on the storage destination for the backup. For the Velero installation command, see one of the following: - - * **Host Path:** See [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) - * **NFS:** See [Configuring an NFS Storage Destination](snapshots-configuring-nfs) or for the configuration steps and how to set up Velero. - * **AWS, GCP, Azure, or other S3:** See [Configuring Other Storage Destinations](snapshots-storage-destinations). - -1. <GetBackups/> - -1. <Restore/> - -### Online Embedded kURL Clusters {#online} - -:::note -If you are restoring to a healthy cluster, you can skip the installation and configuration steps and continue to running the `get backups` and `restore` commands in the last two steps. -::: - -To restore a full backup in a kURL cluster: - -1. (New or Unhealthy Clusters Only) Provision a cluster with kURL and install the target application in the cluster. See [Online Installation with kURL](installing-kurl). - -1. (New or Unhealthy Clusters Only) In the new kURL cluster, configure a storage destination that holds the backup you want to use: - - * **Host Path:** See [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) - * **NFS:** See [Configuring an NFS Storage Destination](snapshots-configuring-nfs) or for the configuration steps and how to set up Velero. - * **AWS, GCP, Azure, or other S3:** See [Configuring Other Storage Destinations](snapshots-storage-destinations). - -1. <GetBackups/> - -1. <Restore/> - -### Air Gap kURL Clusters {#air-gapped} - -To restore a full backup in an air gap kURL cluster: - -1. Run the following command to install a new cluster and provide kURL with the correct registry IP address. kURL must be able to assign the same IP address to the embedded private image registry in the new cluster. - - ```bash - cat install.sh | sudo bash -s airgap kurl-registry-ip=IP - ``` - - Replace `IP` with the registry IP address. - -1. Use the KOTS CLI to configure Velero to use a storage destination. The storage backend used for backups must be accessible from the new cluster. - - * **Host Path:** See [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) - * **NFS:** See [Configuring an NFS Storage Destination](snapshots-configuring-nfs) or for the configuration steps and how to set up Velero. - * **S3-Compatible:** See [Configure S3-Compatible Storage for Air Gapped Environments](snapshots-storage-destinations#configure-s3-compatible-storage-for-air-gapped-environments) in _Configuring Other Storage Destinations_. - -1. <GetBackups/> - -1. <Restore/> - -## Restore the Application Only Using the Admin Console {#admin-console} - -You can restore an application from a full or partial backup using the Admin Console. - -### Restore an Application From a Full Backup - -To restore an application from a full backup: - -1. Select **Full Snapshots (Instance)** from the Snapshots tab. - - ![Full Snapshot tab](/images/full-snapshot-tab.png) - - [View a larger version of this image](/images/full-snapshot-tab.png) - -1. Click the **Restore from this backup** icon (the circular blue arrows) for the backup that you want to restore. - -1. In the **Restore from backup** dialog, select **Partial restore**. - - ![Restore Full Snapshot dialog](/images/restore-backup-dialog.png) - - [View a larger version of this image](/images/restore-backup-dialog.png) - - :::note - You can also get the CLI commands for full restores or Admin Console only restores from this dialog. - ::: - -1. At the bottom of the dialog, enter the application slug provided by your software vendor. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. - -1. Click **Confirm and restore**. - -### Restore an Application From a Partial Backup - -To restore an application from a partial backup: - -1. Select **Partial Snapshots (Application)** from the Snapshots tab. - - ![Partial Snapshot tab](/images/partial-snapshot-tab.png) - - [View a larger version of this image](/images/partial-snapshot-tab.png) - -1. Click the **Restore from this backup** icon (the circular blue arrows) for the backup that you want to restore. - - The **Restore from Partial backup (Application)** dialog opens. - -1. Under **Type your application slug to continue**, enter the application slug provided by your software vendor. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. - - ![Restore Partial Snapshot dialog](/images/restore-partial-dialog.png) - - [View a larger version of this image](/images/restore-partial-dialog.png) - -1. Click **Confirm and restore**. - -## Additional Resources - -[Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) - -================ -File: docs/enterprise/snapshots-storage-destinations.md -================ -import UpdateDefaultStorage from "../partials/snapshots/_updateDefaultStorage.mdx" -import RegistryCredNote from "../partials/snapshots/_registryCredentialsNote.mdx" -import CheckVersion from "../partials/snapshots/_checkVersion.mdx" - -# Configuring Other Storage Destinations - -This topic describes installing Velero and configuring storage for Amazon Web Service (AWS), Google Cloud Provider (GCP), Microsoft Azure, and S3-compatible providers. - -To configure host path or NFS as a storage destination, see [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) and [Configuring an NFS Storage Destination](snapshots-configuring-nfs). - -:::note -<UpdateDefaultStorage/> -::: - -## Prerequisites - -Complete the following items before you install Velero and configure a storage destination: - -* Review the limitations and considerations. See [Limitations and Considerations](/vendor/snapshots-overview#limitations-and-considerations) in _About Backup and Restore_. -* Install the velero CLI. See [Installing the Velero CLI](snapshots-velero-cli-installing). - -## Configure AWS Storage for Online Environments - -In this procedure, you install Velero and configure an AWS storage destination in online environments. - -Snapshots does not support Amazon Simple Storage Service (Amazon S3) buckets that have a bucket policy requiring the server-side encryption header. If you want to require server-side encryption for objects, you can enable default encryption on the bucket instead. For more information about Amazon S3, see the [Amazon S3](https://docs.aws.amazon.com/s3/?icmpid=docs_homepage_featuredsvcs) documentation. - -To install Velero and configure an AWS storage destination: - -1. Follow the instructions for [installing Velero on AWS](https://github.com/vmware-tanzu/velero-plugin-for-aws#setup) in the Velero documentation. - -1. Run the `velero install` command with these additional flags: - - * **Velero 1.10 and later**: Use the `--use-node-agent`, `--uploader-type=restic`, and `--use-volume-snapshots=false` flags. - * **Velero versions earlier than 1.10**: Use the `--use-restic` and `--use-volume-snapshots=false` flags. - - **Example:** - - ``` - velero install \ - --provider aws \ - --plugins velero/velero-plugin-for-aws:v1.2.0 \ - --bucket $BUCKET \ - --backup-location-config region=$REGION \ - --secret-file CREDS_FILE \ - --use-node-agent --uploader-type=restic \ - --use-volume-snapshots=false - ``` - -## Configure GCP Storage for Online Environments - -In this procedure, you install Velero and configure a GCP storage destination in online environments. - -To install Velero and configure a GCP storage destination: - -1. Follow the instructions for [installing Velero on GCP](https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup) in the Velero documentation. - -1. Run the `velero install` command with these additional flags: - * **Velero 1.10 and later**: Use the `--use-node-agent`, `--uploader-type=restic`, and `--use-volume-snapshots=false` flags. - * **Velero versions earlier than 1.10**: Use the `--use-restic` and `--use-volume-snapshots=false` flags. - - **Example:** - - ``` - velero install \ - --provider gcp \ - --plugins velero/velero-plugin-for-gcp:v1.5.0 \ - --bucket $BUCKET \ - --secret-file ./CREDS_FILE - --use-node-agent --uploader-type=restic \ - --use-volume-snapshots=false - ``` - -## Configure Azure Storage for Online Environments - -In this procedure, you install Velero and configure an Azure storage destination in online environments. - -To install Velero and configure an Azure storage destination: - -1. Follow the instructions for [installing Velero on Azure](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#setup) in the Velero documentation. - -1. Run the `velero install` command with these additional flags: - * **Velero 1.10 and later**: Use the `--use-node-agent`, `--uploader-type=restic`, and `--use-volume-snapshots=false` flags. - * **Velero versions earlier than 1.10**: Use the `--use-restic` and `--use-volume-snapshots=false` flags. - - **Example:** - - ``` - velero install \ - --provider azure \ - --plugins velero/velero-plugin-for-microsoft-azure:v1.5.0 \ - --bucket $BLOB_CONTAINER \ - --secret-file ./CREDS_FILE \ - --backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID[,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] \ - --snapshot-location-config apiTimeout=<YOUR_TIMEOUT>[,resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] - --use-node-agent --uploader-type=restic \ - --use-volume-snapshots=false - ``` - -## Configure S3-Compatible Storage for Online Environments - -Replicated supports the following S3-compatible object stores for storing backups with Velero: - -- Ceph RADOS v12.2.7 -- MinIO - -Run the following command to install Velero and configure an S3-compatible storage destination in an online environment. For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. - -``` -kubectl kots velero configure-other-s3 \ - --namespace NAME \ - --endpoint ENDPOINT \ - --region REGION \ - --bucket BUCKET \ - --access-key-id ACCESS_KEY_ID \ - --secret-access-key SECRET_ACCESS_KEY -``` - -Replace: - -- NAME with the name of the namespace where the Replicated KOTS Admin Console is installed and running -- ENDPOINT with the s3 endpoint -- REGION with the region where the bucket exists -- BUCKET with the name of the object storage bucket where backups should be stored -- ACCESS_KEY_ID with the access key id to use for accessing the bucket -- SECRET_ACCESS_KEY with the secret access key to use for accessing the bucket - -**Example:** - -``` -kubectl kots velero configure-other-s3 \ - --namespace default \ - --endpoint http://minio \ - --region minio \ - --bucket kots-snaps \ - --access-key-id XXXXXXXJTJB7M2XZUV7D \ - --secret-access-key mysecretkey -``` - -If no Velero installation is detected, instructions are displayed to install Velero and configure the storage destination. - -## Configure S3-Compatible Storage for Air Gapped Environments - -> Introduced in Replicated KOTS v1.94.0 - -The following S3-compatible object stores are supported for storing backups with Velero: - -- Ceph RADOS v12.2.7 -- MinIO - -Run the following command to install Velero and configure an S3-compatible storage destination in an air gapped environment. For more information about required storage destination flags, see [`velero`](/reference/kots-cli-velero-index) in _Reference_. - -```bash -kubectl kots velero configure-other-s3 \ - --namespace NAME \ - --endpoint ENDPOINT \ - --region REGION \ - --bucket BUCKET \ - --access-key-id ACCESS_KEY_ID \ - --secret-access-key SECRET_ACCESS_KEY \ - --kotsadm-registry REGISTRY_HOSTNAME[/REGISTRY_NAMESPACE] \ - --registry-username REGISTRY_USERNAME \ - --registry-password REGISTRY_PASSWORD -``` - -Replace: - -- `NAME` with the name of the namespace where the Admin Console is installed and running -- `ENDPOINT` with the s3 endpoint -- `REGION` with the region where the bucket exists -- `BUCKET` with the name of the object storage bucket where backups should be stored -- `ACCESS_KEY_ID` with the access key id to use for accessing the bucket -- `SECRET_ACCESS_KEY` with the secret access key to use for accessing the bucket -- `REGISTRY_HOSTNAME` with the registry endpoint where the images are hosted -- `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional) -- `REGISTRY_USERNAME` with the username to use to authenticate with the registry -- `REGISTRY_PASSWORD` with the password to use to authenticate with the registry - -If no Velero installation is detected, instructions are displayed to install Velero and configure the storage destination. - -<RegistryCredNote/> - -## Next Steps - -* (Existing Clusters Only) Configure Velero namespace access if you are using minimal RBAC. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). -* (Optional) Increase the default memory limits. See [Configuring Namespace Access and Memory Limit](snapshots-velero-installing-config). -* Create or schedule backups. See [Creating and Scheduling Backups](snapshots-creating). - -## Additional Resources - -* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) - -================ -File: docs/enterprise/snapshots-troubleshooting-backup-restore.md -================ -import NodeAgentMemLimit from "../partials/snapshots/_node-agent-mem-limit.mdx" - -# Troubleshooting Snapshots - -When a snapshot fails, a support bundle will be collected and stored automatically. Because this is a point-in-time collection of all logs and system state at the time of the failed snapshot, this is a good place to view the logs. - -## Velero is Crashing - -If Velero is crashing and not starting, some common causes are: - -### Invalid Cloud Credentials - -#### Symptom - -You see the following error message from Velero when trying to configure a snapshot. - -```shell -time="2020-04-10T14:22:24Z" level=info msg="Checking existence of namespace" logSource="pkg/cmd/server/server.go:337" namespace=velero -time="2020-04-10T14:22:24Z" level=info msg="Namespace exists" logSource="pkg/cmd/server/server.go:343" namespace=velero -time="2020-04-10T14:22:27Z" level=info msg="Checking existence of Velero custom resource definitions" logSource="pkg/cmd/server/server.go:372" -time="2020-04-10T14:22:31Z" level=info msg="All Velero custom resource definitions exist" logSource="pkg/cmd/server/server.go:406" -time="2020-04-10T14:22:31Z" level=info msg="Checking that all backup storage locations are valid" logSource="pkg/cmd/server/server.go:413" -An error occurred: some backup storage locations are invalid: backup store for location "default" is invalid: rpc error: code = Unknown desc = NoSuchBucket: The specified bucket does not exist - status code: 404, request id: BEFAE2B9B05A2DCF, host id: YdlejsorQrn667ziO6Xr6gzwKJJ3jpZzZBMwwMIMpWj18Phfii6Za+dQ4AgfzRcxavQXYcgxRJI= -``` - -#### Cause - -If the cloud access credentials are invalid or do not have access to the location in the configuration, Velero will crashloop. The Velero logs will be included in a support bundle, and the message will look like this. - -#### Solution - -Replicated recommends that you validate the access key / secret or service account json. - - -### Invalid Top-level Directories - -#### Symptom - -You see the following error message when Velero is starting: - -```shell -time="2020-04-10T14:12:42Z" level=info msg="Checking existence of namespace" logSource="pkg/cmd/server/server.go:337" namespace=velero -time="2020-04-10T14:12:42Z" level=info msg="Namespace exists" logSource="pkg/cmd/server/server.go:343" namespace=velero -time="2020-04-10T14:12:44Z" level=info msg="Checking existence of Velero custom resource definitions" logSource="pkg/cmd/server/server.go:372" -time="2020-04-10T14:12:44Z" level=info msg="All Velero custom resource definitions exist" logSource="pkg/cmd/server/server.go:406" -time="2020-04-10T14:12:44Z" level=info msg="Checking that all backup storage locations are valid" logSource="pkg/cmd/server/server.go:413" -An error occurred: some backup storage locations are invalid: backup store for location "default" is invalid: Backup store contains invalid top-level directories: [other-directory] -``` - -#### Cause - -This error message is caused when Velero is attempting to start, and it is configured to use a reconfigured or re-used bucket. - -When configuring Velero to use a bucket, the bucket cannot contain other data, or Velero will crash. - -#### Solution - -Configure Velero to use a bucket that does not contain other data. - -## Node Agent is Crashing - -If the node-agent Pod is crashing and not starting, some common causes are: - -### Metrics Server is Failing to Start - -#### Symptom - -You see the following error in the node-agent logs. - -```shell -time="2023-11-16T21:29:44Z" level=info msg="Starting metric server for node agent at address []" logSource="pkg/cmd/cli/nodeagent/server.go:229" -time="2023-11-16T21:29:44Z" level=fatal msg="Failed to start metric server for node agent at []: listen tcp :80: bind: permission denied" logSource="pkg/cmd/cli/nodeagent/server.go:236" -``` - -#### Cause - -This is a result of a known issue in Velero 1.12.0 and 1.12.1 where the port is not set correctly when starting the metrics server. This causes the metrics server to fail to start with a `permission denied` error in environments that do not run MinIO and have Host Path, NFS, or internal storage destinations configured. When the metrics server fails to start, the node-agent Pod crashes. For more information about this issue, see [the GitHub issue details](https://github.com/vmware-tanzu/velero/issues/6792). - -#### Solution - -Replicated recommends that you either upgrade to Velero 1.12.2 or later, or downgrade to a version earlier than 1.12.0. - -## Snapshot Creation is Failing - -### Timeout Error when Creating a Snapshot - -#### Symptom - -You see a backup error that includes a timeout message when attempting to create a snapshot. For example: - -```bash -Error backing up item -timed out after 12h0m0s -``` - -#### Cause - -This error message appears when the node-agent (restic) Pod operation timeout limit is reached. In Velero v1.4.2 and later, the default timeout is 240 minutes. - -Restic is an open-source backup tool. Velero integrates with Restic to provide a solution for backing up and restoring Kubernetes volumes. For more information about the Velero Restic integration, see [File System Backup](https://velero.io/docs/v1.10/file-system-backup/) in the Velero documentation. - -#### Solution - -Use the kubectl Kubernetes command-line tool to patch the Velero deployment to increase the timeout: - -**Velero version 1.10 and later**: - -```bash -kubectl patch deployment velero -n velero --type json -p '[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--fs-backup-timeout=TIMEOUT_LIMIT"}]' -``` - -**Velero versions less than 1.10**: - -```bash -kubectl patch deployment velero -n velero --type json -p '[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--restic-timeout=TIMEOUT_LIMIT"}]' -``` - -Replace `TIMEOUT_LIMIT` with a length of time for the node-agent (restic) Pod operation timeout in hours, minutes, and seconds. Use the format `0h0m0s`. For example, `48h30m0s`. - -:::note -The timeout value reverts back to the default value if you rerun the `velero install` command. -::: - -### Memory Limit Reached on the node-agent Pod - -#### Symptom - -The node-agent (restic) Pod is killed by the Linux kernel Out Of Memory (OOM) killer or snapshots are failing with errors simlar to: - -``` -pod volume backup failed: ... signal: killed -``` - -#### Cause - -Velero sets default limits for the velero Pod and the node-agent (restic) Pod during installation. There is a known issue with Restic that causes high memory usage, which can result in failures during snapshot creation when the Pod reaches the memory limit. - -For more information, see the [Restic backup — OOM-killed on raspberry pi after backing up another computer to same repo](https://github.com/restic/restic/issues/1988) issue in the restic GitHub repository. - -#### Solution - -<NodeAgentMemLimit/> - -### At least one source file could not be read - -#### Symptom - -You see the following error in Velero logs: - -``` -Error backing up item...Warning: at least one source file could not be read -``` - -#### Cause - -There are file changes between Restic's initial scan of the volume and during the backup to Restic store. - -#### Solution - -To resolve this issue, do one of the following: - -* Use [hooks](/vendor/snapshots-hooks) to export data to an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) volume and include that in the backup instead of the primary PVC volume. See [Configuring Backup and Restore Hooks for Snapshots](/vendor/snapshots-hooks). -* Freeze the file system to ensure all pending disk I/O operations have completed prior to taking a snapshot. For more information, see [Hook Example with fsfreeze](https://velero.io/docs/main/backup-hooks/#hook-example-with-fsfreeze) in the Velero documentation. - - -## Snapshot Restore is Failing - -### Service NodePort is Already Allocated - -#### Symptom - -In the Replicated KOTS Admin Console, you see an **Application failed to restore** error message that indicates the port number for a static NodePort is already in use. For example: - -![Snapshot Troubleshoot Service NodePort](/images/snapshot-troubleshoot-service-nodeport.png) - -[View a larger version of this image](/images/snapshot-troubleshoot-service-nodeport.png) - -#### Cause - -There is a known issue in Kubernetes versions earlier than version 1.19 where using a static NodePort for services can collide in multi-primary high availability setups when recreating the services. For more information about this known issue, see https://github.com/kubernetes/kubernetes/issues/85894. - -#### Solution - -This issue is fixed in Kubernetes version 1.19. To resolve this issue, upgrade to Kubernetes version 1.19 or later. - -For more infromation about the fix, see https://github.com/kubernetes/kubernetes/pull/89937. - -### Partial Snapshot Restore is Stuck in Progress - -#### Symptom - -In the Admin Console, you see at least one volume restore progress bar frozen at 0%. Example Admin Console display: - -![Snapshot Troubleshoot Frozen Restore](/images/snapshot-troubleshoot-frozen-restore.png) - -You can confirm this is the same issue by running `kubectl get pods -n <application namespace>`, and you should see at least one pod stuck in initialization: - -```shell -NAME READY STATUS RESTARTS AGE -example-mysql-0 0/1 Init:0/2 0 4m15s #<- the offending pod -example-nginx-77b878b4f-zwv2h 3/3 Running 0 4m15s -``` - -#### Cause - -We have seen this issue with Velero version 1.5.4 and opened up this issue with the project to inspect the root cause: https://github.com/vmware-tanzu/velero/issues/3686. However we have not experienced this using Velero 1.6.0 or later. - -#### Solution - -Upgrade Velero to 1.9.0. You can upgrade using Replicated kURL. Or, to follow the Velero upgrade instructions, see [Upgrading to Velero 1.9](https://velero.io/docs/v1.9/upgrade-to-1.9/) in the Velero documentation. - -### Partial Snapshot Restore Finishes with Warnings - -#### Symptom - -In the Admin Console, when the partial snapshot restore completes, you see warnings indicating that Endpoint resources were not restored: - -![Snapshot Troubleshoot Restore Warnings](/images/snapshot-troubleshoot-restore-warnings.png) - -#### Cause - -The resource restore priority was changed in Velero 1.10.3 and 1.11.0, which leads to this warning when restoring Endpoint resources. For more information about this issue, see [the issue details](https://github.com/vmware-tanzu/velero/issues/6280) in GitHub. - -#### Solution - -These warnings do not necessarily mean that the restore itself failed. The endpoints likely do exist as they are created by Kubernetes when the related Service resources were restored. However, to prevent encountering these warnings, use Velero version 1.11.1 or later. - -================ -File: docs/enterprise/snapshots-updating-with-admin-console.md -================ -# Updating Storage Settings - -This topic describes how to update existing storage destination settings using the Replicated Admin Console. - -## Prerequisite -If you are changing from one provider to another provider, make sure that you meet the prerequisites for the storage destination. For information about prerequisites, see: - -- [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) -- [Configuring an NFS Storage Destination](snapshots-configuring-nfs) -- [Configuring Other Storage Destinations](snapshots-storage-destinations) - -## Update Storage Settings - -You can update storage destination settings for online and air gapped environments at any time using the Admin Console. - -Additionally, if Velero was automatically installed by Replicated kURL, then Replicated recommends that you change the default internal storage because it is not sufficient for disaster recovery. - -To update storage destination settings: - -1. In the Admin Console, select **Snapshots** > **Settings and Schedule**. - -1. Under storage, you can edit the existing settings or click **Add a new storage destination** and select a storage destination type. - - ![Snapshot Destination Dropdown Host Path](/images/snapshot-destination-dropdown-hostpath.png) - - The configuration fields that display depend on the type of storage destination. See the following storage destination sections for field descriptions: - - - [AWS](#aws-fields) - - [GCP](#gcp-fields) - - [Azure](#azure-fields) - - [S3-compatible](#s3-compatible-fields) - - [NFS](#nfs-fields) - - [Host Path](#host-path-fields) - -1. Click **Update storage settings**. The update can take several minutes. - -### AWS Fields - -When configuring the Admin Console to store backups on Amazon Web Services (AWS), the following fields are available: - -| Name | Description | -|------------------------------|-----------------------------------------------------------------------------------------------------------------| -| Region | The AWS region that the S3 bucket is available in | -| Bucket | The name of the S3 bucket to use | -| Path (Optional) | The path in the bucket to store all backups in | -| Access Key ID (Optional) | The AWS IAM Access Key ID that can read from and write to the bucket | -| Secret Access Key (Optional) | The AWS IAM Secret Access Key that is associated with the Access Key ID | -| Use Instance Role | When enabled, instead of providing an Access Key ID and Secret Access Key, Velero will use an instance IAM role | -| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | - -### GCP Fields - -When configuring the Admin Console to store backups on Google Cloud Provide (GCP), the following fields are available: - -| Name | Description | -|-----------------|-----------------------------------------------------------------------------------------------------------| -| Bucket | The name of the GCP storage bucket to use | -| Path (Optional) | The path in the bucket to store all backups in | -| Service Account | The GCP IAM Service Account JSON file that has permissions to read from and write to the storage location | -| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | - -### Azure Fields - -When configuring the Admin Console to store backups on Microsoft Azure, the following fields are available: - -| Name | Description | -|----------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| -| Bucket | The name of the Azure Blob Storage Container to use | -| Path (Optional) | The path in the Blob Storage Container to store all backups in | -| Resource Group | The Resource Group name of the target Blob Storage Container | -| Storage Account | The Storage Account Name of the target Blob Storage Container | -| Subscription ID | The Subscription ID associated with the target Blob Storage Container (required only for access via Service Principle or AAD Pod Identity) | -| Tenant ID | The Tenant ID associated with the Azure account of the target Blob Storage container (required only for access via Service Principle) | -| Client ID | The Client ID of a Service Principle with access to the target Container (required only for access via Service Principle) | -| Client Secret | The Client Secret of a Service Principle with access to the target Container (required only for access via Service Principle) | -| Cloud Name | The Azure cloud for the target storage (options: AzurePublicCloud, AzureUSGovernmentCloud, AzureChinaCloud, AzureGermanCloud) | -| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | - -Only connections with Service Principles are supported at this time. - -For more information about authentication methods and setting up Azure, see [Velero plugins for Microsoft Azure](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure) in the velero-plugin-for-microsoft-azure GitHub repository. - -### S3-Compatible Fields - -Replicated supports the following S3-compatible object stores for storing backups with Velero: - -* Ceph RADOS v12.2.7. For more information, see the [Ceph](https://docs.ceph.com/en/quincy/) documentation. -* MinIO. For more information, see the [MinIO](https://docs.min.io/docs/minio-quickstart-guide.html) documentation. - -When configuring the Admin Console to store backups on S3-compatible storage, the following fields are available: - -| Name | Description | -|------------------------------|-----------------------------------------------------------------------------------------------------------------| -| Region | The AWS region that the S3 bucket is available in | -| Endpoint | The endpoint to use to connect to the bucket | -| Bucket | The name of the S3 bucket to use | -| Path (Optional) | The path in the bucket to store all backups in | -| Access Key ID (Optional) | The AWS IAM Access Key ID that can read from and write to the bucket | -| Secret Access Key (Optional) | The AWS IAM Secret Access Key that is associated with the Access Key ID | -| Use Instance Role | When enabled, instead of providing an Access Key ID and Secret Access Key, Velero will use an instance IAM role | -| Add a CA Certificate | (Optional) Upload a third-party issued (proxy) CA certificate used for trusting the authenticity of the snapshot storage endpoint. Only one file can be uploaded. However, it is possible to concatenate multiple certificates into one file. **Formats:** PEM, CER, CRT, CA, and KEY | - -### NFS Fields - -When configuring the Admin Console to store backups on network file system (NFS) storage, the following fields are available: - -| Name | Description | -|--------|----------------------------------------------| -| Server | The hostname or IP address of the NFS server | -| Path | The path that is exported by the NFS server | - -### Host Path Fields - -When configuring the Admin Console to store backups on host path storage, the following fields are available: - -**Host path**: Enter the path to the directory on the node. Although the path can be local, Replicated recommends that you use an external host path. - -================ -File: docs/enterprise/snapshots-velero-cli-installing.md -================ -# Installing the Velero CLI - -You install the Velero CLI before installing Velero and configuring a storage destination for backups. - -:::note -For embedded clusters created with Replicated kURL, if the kURL Installer spec included the Velero add-on, then Velero was automatically installed with default internal storage. Replicated recommends that you proceed to change the default internal storage because it is insufficient for disaster recovery. See [Updating Storage Settings in the Admin Console](snapshots-updating-with-admin-console). -::: - -## Install the Velero CLI in an Online Cluster - -To install the Velero CLI in an online cluster: - -1. Do one of the following: - - - (Embedded kURL cluster) Run an SSH command to access and authenticate to your cluster node. - - (Existing cluster) Open a terminal in the environment that you manage the cluster from, which can be a local machine that has kubectl installed. - -1. Check for the latest supported release of the Velero CLI for **Linux AMD64** in the Velero GitHub repo at https://github.com/vmware-tanzu/velero/releases. Although earlier versions of Velero are supported, Replicated recommends using the latest supported version. For more information about supported versions, see [Velero Version Compatibility](/vendor/snapshots-overview#velero-version-compatibility). - - Note the version number for the next step. - -1. Run the following command to download the latest supported Velero CLI version for the **Linux AMD64** operating system to the cluster: - - ``` - curl -LO https://github.com/vmware-tanzu/velero/releases/download/VERSION/velero-VERSION-linux-amd64.tar.gz - ``` - - Replace VERSION with the version number using the format `vx.x.x` - - **Example:** - - ``` - curl -LO https://github.com/vmware-tanzu/velero/releases/download/v1.10.1/velero-v1.10.1-linux-amd64.tar.gz - ``` - -1. Run the following command to uncompress the TAR file: - - ``` - tar zxvf velero-VERSION-linuxamd64.tar.gz - ``` - Replace VERSION with the version number using the format `vx.x.x`. - -1. Run the following command to install the Velero CLI: - - ``` - sudo mv velero-VERSION-linux-amd64/velero /usr/local/bin/velero - ``` - Replace VERSION with the version number using the format `vx.x.x`. - -1. Run `velero version` to test that the Velero CLI installation worked correctly. - - You might get an error message stating that there are no matches for the server version. This is acceptable, as long as you get a confirmation for the client version. After the Velero installation, you also see the server version. - -## Install the Velero CLI in an Air Gapped Cluster - -To install the Velero CLI in an air gapped cluster: - -1. From a computer with internet access, check for the latest supported release of the Velero CLI for **Linux AMD64** in the Velero GitHub repo at https://github.com/vmware-tanzu/velero/releases. Although earlier versions of Velero are supported, Replicated recommends using the latest supported version. See [Velero Version Compatibility](/vendor/snapshots-overview#velero-version-compatibility). - - Note the version number for the next step. - -1. Run the following command to download the latest supported Velero CLI version for the **Linux AMD64** operating system to the cluster: - - ``` - curl -LO https://github.com/vmware-tanzu/velero/releases/download/VERSION/velero-VERSION-linux-amd64.tar.gz - ``` - - Replace VERSION with the version number using the format `vx.x.x` - - **Example:** - - ``` - curl -LO https://github.com/vmware-tanzu/velero/releases/download/v1.10.1/velero-v1.10.1-linux-amd64.tar.gz - ``` - -1. Copy the TAR file to the air gapped node. - -1. Run the following command to uncompress the TAR file: - - ``` - tar zxvf velero-VERSION-linuxamd64.tar.gz - ``` - Replace VERSION with the version number using the format `vx.x.x`. - -1. Run the following command to install the Velero CLI: - - ``` - sudo mv velero-VERSION-linux-amd64/velero /usr/local/bin/velero - ``` - - Replace VERSION with the version number using the format `vx.x.x`. - -1. Run `velero version` to test that the Velero CLI installation worked correctly. - - You might get an error message stating that there are no matches for the server version. This is acceptable, as long as you get a confirmation for the client version. After the Velero installation, you should see the server version also. - - -## Next Step - -Install Velero and configure a storage destination using one of the following procedures: - -- [Configuring a Host Path Storage Destination](snapshots-configuring-hostpath) -- [Configuring an NFS Storage Destination](snapshots-configuring-nfs) -- [Configuring Other Storage Destinations](snapshots-storage-destinations) - -================ -File: docs/enterprise/snapshots-velero-installing-config.mdx -================ -import NodeAgentMemLimit from "../partials/snapshots/_node-agent-mem-limit.mdx" -import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" -import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" - -# Configuring Namespace Access and Memory Limit - -This topic describes how to configure namespace access and the memory limit for Velero. - -## Overview - -The Replicated KOTS Admin Console requires access to the namespace where Velero is installed. If your Admin Console is running with minimal role-based-access-control (RBAC) privileges, you must enable the Admin Console to access Velero. - -Additionally, if the application uses a large amount of memory, you can configure the default memory limit to help ensure that Velero runs successfully with snapshots. - -## Configure Namespace Access - -This section applies only to _existing cluster_ installations (online and air gap) where the Admin Console is running with minimal role-based-access-control (RBAC) privileges. - -Run the following command to enable the Admin Console to access the Velero namespace: - -``` -kubectl kots velero ensure-permissions --namespace ADMIN_CONSOLE_NAMESPACE --velero-namespace VELERO_NAMESPACE -``` -Replace: -* `ADMIN_CONSOLE_NAMESPACE` with the namespace on the cluster where the Admin Console is running. -* `VELERO_NAMESPACE` with the namespace on the cluster where Velero is installed. - -For more information, see [`velero ensure-permissions`](/reference/kots-cli-velero-ensure-permissions/) in the KOTS CLI documentation. For more information about RBAC privileges for the Admin Console, see [Kubernetes RBAC](/vendor/packaging-rbac). - -## Configure Memory Limit - -This section applies to all online and air gap installations. - -Velero sets default limits for the velero Pod and the node-agent (restic) Pod during installation. There is a known issue with restic that causes high memory usage, which can result in failures during backup creation when the Pod reaches the memory limit. - -<NodeAgentMemLimit/> - -## Additional Resources - -* [Troubleshooting Snapshots](snapshots-troubleshooting-backup-restore) - -================ -File: docs/enterprise/status-viewing-details.md -================ -import StatusesTable from "../partials/status-informers/_statusesTable.mdx" -import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" -import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" - -# Understanding Application Status Details in the Admin Console - -This topic describes how to view the status of an application on the Replicated KOTS Admin Console dashboard. It also describes how Replicated KOTS collects and aggregates the application status. -## View Status Details - -The application status displays on the dashboard of the Admin Console. Viewing the status details can be helpful for troubleshooting. - -To view the status details, click **Details** next to the status on the dashboard. - -![Status Details](/images/kotsadm-dashboard-appstatus.png) - -## About Application Status - -To display application status on the Admin Console dashboard, KOTS aggregates the status of specific Kubernetes resources for the application. - -The following resource types are supported for displaying application status: - -* Deployment -* StatefulSet -* Service -* Ingress -* PersistentVolumeClaims (PVC) -* DaemonSet - -Applications can specify one or more of the supported Kubernetes workloads listed above. KOTS watches all specified workloads for state changes. - -For more information about how to interpret the application status displayed on the Admin Console dashboard, see [Resource Statuses](#resource-statuses) and [Aggregate Application Status](#aggregate-application-status) below. - -### Resource Statuses - -Possible application statuses are Ready, Updating, Degraded, Unavailable, and Missing. - -The following table lists the supported Kubernetes resources and the conditions that contribute to each status: - -<StatusesTable/> - -### Aggregate Application Status - -<AggregateStatusIntro/> - -<AggregateStatus/> - -================ -File: docs/enterprise/troubleshooting-an-app.mdx -================ -import GenerateBundleAdminConsole from "../partials/support-bundles/_generate-bundle-admin-console.mdx" - -# Generating Support Bundles from the Admin Console - -This topic describes how to generate support bundles from the KOTS Admin Console. - -## Generate a Bundle from the Admin Console - -<GenerateBundleAdminConsole/> - -================ -File: docs/enterprise/updating-app-manager.mdx -================ -import AdminConsole from "../partials/updating/_admin-console.mdx" -import AdminConsoleAirGap from "../partials/updating/_admin-console-air-gap.mdx" -import PushKotsImages from "../partials/install/_push-kotsadm-images.mdx" -import BuildAirGapBundle from "../partials/install/_airgap-bundle-build.mdx" -import DownloadAirGapBundle from "../partials/install/_airgap-bundle-download.mdx" -import ViewAirGapBundle from "../partials/install/_airgap-bundle-view-contents.mdx" - -# Performing Updates in Existing Clusters - -This topic describes how to perform updates in existing cluster installations with Replicated KOTS. It includes information about how to update applications and the version of KOTS running in the cluster. - -## Update an Application - -You can perform an application update using the KOTS Admin Console or the KOTS CLI. You can also set up automatic updates. See [Configuring Automatic Updates](/enterprise/updating-apps). - -### Using the Admin Console - -#### Online Environments - -<AdminConsole/> - -#### Air Gap Environments - -<AdminConsoleAirGap/> - -### Using the KOTS CLI - -You can use the KOTS CLI [upstream upgrade](/reference/kots-cli-upstream-upgrade) command to update an application in existing cluster installations. - -#### Online Environments - -To update an application in online environments: - -```bash -kubectl kots upstream upgrade APP_SLUG -n ADMIN_CONSOLE_NAMESPACE -``` -Where: -* `APP_SLUG` is the unique slug for the application. See [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. -* `ADMIN_CONSOLE_NAMESPACE` is the namespace where the Admin Console is running. - -:::note -Add the `--deploy` flag to automatically deploy this version. -::: - -#### Air Gap Environments - -To update an application in air gap environments: - -1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the target release is promoted to build and download the new `.airgap` bundle: - - <BuildAirGapBundle/> - -1. <DownloadAirGapBundle/> - -1. <ViewAirGapBundle/> - -1. Run the following command to update the application: - - ```bash - kubectl kots upstream upgrade APP_SLUG \ - --airgap-bundle NEW_AIRGAP_BUNDLE \ - --kotsadm-registry REGISTRY_HOST[/REGISTRY_NAMESPACE] \ - --registry-username RO_USERNAME \ - --registry-password RO_PASSWORD \ - -n ADMIN_CONSOLE_NAMESPACE - ``` - Replace: - * `APP_SLUG` with the unique slug for the application. See [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. - * `NEW_AIRGAP_BUNDLE` with the `.airgap` bundle for the target application version. - * `REGISTRY_HOST` with the private registry that contains the Admin Console images. - * `REGISTRY_NAMESPACE` with the registry namespace where the images are hosted (Optional). - * `RO_USERNAME` and `RO_PASSWORD` with the username and password for an account that has read-only access to the private registry. - * `ADMIN_CONSOLE_NAMESPACE` with the namespace where the Admin Console is running. - -:::note -Add the `--deploy` flag to automatically deploy this version. -::: - -## Update KOTS - -This section describes how to update the version of Replicated KOTS running in your cluster. For information about the latest versions of KOTS, see [KOTS Release Notes](/release-notes/rn-app-manager). - -:::note -Downgrading KOTS to a version earlier than what is currently deployed is not supported. -::: - -### Online Environments - -To update KOTS in an online existing cluster: - -1. Run _one_ of the following commands to update the KOTS CLI to the target version of KOTS: - - - **Install or update to the latest version**: - - ``` - curl https://kots.io/install | bash - ``` - - - **Install or update to a specific version**: - - ``` - curl https://kots.io/install/VERSION | bash - ``` - Where `VERSION` is the target KOTS version. - - For more KOTS CLI installation options, including information about how to install or update without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). - -1. Run the following command to update the KOTS Admin Console to the same version as the KOTS CLI: - - ```bash - kubectl kots admin-console upgrade -n NAMESPACE - ``` - Replace `NAMESPACE` with the namespace in your cluster where KOTS is installed. - -### Air Gap Environments - -To update KOTS in an existing air gap cluster: - -1. Download the target version of the following assets from the [Releases](https://github.com/replicatedhq/kots/releases/latest) page in the KOTS GitHub repository: - * KOTS Admin Console `kotsadm.tar.gz` bundle - * KOTS CLI plugin - - Ensure that you can access the downloaded bundles from the environment where the Admin Console is running. - -1. Install or update the KOTS CLI to the version that you downloaded. See [Manually Download and Install](/reference/kots-cli-getting-started#manually-download-and-install) in _Installing the KOTS CLI_. - -1. <PushKotsImages/> - -1. Run the following command using registry read-only credentials to update the KOTS Admin Console: - - ``` - kubectl kots admin-console upgrade \ - --kotsadm-registry REGISTRY_HOST \ - --registry-username RO_USERNAME \ - --registry-password RO_PASSWORD \ - -n NAMESPACE - ``` - Replace: - * `REGISTRY_HOST` with the same private registry from the previous step. - * `RO_USERNAME` with the username for credentials with read-only permissions to the registry. - * `RO_PASSWORD` with the password associated with the username. - * `NAMESPACE` with the namespace on your cluster where KOTS is installed. - - For help information, run `kubectl kots admin-console upgrade -h`. - -================ -File: docs/enterprise/updating-apps.mdx -================ -# Configuring Automatic Updates - -This topic describes how to configure automatic updates for applications installed in online (internet-connected) environments. - -## Overview - -For applications installed in an online environment, the Replicated KOTS Admin Console automatically checks for new versions once every four hours by default. After the Admin Console checks for updates, it downloads any new versions of the application and displays them on the **Version History** tab. - -You can edit this default cadence to customize how often the Admin Console checks for and downloads new versions. - -You can also configure the Admin Console to automatically deploy new versions of the application after it downloads them. - -The Admin Console only deploys new versions automatically if preflight checks pass. By default, the Admin Console does not automatically deploy any version of an application. - -## Limitations - -Automatic updates have the following limitations: - -* Automatic updates are not supported for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. - -* Automatic updates are not supported for applications installed in air gap environments with no outbound internet access. - -* Automatically deploying new versions is not supported when KOTS is installed with minimal RBAC. This is because all preflight checks must pass for the new version to be automatically deployed, and preflight checks that require cluster-scoped access will fail in minimal RBAC environments. - -## Set Up Automatic Updates - -To configure automatic updates: - -1. In the Admin Console, go to the **Version History** tab and click **Configure automatic updates**. - - The **Configure automatic updates** dialog opens. - -1. Under **Automatically check for updates**, use the default or select a cadence (Hourly, Daily, Weekly, Never, Custom) from the dropdown list. - - To turn off automatic updates, select **Never**. - - To define a custom cadence, select **Custom**, then enter a cron expression in the text field. For more information about cron expressions, see [Cron Expressions](/reference/cron-expressions). Configured automatic update checks use the local server time. - - ![Configure automatic updates](/images/automatic-updates-config.png) - -1. Under **Automatically deploy new versions**, select an option. The available options depend on whether semantic versioning is enabled for the channel. - * **For channels that use semantic versioning**: (v1.58.0 and later) Select an option in the dropdown - to specify the versions that the Admin Console automatically deploys. For example, - to automatically deploy only new patch and minor versions, select - **Automatically deploy new patch and minor versions**. - * **For channels that do not use semantic versioning**: (v1.67.0 and later) Optionally select **Enable automatic deployment**. - When this checkbox is enabled, the Admin Console automatically deploys each new version of the application that it downloads. - -================ -File: docs/enterprise/updating-embedded.mdx -================ -import UpdateAirGapAdm from "../partials/embedded-cluster/_update-air-gap-admin-console.mdx" -import UpdateAirGapCli from "../partials/embedded-cluster/_update-air-gap-cli.mdx" -import UpdateAirGapOverview from "../partials/embedded-cluster/_update-air-gap-overview.mdx" -import DoNotDowngrade from "../partials/embedded-cluster/_warning-do-not-downgrade.mdx" -import Overview from "../partials/embedded-cluster/_update-overview.mdx" - -# Performing Updates in Embedded Clusters - -This topic describes how to perform updates for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. - -:::note -If you are instead looking for information about Replicated kURL, see [Performing Updates in kURL Clusters](updating-kurl). -::: - -## Overview - -<Overview/> - -The following diagram demonstrates how updates are performed with Embedded Cluster in online (internet-connected) environments: - -![Embedded Cluster updates Kubernetes and an app in a customer environment](/images/embedded-cluster-update.png) - -[View a larger version of this image](/images/embedded-cluster-update.png) - -As shown in the diagram above, users check for available updates from the KOTS Admin Console. When deploying the new version, both the application and the cluster infrastructure are updated as needed. - -## Update in Online Clusters - -<DoNotDowngrade/> - -To perform an update with Embedded Cluster: - -1. In the Admin Console, go to the **Version history** tab. - - All versions available for upgrade are listed in the **Available Updates** section: - - ![Version history page](/images/ec-upgrade-version-history.png) - - [View a larger version of this image](/images/ec-upgrade-version-history.png) - -1. Click **Deploy** next to the target version. - -1. On the **Config** screen of the upgrade wizard, make any necessary changes to the configuration for the application. Click **Next**. - - ![Config screen in the upgrade wizard](/images/ec-upgrade-wizard-config.png) - - [View a larger version of this image](/images/ec-upgrade-wizard-config.png) - - :::note - Any changes made on the **Config** screen of the upgrade wizard are not set until the new version is deployed. - ::: - -1. On the **Preflight** screen, view the results of the preflight checks. - - ![Preflight screen in the upgrade wizard](/images/ec-upgrade-wizard-preflights.png) - - [View a larger version of this image](/images/ec-upgrade-wizard-preflights.png) - -1. On the **Confirm** screen, click **Deploy**. - - ![Confirmation screen in the upgrade wizard](/images/ec-upgrade-wizard-confirm.png) - - [View a larger version of this image](/images/ec-upgrade-wizard-confirm.png) - - During updates, the Admin Console is unavailable. A modal is displayed with a message that the update is in progress. - - :::note - KOTS can experience downtime during an update, such as in single-node installations. If downtime occurs, refreshing the page results in a connection error. Users can refresh the page again after the update is complete to access the Admin Console. - ::: - -## Update in Air Gap Clusters - -<DoNotDowngrade/> - -<UpdateAirGapOverview/> - -### Upload the New Version From the Command Line - -To update by uploading the air gap bundle for the new version from the command line: - -<UpdateAirGapCli/> - -### Upload the New Version From the Admin Console - -To update by uploading the air gap bundle for the new version from the Admin Console: - -<UpdateAirGapAdm/> - -================ -File: docs/enterprise/updating-kurl-about.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# About kURL Cluster Updates - -<KurlAvailability/> - -This topic provides an overview of Replicated kURL cluster updates. For information about how to perform updates in kURL clusters, see [Performing Updates in kURL Clusters](updating-kurl). - -## Overview - -The Replicated kURL installer spec specifies the kURL add-ons and the Kubernetes version that are deployed in kURL clusters. You can run the kURL installation script to apply the latest installer spec and update the cluster. - -## About Kubernetes Updates {#kubernetes} - -The version of Kubernetes running in a kURL cluster can be upgraded by one or more minor versions. - -The Kubernetes upgrade process in kURL clusters steps through one minor version at a time. For example, upgrades from Kubernetes 1.19.x to 1.26.x install versions 1.20.x, 1.21x, 1.22.x, 1.23.x, 1.24.x, and 1.25.x before installing 1.26.x. - -The installation script automatically detects when the Kubernetes version in your cluster must be updated. When a Kubernetes upgrade is required, the script first prints a prompt: `Drain local node and apply upgrade?`. When you confirm the prompt, it drains and upgrades the local primary node where the script is running. - -Then, if there are any remote primary nodes to upgrade, the script drains each sequentially and prints a command that you must run on the node to upgrade. For example, the command that that script prints might look like the following: `curl -sSL https://kurl.sh/myapp/upgrade.sh | sudo bash -s hostname-check=master-node-2 kubernetes-version=v1.24.3`. - -The script polls the status of each remote node until it detects that the Kubernetes upgrade is complete. Then, it uncordons the node and proceeds to cordon and drain the next node. This process ensures that only one node is cordoned at a time. After upgrading all primary nodes, the script performs the same operation sequentially on all remote secondary nodes. - -### Air Gap Multi-Version Kubernetes Updates {#kubernetes-multi} - -To upgrade Kubernetes by more than one minor version in air gapped kURL clusters, you must provide a package that includes the assets required for the upgrade. - -When you run the installation script to upgrade, the script searches for the package in the `/var/lib/kurl/assets/` directory. The script then lists any required assets that are missing, prints a command to download the missing assets as a `.tar.gz` package, and prompts you to provide an absolute path to the package in your local directory. For example: - -``` -⚙ Upgrading Kubernetes from 1.23.17 to 1.26.3 -This involves upgrading from 1.23 to 1.24, 1.24 to 1.25, and 1.25 to 1.26. -This may take some time. -⚙ Downloading assets required for Kubernetes 1.23.17 to 1.26.3 upgrade -The following packages are not available locally, and are required: - kubernetes-1.24.12.tar.gz - kubernetes-1.25.8.tar.gz - -You can download them with the following command: - - curl -LO https://kurl.sh/bundle/version/v2023.04.24-0/19d41b7/packages/kubernetes-1.24.12,kubernetes-1.25.8.tar.gz - -Please provide the path to the file on the server. -Absolute path to file: -``` - -## About Add-ons and KOTS Updates {#add-ons} - -If the application vendor updated any add-ons in the kURL installer spec since the last time that you ran the installation script in your cluster, the script automatically updates the add-ons after updating Kubernetes (if required). - -For a complete list of add-ons that can be included in the kURL installer spec, including the KOTS add-on, see [Add-ons](https://kurl.sh/docs/add-ons/antrea) in the kURL documentation. - -### Containerd and Docker Add-on Updates - -The installation script upgrades the version of the Containerd or Docker container runtime if required by the installer spec. For example, if your cluster uses Containerd version 1.6.4 and the spec is updated to use 1.6.18, then Containerd is updated to 1.6.18 in your cluster when you run the installation script. - -The installation script also supports migrating from Docker to Containerd as Docker is not supported in Kubernetes versions 1.24 and later. If the install script detects a change from Docker to Containerd, it installs Containerd, loads the images found in Docker, and removes Docker. - -For information about the container runtime add-ons, see [Containerd Add-On](https://kurl.sh/docs/add-ons/containerd) and [Docker Add-On](https://kurl.sh/docs/add-ons/docker) in the kURL documentation. - -### KOTS Updates (KOTS Add-on) - -The version of KOTS that is installed in a kURL cluster is set by the [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm), which is defined in the kURL installer spec. - -For example, if the version of KOTS running in your cluster is 1.109.0, and the KOTS add-on in the kURL installer spec is updated to 1.109.12, then the KOTS version in your cluster is updated to 1.109.12 when you update the cluster. - -================ -File: docs/enterprise/updating-kurl.mdx -================ -import InstallerRequirements from "../partials/updating/_installerRequirements.mdx" -import UpgradePrompt from "../partials/updating/_upgradePrompt.mdx" -import AdminConsole from "../partials/updating/_admin-console.mdx" -import AdminConsoleAirGap from "../partials/updating/_admin-console-air-gap.mdx" -import DownloadKurlBundle from "../partials/install/_download-kurl-bundle.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Performing Updates in kURL Clusters - -<KurlAvailability/> - -This topic describes how to perform updates in Replicated kURL installations. It includes procedures for updating an application, as well as for updating the versions of Kubernetes, Replicated KOTS, and add-ons in a kURL cluster. - -For more information about managing nodes in kURL clusters, including how to safely reset, reboot, and remove nodes when performing maintenance tasks, see [Managing Nodes](https://kurl.sh/docs/install-with-kurl/managing-nodes) in the open source kURL documentation. - -## Update an Application - -For kURL installations, you can update an application from the Admin Console. You can also set up automatic updates. See [Configuring Automatic Updates](/enterprise/updating-apps). - -### Online Environments - -<AdminConsole/> - -### Air Gap Environments - -<AdminConsoleAirGap/> - -## Update the kURL Cluster - -After updating the kURL installer spec, you can rerun the kURL installation script to update a kURL cluster. For more information about kURL cluster udpates, see [About kURL Cluster Updates](/enterprise/updating-kurl-about). - -For more information about managing nodes in kURL clusters, including how to safely reset, reboot, and remove nodes when performing maintenance tasks, see [Managing Nodes](https://kurl.sh/docs/install-with-kurl/managing-nodes) in the open source kURL documentation. - -:::important -The Kubernetes scheduler automatically reschedules Pods to other nodes during maintenance. Any deployments or StatefulSets with a single replica experience downtime while being rescheduled. -::: - -### Online Environments - -To update the kURL cluster in an online environment: - -1. Edit the kURL installer spec as desired. For example, update the version of Kubernetes or add, remove, or update add-ons. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). - -1. Run the kURL installation script on any primary node in the cluster: - - ```bash - curl -sSL https://k8s.kurl.sh/APP_SLUG | sudo bash -s ADVANCED_OPTIONS - ``` - Replace: - * `APP_SLUG` with the unique slug for the application. - * `ADVANCED_OPTIONS` optionally with any flags listed in [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. - - To use no advanced installation options, remove `-s ADVANCED_OPTIONS` from the command. - - See the following recommendations for advanced options: - - <InstallerRequirements/> - -1. <UpgradePrompt/> - -### Air Gap Environments - -For air gap installations, you must load images on each node in the cluster before you can run the installation script to update Kubernetes and any add-ons. This is because upgraded components might have Pods scheduled on any node in the cluster. - -To update the kURL cluster in an air gap environment: - -1. Edit the kURL installer spec as desired. For example, update the version of Kubernetes or add, remove, or update add-ons. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). - -1. Repeat the following steps on each node in the cluster to download and extract the kURL `.tar.gz` air gap bundle for the updated spec: - - 1. Download the kURL `.tar.gz` air gap bundle from the channel where the new kURL installer spec is promoted: - - * To download the kURL air gap bundle for the Stable channel: - - <DownloadKurlBundle/> - - * To download the kURL bundle for channels other than Stable: - - ```bash - replicated channel inspect CHANNEL - ``` - Replace `CHANNEL` with the exact name of the target channel, which can include uppercase letters or special characters, such as `Unstable` or `my-custom-channel`. - - In the output of this command, copy the curl command with the air gap URL. - - 1. Extract the contents of the bundle: - - ```bash - tar -xvzf FILENAME.tar.gz - ``` - Replace `FILENAME` with the name of the downloaded kURL `.tar.gz` air gap bundle. - -1. Run the following KURL script to ensure all required images are available: - - ```bash - cat tasks.sh | sudo bash -s load-images - ``` - - :::note - The kURL installation script that you will run in the next step also performs a check for required images and prompts you to run the `load-images` command if any images are missing. - ::: - -1. Run the kURL installation script on any primary node in the cluster with the `airgap` option: - - ```bash - cat install.sh | sudo bash -s airgap OTHER_ADVANCED_OPTIONS - ``` - Replace `OTHER_ADVANCED_OPTIONS` optionally with any flags listed in [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. - - See the following recommendations for advanced options: - <InstallerRequirements/> - -1. <UpgradePrompt/> - - :::note - If Kubernetes must be upgraded by more than one minor version, the script automatically searches for the required Kubernetes assets in the `/var/lib/kurl/assets/` directory. If the assets are not available, the script prints a command to download the assets as a `tar.gz` package. Download and provide the absolute path to the package when prompted to continue with the upgrade. - ::: - -================ -File: docs/enterprise/updating-licenses.md -================ -# Updating Licenses in the Admin Console - -This topic describes how to update a license from the KOTS Admin Console. - -## Update Online Licenses - -To update licenses in online environments: - -1. In the Admin Console, go to the **License** tab. - -1. Click **Sync license** to get the latest updates. - - ![Online License](/images/online-license-tab.png) - - [View a larger version of this image](/images/online-license-tab.png) - - :::note - If no changes are detected, a **License is already up to date** message appears. - ::: - - When the license is updated, KOTS makes a new version available that includes the license changes: - - ![License updated successfully](/images/kots-license-update-message.png) - - [View a larger version of this image](/images/kots-license-update-message.png) - -1. In the dialog, click **Go to new version** to navigate to the **Version history** page. - -1. On the **Version history** page, next to the new version labeled **License Change**, click **Deploy** then **Yes, deploy**. - - ![Deploy license change](/images/kots-deploy-license-change.png) - - [View a larger version of this image](/images/kots-deploy-license-change.png) - - The version with the license change is then displayed as the currently deployed version, as shown below: - - ![Currently deployed version](/images/kots-license-change-currently-deployed.png) - - [View a larger version of this image](/images/kots-license-change-currently-deployed.png) - -## Update Air Gap Licenses - -To update licenses in air gap environments: - -1. Download the new license. Ensure that it is available on the machine where you can access a browser. - -1. In the Admin Console, go to the **License** tab. - -1. Click **Upload license** and select the new license. - - ![Airgap License](/images/airgap-license-tab.png) - - [View a larger version of this image](/images/airgap-license-tab.png) - - :::note - If no changes are detected, a **License is already up to date** message appears. - ::: - - When the license is updated, KOTS makes a new version available that includes the license changes: - - ![License updated successfully](/images/kots-airgap-license-update-message.png) - - [View a larger version of this image](/images/kots-airgap-license-update-message.png) - -1. In the dialog, click **Go to new version** to navigate to the **Version history** page. - -1. On the **Version history** page, next to the new version labeled **License Change**, click **Deploy** then **Yes, deploy**. - - ![Deploy license change](/images/kots-deploy-license-change.png) - - [View a larger version of this image](/images/kots-deploy-license-change.png) - - The version with the license change is then displayed as the currently deployed version, as shown below: - - ![Currently deployed version](/images/kots-license-change-currently-deployed.png) - - [View a larger version of this image](/images/kots-license-change-currently-deployed.png) - -## Upgrade from a Community License - -If you have a community license, you can change your license by uploading a new one. This allows you to upgrade from a community version of the software without having to reinstall the Admin Console and the application. - -To change a community license to another license: - -1. Download the new license. -1. In the **License** tab of the Admin Console, click **Change license**. -1. In the dialog, upload the new license file. - -================ -File: docs/enterprise/updating-patching-with-kustomize.md -================ -# Patching with Kustomize - -This topic describes how to use Kustomize to patch an application before deploying. - -## Overview - -Replicated KOTS uses Kustomize to let you make patches to an application outside of the options available in the KOTS Admin Console **Config** page. _Kustomizations_ are the Kustomize configuration objects, defined in `kustomization.yaml` files, that describe how to transform or generate other Kubernetes objects. - -These kustomizations overlay the application resource files and can persist after release updates. For example, you can kustomize the number of replicas that you want to continually use in your environment or specify what `nodeSelectors` to use for a deployment. - -For more information, see the [Kustomize website](https://kustomize.io). - -## Limitation - -For Helm charts deployed with version `kots.io/v1beta2` of the KOTS HelmChart custom resource, editing the downstream Kustomization files to make changes to the application before deploying is not supported. This is because KOTS does not use Kustomize when installing Helm charts with the `kots.io/v1beta2` HelmChart custom resource. For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). - -## About the Directory Structure - -You can patch an application with Kustomize from the **View files** page in the Admin Console. The **View files** page shows the Kubernetes manifest files for the application. - -The following images shows an example of the file directory on the View files page: - -![Kustomize Directory Structure](/images/kustomize-dir-structure.png) - -[View a larger version of this image](/images/kustomize-dir-structure.png) - -For more information about each of the sections in the file directory, see the following sections: - -- [Upstream](#upstream) -- [Base](#base) -- [Overlays](#overlays) -- [Rendered](#rendered) -- [skippedFiles](#skippedfiles) - -### Upstream - -The following table describes the `upstream` directory and whether custom changes persist after an update: - -<table> - <thead> - <tr> - <th width="16%">Directory</th> - <th width="15%">Changes Persist?</th> - <th width="67%">Description</th> - </tr> - </thead> - <tr> - <td><code>upstream</code></td> - <td>No, except for the <code>userdata</code> subdirectory</td> - <td><p>The <code>upstream</code> directory exactly mirrors the content pushed to a release.</p><p>Contains the template functions, preflight checks, support bundle, config options, license, and so on.</p><p>Contains a <code>userdata</code> subdirectory that includes user data files such as the license file and the config file.</p></td> - </tr> -</table> - -### Base - -The following table describes the `base` directory and whether custom changes persist after an update: - -<table> - <thead> - <tr> - <th width="16%">Directory</th> - <th width="15%">Changes Persist?</th> - <th width="67%">Description</th> - </tr> - </thead> - <tr> - <td><code>base</code></td> - <td>No</td> - <td><p>After KOTS processes and renders the <code>upstream</code>, it puts those files in the <code>base</code> directory.</p><p>Only the deployable application files, such as files deployable with <code>kubectl apply</code>, are placed here.</p><p>Any non-deployable manifests, such as template functions, preflight checks, and configuration options, are removed.</p></td> - </tr> -</table> - - -### Overlays - -The `overlays` directory contains the following subdirectories that apply specific kustomizations to the `base` directory when deploying a version to the cluster. - The following table describes the subdirectories and specifies whether the custom changes made in each subdirectory persist after an update. - <table> - <thead> - <tr> - <th width="16%">Subdirectory</th> - <th width="15%">Changes Persist?</th> - <th width="67%">Description</th> - </tr> - </thead> - <tr> - <td><code>midstream</code></td> - <td>No</td> - <td>Contains KOTS-specific kustomizations, such as:<ul><li>Backup labels, such as those used to configure Velero.</li><li>Image pull secret definitions and patches to inject the <code>imagePullSecret</code> field into relevant manifests (such as deployments, stateful sets, and jobs).</li></ul></td> - </tr> - <tr> - <td><code>downstream</code></td> - <td>Yes</td> - <td><p>Contains user-defined kustomizations that are applied to the <code>midstream</code> directory and deployed to the cluster.</p><p>Only one <code>downstream</code> subdirectory is supported. It is automatically created and named <code>this-cluster</code> when the Admin Console is installed.</p><p>To add kustomizations, see <a href="#patch-an-application">Patch an Application</a>.</p></td> - </tr> - <tr> - <td><code>midstream/charts</code></td> - <td>No</td> - <td><p>Appears only when the <code>useHelmInstall</code> property in the HelmChart custom resource is set to <code>true</code>.</p><p>Contains a subdirectory for each Helm chart. Each Helm chart has its own kustomizations because each chart is rendered and deployed separately from other charts and manifests.</p><p>The subcharts of each Helm chart also have their own kustomizations and are rendered separately. However, these subcharts are included and deployed as part of the parent chart.</p></td> - </tr> - <tr> - <td><code>downstream/charts</code></td> - <td>Yes</td> - <td><p>Appears only when the <code>useHelmInstall</code> property in the HelmChart custom resource is set to <code>true</code>.</p><p>Contains a subdirectory for each Helm chart. Each Helm chart has its own kustomizations because each chart is rendered and deployed separately from other charts and manifests.</p><p>The subcharts of each Helm chart also have their own kustomizations and are rendered separately. However, these subcharts are included and deployed as part of the parent chart.</p></td> - </tr> - </table> - -### Rendered - -The following table describes the `rendered` directory and whether custom changes persist after an update: - -<table> - <thead> - <tr> - <th width="16%">Directory</th> - <th width="15%">Changes Persist?</th> - <th width="67%">Description</th> - </tr> - </thead> - <tr> - <td><code>rendered</code></td> - <td>No</td> - <td><p>Contains the final rendered application manifests that are deployed to the cluster.</p><p>The rendered files are created when KOTS processes the <code>base</code> by applying the corresponding overlays and the user-defined kustomizations. KOTS puts the rendered files in the <code>rendered</code> directory.</p></td> - </tr> - <tr> - <td><code>rendered/charts</code></td> - <td>No</td> - <td><p>Appears only when the <code>useHelmInstall</code> property in the HelmChart custom resource is set to <code>true</code>.</p><p>Contains a subdirectory for each rendered Helm chart. Each Helm chart is deployed separately from other charts and manifests.</p><p>The rendered subcharts of each Helm chart are included and deployed as part of the parent chart.</p></td> - </tr> -</table> - -### skippedFiles - -The `skippedFiles` directory lists files that KOTS is not able to process or render, such as invalid YAML files. - -The `_index.yaml` file contains metadata and details about the errors, such as which files they were found in and sometimes the line number of the error. - -## Patch an Application - -To patch the application with Kustomize so that your changes persist between updates, edit the files in the `overlays/downstream/this-cluster` directory. - -The Admin Console overwrites the `upstream` and `base` directories each time you upgrade the application to a later version. - -To patch an application: - -1. On the View Files tab in the Admin Console, click **Need to edit these files? Click here to learn how**. - - ![edit-patches-kots-app](/images/edit-patches-kots-app.png) - -1. To download the application bundle locally: - - ```shell - kubectl kots download --namespace APP_NAMESPACE --slug APP_SLUG - ``` - Replace: - * `APP_NAMESPACE` with the namespace on the cluster where the application is deployed. - * `APP_SLUG` with the unique slug for the application. - - You can copy these values from the dialog that appears when you click **Need to edit these files? Click here to learn how**. - -1. Create a Kubernetes manifest YAML file and make any desired edits. You only need to add the fields and values that you want to change because this patch file overwrites the corresponding values in the `base` directory. For example, the following `Deployment` patch manifest file shows an edit only to the number of replicas. None of the other values in the `base/deployment.yaml` file will be overwritten. - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: example-nginx - spec: - replicas: 2 - ``` - -1. Add the filename that you created in the previous step to the `patches` field in the `kustomization.yaml` file, located in `/overlays/downstream/this-cluster`. The `downstream/this-cluster` subdirectory is where custom changes (patches) persist when releases are updated. These changes are in turn applied to the `midstream` directory. For more information, see [overlays](#overlays). - - **Example:** - - ```yaml - apiVersion: kustomize.config.k8s.io/v1beta1 - bases: - - ../../midstream - kind: Kustomization - patches: - - path: ./FILENAME.yaml - ``` - -1. Upload your changes to the cluster: - - ```shell - kubectl kots upload --namespace APP_NAMESPACE --slug APP_SLUG ~/APP-SLUG - ``` - -1. On the Version History tab in the Admin Console, click **Diff** to see the new version of the application with the diff of the changes that you uploaded. - - ![kustomize-view-history-diff](/images/kustomize-view-history-diff.png) - - [View a larger version of this image](/images/kustomize-view-history-diff.png) - -1. Click **Deploy** to apply the changes. - - ![kustomize-view-history-deploy](/images/kustomize-view-history-deploy.png) - -1. Verify your changes. For example, running the following command shows that there are two NGINX pods running after deploying two replicas in the example YAML above: - - ```shell - kubectl get po | grep example-nginx - ``` - **Example output:** - - ```shell - example-nginx-f5c49fdf6-bf584 1/1 Running 0 1h - example-nginx-t6ght74jr-58fhr 1/1 Running 0 1m - ``` - -================ -File: docs/enterprise/updating-tls-cert.md -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Updating TLS Certificates in kURL Clusters - -<KurlAvailability/> - -This topic describes how to upload custom TLS certificates for Replicated kURL clusters. - -## Overview - -For kURL clusters, the default Replicated KOTS self-signed certificate automatically renews 30 days before the expiration date. - -If you have uploaded a custom TLS certificate instead, then no renewal is attempted, even if the certificate is expired. In this case, you must manually upload a new custom certificate. - -For information about TLS renewal for registry and Kubernetes control plane with Replicated kURL, see [TLS Certificates](https://kurl.sh/docs/install-with-kurl/setup-tls-certs) in the kURL documentation. - -## Update Custom TLS Certificates - -If you are using a custom TLS certificate in a kURL cluster, you manually upload a new certificate when the previous one expires. - -:::important -Adding the `acceptAnonymousUploads` annotation temporarily creates a vulnerability for an attacker to maliciously upload TLS certificates. After TLS certificates have been uploaded, the vulnerability is closed again. - -Replicated recommends that you complete this upload process quickly to minimize the vulnerability risk. -::: - -To upload a new custom TLS certificate: - -1. Run the following annotation command to restore the ability to upload new TLS certificates: - - ```bash - kubectl -n default annotate secret kotsadm-tls acceptAnonymousUploads=1 --overwrite - ``` -1. Run the following command to get the name of the kurl-proxy server: - - ```bash - kubectl get pods -A | grep kurl-proxy | awk '{print $2}' - ``` - -1. Run the following command to delete the kurl-proxy pod. The pod automatically restarts after the command runs. - - ```bash - kubectl delete pods PROXY_SERVER - ``` - - Replace PROXY_SERVER with the name of the kurl-proxy server that you got in the previous step. - -1. After the pod has restarted, direct your browser to `http://<ip>:8800/tls` and go through the upload process in the user interface. - -================ -File: docs/partials/airgap/_airgap-bundle.mdx -================ -Air gap bundles (`.airgap`) contain the images needed to install and run a single release of your application in _air gap_ environments with no outbound internet access. - -================ -File: docs/partials/application-links/_nginx-deployment.mdx -================ -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx - labels: - app: nginx -spec: - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - annotations: - backup.velero.io/backup-volumes: nginx-content - spec: - containers: - - name: nginx - image: nginx - resources: - limits: - memory: '256Mi' - cpu: '500m' - requests: - memory: '32Mi' - cpu: '100m' -``` - -================ -File: docs/partials/application-links/_nginx-k8s-app.mdx -================ -```yaml -apiVersion: app.k8s.io/v1beta1 -kind: Application -metadata: - name: "nginx" -spec: - descriptor: - links: - - description: Open App - # needs to match applicationUrl in kots-app.yaml - url: "http://nginx" -``` - -================ -File: docs/partials/application-links/_nginx-kots-app.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: nginx -spec: - title: App Name - icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png - statusInformers: - - deployment/nginx - ports: - - serviceName: "nginx" - servicePort: 80 - localPort: 8888 - applicationUrl: "http://nginx" -``` - -================ -File: docs/partials/application-links/_nginx-service.mdx -================ -```yaml -apiVersion: v1 -kind: Service -metadata: - name: nginx - labels: - app: nginx - annotations: - kots.io/when: '{{repl not IsKurl }}' -spec: - type: ClusterIP - ports: - - port: 80 - selector: - app: nginx ---- -apiVersion: v1 -kind: Service -metadata: - name: nginx - labels: - app: nginx - annotations: - kots.io/when: '{{repl IsKurl }}' -spec: - type: NodePort - ports: - - port: 80 - nodePort: 8888 - selector: - app: nginx -``` - -================ -File: docs/partials/ci-cd/_build-source-code.mdx -================ -Add one or more jobs to compile your application source code and build images. The build jobs that you create vary depending upon your application and your CI/CD platform. For additional guidance, see the documentation for your CI/CD platform. - -================ -File: docs/partials/ci-cd/_test-recs.mdx -================ -* **Application Testing:** Traditional application testing includes unit, integration, and end-to-end tests. These tests are critical for application reliability, and Compatibility Matrix is designed to to incorporate and use your application testing. - -* **Performance Testing:** Performance testing is used to benchmark your application to ensure it can handle the expected load and scale gracefully. Test your application under a range of workloads and scenarios to identify any bottlenecks or performance issues. Make sure to optimize your application for different Kubernetes distributions and configurations by creating all of the environments you need to test in. - -* **Smoke Testing:** Using a single, conformant Kubernetes distribution to test basic functionality of your application with default (or standard) configuration values is a quick way to get feedback if something is likely to be broken for all or most customers. Replicated also recommends that you include each Kubernetes version that you intend to support in your smoke tests. - -* **Compatibility Testing:** Because applications run on various Kubernetes distributions and configurations, it is important to test compatibility across different environments. Compatibility Matrix provides this infrastructure. - -* **Canary Testing:** Before releasing to all customers, consider deploying your application to a small subset of your customer base as a _canary_ release. This lets you monitor the application's performance and stability in real-world environments, while minimizing the impact of potential issues. Compatibility Matrix enables canary testing by simulating exact (or near) customer environments and configurations to test your application with. - -================ -File: docs/partials/cmx/_openshift-pool.mdx -================ -:::note -Due to the time it takes to start an OpenShift cluster, a warm pool of OpenShift clusters is maintained. -When available, an OpenShift cluster from the pool starts in approximately two minutes with default disks. -When starting a cluster with a disk size different than the default, an additional four minutes is added to the warm cluster start time. -::: - -================ -File: docs/partials/cmx/_overview.mdx -================ -Replicated Compatibility Matrix quickly provisions ephemeral clusters of different Kubernetes distributions and versions, such as OpenShift, EKS, and Replicated kURL. - -You can use Compatibility Matrix to get kubectl access to running clusters within minutes or less. This allows you to more easily test your code in a range of different environments before releasing to customers. - -Example use cases for Compatibility Matrix include: -* Run tests before releasing a new version of your application to validate compatibility with supported Kubernetes distributions -* Get access to a cluster to develop on and quickly test changes -* Reproduce a reported issue on a customer-representative environment for troubleshooting - -================ -File: docs/partials/cmx/_prerequisites.mdx -================ -* Create an account in the Replicated Vendor Portal. See [Creating a Vendor Account](/vendor/vendor-portal-creating-account). - -* Install the Replicated CLI and then authorize the CLI using your vendor account. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -* If you have a contract, you can purchase more credits by going to [**Compatibility Matrix > Buy additional credits**](https://vendor.replicated.com/compatibility-matrix). Otherwise, you can request credits by going to [**Compatibility Matrix > Request more credits**](https://vendor.replicated.com/compatibility-matrix) in the Vendor Portal. For more information, see [Billing and Credits](/vendor/testing-about#billing-and-credits). - -================ -File: docs/partials/cmx/_supported-clusters-overview.mdx -================ -Compatibility Matrix can create clusters on virtual machines (VMs), such as kind, k3s, RKE2, and Red Hat OpenShift OKD, and also create cloud-managed clusters, such as EKS, GKE and AKS: - -* Cloud-based Kubernetes distributions are run in a Replicated managed and controlled cloud account to optimize and deliver a clusters quickly and reliably. The Replicated account has control planes ready and adds a node group when you request it, making the cluster available much faster than if you try to create your own cluster with your own cloud account. - -* VMs run on Replicated bare metal servers located in several data centers, including data centers physically in the European Union. - -To view an up-to-date list of the available cluster distributions, including the supported Kubernetes versions, instance types, and maximum nodes for each distribution, run [`replicated cluster versions`](/reference/replicated-cli-cluster-versions). - -For detailed information about the available cluster distributions, see [Supported Compatibility Matrix Cluster Types](testing-supported-clusters). - -================ -File: docs/partials/collab-repo/_collab-existing-user.mdx -================ -If a team member adds a GitHub username to their Vendor Portal account that already exists in the collab repository, then the Vendor Portal does _not_ change the role that the existing user is assigned in the collab repository. - -However, if the RBAC policy assigned to this member in the Vendor Portal later changes, or if the member is removed from the Vendor Portal team, then the Vendor Portal updates or removes the user in the collab repository accordingly. - -================ -File: docs/partials/collab-repo/_collab-rbac-important.mdx -================ -:::important -The RBAC policy that you specify also determines the level of access that the user has to the Replicated collab repository in GitHub. By default, the Read Only policy grants the user read access to the collab repository. - -For more information about managing user access to the collab repository from the Vendor Portal, see [Managing Access to the Collab Repository](team-management-github-username). -::: - -================ -File: docs/partials/collab-repo/_collab-rbac-resources-important.mdx -================ -:::important -When you update an existing RBAC policy to add one or more `team/support-issues` resource, the GitHub role in the Replicated collab repository of every team member that is assigned to that policy and has a GitHub username saved in their account is updated accordingly. -::: - -================ -File: docs/partials/collab-repo/_collab-repo-about.mdx -================ -The replicated-collab organization in GitHub is used for tracking and collaborating on escalations, bug reports, and feature requests that are sent by members of a Vendor Portal team to the Replicated team. Replicated creates a unique repository in the replicated-collab organization for each Vendor Portal team. Members of a Vendor Portal team submit issues to their unique collab repository on the Support page in the [Vendor Portal](https://vendor.replicated.com/support). - -For more information about the collab repositories and how they are used, see [Replicated Support Paths and Processes](https://community.replicated.com/t/replicated-vendor-support-paths-and-processes/850) in _Replicated Community_. - -================ -File: docs/partials/config/_affixExample.mdx -================ -```yaml -groups: -- name: example_settings - title: My Example Config - description: Configuration to serve as an example for creating your own. - items: - - name: username - title: Username - type: text - required: true - affix: left - - name: password - title: Password - type: password - required: true - affix: right -``` - -================ -File: docs/partials/config/_defaultExample.mdx -================ -```yaml -- name: custom_key - title: Set your secret key for your app - description: Paste in your Custom Key - items: - - name: key - title: Key - type: text - value: "" - default: change me -``` -![Default change me value displayed under the config field](/images/config-default.png) - -[View a larger version of this image](/images/config-default.png) - -================ -File: docs/partials/config/_helpTextExample.mdx -================ -```yaml -- name: http_settings - title: HTTP Settings - items: - - name: http_enabled - title: HTTP Enabled - help_text: Check to enable the HTTP listener - type: bool -``` -![Config field with help text underneath](/images/config-help-text.png) - -[View a larger version of this image](/images/config-help-text.png) - -================ -File: docs/partials/config/_hiddenExample.mdx -================ -```yaml -- name: secret_key - title: Secret Key - type: password - hidden: true - value: "{{repl RandomString 40}}" -``` - -================ -File: docs/partials/config/_item-types.mdx -================ -- `bool` -- `dropdown` -- `file` -- `heading` -- `label` -- `password` -- `radio` -- `select_one` (Deprecated) -- `text` -- `textarea` - -================ -File: docs/partials/config/_nameExample.mdx -================ -```yaml -- name: http_settings - title: HTTP Settings - items: - - name: http_enabled - title: HTTP Enabled - type: bool -``` - -================ -File: docs/partials/config/_property-when.mdx -================ -It can be useful to conditionally show or hide fields so that your users are only provided the configuration options that are relevant to them. This helps to reduce user error when configuring the application. Conditional statements in the `when` property can be used to evaluate things like the user's environment, license entitlements, and configuration choices. For example: -* The Kubernetes distribution of the cluster -* If the license includes a specific feature entitlement -* The number of users that the license permits -* If the user chooses to bring their own external database, rather than using an embedded database offered with the application - -You can construct conditional statements in the `when` property using KOTS template functions. KOTS template functions are a set of custom template functions based on the Go text/template library. For more information, see [About Template Functions](/reference/template-functions-about). - -================ -File: docs/partials/config/_randomStringNote.mdx -================ -:::note -When you assign a template function that generates a value to a `value` property, you can use the `readonly` and `hidden` properties to define whether or not the generated value is ephemeral or persistent between changes to the configuration settings for the application. For more information, see [RandomString](template-functions-static-context#randomstring) in _Static Context_. -::: - -================ -File: docs/partials/config/_readonlyExample.mdx -================ -```yaml -- name: key - title: Key - type: text - value: "" - default: change me -- name: unique_key - title: Unique Key - type: text - value: "{{repl RandomString 20}}" - readonly: true -``` -![Default change me value displayed under the config field](/images/config-readonly.png) - -[View a larger version of this image](/images/config-readonly.png) - -================ -File: docs/partials/config/_recommendedExample.mdx -================ -```yaml -- name: recommended_field - title: My recommended field - type: bool - default: "0" - recommended: true -``` -![config field with green recommended tag](/images/config-recommended-item.png) - -[View a larger version of this image](/images/config-recommended-item.png) - -================ -File: docs/partials/config/_regexValidationExample.mdx -================ -``` -- name: smtp-settings - title: SMTP Settings - - name: smtp_password - title: SMTP Password - type: password - required: true - validation: - regex: - pattern: ^(?:[\w@#$%^&+=!*()_\-{}[\]:;"'<>,.?\/|]){8,16}$ - message: The password must be between 8 and 16 characters long and can contain a combination of uppercase letters, lowercase letters, digits, and special characters. - - name: jwt_token - title: JWT token - type: file - validation: - regex: - pattern: ^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$ - message: Upload a file with valid JWT token. -``` - -================ -File: docs/partials/config/_requiredExample.mdx -================ -```yaml - - name: custom_key - title: Set your secret key for your app - description: Paste in your Custom Key - items: - - name: key - title: Key - type: text - value: "" - default: change me - required: true -``` -![config field with yellow required tag](/images/config-required-item.png) - -[View a larger version of this image](/images/config-required-item.png) - -================ -File: docs/partials/config/_typeExample.mdx -================ -```yaml -- name: group_title - title: Group Title - items: - - name: http_enabled - title: HTTP Enabled - type: bool - default: "0" -``` -![field named HTTP Enabled with disabled checkbox](/images/config-screen-bool.png) - -[View a larger version of this image](/images/config-screen-bool.png) - -================ -File: docs/partials/config/_valueExample.mdx -================ -```yaml -- name: custom_key - title: Set your secret key for your app - description: Paste in your Custom Key - items: - - name: key - title: Key - type: text - value: "{{repl RandomString 20}}" -``` -![config field with random string as HTML input](/images/config-value-randomstring.png) - -[View a larger version of this image](/images/config-value-randomstring.png) - -================ -File: docs/partials/config/_when-note.mdx -================ -:::note -`when` is a property of both groups and items. See [Group Properties > `when`](/reference/custom-resource-config#when) above. -::: - -================ -File: docs/partials/config/_when-requirements.mdx -================ -* The `when` property accepts the following types of values: - * Booleans - * Strings that match "true", "True", "false", or "False" - - [KOTS template functions](/reference/template-functions-about) can be used to render these supported value types. -* For the `when` property to evaluate to true, the values compared in the conditional statement must match exactly without quotes - -================ -File: docs/partials/config/_whenExample.mdx -================ -```yaml -- name: database_settings_group - title: Database Settings - items: - - name: db_type - title: Database Type - type: radio - default: external - items: - - name: external - title: External - - name: embedded - title: Embedded DB - - name: database_host - title: Database Hostname - type: text - when: repl{{ (ConfigOptionEquals "db_type" "external")}} - - name: database_password - title: Database Password - type: password - when: repl{{ (ConfigOptionEquals "db_type" "external")}} -``` - -<img alt="External option selected and conditional fields displayed" src="/images/config-when-enabled.png" width="500px"/> - -[View a larger version of this image](/images/config-when-enabled.png) - -<img alt="Embedded DB option selected and no additional fields displayed" src="/images/config-when-disabled.png" width="500px"/> - -[View a larger version of this image](/images/config-when-disabled.png) - -================ -File: docs/partials/configValues/_boolExample.mdx -================ -```yaml -bool_config_field: - value: "1" -``` -```yaml -bool_config_field: - value: "0" -``` - -================ -File: docs/partials/configValues/_config-values-procedure.mdx -================ -During installation, KOTS automatically generates a ConfigValues file and saves the file in a directory called `upstream`. After installation, you can view the generated ConfigValues file in the Admin Console **View files** tab or from the command line by running the `kubectl kots get config` command. - -To get the ConfigValues file from an installed application instance: - -1. Install the target release in a development environment. You can either install the release with Replicated Embedded Cluster or install in an existing cluster with KOTS. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). - -1. Depending on the installer that you used, do one of the following to get the ConfigValues for the installed instance: - - * **For Embedded Cluster installations**: In the Admin Console, go to the **View files** tab. In the filetree, go to **upstream > userdata** and open **config.yaml**, as shown in the image below: - - ![ConfigValues file in the Admin Console View Files tab](/images/admin-console-view-files-configvalues.png) - - [View a larger version of this image](/images/admin-console-view-files-configvalues.png) - - * **For KOTS installations in an existing cluster**: Run the `kubectl kots get config` command to view the generated ConfigValues file: - - ```bash - kubectl kots get config --namespace APP_NAMESPACE --decrypt - ``` - Where: - * `APP_NAMESPACE` is the cluster namespace where KOTS is running. - * The `--decrypt` flag decrypts all configuration fields with `type: password`. In the downloaded ConfigValues file, the decrypted value is stored in a `valuePlaintext` field. - - The output of the `kots get config` command shows the contents of the ConfigValues file. For more information about the `kots get config` command, including additional flags, see [kots get config](/reference/kots-cli-get-config). - -================ -File: docs/partials/configValues/_configValuesExample.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: ConfigValues -spec: - values: - text_config_field_name: - default: Example default value - value: Example user-provided value - boolean_config_field_name: - value: "1" - password_config_field_name: - valuePlaintext: examplePassword -``` - -================ -File: docs/partials/configValues/_fileExample.mdx -================ -```yaml -file_config_field: - filename: my-file.txt - value: JVBERi0xLjQKMSAw... -``` - -================ -File: docs/partials/configValues/_passwordExample.mdx -================ -```yaml -password_config_field: - valuePlaintext: myPlainTextPassword -``` - -================ -File: docs/partials/configValues/_selectOneExample.mdx -================ -```yaml -radio_config_field: - value: option_name -``` - -================ -File: docs/partials/configValues/_textareaExample.mdx -================ -```yaml -textarea_config_field: - value: This is a text area field value. -``` - -================ -File: docs/partials/configValues/_textExample.mdx -================ -```yaml -text_config_field: - value: This is a text field value. -``` - -================ -File: docs/partials/custom-domains/_wizard.mdx -================ -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Custom Domains**. - -1. In the section for the target Replicated endpoint, click **Add your first custom domain** for your first domain, or click **Add new domain** for additional domains. - - The **Configure a custom domain** wizard opens. - - <img src="/images/custom-domains-download-configure.png" alt="custom domain wizard" width="600"/> - -1. For **Domain**, enter the custom domain. Click **Save & continue**. - -1. For **Create CNAME**, copy the text string and use it to create a CNAME record in your DNS account. Click **Continue**. - -1. For **Verify ownership**, copy the text string and use it to create a TXT record in your DNS account. Click **Validate & continue**. - - Your changes can take up to 24 hours to propagate. - -1. For **TLS cert creation verification**, copy the text string and use it to create a TXT record in your DNS account. Click **Validate & continue**. - - Your changes can take up to 24 hours to propagate. - -1. For **Use Domain**, to set the new domain as the default, click **Yes, set as default**. Otherwise, click **Not now**. - - :::note - Replicated recommends that you do _not_ set a domain as the default until you are ready for it to be used by customers. - ::: - -The Vendor Portal marks the domain as **Configured** after the verification checks for ownership and TLS certificate creation are complete. - -================ -File: docs/partials/custom-resource-application/_additionalImages.mdx -================ -```yaml -additionalImages: - - jenkins/jenkins:lts -``` - -================ -File: docs/partials/custom-resource-application/_additionalNamespaces.mdx -================ -```yaml -additionalNamespaces: - - "*" -``` - -================ -File: docs/partials/custom-resource-application/_allowRollback.mdx -================ -```yaml -allowRollback: false -``` - -================ -File: docs/partials/custom-resource-application/_graphs-templates.mdx -================ -The template escape sequence is `{{}}`. Use `{{ value }}`. For more information, see [Template Reference](https://prometheus.io/docs/prometheus/latest/configuration/template_reference/) in the Prometheus documentation. - -================ -File: docs/partials/custom-resource-application/_graphs.mdx -================ -```yaml -graphs: - - title: User Signups - query: 'sum(user_signup_events_total)' -``` - -================ -File: docs/partials/custom-resource-application/_icon.mdx -================ -```yaml -icon: https://support.io/img/logo.png -``` - -================ -File: docs/partials/custom-resource-application/_minKotsVersion.mdx -================ -```yaml -minKotsVersion: "1.71.0" -``` - -================ -File: docs/partials/custom-resource-application/_ports-applicationURL.mdx -================ -<li><p>(Optional) <code>ports.applicationUrl</code>: When set to the same URL that is specified in the `descriptor.links.url` field of the Kubernetes SIG Application custom resource, KOTS adds a link on the Admin Console dashboard where the given service can be accessed. This process automatically links to the hostname in the browser (where the Admin Console is being accessed) and appends the specified `localPort`.</p><p>If not set, then the URL defined in the `descriptor.links.url` field of the Kubernetes SIG Application is linked on the Admin Console dashboard.</p></li> - -================ -File: docs/partials/custom-resource-application/_ports-kurl-note.mdx -================ -:::note -KOTS does not automatically create port forwards for installations on VMs or bare metal servers with Replicated Embedded Cluster or Replicated kURL. This is because it cannot be verified that the ports are secure and authenticated. Instead, Embedded Cluster or kURL creates a NodePort service to make the Admin Console accessible on a port on the node (port `8800` for kURL or port `30000` for Embedded Cluster). - -You can expose additional ports on the node for Embedded Cluster or kURL installations by creating NodePort services. For more information, see [Exposing Services Using NodePorts](/vendor/kurl-nodeport-services). -::: - -================ -File: docs/partials/custom-resource-application/_ports-localPort.mdx -================ -<li><code>ports.localPort</code>: The port to map on the local workstation.</li> - -================ -File: docs/partials/custom-resource-application/_ports-serviceName.mdx -================ -<li><code>ports.serviceName</code>: The name of the service that receives the traffic.</li> - -================ -File: docs/partials/custom-resource-application/_ports-servicePort.mdx -================ -<li><p><code>ports.servicePort</code>: The <code>containerPort</code> of the Pod where the service is running.</p></li> - -================ -File: docs/partials/custom-resource-application/_ports.mdx -================ -```yaml -ports: - - serviceName: web - servicePort: 9000 - localPort: 9000 - applicationUrl: "http://web" -``` - -================ -File: docs/partials/custom-resource-application/_proxyRegistryDomain.mdx -================ -```yaml -proxyRegistryDomain: "proxy.mycompany.com" -``` - -================ -File: docs/partials/custom-resource-application/_releaseNotes.mdx -================ -```yaml -releaseNotes: Fixes a bug and adds a new feature. -``` - -================ -File: docs/partials/custom-resource-application/_replicatedRegistryDomain.mdx -================ -```yaml -replicatedRegistryDomain: "registry.mycompany.com" -``` - -================ -File: docs/partials/custom-resource-application/_requireMinimalRBACPrivileges.mdx -================ -```yaml -requireMinimalRBACPrivileges: false -``` - -================ -File: docs/partials/custom-resource-application/_servicePort-note.mdx -================ -:::note -Ensure that you use the `containerPort` and not the `servicePort`. The `containerPort` and `servicePort` are often the same port, though it is possible that they are different. -::: - -================ -File: docs/partials/custom-resource-application/_statusInformers.mdx -================ -```yaml -statusInformers: - - deployment/my-web-svc - - deployment/my-worker -``` -The following example shows excluding a specific status informer based on a user-supplied value from the Admin Console Configuration screen: -```yaml -statusInformers: - - deployment/my-web-svc - - '{{repl if ConfigOptionEquals "option" "value"}}deployment/my-worker{{repl else}}{{repl end}}' -``` - -================ -File: docs/partials/custom-resource-application/_supportMinimalRBACPrivileges.mdx -================ -```yaml -supportMinimalRBACPrivileges: true -``` - -================ -File: docs/partials/custom-resource-application/_targetKotsVersion.mdx -================ -```yaml -targetKotsVersion: "1.85.0" -``` - -================ -File: docs/partials/custom-resource-application/_title.mdx -================ -```yaml -title: My Application -``` - -================ -File: docs/partials/customers/_change-channel.mdx -================ -You can change the channel a customer is assigned at any time. For installations with Replicated KOTS, when you change the customer's channel, the customer can synchronize their license in the Replicated Admin Console to fetch the latest release on the new channel and then upgrade. The Admin Console always fetches the latest release on the new channel, regardless of the presence of any releases on the channel that are marked as required. - -================ -File: docs/partials/customers/_download.mdx -================ -You can download customer and instance data from the **Download CSV** dropdown on the **Customers** page: - -![Download CSV button in the Customers page](/images/customers-download-csv.png) - -[View a larger version of this image](/images/customers-download-csv.png) - -The **Download CSV** dropdown has the following options: - -* **Customers**: Includes details about your customers, such as the customer's channel assignment, license entitlements, expiration date, last active timestamp, and more. - -* (Recommended) **Customers + Instances**: Includes details about the instances assoicated with each customer, such as the Kubernetes distribution and cloud provider of the cluster where the instance is running, the most recent application instance status, if the instance is active or inactive, and more. The **Customers + Instances** data is a super set of the customer data, and is the recommended download for most use cases. - -You can also export customer instance data as JSON using the Vendor API v3 `customer_instances` endpoint. For more information, see [Get customer instance report in CSV or JSON format](https://replicated-vendor-api.readme.io/reference/listappcustomerinstances) in the Vendor API v3 documentation. - -================ -File: docs/partials/embedded-cluster/_definition.mdx -================ -Replicated Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. Embedded Cluster is based on the open source Kubernetes distribution k0s. For more information, see the [k0s documentation](https://docs.k0sproject.io/stable/). - -For software vendors, Embedded Cluster provides a Config for defining characteristics of the cluster that will be created in the customer environment. Additionally, each version of Embedded Cluster includes a specific version of Replicated KOTS, ensuring compatibility between KOTS and the cluster. For enterprise users, cluster updates are done automatically at the same time as application updates, allowing users to more easily keep the cluster up-to-date without needing to use kubectl. - -================ -File: docs/partials/embedded-cluster/_ec-config.mdx -================ -```yaml -apiVersion: embeddedcluster.replicated.com/v1beta1 -kind: Config -spec: - version: 2.1.3+k8s-1.30 -``` - -================ -File: docs/partials/embedded-cluster/_multi-node-ha-arch.mdx -================ -The following diagram shows the architecture of an HA multi-node Embedded Cluster installation: - -![Embedded Cluster multi-node architecture with high availability](/images/embedded-architecture-multi-node-ha.png) - -[View a larger version of this image](/images/embedded-architecture-multi-node-ha.png) - -As shown in the diagram above, in HA installations with Embedded Cluster: -* A single replica of the Embedded Cluster Operator is deployed and runs on a controller node. -* A single replica of the KOTS Admin Console is deployed and runs on a controller node. -* Three replicas of rqlite are deployed in the kotsadm namespace. Rqlite is used by KOTS to store information such as support bundles, version history, application metadata, and other small amounts of data needed to manage the application. -* For installations that include disaster recovery, the Velero pod is deployed on one node. The Velero Node Agent runs on each node in the cluster. The Node Agent is a Kubernetes DaemonSet that performs backup and restore tasks such as creating snapshots and transferring data during restores. -* For air gap installations, two replicas of the air gap image registry are deployed. - -Any Helm [`extensions`](/reference/embedded-config#extensions) that you include in the Embedded Cluster Config are installed in the cluster depending on the given chart and whether or not it is configured to be deployed with high availability. - -================ -File: docs/partials/embedded-cluster/_port-reqs.mdx -================ -This section lists the ports used by Embedded Cluster. These ports must be open and available for both single- and multi-node installations. - -#### Ports Used by Local Processes - -The following ports must be open and available for use by local processes running on the same node. It is not necessary to create firewall openings for these ports. - -* 2379/TCP -* 9099/TCP -* 10248/TCP -* 10257/TCP -* 10259/TCP - -#### Ports Required for Bidirectional Communication Between Nodes - -The following ports are used for bidirectional communication between nodes. - -For multi-node installations, create firewall openings between nodes for these ports. - -For single-node installations, ensure that there are no other processes using these ports. Although there is no communication between nodes in single-node installations, these ports are still required. - -* 2380/TCP -* 4789/UDP -* 6443/TCP -* 7443/TCP -* 9091/TCP -* 9443/TCP -* 10249/TCP -* 10250/TCP -* 10256/TCP - -#### Admin Console Port - -The KOTS Admin Console requires that port 30000/TCP is open and available. Create a firewall opening for port 30000/TCP so that the Admin Console can be accessed by the end user. - -Additionally, port 30000 must be accessible by nodes joining the cluster. - -If port 30000 is occupied, you can select a different port for the Admin Console during installation. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - -#### LAM Port - -The Local Artifact Mirror (LAM) requires that port 50000/TCP is open and available. - -If port 50000 is occupied, you can select a different port for the LAM during installation. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - -================ -File: docs/partials/embedded-cluster/_proxy-install-limitations.mdx -================ -**Limitations:** - -* If any of your [Helm extensions](/reference/embedded-config#extensions) make requests to the internet, the given charts need to be manually configured so that those requests are made to the user-supplied proxy server instead. Typically, this requires updating the Helm values to set HTTP proxy, HTTPS proxy, and no proxy. Note that this limitation applies only to network requests made by your Helm extensions. The proxy settings supplied to the install command are used to pull the containers required to run your Helm extensions. - -* Proxy settings cannot be changed after installation or during upgrade. - -================ -File: docs/partials/embedded-cluster/_proxy-install-reqs.mdx -================ -**Requirement:** Proxy installations require Embedded Cluster 1.5.1 or later with Kubernetes 1.29 or later. - -================ -File: docs/partials/embedded-cluster/_requirements.mdx -================ -* Linux operating system - -* x86-64 architecture - -* systemd - -* At least 2GB of memory and 2 CPU cores - -* The disk on the host must have a maximum P99 write latency of 10 ms. This supports etcd performance and stability. For more information about the disk write latency requirements for etcd, see [Disks](https://etcd.io/docs/latest/op-guide/hardware/#disks) in _Hardware recommendations_ and [What does the etcd warning “failed to send out heartbeat on time” mean?](https://etcd.io/docs/latest/faq/) in the etcd documentation. - -* The filesystem at `/var/lib/embedded-cluster` has 40Gi or more of total space and must be less than 80% full - - The directory used for data storage can be changed by passing the `--data-dir` flag with the Embedded Cluster `install` command. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - - Note that in addition to the primary `/var/lib/embedded-cluster` directory, Embedded Cluster creates directories and files in the following locations: - - - `/etc/cni` - - `/etc/k0s` - - `/opt/cni` - - `/opt/containerd` - - `/run/calico` - - `/run/containerd` - - `/run/k0s` - - `/sys/fs/cgroup/kubepods` - - `/sys/fs/cgroup/system.slice/containerd.service` - - `/sys/fs/cgroup/system.slice/k0scontroller.service` - - `/usr/libexec/k0s` - - `/var/lib/calico` - - `/var/lib/cni` - - `/var/lib/containers` - - `/var/lib/kubelet` - - `/var/log/calico` - - `/var/log/containers` - - `/var/log/pods` - - `/usr/local/bin/k0s` - -* (Online installations only) Access to replicated.app and proxy.replicated.com or your custom domain for each - -* Embedded Cluster is based on k0s, so all k0s system requirements and external runtime dependencies apply. See [System requirements](https://docs.k0sproject.io/stable/system-requirements/) and [External runtime dependencies](https://docs.k0sproject.io/stable/external-runtime-deps/) in the k0s documentation. - -================ -File: docs/partials/embedded-cluster/_update-air-gap-admin-console.mdx -================ -1. On a machine with browser access (for example, where you accessed the Admin Console to configure the application), download the air gap bundle for the new version using the same curl command that you used to install. For example: - - ```bash - curl -f https://replicated.app/embedded/APP_SLUG/CHANNEL_SLUG?airgap=true -H "Authorization: LICENSE_ID" -o APP_SLUG-CHANNEL_SLUG.tgz - ``` - For more information, see [Install](/enterprise/installing-embedded-air-gap#install). - -1. Untar the tarball. For example: - - ```bash - tar -xvzf APP_SLUG-CHANNEL_SLUG.tgz - ``` - Ensure that the `.airgap` air gap bundle is present. - -1. On the same machine, use a browser to access the Admin Console. - -1. On the **Version history** page, click **Upload new version** and choose the `.airgap` air gap bundle you downloaded. - -1. When the air gap bundle has been uploaded, click **Deploy** next to the new version. - -1. On the **Config** screen of the upgrade wizard, make any necessary changes to the configuration for the application. Click **Next**. - - ![Config screen in the upgrade wizard](/images/ec-upgrade-wizard-config.png) - - [View a larger version of this image](/images/ec-upgrade-wizard-config.png) - - :::note - Any changes made on the **Config** screen of the upgrade wizard are not set until the new version is deployed. - ::: - -1. On the **Preflight** screen, view the results of the preflight checks. - - ![Preflight screen in the upgrade wizard](/images/ec-upgrade-wizard-preflights.png) - - [View a larger version of this image](/images/ec-upgrade-wizard-preflights.png) - -1. On the **Confirm** screen, click **Deploy**. - - ![Confirmation screen in the upgrade wizard](/images/ec-upgrade-wizard-confirm.png) - - [View a larger version of this image](/images/ec-upgrade-wizard-confirm.png) - -================ -File: docs/partials/embedded-cluster/_update-air-gap-cli.mdx -================ -1. SSH onto a controller node in the cluster and download the air gap bundle for the new version using the same curl command that you used to install. For example: - - ```bash - curl -f https://replicated.app/embedded/APP_SLUG/CHANNEL_SLUG?airgap=true -H "Authorization: LICENSE_ID" -o APP_SLUG-CHANNEL_SLUG.tgz - ``` - - For more information, see [Install](/enterprise/installing-embedded-air-gap#install). - -1. Untar the tarball. For example: - - ```bash - tar -xvzf APP_SLUG-CHANNEL_SLUG.tgz - ``` - Ensure that the `.airgap` air gap bundle is present. - -1. Use the `update` command to upload the air gap bundle and make this new version available in the Admin Console. For example: - - ```bash - ./APP_SLUG update --airgap-bundle APP_SLUG.airgap - ``` - -1. When the air gap bundle has been uploaded, open a browser on the same machine and go to the Admin Console. - -1. On the **Version history** page, click **Deploy** next to the new version. - - ![Version history page](/images/ec-upgrade-version-history.png) - - [View a larger version of this image](/images/ec-upgrade-version-history.png) - -1. On the **Config** screen of the upgrade wizard, make any necessary changes to the configuration for the application. Click **Next**. - - ![Config screen in the upgrade wizard](/images/ec-upgrade-wizard-config.png) - - [View a larger version of this image](/images/ec-upgrade-wizard-config.png) - - :::note - Any changes made on the **Config** screen of the upgrade wizard are not set until the new version is deployed. - ::: - -1. On the **Preflight** screen, view the results of the preflight checks. - - ![Preflight screen in the upgrade wizard](/images/ec-upgrade-wizard-preflights.png) - - [View a larger version of this image](/images/ec-upgrade-wizard-preflights.png) - -1. On the **Confirm** screen, click **Deploy**. - - ![Confirmation screen in the upgrade wizard](/images/ec-upgrade-wizard-confirm.png) - - [View a larger version of this image](/images/ec-upgrade-wizard-confirm.png) - -================ -File: docs/partials/embedded-cluster/_update-air-gap-overview.mdx -================ -To upgrade an installation, new air gap bundles can be uploaded to the Admin Console from the browser or with the Embedded Cluster binary from the command line. - -Using the binary is faster and allows the user to download the air gap bundle directly to the machine where the Embedded Cluster is running. Using the browser is slower because the user must download the air gap bundle to a machine with a browser, then upload that bundle to the Admin Console, and then the Admin Console can process it. - -================ -File: docs/partials/embedded-cluster/_update-overview.mdx -================ -When you update an application installed with Embedded Cluster, you update both the application and the cluster infrastructure together, including Kubernetes, KOTS, and other components running in the cluster. There is no need or mechanism to update the infrastructure on its own. - -When you deploy a new version, any changes to the cluster are deployed first. The Admin Console waits until the cluster is ready before updatng the application. - -Any changes made to the Embedded Cluster Config, including changes to the Embedded Cluster version, Helm extensions, and unsupported overrides, trigger a cluster update. - -When performing an upgrade with Embedded Cluster, the user is able to change the application config before deploying the new version. Additionally, the user's license is synced automatically. Users can also make config changes and sync their license outside of performing an update. This requires deploying a new version to apply the config change or license sync. - -================ -File: docs/partials/embedded-cluster/_warning-do-not-downgrade.mdx -================ -:::important -Do not downgrade the Embedded Cluster version. This is not supported but is not prohibited, and it can lead to unexpected behavior. -::: - -================ -File: docs/partials/getting-started/_create-promote-release.mdx -================ -Create a new release and promote it to the Unstable channel. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - -================ -File: docs/partials/getting-started/_csdl-overview.mdx -================ -Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. - -Replicated has developed the Commercial Software Distribution Lifecycle to represents the stages that are essential for every company that wants to deliver their software securely and reliably to customer controlled environments. - -This lifecycle was inspired by the DevOps lifecycle and the Software Development Lifecycle (SDLC), but it focuses on the unique things that must be done to successfully distribute third party, commercial software to tens, hundreds, or thousands of enterprise customers. - -================ -File: docs/partials/getting-started/_gitea-ec-config.mdx -================ -```yaml -apiVersion: embeddedcluster.replicated.com/v1beta1 -kind: Config -spec: - version: 2.1.3+k8s-1.30 -``` - -================ -File: docs/partials/getting-started/_gitea-helmchart-cr-ec.mdx -================ -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: gitea -spec: - # chart identifies a matching chart from a .tgz - chart: - name: gitea - chartVersion: 1.0.6 - optionalValues: - - when: 'repl{{ eq Distribution "embedded-cluster" }}' - recursiveMerge: false - values: - service: - type: NodePort - nodePorts: - http: "32000" -``` - -================ -File: docs/partials/getting-started/_gitea-helmchart-cr.mdx -================ -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: gitea -spec: - # chart identifies a matching chart from a .tgz - chart: - name: gitea - chartVersion: 1.0.6 -``` - -================ -File: docs/partials/getting-started/_gitea-k8s-app-cr.mdx -================ -```yaml -apiVersion: app.k8s.io/v1beta1 -kind: Application -metadata: - name: "gitea" -spec: - descriptor: - links: - - description: Open App - # needs to match applicationUrl in kots-app.yaml - url: "http://gitea" -``` - -================ -File: docs/partials/getting-started/_gitea-kots-app-cr-ec.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: gitea -spec: - title: Gitea - statusInformers: - - deployment/gitea - ports: - - serviceName: "gitea" - servicePort: 3000 - localPort: 32000 - applicationUrl: "http://gitea" - icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png -``` - -================ -File: docs/partials/getting-started/_gitea-kots-app-cr.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: gitea -spec: - title: Gitea - statusInformers: - - deployment/gitea - ports: - - serviceName: "gitea" - servicePort: 3000 - localPort: 8888 - applicationUrl: "http://gitea" - icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png -``` - -================ -File: docs/partials/getting-started/_grafana-config.mdx -================ -```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: grafana-config - spec: - groups: - - name: grafana - title: Grafana - description: Grafana Configuration - items: - - name: admin_user - title: Admin User - type: text - default: 'admin' - - name: admin_password - title: Admin Password - type: password - default: 'admin' - ``` - -================ -File: docs/partials/getting-started/_grafana-helmchart.mdx -================ -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: grafana -spec: - # chart identifies a matching chart from a .tgz - chart: - name: grafana - chartVersion: 9.6.5 - values: - admin: - user: "repl{{ ConfigOption `admin_user`}}" - password: "repl{{ ConfigOption `admin_password`}}" -``` - -================ -File: docs/partials/getting-started/_grafana-k8s-app.mdx -================ -```yaml -apiVersion: app.k8s.io/v1beta1 -kind: Application -metadata: - name: "grafana" -spec: - descriptor: - links: - - description: Open App - # needs to match applicationUrl in kots-app.yaml - url: "http://grafana" -``` - -================ -File: docs/partials/getting-started/_grafana-kots-app.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: grafana -spec: - title: Grafana - statusInformers: - - deployment/grafana - ports: - - serviceName: "grafana" - servicePort: 3000 - localPort: 8888 - applicationUrl: "http://grafana" - icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/icon/color/kubernetes-icon-color.png -``` - -================ -File: docs/partials/getting-started/_kubernetes-training.mdx -================ -:::note -This tutorial assumes that you have a working knowledge of Kubernetes. For an introduction to Kubernetes and free training resources, see [Training](https://kubernetes.io/training/) in the Kubernetes documentation. -::: - -================ -File: docs/partials/getting-started/_labs-intro.mdx -================ -Replicated also offers a sandbox environment where you can complete several beginner, intermediate, and advanced labs. The sandbox environment automatically provisions the required Kubernetes cluster or VM where you will install a sample application as part of the labs. - -To get started with an introductory lab, see [Deploy a Hello World Application with Replicated](https://play.instruqt.com/replicated/tracks/hello-world). - -================ -File: docs/partials/getting-started/_related-topics.mdx -================ -For more information about the subjects in the getting started tutorials, see the following topics: - -* [Installing the Replicated CLI](/reference/replicated-cli-installing) -* [Linter Rules](/reference/linter) -* [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) -* [Performing Updates in Existing Clusters](/enterprise/updating-app-manager) - -================ -File: docs/partials/getting-started/_replicated-definition.mdx -================ -Replicated is a commercial software distribution platform. Independent software vendors (ISVs) can use features of the Replicated Platform to distribute modern commercial software into complex, customer-controlled environments, including on-prem and air gap. - -================ -File: docs/partials/getting-started/_test-your-changes.mdx -================ -Install the release to test your changes. For Embedded Cluster installations, see [Performing Udpates in Embedded Clusters](/enterprise/updating-embedded). For existing cluster installations with KOTS, see [Performing Updates in Existing Clusters](/enterprise/updating-app-manager). - -================ -File: docs/partials/getting-started/_tutorial-intro.mdx -================ -This tutorial introduces you to the Replicated features for software vendors and their enterprise users. It is designed to familiarize you with the key concepts and processes that you use as a software vendor when you package and distribute your application with Replicated. - -In this tutorial, you use a set of sample manifest files for a basic NGINX application to learn how to: -* Create and promote releases for an application as a software vendor -* Install and update an application on a Kubernetes cluster as an enterprise user - -================ -File: docs/partials/getting-started/_vm-requirements.mdx -================ -For this tutorial, the VM must meet the following requirements: - - * Ubuntu 18.04 - * At least 8 GB of RAM - * 4 CPU cores - * At least 50GB of disk space - - :::note - If you use a virtual machine that is behind a firewall, make sure that port 8800 (and any other ports you attempt to access through the internet) are allowed to accept traffic. GCP and AWS typically require firewall rule creation to expose ports. - ::: - -For the complete list of system requirements for the kURL, see [kURL Requirements](/enterprise/installing-general-requirements#kurl-requirements) in _Installation Requirements_. - -================ -File: docs/partials/gitops/_gitops-not-recommended.mdx -================ -:::important -KOTS Auto-GitOps is a legacy feature and is **not recommended** for use. For modern enterprise customers that prefer software deployment processes that use CI/CD pipelines, Replicated recommends the [Helm CLI installation method](/vendor/install-with-helm), which is more commonly used in these types of enterprise environments. -::: - -================ -File: docs/partials/helm/_gitops-limitation.mdx -================ -The KOTS Auto-GitOps workflow is not supported for installations with the HelmChart custom resource `apiVersion: kots.io/v1beta2` or the HelmChart custom resource `apiVersion: kots.io/v1beta1` with `useHelmInstall: true`. - -================ -File: docs/partials/helm/_helm-builder-requirements.mdx -================ -The `builder` key has the following requirements and recommendations: -* Replicated recommends that you include only the minimum Helm values in the `builder` key that are required to template the Helm chart with the correct image tags. -* Use only static, or _hardcoded_, values in the `builder` key. You cannot use template functions in the `builder` key because values in the `builder` key are not rendered in a customer environment. -* Any `required` Helm values that need to be set to render the chart templates must have a value supplied in the `builder` key. For more information about the Helm `required` function, see [Using the 'required' function](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-required-function) in the Helm documentation. - -================ -File: docs/partials/helm/_helm-cr-builder-airgap-intro.mdx -================ -In the `builder` key, you provide the minimum Helm values required to render the chart templates so that the output includes any images that must be included in the air gap bundle. The Vendor Portal uses these values to render the Helm chart templates when building the `.airgap` bundle for the release. - -================ -File: docs/partials/helm/_helm-cr-builder-example.mdx -================ -For example, a Helm chart might include a conditional PostgreSQL Deployment, as shown in the Helm template below: - -```yaml -{{- if .Values.postgresql.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: postgresql - labels: - app: postgresql -spec: - selector: - matchLabels: - app: postgresql - template: - metadata: - labels: - app: postgresql - spec: - containers: - - name: postgresql - image: "postgres:10.17" - ports: - - name: postgresql - containerPort: 80 -# ... -{{- end }} -``` - -To ensure that the `postgresql` image is included in the air gap bundle for the release, the `postgresql.enabled` value is added to the `builder` key of the HelmChart custom resource and is hardcoded to `true`: - -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - values: - postgresql: - enabled: repl{{ ConfigOptionEquals "postgres_type" "embedded_postgres"}} - builder: - postgresql: - enabled: true -``` - -================ -File: docs/partials/helm/_helm-cr-chart-name.mdx -================ -The name of the chart. This value must exactly match the `name` field from a `Chart.yaml` in a `.tgz` chart archive that is also included in the release. If the names do not match, then the installation can error or fail. - -================ -File: docs/partials/helm/_helm-cr-chart-release-name.mdx -================ -Specifies the release name to use when installing this instance of the Helm chart. Defaults to the chart name. - -The release name must be unique across all charts deployed in the namespace. To deploy multiple instances of the same Helm chart in a release, you must add an additional HelmChart custom resource with a unique release name for each instance of the Helm chart. - -Must be a valid Helm release name that matches regex `^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` and is no longer than 53 characters. - -================ -File: docs/partials/helm/_helm-cr-chart-version.mdx -================ -The version of the chart. This value must match the `version` field from a `Chart.yaml` in a `.tgz` chart archive that is also included in the release. - -================ -File: docs/partials/helm/_helm-cr-chart.mdx -================ -The `chart` key allows for a mapping between the data in this definition and the chart archive itself. -More than one `kind: HelmChart` can reference a single chart archive, if different settings are needed. - -================ -File: docs/partials/helm/_helm-cr-exclude.mdx -================ -The attribute is a value for making optional charts. The `exclude` attribute can be parsed by template functions. - -When Replicated KOTS processes Helm charts, it excludes the entire chart if the output of the `exclude` field can be parsed as a boolean evaluating to `true`. - -For more information about optional charts, template functions, and how KOTS processes Helm charts, see: - -* [Optional Charts](/vendor/helm-optional-charts) -* [About Template Function Contexts](template-functions-about) -* [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) - -================ -File: docs/partials/helm/_helm-cr-namespace.mdx -================ -The `namespace` key specifies an alternative namespace where Replicated KOTS installs the Helm chart. **Default:** The Helm chart is installed in the same namespace as the Admin Console. The `namespace` attribute can be parsed by template functions. For more information about template functions, see [About template function contexts](template-functions-about). - - -If you specify a namespace in the HelmChart `namespace` field, you must also include the same namespace in the `additionalNamespaces` field of the Application custom resource manifest file. KOTS creates the namespaces listed in the `additionalNamespaces` field during installation. For more information, see [additionalNamespaces](custom-resource-application#additionalnamespaces) in the _Application_ reference. - -================ -File: docs/partials/helm/_helm-cr-optional-values-recursive-merge.mdx -================ -The `optionalValues.recursiveMerge` boolean defines how KOTS merges `values` and `optionalValues`: - -* When `optionalValues.recursiveMerge` is false, the top level keys in `optionalValues` override the top level keys in `values`. By default, `optionalValues.recursiveMerge` is set to false. - -* When `optionalValues.recursiveMerge` is true, all keys from `values` and `optionalValues` are included. In the case of a conflict where there is a matching key in `optionalValues` and `values`, KOTS uses the value of the key from `optionalValues`. - -================ -File: docs/partials/helm/_helm-cr-optional-values-when.mdx -================ -The `optionalValues.when` field defines a conditional statement that must evaluate to true for the given values to be set. Evaluation of the conditional in the `optionalValues.when` field is deferred until render time in the customer environment. - -Use KOTS template functions to write the `optionalValues.when` conditional statement. The following example shows a conditional statement for selecting a database option on the Admin Console configuration screen: - -```yaml -optionalValues: - - when: repl{{ ConfigOptionEquals "postgres_type" "external_postgres"}} -``` - -For more information about using KOTS template functions, see [About Template Functions](/reference/template-functions-about). - -================ -File: docs/partials/helm/_helm-cr-optional-values.mdx -================ -The `optionalValues` key can be used to set values in the Helm chart `values.yaml` file when a given conditional statement evaluates to true. For example, if a customer chooses to include an optional application component in their deployment, it might be necessary to include Helm chart values related to the optional component. - -`optionalValues` includes the following properties: - -* `optionalValues.when`: Defines a conditional statement using KOTS template functions. If `optionalValues.when` evaluates to true, then the values specified in `optionalValues` are set. - -* `optionalValues.recursiveMerge`: Defines how `optionalValues` is merged with `values`. - -* `optionalValues.values`: An array of key-value pairs. - -================ -File: docs/partials/helm/_helm-cr-upgrade-flags.mdx -================ -Specifies additional flags to pass to the `helm upgrade` command for charts. These flags are passed in addition to any flags Replicated KOTS passes by default. The values specified here take precedence if KOTS already passes the same flag. The `helmUpgradeFlags` attribute can be parsed by template functions. For more information about template functions, see [About template function contexts](template-functions-about). - -KOTS uses `helm upgrade` for _all_ deployments of an application, not just upgrades, by specifying the `--install` flag. For non-boolean flags that require an additional argument, such as `--timeout 1200s`, you must use an equal sign (`=`) or specify the additional argument separately in the array. - -**Example:** - -```yaml -helmUpgradeFlags: - - --timeout - - 1200s - - --history-max=15 -``` - -================ -File: docs/partials/helm/_helm-cr-values.mdx -================ -The `values` key can be used to set or delete existing values in the Helm chart `values.yaml` file. Any values that you include in the `values` key must match values in the Helm chart `values.yaml`. For example, `spec.values.images.pullSecret` in the HelmChart custom resource matches `images.pullSecret` in the Helm chart `values.yaml`. - -During installation or upgrade with KOTS, `values` is merged with the Helm chart `values.yaml` in the chart archive. Only include values in the `values` key that you want to set or delete. - -================ -File: docs/partials/helm/_helm-cr-weight-limitation.mdx -================ -The `weight` field is _not_ supported for HelmChart custom resources with `useHelmInstall: false`. - -================ -File: docs/partials/helm/_helm-cr-weight.mdx -================ -Determines the order in which KOTS applies the Helm chart. Charts are applied by weight in ascending order, with lower weights applied first. **Supported values:** Positive or negative integers. **Default:** `0` - -In KOTS v1.99.0 and later, `weight` also determines the order in which charts are uninstalled. Charts are uninstalled by weight in descending order, with higher weights uninstalled first. For more information about uninstalling applications, see [remove](kots-cli-remove) in _KOTS CLI_. - -For more information, see [Orchestrating Resource Deployment](/vendor/orchestrating-resource-deployment). - -================ -File: docs/partials/helm/_helm-definition.mdx -================ -Helm is a popular open source package manager for Kubernetes applications. Many ISVs use Helm to configure and deploy Kubernetes applications because it provides a consistent, reusable, and sharable packaging format. For more information, see the [Helm documentation](https://helm.sh/docs). - -================ -File: docs/partials/helm/_helm-install-beta.mdx -================ -The Helm installation method is Beta and is not recommended for production releases. The features and availability of the Helm installation method are subject to change. - -================ -File: docs/partials/helm/_helm-install-prereqs.mdx -================ -* The customer used to install must have a valid email address. This email address is only used as a username for the Replicated registry and is never contacted. For more information about creating and editing customers in the Vendor Portal, see [Creating a Customer](/vendor/releases-creating-customer). - -* The customer used to install must have the **Existing Cluster (Helm CLI)** install type enabled. For more information about enabling install types for customers in the Vendor Portal, see [Managing Install Types for a License](licenses-install-types). - -* To ensure that the Replicated proxy registry can be used to grant proxy access to your application images during Helm installations, you must create an image pull secret for the proxy registry and add it to your Helm chart. To do so, follow the steps in [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry). - -* Declare the SDK as a dependency in your Helm chart. For more information, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. - -================ -File: docs/partials/helm/_helm-package.mdx -================ -```bash -helm package -u PATH_TO_CHART -``` -Where: -* `-u` or `--dependency-update` is an option for the `helm package` command that updates chart dependencies before packaging. For more information, see [Helm Package](https://helm.sh/docs/helm/helm_package/) in the Helm documentation. -* `PATH_TO_CHART` is the path to the Helm chart in your local directory. For example, `helm package -u .`. - -The Helm chart, including any dependencies, is packaged and copied to your current directory in a `.tgz` file. The file uses the naming convention: `CHART_NAME-VERSION.tgz`. For example, `postgresql-8.1.2.tgz`. - -================ -File: docs/partials/helm/_helm-template-limitation.mdx -================ -Helm's `lookup` function and some values in the built-in `Capabilities` object are not supported with the `kots.io/v1beta1` HelmChart custom resource. - - This is because KOTS uses the `helm template` command to render chart templates locally. During rendering, Helm does not have access to the cluster where the chart will be installed. For more information, see [Kubernetes and Chart Functions](https://helm.sh/docs/chart_template_guide/function_list/#kubernetes-and-chart-functions) in the Helm documentation. - -================ -File: docs/partials/helm/_helm-version-limitation.mdx -================ -Support for Helm v2, including security patches, ended on November 13, 2020. If you specified `helmVersion: v2` in any HelmChart custom resources, update your references to v3. By default, KOTS uses Helm v3 to process all Helm charts. - -================ -File: docs/partials/helm/_hook-weights-limitation.mdx -================ -Hook weights below -9999 are not supported. All hook weights must be set to a value above -9999 to ensure the Replicated image pull secret is deployed before any resources are pulled. - -================ -File: docs/partials/helm/_hooks-limitation.mdx -================ -The following hooks are not supported and are ignored if they are present: - * `test` - * `pre-rollback` - * `post-rollback` - -================ -File: docs/partials/helm/_installer-only-annotation.mdx -================ -Any other Kubernetes resources in the release (such as Kubernetes Deployments or Services) must include the `kots.io/installer-only` annotation. - -The `kots.io/installer-only` annotation indicates that the Kubernetes resource is used only by the Replicated installers (Embedded Cluster, KOTS, and kURL). - -Example: -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-service - annotations: - kots.io/installer-only: "true" -``` - -================ -File: docs/partials/helm/_kots-helm-cr-description.mdx -================ -To deploy Helm charts, KOTS requires a unique HelmChart custom resource for each Helm chart `.tgz` archive in the release. You configure the HelmChart custom resource to provide the necessary instructions to KOTS for processing and preparing the chart for deployment. Additionally, the HelmChart custom resource creates a mapping between KOTS and your Helm chart to allow Helm values to be dynamically set during installation or upgrade. - -================ -File: docs/partials/helm/_replicated-deprecated.mdx -================ -The HelmChart custom resource `apiVersion: kots.io/v1beta1` is deprecated. For installations with Replicated KOTS v1.99.0 and later, use the HelmChart custom resource with `apiVersion: kots.io/v1beta2` instead. See [HelmChart v2](/reference/custom-resource-helmchart-v2) and [Confguring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). - -================ -File: docs/partials/helm/_replicated-helm-migration.mdx -================ -You cannot migrate existing Helm charts in existing installations from the `useHelmInstall: false` installation method to a different method. If KOTS already installed the Helm chart previously in the environment using a HelmChart custom resource with `apiVersion: kots.io/v1beta1` and `useHelmInstall: false`, then KOTS does not attempt to install the chart using a different method and displays the following error message: `Deployment method for chart <chart_name> has changed`. - -To change the installation method from `useHelmInstall: false` to a different method, the user must reinstall your application in a new environment. - -================ -File: docs/partials/helm/_set-values-config-example.mdx -================ -Using KOTS template functions in the [Config](/reference/template-functions-config-context) context allows you to set Helm values based on user-supplied values from the KOTS Admin Console configuration page. - -For example, the following Helm chart `values.yaml` file contains `postgresql.enabled`, which is set to `false`: - -```yaml -# Helm chart values.yaml -postgresql: - enabled: false -``` -The following HelmChart custom resource contains a mapping to `postgresql.enabled` in its `values` key: - -```yaml -# KOTS HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - - releaseName: samplechart-release-1 - - values: - postgresql: - enabled: repl{{ ConfigOptionEquals `postgres_type` `embedded_postgres`}} -``` - -The `values.postgresql.enabled` field in the HelmChart custom resource above uses the Replicated [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to evaluate the user's selection for a `postgres_type` configuration option. - -During installation or upgrade, the template function is rendered to true or false based on the user's selction. Then, KOTS sets the matching `postgresql.enabled` value in the Helm chart `values.yaml` file accordingly. - -================ -File: docs/partials/helm/_set-values-license-example.mdx -================ -Using KOTS template functions in the [License](/reference/template-functions-license-context) context allows you to set Helm values based on the unique license file used for installation or upgrade. - -For example, the following HelmChart custom resource uses the Replicated [LiencseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function to evaluate if the license has the boolean `newFeatureEntitlement` field set to `true`: - -```yaml -# KOTS HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - - releaseName: samplechart-release-1 - - values: - newFeature: - enabled: repl{{ LicenseFieldValue "newFeatureEntitlement" }} -``` - -During installation or upgrade, the LicenseFieldValue template function is rendered based on the user's license. Then, KOTS sets the matching `newFeature.enabled` value in the Helm chart `values.yaml` file accordingly. - -================ -File: docs/partials/helm/_v2-native-helm-cr-example.mdx -================ -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - # chart identifies a matching chart from a .tgz - chart: - name: samplechart - chartVersion: 3.1.7 - - releaseName: samplechart-release-1 - - exclude: "repl{{ ConfigOptionEquals `include_chart` `include_chart_no`}}" - - # weight determines the order that charts are applied, with lower weights first. - weight: 42 - - # helmUpgradeFlags specifies additional flags to pass to the `helm upgrade` command. - helmUpgradeFlags: - - --skip-crds - - --no-hooks - - --timeout - - 1200s - - --history-max=15 - - # values are used in the customer environment as a pre-render step - # these values are supplied to helm template - values: - postgresql: - enabled: repl{{ ConfigOptionEquals `postgres_type` `embedded_postgres`}} - - optionalValues: - - when: "repl{{ ConfigOptionEquals `postgres_type` `external_postgres`}}" - recursiveMerge: false - values: - postgresql: - postgresqlDatabase: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_database`}}repl{{ end}}" - postgresqlUsername: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_username`}}repl{{ end}}" - postgresqlHost: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_host`}}repl{{ end}}" - postgresqlPassword: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_password`}}repl{{ end}}" - postgresqlPort: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_port`}}repl{{ end}}" - # adds backup labels to postgresql if the license supports snapshots - - when: "repl{{ LicenseFieldValue `isSnapshotSupported` }}" - recursiveMerge: true - values: - postgresql: - commonLabels: - kots.io/backup: velero - kots.io/app-slug: my-app - podLabels: - kots.io/backup: velero - kots.io/app-slug: my-app - - # namespace allows for a chart to be installed in an alternate namespace to - # the default - namespace: samplechart-namespace - - # builder values render the chart with all images and manifests. - # builder is used to create `.airgap` packages and to support end users - # who use private registries - builder: - postgresql: - enabled: true -``` - -================ -File: docs/partials/image-registry/_docker-compatibility.mdx -================ -- Docker Hub - - :::note - To avoid the November 20, 2020 Docker Hub rate limits, use the `kots docker ensure-secret` CLI command. For more information, see [Avoiding Docker Hub Rate Limits](image-registry-rate-limits). - ::: - -- Quay -- Amazon Elastic Container Registry (ECR) -- Google Container Registry (GCR) -- Azure Container Registry (ACR) -- Harbor -- Sonatype Nexus - -================ -File: docs/partials/image-registry/_image-registry-settings.mdx -================ -<table> - <tr> - <th width="30%">Field</th> - <th width="70%">Description</th> - </tr> - <tr> - <td>Hostname</td> - <td>Specify a registry domain that uses the Docker V2 protocol.</td> - </tr> - <tr> - <td>Username</td> - <td>Specify the username for the domain.</td> - </tr> - <tr> - <td>Password</td> - <td>Specify the password for the domain.</td> - </tr> - <tr> - <td>Registry Namespace</td> - <td>Specify the registry namespace. The registry namespace is the path between the registry and the image name. For example, `my.registry.com/namespace/image:tag`. For air gap environments, this setting overwrites the registry namespace where images where pushed when KOTS was installed.</td> - </tr> - <tr> - <td>Disable Pushing Images to Registry</td> - <td>(Optional) Select this option to disable KOTS from pushing images. Make sure that an external process is configured to push images to your registry instead. Your images are still read from your registry when the application is deployed.</td> - </tr> - </table> - -================ -File: docs/partials/install/_access-admin-console.mdx -================ -By default, during installation, KOTS automatically opens localhost port 8800 to provide access to the Admin Console. Using the `--no-port-forward` flag with the `kots install` command prevents KOTS from creating a port forward to the Admin Console. - -After you install with the `--no-port-forward` flag, you can optionally create a port forward so that you can log in to the Admin Console in a browser window. - -To access the Admin Console: - -1. If you installed in a VM where you cannot open a browser window, forward a port on your local machine to `localhost:8800` on the remote VM using the SSH client: - - ```bash - ssh -L LOCAL_PORT:localhost:8800 USERNAME@IP_ADDRESS - ``` - Replace: - * `LOCAL_PORT` with the port on your local machine to forward. For example, `9900` or `8800`. - * `USERNAME` with your username for the VM. - * `IP_ADDRESS` with the IP address for the VM. - - **Example**: - - The following example shows using the SSH client to forward port 8800 on your local machine to `localhost:8800` on the remote VM. - - ```bash - ssh -L 8800:localhost:8800 user@ip-addr - ``` - -1. Run the following KOTS CLI command to open localhost port 8800, which forwards to the Admin Console service: - - ```bash - kubectl kots admin-console --namespace NAMESPACE - ``` - Replace `NAMESPACE` with the namespace where the Admin Console was installed. - - For more information about the `kots admin-console` command, see [admin-console](/reference/kots-cli-admin-console-index) in the _KOTS CLI_ documentation. - -1. Open a browser window and go to `https://localhost:8800`. - -1. Log in to the Admin Console using the password that you created as part of the `kots install` command. - -================ -File: docs/partials/install/_airgap-bundle-build.mdx -================ -* If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. -* If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. - - <img alt="Release history link on a channel card" src="/images/release-history-link.png" width="400px"/> - - [View a larger version of this image](/images/release-history-link.png) - - ![Build button on the Release history page](/images/release-history-build-airgap-bundle.png) - - [View a larger version of this image](/images/release-history-build-airgap-bundle.png) - -================ -File: docs/partials/install/_airgap-bundle-download.mdx -================ -After the build completes, download the bundle. Ensure that you can access the downloaded bundle from the environment where you will install the application. - -================ -File: docs/partials/install/_airgap-bundle-view-contents.mdx -================ -(Optional) View the contents of the downloaded bundle: - - ```bash - tar -zxvf AIRGAP_BUNDLE - ``` - - Where `AIRGAP_BUNDLE` is the filename for the `.airgap` bundle that you downloaded. - -================ -File: docs/partials/install/_airgap-license-download.mdx -================ -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page. - -1. Click on the name of the target customer and go to the **Manage customer** tab. - -1. Under **License options**, enable the **Airgap Download Enabled** option. Click **Save Changes**. - - ![Airgap Download Enabled option](/images/airgap-download-enabled.png) - - [View a larger version of this image](/images/airgap-download-enabled.png) - -1. At the top of the screen, click **Download license** to download the air gap enabled license. - - ![Download air gap license](/images/download-airgap-license.png) - - [View a larger version of this image](/images/download-airgap-license.png) - -================ -File: docs/partials/install/_automation-intro-embedded.mdx -================ -When you use the KOTS CLI to install an application in a kURL cluster, you first run the kURL installation script to provision the cluster and automatically install KOTS in the cluster. Then, you can run the `kots install` command to install the application. - -================ -File: docs/partials/install/_automation-intro-existing.mdx -================ -When you use the KOTS CLI to install an application in an existing cluster, you install both the application and Replicated KOTS with a single command. - -================ -File: docs/partials/install/_config-values-procedure.mdx -================ -To get the ConfigValues file from an installed application instance: - -1. Install the target release in a development environment. You can either install the release with Replicated Embedded Cluster or install in an existing cluster with KOTS. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Online Installation in Existing Clusters](/enterprise/installing-existing-cluster). - -1. Depending on the installer that you used, do one of the following to get the ConfigValues for the installed instance: - - * **For Embedded Cluster installations**: In the Admin Console, go to the **View files** tab. In the filetree, go to **upstream > userdata** and open **config.yaml**, as shown in the image below: - - ![ConfigValues file in the Admin Console View Files tab](/images/admin-console-view-files-configvalues.png) - - [View a larger version of this image](/images/admin-console-view-files-configvalues.png) - - * **For KOTS installations in an existing cluster**: Run the `kubectl kots get config` command to view the generated ConfigValues file: - - ```bash - kubectl kots get config --namespace APP_NAMESPACE --decrypt - ``` - Where: - * `APP_NAMESPACE` is the cluster namespace where KOTS is running. - * The `--decrypt` flag decrypts all configuration fields with `type: password`. In the downloaded ConfigValues file, the decrypted value is stored in a `valuePlaintext` field. - - The output of the `kots get config` command shows the contents of the ConfigValues file. For more information about the `kots get config` command, including additional flags, see [kots get config](/reference/kots-cli-get-config). - -================ -File: docs/partials/install/_download-kotsadm-bundle.mdx -================ -Download the `kotsadm.tar.gz` air gap bundle from the [Releases](https://github.com/replicatedhq/kots/releases) page in the kots repository in GitHub. Ensure that you can access the downloaded bundle from the environment where you will install the application. - -:::note -The version of the `kotsadm.tar.gz` air gap bundle used must be compatible with the version of the `.airgap` bundle for the given application release. -::: - -================ -File: docs/partials/install/_download-kurl-bundle.mdx -================ -```bash -export REPLICATED_APP=APP_SLUG -curl -LS https://k8s.kurl.sh/bundle/$REPLICATED_APP.tar.gz -o $REPLICATED_APP.tar.gz -``` -Where `APP_SLUG` is the unqiue slug for the application. - -================ -File: docs/partials/install/_ec-prereqs.mdx -================ -* Ensure that your installation environment meets the Embedded Cluster requirements. See [Embedded Cluster Requirements](/enterprise/installing-embedded-requirements). - -* The application release that you want to install must include an [Embedded Cluster Config](/reference/embedded-config). - -* The license used to install must have the **Embedded Cluster Enabled** license field enabled. See [Creating and Managing Customers](/vendor/releases-creating-customer). - -================ -File: docs/partials/install/_embedded-ha-step.mdx -================ -(HA Installation Only) If you are installing in HA mode and did not already preconfigure a load balancer, you are prompted during the installation. Do one of the following: - - - If you are using the internal load balancer, leave the prompt blank and proceed with the installation. - - - If you are using an external load balancer, pass the load balancer address. - -================ -File: docs/partials/install/_embedded-login-password.mdx -================ -After the installation command finishes, note the `Kotsadm` and `Login with password (will not be shown again)` fields in the output of the command. You use these to log in to the Admin Console. - - The following shows an example of the `Kotsadm` and `Login with password (will not be shown again)` fields in the output of the installation command: - - ``` - Installation - Complete ✔ - - Kotsadm: http://10.128.0.35:8800 - Login with password (will not be shown again): 3Hy8WYYid - - This password has been set for you by default. It is recommended that you change - this password; this can be done with the following command: - kubectl kots reset-password default - ``` - -================ -File: docs/partials/install/_extract-kurl-bundle.mdx -================ -In your installation environment, extract the contents of the kURL `.tar.gz` bundle that you downloaded: - - ```bash - tar -xvzf $REPLICATED_APP.tar.gz - ``` - -================ -File: docs/partials/install/_firewall-openings-intro.mdx -================ -The domains for the services listed in the table below need to be accessible from servers performing online installations. No outbound internet access is required for air gap installations. - -For services hosted at domains owned by Replicated, the table below includes a link to the list of IP addresses for the domain at [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json) in GitHub. Note that the IP addresses listed in the `replicatedhq/ips` repository also include IP addresses for some domains that are _not_ required for installation. - -For any third-party services hosted at domains not owned by Replicated, consult the third-party's documentation for the IP address range for each domain, as needed. - -================ -File: docs/partials/install/_firewall-openings.mdx -================ -The domains for the services listed in the table below need to be accessible from servers performing online installations. No outbound internet access is required for air gap installations. - -For services hosted at domains owned by Replicated, the table below includes a link to the list of IP addresses for the domain at [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json) in GitHub. Note that the IP addresses listed in the `replicatedhq/ips` repository also include IP addresses for some domains that are _not_ required for installation. - -For third-party services hosted at domains not owned by Replicated, the table below lists the required domains. Consult the third-party's documentation for the IP address range for each domain, as needed. - -<table> - <tr> - <th width="10%">Host</th> - <th width="20%">Embedded Cluster</th> - <th width="20%">Helm</th> - <th width="20%">KOTS Existing Cluster</th> - <th width="20%">kURL</th> - <th width="10%">Description</th> - </tr> - <tr> - <td>Docker Hub</td> - <td>Not Required</td> - <td>Not Required</td> - <td>Required</td> - <td>Required</td> - <td>Some dependencies of KOTS are hosted as public images in Docker Hub. The required domains for this service are `index.docker.io`, `cdn.auth0.com`, `*.docker.io`, and `*.docker.com.`</td> - </tr> - <tr> - <td>`replicated.app`</td> - <td>Required</td> - <td>Required***</td> - <td>Required</td> - <td>Required</td> - <td><p>Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.</p></td> - </tr> - <tr> - <td>`proxy.replicated.com`</td> - <td>Required</td> - <td>Required</td> - <td>Required*</td> - <td>Required*</td> - <td><p>Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.</p></td> - </tr> - <tr> - <td>`registry.replicated.com`</td> - <td>Required**</td> - <td>Required</td> - <td>Required**</td> - <td>Required**</td> - <td><p>Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.</p><p> For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.</p></td> - </tr> - <tr> - <td>`kots.io`</td> - <td>Not Required</td> - <td>Not Required</td> - <td>Required</td> - <td>Not Required</td> - <td>Requests are made to this domain when installing the Replicated KOTS CLI. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</td> - </tr> - <tr> - <td>`github.com`</td> - <td>Not Required</td> - <td>Not Required</td> - <td>Required</td> - <td>Not Required</td> - <td>Requests are made to this domain when installing the Replicated KOTS CLI. For information about retrieving GitHub IP addresses, see [About GitHub's IP addresses](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-githubs-ip-addresses) in the GitHub documentation.</td> - </tr> - <tr> - <td><p>`k8s.kurl.sh`</p><p>`s3.kurl.sh`</p></td> - <td>Not Required</td> - <td>Not Required</td> - <td>Not Required</td> - <td>Required</td> - <td><p>kURL installation scripts and artifacts are served from [kurl.sh](https://kurl.sh). An application identifier is sent in a URL path, and bash scripts and binary executables are served from kurl.sh. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p> For the range of IP addresses for `k8s.kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L34-L39) in GitHub.</p><p> The range of IP addresses for `s3.kurl.sh` are the same as IP addresses for the `kurl.sh` domain. For the range of IP address for `kurl.sh`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L28-L31) in GitHub.</p></td> - </tr> - <tr> - <td>`amazonaws.com`</td> - <td>Not Required</td> - <td>Not Required</td> - <td>Not Required</td> - <td>Required</td> - <td>`tar.gz` packages are downloaded from Amazon S3 during installations with kURL. For information about dynamically scraping the IP ranges to allowlist for accessing these packages, see [AWS IP address ranges](https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html#aws-ip-download) in the AWS documentation.</td> - </tr> -</table> - -* Required only if the application uses the [Replicated proxy registry](/vendor/private-images-about). - -** Required only if the application uses the [Replicated registry](/vendor/private-images-replicated). - -*** Required only if the [Replicated SDK](/vendor/replicated-sdk-overview) if included as a dependency of the application Helm chart. - -================ -File: docs/partials/install/_ha-load-balancer-about.mdx -================ -A load balancer is required for high availability mode. If your vendor has chosen to use the internal load balancer with the kURL EKCO add-on, you do not need to provide your own external load balancer. An external load balancer can be preferred when clients outside the cluster need access to the cluster's Kubernetes API. - -If you decide to use an external load balancer, the external load balancer must be a TCP forwarding load balancer. For more information, see [Prerequisites](#prerequisites). - -The health check for an apiserver is a TCP check on the port that the kube-apiserver listens on. The default value is `:6443`. For more information about the kube-apiserver external load balancer, see [Create load balancer for kube-apiserver](https://kubernetes.io/docs/setup/independent/high-availability/#create-load-balancer-for-kube-apiserver) in the Kubernetes documentation. - -================ -File: docs/partials/install/_ha-load-balancer-prereq.mdx -================ -- If you are installing in high availability (HA) mode, a load balancer is required. You can use the kURL internal load balancer if the [Embedded kURL Cluster Operator (EKCO) Add-On](https://kurl.sh/docs/add-ons/ekco) is included in the kURL Installer spec. Or, you can bring your own external load balancer. An external load balancer might be preferred when clients outside the cluster need access to the cluster's Kubernetes API. - - To install in HA mode, complete the following prerequisites: - - (Optional) If you are going to use the internal EKCO load balancer, you can preconfigure it by passing `| sudo bash -s ha ekco-enable-internal-load-balancer` with the kURL install command. Otherwise, you are prompted for load balancer details during installation. For more information about the EKCO Add-on, see [EKCO Add-On](https://kurl.sh/docs/add-ons/ekco) in the open source kURL documentation. - - To use an external load balancer, ensure that the load balancer meets the following requirements: - - Must be a TCP forwarding load balancer - - Must be configured to distribute traffic to all healthy control plane nodes in its target list - - The health check must be a TCP check on port 6443 - - For more information about how to create a load balancer for kube-apirserver, see [Create load balancer for kube-apiserver](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/#create-load-balancer-for-kube-apiserver) in the Kubernetes documentation. - - You can optionally preconfigure the external loader by passing the `load-balancer-address=HOST:PORT` flag with the kURL install command. Otherwise, you are prompted to provide the load balancer address during installation. - -================ -File: docs/partials/install/_install-kots-cli-airgap.mdx -================ -Install the KOTS CLI. See [Manually Download and Install](/reference/kots-cli-getting-started#manually-download-and-install) in _Installing the KOTS CLI_. - -================ -File: docs/partials/install/_install-kots-cli.mdx -================ -Install the KOTS CLI: - - ``` - curl https://kots.io/install | bash - ``` - - For more installation options, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). - -================ -File: docs/partials/install/_intro-air-gap.mdx -================ -The procedures in this topic apply to installation environments that do not have access to the internet, known as _air gap_ environments. - -================ -File: docs/partials/install/_intro-embedded.mdx -================ -This topic describes how to use Replicated kURL to provision an embedded cluster in a virtual machine (VM) or bare metal server and install an application in the cluster. - -================ -File: docs/partials/install/_intro-existing.mdx -================ -This topic describes how to use Replicated KOTS to install an application in an existing Kubernetes cluster. - -================ -File: docs/partials/install/_kots-airgap-version-match.mdx -================ -:::note -The versions of the KOTS CLI and the `kotsadm.tar.gz` bundle must match. You can check the version of the KOTS CLI with `kubectl kots version`. -::: - -================ -File: docs/partials/install/_kots-install-prompts.mdx -================ -When prompted by the `kots install` command: - 1. Provide the namespace where you want to install both KOTS and the application. - 1. Create a new password for logging in to the Admin Console. - - **Example**: - - ```shell - $ kubectl kots install application-name - Enter the namespace to deploy to: application-name - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - Enter a new password to be used for the Admin Console: •••••••• - • Waiting for Admin Console to be ready ✓ - - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console - - ``` - - After the `kots install` command completes, it creates a port forward to the Admin Console. The Admin Console is exposed internally in the cluster and can only be accessed using a port forward. - -================ -File: docs/partials/install/_kubernetes-compatibility.mdx -================ -| KOTS Versions | Kubernetes Compatibility | -|------------------------|-----------------------------| -| 1.117.0 and later | 1.31, 1.30, 1.29 | -| 1.109.1 to 1.116.1 | 1.30, 1.29, 1.28 | -| 1.105.2 to 1.109.0 | 1.29, 1.28 | - -================ -File: docs/partials/install/_kurl-about.mdx -================ -Replicated kURL is an open source project. For more information, see the [kURL documentation](https://kurl.sh/docs/introduction/). - -================ -File: docs/partials/install/_license-file-prereq.mdx -================ -* Download your license file. Ensure that you can access the downloaded license file from the environment where you will install the application. See [Downloading Customer Licenses](/vendor/licenses-download). - -================ -File: docs/partials/install/_placeholder-airgap-bundle.mdx -================ -* `PATH_TO_AIRGAP_BUNDLE` with the path to the `.airgap` bundle for the application release. You can build and download the air gap bundle for a release in the [Vendor Portal](https://vendor.replicated.com) on the **Release history** page for the channel where the release is promoted. - - Alternatively, for information about building and downloading air gap bundles with the Vendor API v3, see [Trigger airgap build for a channel's release](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbuild) and [Get airgap bundle download URL for the active release on the channel](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) in the Vendor API v3 documentation. - -================ -File: docs/partials/install/_placeholder-app-name-UI.mdx -================ -* `APP_NAME` with the name of the application. The `APP_NAME` is included in the installation command that your vendor gave you. This is a unique identifier that KOTS will use to refer to the application that you install. - -================ -File: docs/partials/install/_placeholder-namespace-embedded.mdx -================ -* `NAMESPACE` with the namespace where Replicated kURL installed Replicated KOTS when creating the cluster. By default, kURL installs KOTS in the `default` namespace. - -================ -File: docs/partials/install/_placeholder-namespace-existing.mdx -================ -* `NAMESPACE` with the namespace where you want to install both the application and KOTS. - -================ -File: docs/partials/install/_placeholder-ro-creds.mdx -================ -* `REGISTRY_HOST` with the same hostname for the private registry where you pushed the Admin Console images. - -* `RO_USERNAME` and `RO_PASSWORD` with the username and password for an account that has read-only access to the private registry. - - :::note - KOTS stores these read-only credentials in a Kubernetes secret in the same namespace where the Admin Console is installed. - - KOTS uses these credentials to pull the images. To allow KOTS to pull images, the credentials are automatically created as an imagePullSecret on all of the Admin Console Pods. - ::: - -================ -File: docs/partials/install/_placeholders-global.mdx -================ -* `APP_NAME` with a name for the application. This is the unique name that KOTS will use to refer to the application that you install. - -* `PASSWORD` with a shared password for accessing the Admin Console. - -* `PATH_TO_LICENSE` with the path to your license file. See [Downloading Customer Licenses](/vendor/licenses-download). For information about how to download licenses with the Vendor API v3, see [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) in the Vendor API v3 documentation. - -* `PATH_TO_CONFIGVALUES` with the path to the ConfigValues file. - -================ -File: docs/partials/install/_prereqs-embedded-cluster.mdx -================ -* Ensure that your environment meets the minimum system requirements. See [kURL Installation Requirements](/enterprise/installing-kurl-requirements). - -* Review the advanced installation options available for the kURL installer. See [Advanced Options](https://kurl.sh/docs/install-with-kurl/advanced-options) in the kURL documentation. - -================ -File: docs/partials/install/_prereqs-existing-cluster.mdx -================ -* Ensure that your cluster meets the minimum system requirements. See [Minimum System Requirements](/enterprise/installing-general-requirements#minimum-system-requirements) in _Installation Requirements_. - -* Ensure that you have at least the minimum RBAC permissions in the cluster required to install KOTS. See [RBAC Requirements](/enterprise/installing-general-requirements#rbac-requirements) in _Installation Requirements_. - - :::note - If you manually created RBAC resources for KOTS as described in [Namespace-scoped RBAC Requirements](/enterprise/installing-general-requirements#namespace-scoped), include both the `--ensure-rbac=false` and `--skip-rbac-check` flags when you run the `kots install` command. - - These flags prevent KOTS from checking for or attempting to create a Role with `* * *` permissions in the namespace. For more information about these flags, see [install](/reference/kots-cli-install) or [admin-console upgrade](/reference/kots-cli-admin-console-upgrade). - ::: - -* Review the options available with the `kots install` command before installing. The `kots install` command includes several optional flags to support different installation use cases. For a list of options, see [install](/reference/kots-cli-install) in the _KOTS CLI_ documentation. - -================ -File: docs/partials/install/_provision-cluster-intro.mdx -================ -This procedure describes how to use kURL to provision an embedded cluster on a VM or bare metal server. When you create a cluster with kURL, kURL also automatically installs Replicated KOTS in the `default` namespaces in the cluster. - -================ -File: docs/partials/install/_push-kotsadm-images.mdx -================ -Extract the KOTS Admin Console container images from the `kotsadm.tar.gz` bundle and push the images to your private registry: - - ``` - kubectl kots admin-console push-images ./kotsadm.tar.gz REGISTRY_HOST \ - --registry-username RW_USERNAME \ - --registry-password RW_PASSWORD - ``` - - Replace: - - * `REGISTRY_HOST` with the hostname for the private registry. For example, `private.registry.host` or `my-registry.example.com/my-namespace`. - - * `RW_USERNAME` and `RW_PASSWORD` with the username and password for an account that has read and write access to the private registry. - - :::note - KOTS does not store or reuse these read-write credentials. - ::: - -================ -File: docs/partials/instance-insights/_airgap-telemetry.mdx -================ -For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. The Replicated SDK also stores any custom metrics within its Secret. - -The telemetry and custom metrics stored in the Secret are collected when a support bundle is generated in the environment. When the support bundle is uploaded to the Vendor Portal, the telemetry and custom metrics are associated with the correct customer and instance ID, and the Vendor Portal updates the instance insights and event data accordingly. - -================ -File: docs/partials/instance-insights/_notifications-about.mdx -================ -:::note -Configuring notifications for customer instance changes is in public Beta. Features and functionality are subject to change as we continue to iterate this functionality towards General Availability. -::: - -Notifications can help catch problems before they happen and let you proactively contact customers to prevent support cases. For example, you can be notified of a degraded status and you can contact your customer about fixing it before the instance goes down. This approach can make issues quicker and easier to solve, and improve the customer experience with less down time. - -For more information about how application status is determined, see [Resource Statuses](insights-app-status#resource-statuses) in _Enabling and Understanding Application Status_. For more information about events that might trigger notifications, see [How the Vendor Portal Generates Events and Insights](instance-insights-event-data#about-events) in _About Instance and Event Data_. - -================ -File: docs/partials/instance-insights/_supported-resources-status.mdx -================ -The following resource types are supported: - -* Deployment -* StatefulSet -* Service -* Ingress -* PersistentVolumeClaims (PVC) -* DaemonSet - -================ -File: docs/partials/kots/_admin-console-about.mdx -================ -KOTS provides an Admin Console that lets your customers manage your application. You can customize the Admin Console. For example, you can customize the Config screen to allow customers to specify inputs related to unique options that your application provides. You can also include your own branding on the Admin Console, configure status informers, and add custom graphs. - -================ -File: docs/partials/kots/_download-portal-about.mdx -================ -The Replicated Download Portal can be used to share license files, air gap bundles, and other assets with customers. A unique Download Portal link is available for each customer. The Download Portal uses information from the customer's license to make the relevant assets available for download, such as: -* The license file -* `.airgap` bundles for the application releases that the customer has access to based on their channel assignment -* The Replicated KOTS Admin Console `kotsadm.tar.gz` air gap bundle -* The Replicated kURL `.tgz` air gap bundle -* Preflight, support bundle, and KOTS CLI kubectl plugins - -================ -File: docs/partials/kots/_embedded-kubernetes-definition.mdx -================ -_Embedded Kubernetes_ refers to delivering a Kubernetes distribution alongside an application, so that both Kubernetes and the application are deployed in the customer environment. Embedding Kubernetes allows software vendors to install their Kubernetes application in non-Kubernetes customer-controlled environments, such as virtual machines (VMs) or bare metal servers. Additionally, software vendors that embed Kubernetes with their application have greater control over the charactersitics of the cluster where their application is installed. This allows vendors to deliver a cluster that meets their application's requirements, which can help reduce errors during installation. - -================ -File: docs/partials/kots/_kots-definition.mdx -================ -Replicated KOTS is a kubectl plugin and an in-cluster Admin Console that provides highly successful installations of Helm charts and Kubernetes applications into customer-controlled environments, including on-prem and air gap environments. - -================ -File: docs/partials/kots/_kots-entitlement-note.mdx -================ -:::note -The Replicated KOTS entitlement is required to install applications with KOTS. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. -::: - -================ -File: docs/partials/kots-cli/_ensure-rbac.mdx -================ -<tr> - <td><code>--ensure-rbac</code></td> - <td>bool</td> - <td>When <code>false</code>, KOTS does not attempt to create the RBAC resources necessary to manage applications. <strong>Default:</strong> <code>true</code>. If a role specification is needed, use the <a href="kots-cli-admin-console-generate-manifests">generate-manifests</a> command.</td> -</tr> - -================ -File: docs/partials/kots-cli/_help.mdx -================ -<tr> - <td><code>-h, --help</code></td> - <td></td> - <td>Help for the command.</td> -</tr> - -================ -File: docs/partials/kots-cli/_kotsadm-namespace.mdx -================ -<tr> - <td><code>--kotsadm-namespace</code></td> - <td>string</td> - <td><p>Set to override the registry namespace of KOTS Admin Console images. Used for air gap installations. For more information, see [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped).</p><p><strong>Note:</strong> Replicated recommends that you use <code>--kotsadm-registry</code> instead of <code>--kotsadm-namespace</code> to override both the registry hostname and, optionally, the registry namespace with a single flag.</p></td> -</tr> - -================ -File: docs/partials/kots-cli/_kotsadm-registry.mdx -================ -<tr> - <td><code>--kotsadm-registry</code></td> - <td>string</td> - <td>Set to override the registry hostname and namespace of KOTS Admin Console images. Used for air gap installations. For more information, see [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped).</td> -</tr> - -================ -File: docs/partials/kots-cli/_registry-password.mdx -================ -<tr> - <td><code>--registry-password</code></td> - <td>string</td> - <td>Password to use to authenticate with the application registry. Used for air gap installations.</td> -</tr> - -================ -File: docs/partials/kots-cli/_registry-username.mdx -================ -<tr> - <td><code>--registry-username</code></td> - <td>string</td> - <td>Username to use to authenticate with the application registry. Used for air gap installations.</td> -</tr> - -================ -File: docs/partials/kots-cli/_skip-rbac-check.mdx -================ -<tr> - <td><code>--skip-rbac-check</code></td> - <td>bool</td> - <td>When <code>true</code>, KOTS does not validate RBAC permissions. <strong>Default:</strong> <code>false</code></td> -</tr> - -================ -File: docs/partials/kots-cli/_strict-sec-context-yaml.mdx -================ -```yaml -securityContext: - fsGroup: 1001 - runAsGroup: 1001 - runAsNonRoot: true - runAsUser: 1001 - seccompProfile: - type: RuntimeDefault - supplementalGroups: - - 1001 -``` - -================ -File: docs/partials/kots-cli/_strict-security-context.mdx -================ -import StrictSecContextYaml from "./_strict-sec-context-yaml.mdx" - -<tr> - <td><code>--strict-security-context</code></td> - <td>bool</td> - <td> - <p>Set to <code>true</code> to explicitly enable strict security contexts for all KOTS Pods and containers.</p> - <p>By default, KOTS Pods and containers are not deployed with a specific security context. When <code>true</code>, <code>--strict-security-context</code> does the following:</p> - <ul> - <li>Ensures containers run as a non-root user</li> - <li>Sets the specific UID for the containers (1001)</li> - <li>Sets the GID for volume ownership and permissions (1001)</li> - <li>Applies the default container runtime seccomp profile for security</li> - <li>Ensures the container is not run with privileged system access</li> - <li>Prevents the container from gaining more privileges than its parent process</li> - <li>Ensures the container's root filesystem is mounted as read-only</li> - <li>Removes all Linux capabilities from the container</li> - </ul> - <p>The following shows the <code>securityContext</code> for KOTS Pods when <code>--strict-security-context</code> is set:</p> - <StrictSecContextYaml/> - <p><strong>Default:</strong> <code>false</code></p> - :::note - Might not work for some storage providers. - ::: - </td> -</tr> - -================ -File: docs/partials/kots-cli/_use-minimal-rbac.mdx -================ -<tr> - <td><code>--use-minimal-rbac</code></td> - <td>bool</td> - <td><p>When <code>true</code>, KOTS RBAC permissions are limited to the namespace where it is installed.</p><p> To use <code>--use-minimal-rbac</code>, the application must support namespace-scoped installations and the user must have the minimum RBAC permissions required by KOTS in the target namespace. For a complete list of requirements, see <a href="/enterprise/installing-general-requirements#namespace-scoped">Namespace-scoped RBAC Requirements​</a> in <em>Installation Requirements</em>. <strong>Default:</strong> <code>false</code></p></td> -</tr> - -================ -File: docs/partials/kots-cli/_wait-duration.mdx -================ -<tr> - <td><code>--wait-duration</code></td> - <td>string</td> - <td>Timeout out to be used while waiting for individual components to be ready. Must be in <a href="https://pkg.go.dev/time#ParseDuration">Go duration</a> format. <strong>Example:</strong> 10s, 2m</td> -</tr> - -================ -File: docs/partials/kots-cli/_with-minio.mdx -================ -<tr> - <td><code>--with-minio</code></td> - <td>bool</td> - <td>When <code>true</code>, KOTS deploys a local MinIO instance for storage and attempts to change any MinIO-based snapshots (hostpath and NFS) to the local-volume-provider plugin. See <a href="https://github.com/replicatedhq/local-volume-provider">local-volume-provider</a> in GitHub. <strong>Default:</strong> <code>true</code></td> -</tr> - -================ -File: docs/partials/kurl/_installers.mdx -================ -To provision a cluster on a VM or bare metal server, kURL uses a spec that is defined in a manifest file with `apiVersion: cluster.kurl.sh/v1beta1` and `kind: Installer`. This spec (called a _kURL installer_) lists the kURL add-ons that will be included in the cluster. kURL provides add-ons for networking, storage, ingress, and more. kURL also provides a KOTS add-on, which installs KOTS in the cluster and deploys the KOTS Admin Console. You can customize the kURL installer according to your application requirements. - -================ -File: docs/partials/kurl/_kurl-availability.mdx -================ -:::note -Replicated kURL is available only for existing customers. If you are not an existing kURL user, use Replicated Embedded Cluster instead. For more information, see [Using Embedded Cluster](/vendor/embedded-overview). - -kURL is a Generally Available (GA) product for existing customers. For more information about the Replicated product lifecycle phases, see [Support Lifecycle Policy](/vendor/policies-support-lifecycle). -::: - -================ -File: docs/partials/kurl/_kurl-definition.mdx -================ -kURL is an open source project maintained by Replicated that software vendors can use to create custom Kubernetes distributions that are embedded with their application. Enterprise customers can then run a kURL installation script on their virtual machine (VM) or bare metal server to provision a cluster and install the application. This allows software vendors to distribute Kubernetes applications to customers that do not have access to a cluster in their environment. - -For more information about the kURL open source project, see the [kURL website](https://kurl.sh). - -================ -File: docs/partials/linter-rules/_allow-privilege-escalation.mdx -================ -```yaml -spec: - allowPrivilegeEscalation: true -``` - -================ -File: docs/partials/linter-rules/_application-icon.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -spec: - icon: https://example.com/app-icon.png -``` - -================ -File: docs/partials/linter-rules/_application-spec.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -``` - -================ -File: docs/partials/linter-rules/_application-statusInformers.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -spec: - statusInformers: - - deployment/example-nginx -``` - -================ -File: docs/partials/linter-rules/_config-option-invalid-regex-validator.mdx -================ -**Correct**: - -```yaml -spec: - groups: - - name: authentication - title: Authentication - description: Configure application authentication below. - - name: jwt_file - title: jwt_file - type: file - validation: - regex: - pattern: "^[A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" // valid RE2 regular expression - message: "JWT is invalid" -``` - -**Incorrect**: - -```yaml -spec: - groups: - - name: authentication - title: Authentication - description: Configure application authentication below. - - name: jwt_file - title: jwt_file - type: file - validation: - regex: - pattern: "^/path/([A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" // invalid RE2 regular expression - message: "JWT is invalid" -``` - -================ -File: docs/partials/linter-rules/_config-option-invalid-type.mdx -================ -**Correct**: - -```yaml -spec: - groups: - - name: authentication - title: Authentication - description: Configure application authentication below. - - name: group_title - title: Group Title - items: - - name: http_enabled - title: HTTP Enabled - type: bool # bool is a valid type -``` - -**Incorrect**:: - -```yaml -spec: - groups: - - name: authentication - title: Authentication - description: Configure application authentication below. - - name: group_title - title: Group Title - items: - - name: http_enabled - title: HTTP Enabled - type: unknown_type # unknown_type is not a valid type -``` - -================ -File: docs/partials/linter-rules/_config-option-is-circular.mdx -================ -**Incorrect**: - -```yaml -spec: - groups: - - name: example_settings - items: - - name: example_default_value - type: text - value: repl{{ ConfigOption "example_default_value" }} -``` - -================ -File: docs/partials/linter-rules/_config-option-password-type.mdx -================ -```yaml -spec: - groups: - - name: ports - items: - - name: my_secret - type: password -``` - -================ -File: docs/partials/linter-rules/_config-option-regex-validator-invalid-type.mdx -================ -**Correct**: - -```yaml -spec: - groups: - - name: authentication - title: Authentication - description: Configure application authentication below. - - name: jwt_file - title: jwt_file - type: file // valid item type - validation: - regex: - pattern: "^[A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" - message: "JWT is invalid" -``` - -**Incorrect**: - -```yaml -spec: - groups: - - name: authentication - title: Authentication - description: Configure application authentication below. - - name: jwt_file - title: jwt_file - type: bool // invalid item type - validation: - regex: - pattern: "^[A-Za-z0-9-_]+.[A-Za-z0-9-_]+.[A-Za-z0-9-_]*$" - message: "JWT is invalid" -``` - -================ -File: docs/partials/linter-rules/_config-spec.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -``` - -================ -File: docs/partials/linter-rules/_container-image-latest-tag.mdx -================ -```yaml -spec: - containers: - - image: nginx:latest -``` - -================ -File: docs/partials/linter-rules/_container-image-local-image-name.mdx -================ -```yaml -spec: - containers: - - image: LocalImageName -``` - -================ -File: docs/partials/linter-rules/_container-resource-limits.mdx -================ -```yaml -spec: - containers: - - name: nginx - resources: - requests: - memory: '32Mi' - cpu: '100m' - # note the lack of a limit field -``` - -================ -File: docs/partials/linter-rules/_container-resource-requests.mdx -================ -```yaml -spec: - containers: - - name: nginx - resources: - limits: - memory: '256Mi' - cpu: '500m' - # note the lack of a requests field -``` - -================ -File: docs/partials/linter-rules/_container-resources.mdx -================ -```yaml -spec: - containers: - - name: nginx - # note the lack of a resources field -``` - -================ -File: docs/partials/linter-rules/_deprecated-kubernetes-installer-version.mdx -================ -**Correct**: - -```yaml -apiVersion: cluster.kurl.sh/v1beta1 -kind: Installer -``` - -**Incorrect**: - -```yaml -apiVersion: kurl.sh/v1beta1 -kind: Installer -``` - -================ -File: docs/partials/linter-rules/_hardcoded-namespace.mdx -================ -```yaml -metadata: - name: spline-reticulator - namespace: graphviz-pro -``` - -================ -File: docs/partials/linter-rules/_invalid_type.mdx -================ -**Correct**: - -```yaml -ports: - - serviceName: "example" - servicePort: 80 -``` - -**Incorrect**: - -```yaml -ports: - - serviceName: "example" - servicePort: "80" -``` - -================ -File: docs/partials/linter-rules/_invalid-helm-release-name.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: HelmChart -spec: - chart: - releaseName: samplechart-release-1 -``` - -================ -File: docs/partials/linter-rules/_invalid-kubernetes-installer.mdx -================ -**Correct**: - -```yaml -apiVersion: cluster.kurl.sh/v1beta1 -kind: Installer -spec: - kubernetes: - version: 1.24.5 -``` - -**Incorrect**: - -```yaml -apiVersion: cluster.kurl.sh/v1beta1 -kind: Installer -spec: - kubernetes: - version: 1.24.x - ekco: - version: latest -``` - -================ -File: docs/partials/linter-rules/_invalid-min-kots-version.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -spec: - minKotsVersion: 1.0.0 -``` - -================ -File: docs/partials/linter-rules/_invalid-rendered-yaml.mdx -================ -**Example Helm Chart**: -```yaml -apiVersion: kots.io/v1beta1 -kind: HelmChart -metadata: - name: nginx-chart -spec: - chart: - name: nginx-chart - chartVersion: 0.1.0 - helmVersion: v3 - useHelmInstall: true - builder: {} - values: - image: repl{{ ConfigOption `nginx_image`}} -``` - -**Correct Config**: -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: nginx-config -spec: - groups: - - name: nginx-deployment-config - title: nginx deployment config - items: - - name: nginx_image - title: image - type: text - default: "nginx" -``` - -**Resulting Rendered Helm Chart**: -```yaml -apiVersion: kots.io/v1beta1 -kind: HelmChart -metadata: - name: nginx-chart -spec: - chart: - name: nginx-chart - chartVersion: 0.1.0 - helmVersion: v3 - useHelmInstall: true - builder: {} - values: - image: nginx -``` -**Incorrect Config**: -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: nginx-config -spec: - groups: - - name: nginx-deployment-config - items: - - name: nginx_image - title: image - type: text - default: "***HIDDEN***" -``` - -**Resulting Lint Error**: -```json -{ - "lintExpressions": [ - { - "rule": "invalid-rendered-yaml", - "type": "error", - "message": "yaml: did not find expected alphabetic or numeric character: image: ***HIDDEN***", - "path": "nginx-chart.yaml", - "positions": null - } - ], - "isLintingComplete": false -} -``` -**Incorrectly Rendered Helm Chart**: -```yaml -apiVersion: kots.io/v1beta1 -kind: HelmChart -metadata: - name: nginx-chart -spec: - chart: - name: nginx-chart - chartVersion: 0.1.0 - helmVersion: v3 - useHelmInstall: true - builder: {} - values: - image: ***HIDDEN*** -``` - -================ -File: docs/partials/linter-rules/_invalid-target-kots-version.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -spec: - targetKotsVersion: 1.0.0 -``` - -================ -File: docs/partials/linter-rules/_invalid-yaml.mdx -================ -**Correct**: - -```yaml -spec: - kubernetes: - version: 1.24.5 -``` - -**Incorrect**: - -```yaml -spec: - kubernetes: version 1.24.x -``` - -================ -File: docs/partials/linter-rules/_linter-definition.mdx -================ -The linter checks the manifest files in Replicated KOTS releases to ensure that there are no YAML syntax errors, that all required manifest files are present in the release to support installation with KOTS, and more. - -================ -File: docs/partials/linter-rules/_may-contain-secrets.mdx -================ -```yaml -data: - ENV_VAR_1: "y2X4hPiAKn0Pbo24/i5nlInNpvrL/HJhlSCueq9csamAN8g5y1QUjQnNL7btQ==" -``` - -================ -File: docs/partials/linter-rules/_missing-api-version-field.mdx -================ -```yaml -apiVersion: kots.io/v1beta1 -``` - -================ -File: docs/partials/linter-rules/_missing-kind-field.mdx -================ -```yaml -kind: Config -``` - -================ -File: docs/partials/linter-rules/_preflight-spec.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -``` - -================ -File: docs/partials/linter-rules/_privileged.mdx -================ -```yaml -spec: - privileged: true -``` - -================ -File: docs/partials/linter-rules/_repeat-option-malformed-yamlpath.mdx -================ -```yaml -spec: - groups: - - name: ports - items: - - name: service_port - yamlPath: 'spec.ports[0]' -``` - -================ -File: docs/partials/linter-rules/_repeat-option-missing-template.mdx -================ -```yaml -spec: - groups: - - name: ports - items: - - name: service_port - title: Service Port - type: text - repeatable: true - templates: - - apiVersion: v1 - kind: Service - name: my-service - namespace: my-app - yamlPath: 'spec.ports[0]' - - apiVersion: v1 - kind: Service - name: my-service - namespace: my-app -``` - -================ -File: docs/partials/linter-rules/_repeat-option-missing-valuesByGroup.mdx -================ -```yaml -spec: - groups: - - name: ports - items: - - name: service_port - title: Service Port - type: text - repeatable: true - valuesByGroup: - ports: - port-default-1: "80" -``` - -================ -File: docs/partials/linter-rules/_replicas-1.mdx -================ -```yaml -spec: - replicas: 1 -``` - -================ -File: docs/partials/linter-rules/_resource-limits-cpu.mdx -================ -```yaml -spec: - containers: - - name: nginx - resources: - limits: - memory: '256Mi' - # note the lack of a cpu field -``` - -================ -File: docs/partials/linter-rules/_resource-limits-memory.mdx -================ -```yaml -spec: - containers: - - name: nginx - resources: - limits: - cpu: '500m' - # note the lack of a memory field -``` - -================ -File: docs/partials/linter-rules/_resource-requests-cpu.mdx -================ -```yaml -spec: - containers: - - name: nginx - resources: - requests: - memory: '32Mi' - # note the lack of a cpu field -``` - -================ -File: docs/partials/linter-rules/_resource-requests-memory.mdx -================ -```yaml -spec: - containers: - - name: nginx - resources: - requests: - cpu: '100m' - # note the lack of a memory field -``` - -================ -File: docs/partials/linter-rules/_troubleshoot-spec.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -``` - -================ -File: docs/partials/linter-rules/_volume-docker-sock.mdx -================ -```yaml -spec: - volumes: - - hostPath: - path: /var/run/docker.sock -``` - -================ -File: docs/partials/linter-rules/_volumes-host-paths.mdx -================ -```yaml -spec: - volumes: - - hostPath: - path: /data -``` - -================ -File: docs/partials/monitoring/_limitation-ec.mdx -================ -Monitoring applications with Prometheus is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). - -================ -File: docs/partials/monitoring/_overview-prom.mdx -================ -The KOTS Admin Console can use the open source systems monitoring tool Prometheus to collect metrics on an application and the cluster where the application is installed. Prometheus components include the main Prometheus server, which scrapes and stores time series data, an Alertmanager for alerting on metrics, and Grafana for visualizing metrics. For more information about Prometheus, see [What is Prometheus?](https://prometheus.io/docs/introduction/overview/) in the Prometheus documentation. - -The Admin Console exposes graphs with key metrics collected by Prometheus in the **Monitoring** section of the dashboard. By default, the Admin Console displays the following graphs: - -* Cluster disk usage -* Pod CPU usage -* Pod memory usage - -In addition to these default graphs, application developers can also expose business and application level metrics and alerts on the dashboard. - -The following screenshot shows an example of the **Monitoring** section on the Admin Console dashboard with the Disk Usage, CPU Usage, and Memory Usage default graphs: - -<img alt="Graphs on the Admin Console dashboard" src="/images/kotsadm-dashboard-graph.png" width="700px"/> - -[View a larger version of this image](/images/kotsadm-dashboard-graph.png) - -================ -File: docs/partials/preflights/_analyzers-note.mdx -================ -For basic examples of checking CPU, memory, and disk capacity, see [Node Resources Analyzer](https://troubleshoot.sh/reference/analyzers/node-resources/) in the Troubleshoot documentation. - -================ -File: docs/partials/preflights/_http-requests-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: preflight-checks -spec: - collectors: - - http: - collectorName: slack - get: - url: https://api.slack.com/methods/api.test - analyzers: - - textAnalyze: - checkName: Slack Accessible - fileName: slack.json - regex: '"status": 200,' - outcomes: - - pass: - when: "true" - message: "Can access the Slack API" - - fail: - when: "false" - message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." -``` - -================ -File: docs/partials/preflights/_http-requests-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - collectors: - - http: - collectorName: slack - get: - url: https://api.slack.com/methods/api.test - analyzers: - - textAnalyze: - checkName: Slack Accessible - fileName: slack.json - regex: '"status": 200,' - outcomes: - - pass: - when: "true" - message: "Can access the Slack API" - - fail: - when: "false" - message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." -``` - -================ -File: docs/partials/preflights/_k8s-distro-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: my-app -spec: - analyzers: - - distribution: - checkName: Kubernetes distribution - outcomes: - - fail: - when: "== docker-desktop" - message: The application does not support Docker Desktop Clusters - - fail: - when: "== microk8s" - message: The application does not support Microk8s Clusters - - fail: - when: "== minikube" - message: The application does not support Minikube Clusters - - pass: - when: "== eks" - message: EKS is a supported distribution - - pass: - when: "== gke" - message: GKE is a supported distribution - - pass: - when: "== aks" - message: AKS is a supported distribution - - pass: - when: "== kurl" - message: KURL is a supported distribution - - pass: - when: "== digitalocean" - message: DigitalOcean is a supported distribution - - warn: - message: Unable to determine the distribution of Kubernetes -``` - -================ -File: docs/partials/preflights/_k8s-distro-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - analyzers: - - distribution: - checkName: Kubernetes distribution - outcomes: - - fail: - when: "== docker-desktop" - message: The application does not support Docker Desktop Clusters - - fail: - when: "== microk8s" - message: The application does not support Microk8s Clusters - - fail: - when: "== minikube" - message: The application does not support Minikube Clusters - - pass: - when: "== eks" - message: EKS is a supported distribution - - pass: - when: "== gke" - message: GKE is a supported distribution - - pass: - when: "== aks" - message: AKS is a supported distribution - - pass: - when: "== kurl" - message: KURL is a supported distribution - - pass: - when: "== digitalocean" - message: DigitalOcean is a supported distribution - - warn: - message: Unable to determine the distribution of Kubernetes -``` - -================ -File: docs/partials/preflights/_k8s-version-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: my-app -spec: - analyzers: - - clusterVersion: - outcomes: - - fail: - when: "< 1.25.0" - message: The application requires Kubernetes 1.25.0 or later, and recommends 1.28.0. - uri: https://www.kubernetes.io - - warn: - when: "< 1.28.0" - message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.28.0 or later. - uri: https://kubernetes.io - - pass: - message: Your cluster meets the recommended and required versions of Kubernetes. -``` - -================ -File: docs/partials/preflights/_k8s-version-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - analyzers: - - clusterVersion: - outcomes: - - fail: - when: "< 1.25.0" - message: The application requires Kubernetes 1.25.0 or later, and recommends 1.28.0. - uri: https://www.kubernetes.io - - warn: - when: "< 1.28.0" - message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.28.0 or later. - uri: https://kubernetes.io - - pass: - message: Your cluster meets the recommended and required versions of Kubernetes. -``` - -================ -File: docs/partials/preflights/_mysql-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: my-app -spec: - collectors: - - mysql: - collectorName: mysql - uri: 'repl{{ ConfigOption "db_user" }}:repl{{ConfigOption "db_password" }}@tcp(repl{{ ConfigOption "db_host" }}:repl{{ConfigOption "db_port" }})/repl{{ ConfigOption "db_name" }}' - analyzers: - - mysql: - # `strict: true` prevents installation from continuing if the preflight check fails - strict: true - checkName: Must be MySQL 8.x or later - collectorName: mysql - outcomes: - - fail: - when: connected == false - message: Cannot connect to MySQL server - - fail: - when: version < 8.x - message: The MySQL server must be at least version 8 - - pass: - message: The MySQL server is ready -``` - -================ -File: docs/partials/preflights/_mysql-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - {{ if eq .Values.global.mysql.enabled true }} - collectors: - - mysql: - collectorName: mysql - uri: '{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false' - {{ end }} - analyzers: - {{ if eq .Values.global.mysql.enabled true }} - - mysql: - checkName: Must be MySQL 8.x or later - collectorName: mysql - outcomes: - - fail: - when: connected == false - message: Cannot connect to MySQL server - - fail: - when: version < 8.x - message: The MySQL server must be at least version 8 - - pass: - message: The MySQL server is ready - {{ end }} -``` - -================ -File: docs/partials/preflights/_node-count-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - analyzers: - - nodeResources: - checkName: Node Count Check - outcomes: - - fail: - when: 'count() > {{ .Values.global.maxNodeCount }}' - message: "The cluster has more than {{ .Values.global.maxNodeCount }} nodes." - - pass: - message: You have the correct number of nodes. -``` - -================ -File: docs/partials/preflights/_node-cpu-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: my-app -spec: - analyzers: - - nodeResources: - checkName: Total CPU Cores in the cluster is 4 or greater - outcomes: - - fail: - when: "sum(cpuCapacity) < 4" - message: The cluster must contain at least 4 cores - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - pass: - message: There are at least 4 cores in the cluster -``` - -================ -File: docs/partials/preflights/_node-cpu-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - analyzers: - - nodeResources: - checkName: Total CPU Cores in the cluster is 4 or greater - outcomes: - - fail: - when: "sum(cpuCapacity) < 4" - message: The cluster must contain at least 4 cores - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - pass: - message: There are at least 4 cores in the cluster -``` - -================ -File: docs/partials/preflights/_node-ephem-storage-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: my-app -spec: - analyzers: - - nodeResources: - checkName: Every node in the cluster must have at least 40 GB of ephemeral storage, with 100 GB recommended - outcomes: - - fail: - when: "min(ephemeralStorageCapacity) < 40Gi" - message: All nodes must have at least 40 GB of ephemeral storage. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - warn: - when: "min(ephemeralStorageCapacity) < 100Gi" - message: All nodes are recommended to have at least 100 GB of ephemeral storage. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - pass: - message: All nodes have at least 100 GB of ephemeral storage. -``` - -================ -File: docs/partials/preflights/_node-ephem-storage-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - analyzers: - - nodeResources: - checkName: Every node in the cluster must have at least 40 GB of ephemeral storage, with 100 GB recommended - outcomes: - - fail: - when: "min(ephemeralStorageCapacity) < 40Gi" - message: All nodes must have at least 40 GB of ephemeral storage. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - warn: - when: "min(ephemeralStorageCapacity) < 100Gi" - message: All nodes are recommended to have at least 100 GB of ephemeral storage. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - pass: - message: All nodes have at least 100 GB of ephemeral storage. -``` - -================ -File: docs/partials/preflights/_node-mem-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: my-app -spec: - analyzers: - - nodeResources: - checkName: Every node in the cluster must have at least 8 GB of memory, with 32 GB recommended - outcomes: - - fail: - when: "min(memoryCapacity) < 8Gi" - message: All nodes must have at least 8 GB of memory. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - warn: - when: "min(memoryCapacity) < 32Gi" - message: All nodes are recommended to have at least 32 GB of memory. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - pass: - message: All nodes have at least 32 GB of memory. -``` - -================ -File: docs/partials/preflights/_node-mem-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - analyzers: - - nodeResources: - checkName: Every node in the cluster must have at least 8 GB of memory, with 32 GB recommended - outcomes: - - fail: - when: "min(memoryCapacity) < 8Gi" - message: All nodes must have at least 8 GB of memory. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - warn: - when: "min(memoryCapacity) < 32Gi" - message: All nodes are recommended to have at least 32 GB of memory. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - pass: - message: All nodes have at least 32 GB of memory. -``` - -================ -File: docs/partials/preflights/_node-req-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: my-app -spec: - analyzers: - - nodeResources: - checkName: Node requirements - filters: - # Must have 1 node with 16 GB (available) memory and 5 cores (on a single node) with amd64 architecture - allocatableMemory: 16Gi - cpuArchitecture: amd64 - cpuCapacity: "5" - outcomes: - - fail: - when: "count() < 1" - message: This application requires at least 1 node with 16GB available memory and 5 cpu cores with amd64 architecture - - pass: - message: This cluster has a node with enough memory and cpu cores -``` - -================ -File: docs/partials/preflights/_node-req-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - analyzers: - - nodeResources: - checkName: Node requirements - filters: - # Must have 1 node with 16 GB (available) memory and 5 cores (on a single node) with amd64 architecture - allocatableMemory: 16Gi - cpuArchitecture: amd64 - cpuCapacity: "5" - outcomes: - - fail: - when: "count() < 1" - message: This application requires at least 1 node with 16GB available memory and 5 cpu cores with amd64 architecture - - pass: - message: This cluster has a node with enough memory and cpu cores -``` - -================ -File: docs/partials/preflights/_node-storage-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: my-app -spec: - analyzers: - - storageClass: - checkName: Required storage classes - storageClassName: "default" - outcomes: - - fail: - message: Could not find a storage class called "default". - - pass: - message: A storage class called "default" is present. -``` - -================ -File: docs/partials/preflights/_node-storage-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - analyzers: - - storageClass: - checkName: Required storage classes - storageClassName: "default" - outcomes: - - fail: - message: Could not find a storage class called "default". - - pass: - message: A storage class called "default" is present. -``` - -================ -File: docs/partials/preflights/_preflight-sb-helm-templates.mdx -================ -For more information about using Helm templates with collectors and analyzers, see [Using Helm Templates in Specifications](preflight-sb-helm-templates-about). - -================ -File: docs/partials/preflights/_preflights-add-analyzers.mdx -================ -You must add analyzers to analyze the data from the collectors that you specified. Define the criteria for the pass, fail, and warn outcomes, and specify custom messages for each. - -For example, you can set a `fail` outcome if the MySQL version is less than the minimum required. Then, specify a message to display that informs your customer of the reasons for the failure and steps they can take to fix the issue. - -================ -File: docs/partials/preflights/_preflights-define-xref.mdx -================ -For more information about defining collectors and analyzers, see [Collecting Data](https://troubleshoot.sh/docs/collect/) -and [Analyzing Data](https://troubleshoot.sh/docs/analyze/) in the Troubleshoot documentation. - -================ -File: docs/partials/preflights/_preflights-define.mdx -================ -Any preflight checks you run are dependent on your application needs. This section gives some guidance about how to think about using collectors and analyzers to design preflight checks. - -================ -File: docs/partials/preflights/_preflights-sb-about.mdx -================ -Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. Troubleshoot is a kubectl plugin that provides diagnostic tools for Kubernetes applications. For more information, see the open source [Troubleshoot](https://troubleshoot.sh/docs/collect/) documentation. - -Preflight checks and support bundles analyze data from customer environments to provide insights that help users to avoid or troubleshoot common issues with an application: -* **Preflight checks** run before an application is installed to check that the customer environment meets the application requirements. -* **Support bundles** collect troubleshooting data from customer environments to help users diagnose problems with application deployments. - -================ -File: docs/partials/preflights/_preflights-sb-note.mdx -================ -For a comprehensive overview, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). - -================ -File: docs/partials/preflights/_preflights-spec-locations.mdx -================ -For more information about specifications, see [About Specifications](preflight-support-bundle-about#about-specifications) in _About Preflight Checks and Support Bundles_. - -================ -File: docs/partials/preflights/_preflights-strict.mdx -================ -If any strict preflight checks are configured, the `--skip-preflights` flag are not honored because the preflight checks must run and contain no failures before the application is deployed. - -When the `--deploy` option is provided and there are strict preflight checks, the preflight checks always run. The deployment waits for up to 15 minutes for the preflight checks to complete. If the checks complete without strict preflight failures, the release deploys. If the checks do not complete within 15 minutes, the release does not deploy. If there are one or more strict preflight failures, the release does not deploy. - -For more information about strict preflight checks, see [Defining Preflight Checks](/vendor/preflight-defining). - -================ -File: docs/partials/proxy-service/_step-creds.mdx -================ -Provide read-only credentials for the external private registry in your Replicated account. This allows Replicated to access the images through the proxy registry. See [Add Credentials for an External Registry](packaging-private-images#add-credentials-for-an-external-registry) in _Connecting to an External Registry_. - -================ -File: docs/partials/proxy-service/_step-custom-domain.mdx -================ -(Optional) Add a custom domain for the proxy registry instead of `proxy.replicated.com`. See [Using Custom Domains](custom-domains-using). - -================ -File: docs/partials/redactors/_redactors-about.mdx -================ -Troubleshoot has built-in redactors to prevent sensitive data from being collected when support bundles are generated. You can add more redactors if needed. For more information, see [Redacting Data](https://troubleshoot.sh/docs/redact/) in the Troubleshoot documentation. - -================ -File: docs/partials/releases/_required-releases-description.mdx -================ -When a release is required, KOTS requires users to upgrade to that version before they can upgrade to a later version. For example, if you select **Prevent this release from being skipped during upgrades** for release v2.0.0, users with v1.0.0 deployed must upgrade to v2.0.0 before they can upgrade to a version later than v2.0.0, such as v2.1.0. - -================ -File: docs/partials/releases/_required-releases-limitations.mdx -================ -Required releases have the following limitations: - - * Required releases are supported in KOTS v1.68.0 and later. - * After users deploy a required version, they can no longer redeploy (roll back to) versions earlier than the required version, even if `allowRollback` is true in the Application custom resource manifest. For more information, see [`allowRollback`](/reference/custom-resource-application#allowrollback) in the Application custom resource topic. - * If you change the channel an existing customer is assigned to, the Admin Console always fetches the latest release on the new channel, regardless of any required releases on the channel. For more information, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. - * Required releases are supported for KOTS installations only and are not supported for releases installed with Helm. The **Prevent this release from being skipped during upgrades** option has no affect if the user installs with Helm. - -================ -File: docs/partials/releases/_version-label-reqs-helm.mdx -================ -* The version label for the release must match the version label from one of the `Chart.yaml` files in the release. -* If there is one Helm chart in the release, Replicated automatically uses the version from the `Chart.yaml` file. -* If there is more than one Helm chart in the release, Replicated uses the version label from one of the `Chart.yaml` files. You can edit the version label for the release to use the version label from a different `Chart.yaml` file. - -================ -File: docs/partials/replicated-cli/_app.mdx -================ -<tr> - <td><code>--app</code></td> - <td>string</td> - <td>The app slug or app ID to use in all calls. The default uses the <code>$REPLICATED_APP</code> environment variable.</td> -</tr> - -================ -File: docs/partials/replicated-cli/_authorize-with-token-note.mdx -================ -:::note -The `replicated login` command creates a token after you log in to your vendor account in a browser and saves it to a config file. Alteratively, if you do not have access to a browser, you can set the `REPLICATED_API_TOKEN` environment variable to authenticate. For more information, see [(Optional) Set Environment Variables](#env-var) below. -::: - -================ -File: docs/partials/replicated-cli/_authtype.mdx -================ -<tr> - <td><code>--authtype</code></td> - <td>string</td> - <td>Authorization type for the registry. <strong>Default:</strong> password</td> -</tr> - -================ -File: docs/partials/replicated-cli/_chart-yaml-dir-reqs.mdx -================ -:::note -If your release supports installations with Replicated KOTS, `--yaml-dir` is required. If your release supports installations with the Helm CLI only, either `--yaml-dir` or `--chart` can be used. -::: - -================ -File: docs/partials/replicated-cli/_help.mdx -================ -<tr> - <td><code>-h, --help</code></td> - <td></td> - <td>Help for the command.</td> -</tr> - -================ -File: docs/partials/replicated-cli/_login.mdx -================ -Authorize the Replicated CLI: - - ``` - replicated login - ``` - - In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. - - <img width="350" alt="Authorize replicated cli web page" src="/images/authorize-repl-cli.png"/> - - [View a larger version of this image](/images/authorize-repl-cli.png) - -================ -File: docs/partials/replicated-cli/_logout.mdx -================ -(Optional) When you are done using the Replicated CLI, remove any stored credentials created by the `replicated login` command: - - ``` - replicated logout - ``` - -================ -File: docs/partials/replicated-cli/_output.mdx -================ -<tr> - <td><code>--output</code></td> - <td>string</td> - <td> - <p>The output format to use. <strong>Valid values:</strong> <code>json</code> or <code>table</code>. Some commands also support <code>wide</code> <strong>Default:</strong> table</p> - </td> -</tr> - -================ -File: docs/partials/replicated-cli/_password-stdin.mdx -================ -<tr> - <td><code>--password-stdin</code></td> - <td></td> - <td>Takes the password from stdin.</td> -</tr> - -================ -File: docs/partials/replicated-cli/_password.mdx -================ -<tr> - <td><code>--password</code></td> - <td>string</td> - <td>The password to use when authenticating to the registry.</td> -</tr> - -================ -File: docs/partials/replicated-cli/_skip-validation.mdx -================ -<tr> - <td><code>--skip-validation</code></td> - <td></td> - <td>Skips the validation of the registry (not recommended).</td> -</tr> - -================ -File: docs/partials/replicated-cli/_sudo-install.mdx -================ -:::note -If you do not have root access to the `/usr/local/bin` directory, you can install with sudo by running `sudo mv replicated /usr/local/bin/replicated` instead of `mv replicated /usr/local/bin/replicated`. -::: - -================ -File: docs/partials/replicated-cli/_token-stdin.mdx -================ -<tr> - <td><code>--token-stdin</code></td> - <td></td> - <td>Takes the token from stdin.</td> -</tr> - -================ -File: docs/partials/replicated-cli/_token.mdx -================ -<tr> - <td><code>--token</code></td> - <td>string</td> - <td>The API token used to access your application in the Vendor API. The default uses the <code>$REPLICATED_API_TOKEN</code> environment variable.</td> -</tr> - -================ -File: docs/partials/replicated-cli/_username.mdx -================ -<tr> - <td><code>--username</code></td> - <td>string</td> - <td>The username with which to authenticate to the registry.</td> -</tr> - -================ -File: docs/partials/replicated-cli/_verify-install.mdx -================ -Verify that the installation was successful: - - ``` - replicated --help - ``` - -================ -File: docs/partials/replicated-cli/_yaml-dir.mdx -================ -<tr> - <td><code>--yaml-dir</code></td> - <td>path</td> - <td>The directory containing multiple YAML manifest files for a release. <strong>(Required)</strong></td> -</tr> - -================ -File: docs/partials/replicated-sdk/_401-unauthorized.mdx -================ -:::note -If you see a `401 Unauthorized` error message, log out of the Replicated registry by running `helm registry logout registry.replicated.com` and then run `helm package . --dependency-update` again. -::: - -================ -File: docs/partials/replicated-sdk/_dependency-yaml.mdx -================ -```yaml -# Chart.yaml -dependencies: -- name: replicated - repository: oci://registry.replicated.com/library - version: 1.1.1 -``` - -For the latest version information for the Replicated SDK, see the [replicated-sdk repository](https://github.com/replicatedhq/replicated-sdk/releases) in GitHub. - -================ -File: docs/partials/replicated-sdk/_integration-mode-install.mdx -================ -You can install the Replicated SDK in integration mode to develop locally against the SDK API without needing to add the SDK to your application, create a release in the Replicated Vendor Portal, or make changes in your environment. You can also use integration mode to test sending instance data to the Vendor Portal, including any custom metrics that you configure. - -To use integration mode, install the Replicated SDK as a standalone component using a valid Development license created in the Vendor Portal. After you install in integration mode, the SDK provides default mock data for requests to the SDK API `app` endpoints. Requests to the `license` endpoints use the real data from your Development license. - -To install the SDK in integration mode: - -1. Create a Development license that you can use to install the SDK in integration mode: - - 1. In the Vendor Portal, go to **Customers** and click **Create customer**. - - 1. Complete the following fields: - - 1. For **Customer name**, add a name for the customer. - - 1. For **Assigned channel**, assign the customer to the channel that you use for testing. For example, Unstable. - - 1. For **Customer type**, select **Development**. - - 1. For **Customer email**, add the email address that you want to use for the license. - - 1. For **Install types**, ensure that the **Existing Cluster (Helm CLI)** option is enabled. - - 1. (Optional) Add any license field values that you want to use for testing: - - 1. For **Expiration policy**, you can add an expiration date for the license. - - 1. For **Custom fields**, you can add values for any custom license fields in your application. For information about how to create custom license fields, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). - - 1. Click **Save Changes**. - -1. On the **Manage customer** page for the customer you created, click **Helm install instructions**. - - <img alt="Helm install instructions button on the manage customer page" src="/images/helm-install-instructions-button.png" width="700px"/> - - [View a larger version of this image](/images/helm-install-instructions-button.png) - -1. In the **Helm install instructions** dialog, copy and run the command to log in to the Replicated registry. - - <img alt="Registry login command in the Helm install instructions dialog" src="/images/helm-install-instructions-registry-login.png" width="600px"/> - - [View a larger version of this image](/images/helm-install-instructions-registry-login.png) - -1. From the same dialog, copy and run the command to install the SDK in integration mode: - - <img alt="SDK integration mode install command in the Helm install instructions dialog" src="/images/helm-install-instructions-sdk-integration.png" width="600px"/> - - [View a larger version of this image](/images/helm-install-instructions-sdk-integration.png) - -1. Make requests to the SDK API from your application. You can access the SDK API for testing by forwarding the API service to your local machine. For more information, see [Port Forwarding the SDK API Service](/vendor/replicated-sdk-development#port-forward). - -================ -File: docs/partials/replicated-sdk/_kots-version-req.mdx -================ -To install the SDK with a Replicated installer, KOTS v1.104.0 or later and the SDK version 1.0.0-beta.12 or later are required. You can verify the version of KOTS installed with `kubectl kots version`. For Replicated Embedded Cluster installations, you can see the version of KOTS that is installed by your version of Embedded Cluster in the [Embedded Cluster Release Notes](/release-notes/rn-embedded-cluster). - -================ -File: docs/partials/replicated-sdk/_overview.mdx -================ -The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. The SDK can be installed alongside applications packaged as Helm charts or Kubernetes manifests. The SDK can be installed using the Helm CLI or KOTS. - -For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). - -Replicated recommends that the SDK is distributed with all applications because it provides access to key Replicated functionality, such as: - -* Automatic access to insights and operational telemetry for instances running in customer environments, including granular details about the status of different application resources. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). -* An in-cluster API that you can use to embed Replicated features into your application, including: - * Collect custom metrics on instances running in online or air gap environments. See [Configuring Custom Metrics](/vendor/custom-metrics). - * Check customer license entitlements at runtime. See [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk) and [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). - * Provide update checks to alert customers when new versions of your application are available for upgrade. See [Support Update Checks in Your Application](/reference/replicated-sdk-apis#support-update-checks-in-your-application) in _Replicated SDK API_. - * Programmatically name or tag instances from the instance itself. See [Programatically Set Tags](/reference/replicated-sdk-apis#post-appinstance-tags). - -================ -File: docs/partials/replicated-sdk/_registry-logout.mdx -================ -If you see a 401 Unauthorized error after running `helm dependency update`, run the following command to remove credentials from the Replicated registry, then re-run `helm dependency update`: - -```bash -helm registry logout registry.replicated.com -``` - -For more information, see [401 Unauthorized Error When Updating Helm Dependencies](replicated-sdk-installing#401). - -================ -File: docs/partials/replicated-sdk/_sdk-values.mdx -================ -When a user installs a Helm chart that includes the Replicated SDK as a dependency, a set of default SDK values are included in the `replicated` key of the parent chart's values file. - -For example: - -```yaml -# values.yaml - -replicated: - enabled: true - appName: gitea - channelID: 2jKkegBMseH5w... - channelName: Beta - channelSequence: 33 - integration: - enabled: true - license: {} - parentChartURL: oci://registry.replicated.com/gitea/beta/gitea - releaseCreatedAt: "2024-11-25T20:38:22Z" - releaseNotes: 'CLI release' - releaseSequence: 88 - replicatedAppEndpoint: https://replicated.app - versionLabel: Beta-1234 -``` - -These `replicated` values can be referenced by the application or set during installation as needed. For example, if users need to add labels or annotations to everything that runs in their cluster, then they can pass the labels or annotations to the relevant value in the SDK subchart. - -For the default Replicated SDK Helm chart values file, see [values.yaml.tmpl](https://github.com/replicatedhq/replicated-sdk/blob/main/chart/values.yaml.tmpl) in the [replicated-sdk](https://github.com/replicatedhq/replicated-sdk) repository in GitHub. - -The SDK Helm values also include a `replicated.license` field, which is a string that contains the YAML representation of the customer license. For more information about the built-in fields in customer licenses, see [Built-In License Fields](licenses-using-builtin-fields). - -================ -File: docs/partials/snapshots/_checkVersion.mdx -================ -Run `velero version --client-only` to check the version of the velero CLI that you installed as part of [Installing the Velero CLI](snapshots-velero-cli-installing). - -================ -File: docs/partials/snapshots/_installVelero.mdx -================ -Run one of the following commands to install Velero, depending on the version of the velero CLI you are using: - - * **Velero v1.10 and later**: - - ```bash - velero install \ - --no-default-backup-location \ - --no-secret \ - --use-node-agent --uploader-type=restic \ - --use-volume-snapshots=false \ - --plugins velero/velero-plugin-for-aws:v1.5.3 - ``` - - * **Velero versions earlier than v1.10**: - - ```bash - velero install \ - --no-default-backup-location \ - --no-secret \ - --use-restic \ - --use-volume-snapshots=false \ - --plugins velero/velero-plugin-for-aws:v1.5.3 - ``` - -================ -File: docs/partials/snapshots/_limitation-cli-restores.mdx -================ -Only full backups can be restored using the KOTS CLI. To restore an application from a partial backup, use the Admin Console. See [Restore the Application Only Using the Admin Console](/enterprise/snapshots-restoring-full#admin-console). - -================ -File: docs/partials/snapshots/_limitation-dr.mdx -================ -Only full backups that include both the application and the Admin Console can be restored to a new cluster in disaster recovery scenarios. Partial backups that include the application only _cannot_ be restored to a new cluster, and are therefore not useable for disaster recovery. - -================ -File: docs/partials/snapshots/_limitation-install-method.mdx -================ -Snapshots can be restored only to clusters that use the same installation method as the cluster the snapshot was taken from. For example, snapshots taken in an online (internet-connected) cluster must be restored to an online cluster. - -================ -File: docs/partials/snapshots/_limitation-no-ec-support.mdx -================ -The KOTS Snapshots feature is supported for existing cluster installations with KOTS and Replicated kURL installations only. Snapshots is not supported for Replicated Embedded Cluster installations. For more information about configuring backup and restore for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). - -================ -File: docs/partials/snapshots/_limitation-os.mdx -================ -Snapshots must be restored on the same operating system that the snapshot was taken on. For example, snapshots taken on a CentOS cluster must be restored on a CentOS cluster. - -================ -File: docs/partials/snapshots/_node-agent-mem-limit.mdx -================ -Increase the default memory limit for the node-agent (restic) Pod if your application is particularly large. For more information about configuring Velero resource requests and limits, see [Customize resource requests and limits](https://velero.io/docs/v1.10/customize-installation/#customize-resource-requests-and-limits) in the Velero documentation. - -For example, the following kubectl commands will increase the memory limit for the node-agent (restic) daemon set from the default of 1Gi to 2Gi. - -**Velero 1.10 and later**: - -``` -kubectl -n velero patch daemonset node-agent -p '{"spec":{"template":{"spec":{"containers":[{"name":"node-agent","resources":{"limits":{"memory":"2Gi"}}}]}}}}' -``` - -**Velero versions earlier than 1.10**: - -``` -kubectl -n velero patch daemonset restic -p '{"spec":{"template":{"spec":{"containers":[{"name":"restic","resources":{"limits":{"memory":"2Gi"}}}]}}}}' -``` - -Alternatively, you can potentially avoid the node-agent (restic) Pod reaching the memory limit during snapshot creation by running the following kubectl command to lower the memory garbage collection target percentage on the node-agent (restic) daemon set: - -**Velero 1.10 and later**: - -``` -kubectl -n velero set env daemonset/node-agent GOGC=1 -``` - -**Velero versions earlier than 1.10**: - -``` -kubectl -n velero set env daemonset/restic GOGC=1 -``` - -================ -File: docs/partials/snapshots/_registryCredentialsNote.mdx -================ -:::note -It is typical for the velero and node-agent (restic) Pods to be in the `ErrImagePull` or `ImagePullBackOff` state after you run the `velero install` command because Velero does not support passing registry credentials during installation. In Replicated KOTS v1.94.0 and later, this situation resolves itself after you complete the instructions to configure the storage destination. - -If you are on an earlier version of KOTS, Replicated recommends that you upgrade to KOTS v1.94.0 or later. Otherwise, you must patch the Velero deployment manually and add the image pull secret to access the registry. -::: - -================ -File: docs/partials/snapshots/_resticDaemonSet.mdx -================ -Configure the Restic DaemonSet specification if your cluster uses one of the following Kubernetes distributions: - * RancherOS - * OpenShift - * Microsoft Azure - * VMware Tanzu Kubernetes Grid Integrated Edition - -For information about how to configure the Restic DaemonSet for these distributions, see [Configure Restic DaemonSet spec](https://velero.io/docs/v1.9/restic/#configure-restic-daemonset-spec) in the Velero documentation. - -================ -File: docs/partials/snapshots/_restore-types.mdx -================ -Snapshots supports the following types of restores: -* Restore both the application and the KOTS Admin Console (also referred to as a _full_ restore) -* Restore the KOTS Admin Console only -* Restore the application only (also referred to as a _partial_ restore) - -================ -File: docs/partials/snapshots/_restoreTable.mdx -================ -<table> - <tr> - <th width="25%">Restore Type</th> - <th width="50%">Description</th> - <th width="25%">Interface to Use</th> - </tr> - <tr> - <td>Full restore</td> - <td>Restores the Admin Console and the application.</td> - <td>KOTS CLI</td> - </tr> - <tr> - <td>Partial restore</td> - <td>Restores the application only.</td> - <td>KOTS CLI or Admin Console</td> - </tr> - <tr> - <td>Admin console</td> - <td>Restores the Admin Console only.</td> - <td>KOTS CLI</td> - </tr> - </table> - -================ -File: docs/partials/snapshots/_step-get-backups.mdx -================ -Run the [`kubectl kots get backups`](/reference/kots-cli-get-backups) command to get the list of full backups for the instance. - -================ -File: docs/partials/snapshots/_step-restore.mdx -================ -Run the following command to restore a full backup: - - ```bash - kubectl kots restore --from-backup BACKUP - ``` - Replace `BACKUP` with the the name of the backup to restore from. - - For more information about the available options for the `kots restore` command, including application-only and Admin Console-only options, see [restore](/reference/kots-cli-restore-index/). - -================ -File: docs/partials/snapshots/_updateDefaultStorage.mdx -================ -If Velero is already installed, you can update your storage destination in the Replicated Admin Console. - -For embedded clusters with the Velero add-on, you must update the default internal storage settings in the Admin Console because internal storage is insufficient for full backups. - -For more information about updating storage, see [Updating Settings in the Admin Console](snapshots-updating-with-admin-console). - -================ -File: docs/partials/status-informers/_aggregate-status-intro.mdx -================ -When you provide more than one Kubernetes resource, Replicated aggregates all resource statuses to display a single application status. - -Replicated uses the least available resource status to represent the aggregate application status. For example, if at least one resource has an Unavailable status, then the aggregate application status is Unavailable. - -================ -File: docs/partials/status-informers/_aggregateStatus.mdx -================ -The following table describes the resource statuses that define each aggregate application status: - -<table> - <tbody> - <tr> - <th>Resource Statuses</th> - <th>Aggregate Application Status</th> - </tr> - <tr> - <td>No status available for any resource</td> - <td>Missing</td> - </tr> - <tr> - <td>One or more resources Unavailable</td> - <td>Unavailable</td> - </tr> - <tr> - <td>One or more resources Degraded</td> - <td>Degraded</td> - </tr> - <tr> - <td>One or more resources Updating</td> - <td>Updating</td> - </tr> - <tr> - <td>All resources Ready</td> - <td>Ready</td> - </tr> - </tbody> -</table> - -================ -File: docs/partials/status-informers/_statusesTable.mdx -================ -<table> - <tbody> - <tr> - <th width="10%"></th> - <th width="15%">Deployment</th> - <th width="15%">StatefulSet</th> - <th width="15%">Service</th> - <th width="15%">Ingress</th> - <th width="15%">PVC</th> - <th width="15%">DaemonSet</th> - </tr> - <tr> - <th>Ready</th> - <td>Ready replicas equals desired replicas</td> - <td>Ready replicas equals desired replicas</td> - <td>All desired endpoints are ready, any load balancers have been assigned</td> - <td>All desired backend service endpoints are ready, any load balancers have been assigned</td> - <td>Claim is bound</td> - <td>Ready daemon pods equals desired scheduled daemon pods</td> - </tr> - <tr> - <th>Updating</th> - <td>The deployed replicas are from a different revision</td> - <td>The deployed replicas are from a different revision</td> - <td>N/A</td> - <td>N/A</td> - <td>N/A</td> - <td>The deployed daemon pods are from a different revision</td> - </tr> - <tr> - <th>Degraded</th> - <td>At least 1 replica is ready, but more are desired</td> - <td>At least 1 replica is ready, but more are desired</td> - <td>At least one endpoint is ready, but more are desired</td> - <td>At least one backend service endpoint is ready, but more are desired</td> - <td>N/A</td> - <td>At least one daemon pod is ready, but more are desired</td> - </tr> - <tr> - <th>Unavailable</th> - <td>No replicas are ready</td> - <td>No replicas are ready</td> - <td>No endpoints are ready, no load balancer has been assigned</td> - <td>No backend service endpoints are ready, no load balancer has been assigned</td> - <td>Claim is pending or lost</td> - <td>No daemon pods are ready</td> - </tr> - <tr> - <th>Missing</th> - <td colSpan="6">Missing is an initial deployment status indicating that informers have not reported their status because the application has just been deployed and the underlying resource has not been created yet. After the resource is created, the status changes. However, if a resource changes from another status to Missing, then the resource was either deleted or the informers failed to report a status.</td> - </tr> - </tbody> -</table> - -================ -File: docs/partials/support-bundles/_configmap-note.mdx -================ -:::note -Alternatively, you can use a ConfigMap (`kind: ConfigMap`) if the specification will not collect private information from the cluster. -::: - -================ -File: docs/partials/support-bundles/_customize-support-bundle-spec.mdx -================ -When customizing your support bundle specifications, consider the following guidelines: - -- The `clusterInfo` and `clusterResources` collectors are useful because they collect a large amount of data to help with installation and debugging. - - ```yaml - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: collectors - spec: - collectors: - - clusterInfo: - exclude: false - - clusterResources: - exclude: false - ``` -- You can edit the default collector properties. If `clusterResources` is defined in your specification, the default namespace cannot be removed, but you can add a namespace to the `namespaces` field. - - ```yaml - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: collectors - spec: - collectors: - - clusterInfo: - exclude: false - - clusterResources: - namespaces: - - default - - APP_NAMESPACE - ``` - Replace `APP_NAMESPACE` with the application namespace. - -- Add application Pod logs and set the collection limits for the number of lines logged. Typically the selector attribute is matched to the labels. - - 1. To get the labels for an application, inspect the Pods YAML. - - 1. Create collectors to include logs from these pods in a bundle. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector. You can include the `logs` collector specification multiple times. - - The limits field can support `maxAge` or `maxLines`. This limits the output to the constraints provided. **Default:** `maxLines: 10000` - - **Example:** - - ```yaml - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: collectors - spec: - collectors: - - logs: - selector: - - app=api - namespace: default - limits: - maxLines: 10000 - ``` - -- Add any custom collectors to the file. Collectors that Replicated recommends considering are: - - - **Kubernetes resources:** Use for custom resource definitions (CRDs), secrets, and ConfigMaps, if they are required for your application to work. - - **Databases:** Return a selection of rows or entire tables. - - **Volumes:** Ensure that an application's persistent state files exist, are readable/writeable, and have the right permissions. - - **Pods:** Run a pod from a custom image. - - **Files:** Copy files from pods and hosts. - - **HTTP:** Consume your own application APIs with HTTP requests. If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. - -- Add analyzers based on conditions that you expect for your application. For example, you might require that a cluster have at least 2 CPUs and 4GB memory available. - - Good analyzers clearly identify failure modes. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log. - - At a minimum, include application log analyzers. A simple text analyzer can detect specific log lines and inform an end user of remediation steps. - - Analyzers that Replicated recommends considering are: - - - **Resource statuses:** Check the status of various resources, such as Deployments, StatefulSets, Jobs, and so on. - - **Regular expressions:** Analyze arbitrary data. - - **Databases:** Check the version and connection status. -. -- If needed, you can add custom the redactors to the default redactors. Disabling the redactors is not recommended. - -================ -File: docs/partials/support-bundles/_deploy-status-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: [] - analyzers: - - deploymentStatus: - name: api - namespace: default - outcomes: - - fail: - when: "< 1" - message: The API deployment does not have any ready replicas. - - warn: - when: "= 1" - message: The API deployment has only a single ready replica. - - pass: - message: There are multiple replicas of the API deployment ready. -``` - -================ -File: docs/partials/support-bundles/_deploy-status-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle -stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: [] - analyzers: - - deploymentStatus: - name: api - namespace: default - outcomes: - - fail: - when: "< 1" - message: The API deployment does not have any ready replicas. - - warn: - when: "= 1" - message: The API deployment has only a single ready replica. - - pass: - message: There are multiple replicas of the API deployment ready. -``` - -================ -File: docs/partials/support-bundles/_ec-support-bundle-intro.mdx -================ -Embedded Cluster includes a default support bundle spec that collects both host- and cluster-level information. - -The host-level information is useful for troubleshooting failures related to host configuration like DNS, networking, or storage problems. Cluster-level information includes details about the components provided by Replicated, such as the Admin Console and Embedded Cluster operator that manage install and upgrade operations. If the cluster has not installed successfully and cluster-level information is not available, then it is excluded from the bundle. - -In addition to the host- and cluster-level details provided by the default Embedded Cluster spec, support bundles generated for Embedded Cluster installations also include app-level details provided by any custom support bundle specs that you included in the application release. - -================ -File: docs/partials/support-bundles/_generate-bundle-admin-console.mdx -================ -The Replicated KOTS Admin Console includes a **Troubleshoot** page where you can generate a support bundle and review remediation suggestions for troubleshooting. You can also download the support bundle from the Admin Console. - -To generate a support bundle in the KOTS Admin Console: - -1. Log in to the Admin Console and go to the **Troubleshoot** tab. - -1. Click **Analyze** to start analyzing the application. Or, copy the command provided to generate a bundle from the CLI. - - The analysis executes the support bundle plugin. After the analysis completes, the bundle is available on the **Troubleshoot** tab in the Admin Console. If any known issues are detected, they are highlighted with possible remediation suggestions. - - :::note - No data leaves the cluster. Data is never sent across the internet or to anyone else. - ::: - -1. (Optional) If enabled for your online installation, you might also see a **Send bundle to vendor** button available. Clicking this button will send the support bundle directly to your vendor. Replicated recommendeds following up with your vendor to let them know the bundle has been provided. - <img alt="Send bundle to vendor screen" src="/images/send-bundle-to-vendor.png" width="800px"/> - - [View a larger version of this image](/images/send-bundle-to-vendor.png) - -1. (Optional) Click **Download bundle** to download the support bundle. You can send the bundle to your vendor for assistance. - -================ -File: docs/partials/support-bundles/_generate-bundle-default-kots.mdx -================ -For KOTS installations, you can generate a support bundle using the default KOTS spec. This is useful if the application does not have a support bundle spec included. - -#### Online Environments - -In an online environment, run the following command to generate a support bundle using the default KOTS spec: - -``` -kubectl support-bundle https://kots.io -``` - -#### Air Gap Environments - -For air gap environments, perform the following steps to generate a support bundle using the default KOTS spec: - -1. Run the following command from a computer with internet access to download the default KOTS spec: - - ``` - curl -o spec.yaml https://kots.io -H 'User-agent:Replicated_Troubleshoot/v1beta1' - ``` - -1. Upload the `spec.yaml` file to your air gap server. - -1. Run the following command to create a support bundle using the uploaded `spec.yaml` file: - - ``` - kubectl support-bundle /path/to/spec.yaml - ``` - -================ -File: docs/partials/support-bundles/_generate-bundle-ec.mdx -================ -There are different steps to generate a support bundle depending on the version of Embedded Cluster installed. - -### For Versions 1.17.0 and Later - -For Embedded Cluster 1.17.0 and later, you can run the Embedded Cluster `support-bundle` command to generate a support bundle. - -The `support-bundle` command uses the default Embedded Cluster support bundle spec to collect both cluster- and host-level information. It also automatically includes any application-specific support bundle specs in the generated bundle. - -To generate a support bundle: - -1. SSH onto a controller node. - - :::note - You can SSH onto a worker node to generate a support bundle that contains information specific to that node. However, when run on a worker node, the `support-bundle` command does not capture cluster-wide information. - ::: - -1. Run the following command: - - ```bash - sudo ./APP_SLUG support-bundle - ``` - - Where `APP_SLUG` is the unique slug for the application. - -### For Versions Earlier Than 1.17.0 - -For Embedded Cluster versions earlier than 1.17.0, you can generate a support bundle from the shell using the kubectl support-bundle plugin. - -To generate a bundle with the support-bundle plugin, you pass the default Embedded Cluster spec to collect both cluster- and host-level information. You also pass the `--load-cluster-specs` flag, which discovers all support bundle specs that are defined in Secrets or ConfigMaps in the cluster. This ensures that any application-specific specs are also included in the bundle. For more information, see [Discover Cluster Specs](https://troubleshoot.sh/docs/support-bundle/discover-cluster-specs/) in the Troubleshoot documentation. - -To generate a bundle: - -1. SSH onto a controller node. - -1. Use the Embedded Cluster shell command to start a shell with access to the cluster: - - ```bash - sudo ./APP_SLUG shell - ``` - Where `APP_SLUG` is the unique slug for the application. - - The output looks similar to the following: - - ```bash - __4___ - _ \ \ \ \ Welcome to APP_SLUG debug shell. - <'\ /_/_/_/ This terminal is now configured to access your cluster. - ((____!___/) Type 'exit' (or CTRL+d) to exit. - \0\0\0\0\/ Happy hacking. - ~~~~~~~~~~~ - root@alex-ec-2:/home/alex# export KUBECONFIG="/var/lib/embedded-cluster/k0s/pki/admin.conf" - root@alex-ec-2:/home/alex# export PATH="$PATH:/var/lib/embedded-cluster/bin" - root@alex-ec-2:/home/alex# source <(kubectl completion bash) - root@alex-ec-2:/home/alex# source /etc/bash_completion - ``` - - The appropriate kubeconfig is exported, and the location of useful binaries like kubectl and the preflight and support-bundle plugins is added to PATH. - - :::note - The shell command cannot be run on non-controller nodes. - ::: - -2. Generate the support bundle using the default Embedded Cluster spec and the `--load-cluster-specs` flag: - - ```bash - kubectl support-bundle --load-cluster-specs /var/lib/embedded-cluster/support/host-support-bundle.yaml - ``` - -================ -File: docs/partials/support-bundles/_generate-bundle-host.mdx -================ -To generate a kURL host support bundle: - -1. Do one of the following: - - - Save the host support bundle YAML file on the host. For more information about creating a YAML spec for a host support bundle, see [Create a Host Support Bundle Spec](/vendor/support-host-support-bundles#create-a-host-support-bundle-spec). - - - Run the following command to download the default kURL host support bundle YAML file from the Troubleshoot repository: - - ``` - kubectl support-bundle https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/host/default.yaml - ``` - - :::note - For air gap environments, download the YAML file and copy it to the air gap machine. - ::: - -1. Run the following command on the host to generate a support bundle: - - ``` - ./support-bundle --interactive=false PATH/FILE.yaml - ``` - - Replace: - - `PATH` with the path to the host support bundle YAML file. - - `FILE` with the name of the host support bundle YAML file from your vendor. - - :::note - Root access is typically not required to run the host collectors and analyzers. However, depending on what is being collected, you might need to run the support-bundle binary with elevated permissions. For example, if you run the `filesystemPerformance` host collector against `/var/lib/etcd` and the user running the binary does not have permissions on this directory, the collection process fails. - ::: - -1. Share the host support bundle with your vendor's support team, if needed. - -1. Repeat these steps for each node because there is no method to generate host support bundles on remote hosts. If you have a multi-node kURL cluster, you must run the support-bundle binary on each node to generate a host support bundle for each node. - -================ -File: docs/partials/support-bundles/_generate-bundle.mdx -================ -Run the following command to generate a bundle: - -```bash -kubectl support-bundle --load-cluster-specs -``` - -The `--load-cluster-specs` flag automatically discovers all support bundle specs that are defined in Secrets or ConfigMaps in the cluster. For more information, see [Discover Cluster Specs](https://troubleshoot.sh/docs/support-bundle/discover-cluster-specs/) in the Troubleshoot documentation. - -For a complete list of options with the `kubectl support-bundle` command, run `kubectl support-bundle --help`. For more information, see [Collecting a Support Bundle](https://troubleshoot.sh/docs/support-bundle/collecting/) in the Troubleshoot documentation. - -================ -File: docs/partials/support-bundles/_http-requests-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: - - http: - collectorName: slack - get: - url: https://api.slack.com/methods/api.test - analyzers: - - textAnalyze: - checkName: Slack Accessible - fileName: slack.json - regex: '"status": 200,' - outcomes: - - pass: - when: "true" - message: "Can access the Slack API" - - fail: - when: "false" - message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." -``` - -================ -File: docs/partials/support-bundles/_http-requests-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle -stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: - - http: - collectorName: slack - get: - url: https://api.slack.com/methods/api.test - analyzers: - - textAnalyze: - checkName: Slack Accessible - fileName: slack.json - regex: '"status": 200,' - outcomes: - - pass: - when: "true" - message: "Can access the Slack API" - - fail: - when: "false" - message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." -``` - -================ -File: docs/partials/support-bundles/_install-plugin.mdx -================ -The support-bundle plugin (a kubectl plugin) is required to generate support bundles from the command line. - -You can install the support-bundle plugin using krew or install it manually from the release archives. - -:::note -For Replicated Embedded Cluster and Replicated kURL installations, the support-bundle plugin is automatically installed on all of the control plane nodes. You can skip this prerequisite. -::: - -#### Install or Upgrade using krew - -To install the support-bundle plugin using krew, do one of the following: - -* If krew is _not_ installed already, run the following command to install krew and the support-bundle plugin at the same time: - - ``` - curl https://krew.sh/support-bundle | bash - ``` - -* If krew is installed already, run the following command to install the plug-in: - - ``` - kubectl krew install support-bundle - ``` - -* To upgrade your existing support-bundle plugin using krew: - - ``` - kubectl krew upgrade support-bundle - ``` - -#### Install Manually - -If you do not want to install the plugin using krew or want an easier way to install the plugin in an air gap environment, you can install the plugin manually from the release archives. - -To install the support-bundle plugin manually: - -1. Run the following command to download and unarchive the latest release, and move the plugin to your $PATH: - - ``` - curl -L https://github.com/replicatedhq/troubleshoot/releases/latest/download/support-bundle_linux_amd64.tar.gz | tar xzvf - - sudo mv ./support-bundle /usr/local/bin/kubectl-support_bundle - ``` - :::note - If you do not have root access, or choose not to add the support-bundle plugin to your path, you can run the binary directly from where you unzipped it by executing `./support-bundle`. If you choose not to put the plugin into your $PATH, then replace all instances of `kubectl support-bundle` in these instructions with `./support-bundle` or with the absolute path to the binary. - ::: - -1. (Optional) Run the following command to test that the installation is working: - - ``` - kubectl support-bundle --help - ``` - -================ -File: docs/partials/support-bundles/_k8s-version-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: [] - analyzers: - - clusterVersion: - outcomes: - - fail: - message: This application relies on kubernetes features only present in 1.16.0 - and later. - uri: https://kubernetes.io - when: < 1.16.0 - - warn: - message: Your cluster is running a version of kubernetes that is out of support. - uri: https://kubernetes.io - when: < 1.24.0 - - pass: - message: Your cluster meets the recommended and quired versions of Kubernetes. -``` - -================ -File: docs/partials/support-bundles/_k8s-version-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle -stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: [] - analyzers: - - clusterVersion: - outcomes: - - fail: - message: This application relies on kubernetes features only present in 1.16.0 - and later. - uri: https://kubernetes.io - when: < 1.16.0 - - warn: - message: Your cluster is running a version of kubernetes that is out of support. - uri: https://kubernetes.io - when: < 1.24.0 - - pass: - message: Your cluster meets the recommended and quired versions of Kubernetes. -``` - -================ -File: docs/partials/support-bundles/_logs-limits-cr.mdx -================ -```yaml -apiVersion: troubleshoot.replicated.com/v1beta1 -kind: SupportBundle -metadata: - name: example -spec: - collectors: - - logs: - selector: - - app.kubernetes.io/name=myapp - namespace: '{{repl Namespace }}' - limits: - maxAge: 720h - maxLines: 10000 -``` - -================ -File: docs/partials/support-bundles/_logs-limits-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle -stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: - - logs: - selector: - - app.kubernetes.io/name=myapp - namespace: {{ .Release.Namespace }} - limits: - maxAge: 720h - maxLines: 10000 -``` - -================ -File: docs/partials/support-bundles/_logs-selectors-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: - - logs: - namespace: example-namespace - selector: - - app=slackernews-nginx - - logs: - namespace: example-namespace - selector: - - app=slackernews-api - - logs: - namespace: example-namespace - selector: - - app=slackernews-frontend - - logs: - selector: - - app=postgres - analyzers: - - textAnalyze: - checkName: Axios Errors - fileName: slackernews-frontend-*/slackernews.log - regex: "error - AxiosError" - outcomes: - - pass: - when: "false" - message: "Axios errors not found in logs" - - fail: - when: "true" - message: "Axios errors found in logs" -``` - -================ -File: docs/partials/support-bundles/_logs-selectors-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle -stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: - - logs: - namespace: {{ .Release.Namespace }} - selector: - - app=slackernews-nginx - - logs: - namespace: {{ .Release.Namespace }} - selector: - - app=slackernews-api - - logs: - namespace: {{ .Release.Namespace }} - selector: - - app=slackernews-frontend - - logs: - selector: - - app=postgres - analyzers: - - textAnalyze: - checkName: Axios Errors - fileName: slackernews-frontend-*/slackernews.log - regex: "error - AxiosError" - outcomes: - - pass: - when: "false" - message: "Axios errors not found in logs" - - fail: - when: "true" - message: "Axios errors found in logs" -``` - -================ -File: docs/partials/support-bundles/_node-resources-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: [] - analyzers: - - nodeResources: - checkName: One node must have 2 GB RAM and 1 CPU Cores - filters: - allocatableMemory: 2Gi - cpuCapacity: "1" - outcomes: - - fail: - when: count() < 1 - message: Cannot find a node with sufficient memory and cpu - - pass: - message: Sufficient CPU and memory is available - - nodeResources: - checkName: Must have at least 3 nodes in the cluster - outcomes: - - fail: - when: "count() < 3" - message: This application requires at least 3 nodes - - warn: - when: "count() < 5" - message: This application recommends at last 5 nodes. - - pass: - message: This cluster has enough nodes. - - nodeResources: - checkName: Each node must have at least 40 GB of ephemeral storage - outcomes: - - fail: - when: "min(ephemeralStorageCapacity) < 40Gi" - message: Noees in this cluster do not have at least 40 GB of ephemeral storage. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - warn: - when: "min(ephemeralStorageCapacity) < 100Gi" - message: Nodes in this cluster are recommended to have at least 100 GB of ephemeral storage. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - pass: - message: The nodes in this cluster have enough ephemeral storage. -``` - -================ -File: docs/partials/support-bundles/_node-resources-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle -stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: [] - analyzers: - - nodeResources: - checkName: One node must have 2 GB RAM and 1 CPU Cores - filters: - allocatableMemory: 2Gi - cpuCapacity: "1" - outcomes: - - fail: - when: count() < 1 - message: Cannot find a node with sufficient memory and cpu - - pass: - message: Sufficient CPU and memory is available - - nodeResources: - checkName: Must have at least 3 nodes in the cluster - outcomes: - - fail: - when: "count() < 3" - message: This application requires at least 3 nodes - - warn: - when: "count() < 5" - message: This application recommends at last 5 nodes. - - pass: - message: This cluster has enough nodes. - - nodeResources: - checkName: Each node must have at least 40 GB of ephemeral storage - outcomes: - - fail: - when: "min(ephemeralStorageCapacity) < 40Gi" - message: Noees in this cluster do not have at least 40 GB of ephemeral storage. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - warn: - when: "min(ephemeralStorageCapacity) < 100Gi" - message: Nodes in this cluster are recommended to have at least 100 GB of ephemeral storage. - uri: https://kurl.sh/docs/install-with-kurl/system-requirements - - pass: - message: The nodes in this cluster have enough ephemeral storage. -``` - -================ -File: docs/partials/support-bundles/_node-status-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: [] - analyzers: - - nodeResources: - checkName: Node status check - outcomes: - - fail: - when: "nodeCondition(Ready) == False" - message: "Not all nodes are online." - - warn: - when: "nodeCondition(Ready) == Unknown" - message: "Not all nodes are online." - - pass: - message: "All nodes are online." -``` - -================ -File: docs/partials/support-bundles/_node-status-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle -stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: [] - analyzers: - - nodeResources: - checkName: Node status check - outcomes: - - fail: - when: "nodeCondition(Ready) == False" - message: "Not all nodes are online." - - warn: - when: "nodeCondition(Ready) == Unknown" - message: "Not all nodes are online." - - pass: - message: "All nodes are online." -``` - -================ -File: docs/partials/support-bundles/_redis-mysql-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: - - mysql: - collectorName: mysql - uri: 'root:my-secret-pw@tcp(localhost:3306)/mysql' - parameters: - - character_set_server - - collation_server - - init_connect - - innodb_file_format - - innodb_large_prefix - - innodb_strict_mode - - log_bin_trust_function_creators - - redis: - collectorName: my-redis - uri: rediss://default:replicated@server:6380 -``` - -================ -File: docs/partials/support-bundles/_redis-mysql-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle -stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: - - mysql: - collectorName: mysql - uri: 'root:my-secret-pw@tcp(localhost:3306)/mysql' - parameters: - - character_set_server - - collation_server - - init_connect - - innodb_file_format - - innodb_large_prefix - - innodb_strict_mode - - log_bin_trust_function_creators - - redis: - collectorName: my-redis - uri: rediss://default:replicated@server:6380 -``` - -================ -File: docs/partials/support-bundles/_run-pods-cr.mdx -================ -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: - - runPod: - collectorName: "static-hi" - podSpec: - containers: - - name: static-hi - image: alpine:3 - command: ["echo", "hi static!"] - analyzers: - - textAnalyze: - checkName: Said hi! - fileName: /static-hi.log - regex: 'hi static' - outcomes: - - fail: - message: Didn't say hi. - - pass: - message: Said hi! -``` - -================ -File: docs/partials/support-bundles/_run-pods-secret.mdx -================ -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle -stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: - - runPod: - collectorName: "static-hi" - podSpec: - containers: - - name: static-hi - image: alpine:3 - command: ["echo", "hi static!"] - analyzers: - - textAnalyze: - checkName: Said hi! - fileName: /static-hi.log - regex: 'hi static' - outcomes: - - fail: - message: Didn't say hi. - - pass: - message: Said hi! -``` - -================ -File: docs/partials/support-bundles/_support-bundle-add-analyzers.mdx -================ -Add analyzers based on conditions that you expect for your application. For example, you might require that a cluster have at least 2 CPUs and 4GB memory available. - - Good analyzers clearly identify failure modes. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log. - - At a minimum, include application log analyzers. A simple text analyzer can detect specific log lines and inform an end user of remediation steps. - - Analyzers that Replicated recommends considering are: - -- **Resource statuses:** Check the status of various resources, such as Deployments, StatefulSets, Jobs, and so on. -- **Regular expressions:** Analyze arbitrary data. -- **Databases:** Check the version and connection status. - -================ -File: docs/partials/support-bundles/_support-bundle-add-logs.mdx -================ -Replicated recommends adding application Pod logs and set the collection limits for the number of lines logged. Typically the selector attribute is matched to the labels. - -To get the labels for an application, either inspect the YAML or run `kubectl get pods --show-labels`. - -After the labels are discovered, create collectors to include logs from these pods in a bundle. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector. You can include the `logs` collector as many times as needed. - -The `limits` field can support `maxAge` or `maxLines`. This limits the output to the constraints provided. **Default:** `maxLines: 10000` - -================ -File: docs/partials/support-bundles/_support-bundle-custom-collectors.mdx -================ -Add any custom collectors to the file. Collectors that Replicated recommends considering are: - -- **Kubernetes resources:** Use for custom resource definitions (CRDs), Secrets, and ConfigMaps, if they are required for your application to work. -- **Databases:** Return a selection of rows or entire tables. -- **Volumes:** Ensure that an application's persistent state files exist, are readable/writeable, and have the right permissions. -- **Pods:** Run a Pod from a custom image. -- **Files:** Copy files from Pods and hosts. -- **HTTP:** Consume your own application APIs with HTTP requests. If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. - -================ -File: docs/partials/template-functions/_go-sprig.mdx -================ -KOTS template functions are based on the Go text/template library. All functionality of the Go templating language, including if statements, loops, and variables, is supported with KOTS template functions. For more information, see [text/template](https://golang.org/pkg/text/template/) in the Go documentation. - -Additionally, KOTS template functions can be used with all functions in the Sprig library. Sprig provides several template functions for the Go templating language, such as type conversion, string, and integer math functions. For more information, see [Sprig Function Documentation](https://masterminds.github.io/sprig/). - -================ -File: docs/partials/template-functions/_integer-comparison.mdx -================ -The following example uses: -* KOTS [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function to evaluate the number of seats permitted by the license -* Sprig [atoi](https://masterminds.github.io/sprig/conversion.html) function to convert the string values returned by LicenseFieldValue to integers -* [Go binary comparison operators](https://pkg.go.dev/text/template#hdr-Functions) `gt`, `lt`, `ge`, and `le` to compare the integers - -```yaml -# KOTS Config custom resource -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: example_group - title: Example Config - items: - - name: small - title: Small (100 or Fewer Seats) - type: text - default: Default for small teams - # Use le and atoi functions to display this config item - # only when the value of the numSeats entitlement is - # less than or equal to 100 - when: repl{{ le (atoi (LicenseFieldValue "numSeats")) 100 }} - - name: medium - title: Medium (101-1000 Seats) - type: text - default: Default for medium teams - # Use ge, le, and atoi functions to display this config item - # only when the value of the numSeats entitlement is - # greater than or equal to 101 and less than or equal to 1000 - when: repl{{ (and (ge (atoi (LicenseFieldValue "numSeats")) 101) (le (atoi (LicenseFieldValue "numSeats")) 1000)) }} - - name: large - title: Large (More Than 1000 Seats) - type: text - default: Default for large teams - # Use gt and atoi functions to display this config item - # only when the value of the numSeats entitlement is - # greater than 1000 - when: repl{{ gt (atoi (LicenseFieldValue "numSeats")) 1000 }} -``` - -As shown in the image below, if the user's license contains `numSeats: 150`, then the `medium` item is displayed on the **Config** page and the `small` and `large` items are not displayed: - -<img alt="Config page displaying the Medium (101-1000 Seats) item" src="/images/config-example-numseats.png" width="550px"/> - -[View a larger version of this image](/images/config-example-numseats.png) - -================ -File: docs/partials/template-functions/_ne-comparison.mdx -================ -In the example below, the `ingress_type` field is displayed on the **Config** page only when the distribution of the cluster is _not_ [Replicated Embedded Cluster](/vendor/embedded-overview). This ensures that only users deploying to their own existing cluster are able to select the method for ingress. - -The following example uses: -* KOTS [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where KOTS is running -* [ne](https://pkg.go.dev/text/template#hdr-Functions) (_not equal_) Go binary operator to compare the rendered value of the Distribution template function to a string, then return `true` if the values are not equal to one another - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config -spec: - groups: - # Ingress settings - - name: ingress_settings - title: Ingress Settings - description: Configure Ingress - items: - - name: ingress_type - title: Ingress Type - help_text: | - Select how traffic will ingress to the appliction. - type: radio - items: - - name: ingress_controller - title: Ingress Controller - - name: load_balancer - title: Load Balancer - default: "ingress_controller" - required: true - when: 'repl{{ ne Distribution "embedded-cluster" }}' - # Database settings - - name: database_settings - title: Database - items: - - name: postgres_type - help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? - type: radio - title: Postgres - default: embedded_postgres - items: - - name: embedded_postgres - title: Embedded Postgres - - name: external_postgres - title: External Postgres -``` - -The following image shows how the `ingress_type` field is hidden when the distribution of the cluster is `embedded-cluster`. Only the `postgres_type` item is displayed: - -<img alt="Config page with a Postgres field" src="/images/config-example-distribution-not-ec.png" width="550px"/> - -[View a larger version of this image](/images/config-example-distribution-not-ec.png) - -Conversely, when the distribution of the cluster is not `embedded-cluster`, both fields are displayed: - -<img alt="Config page with Ingress and Postgres fields" src="/images/config-example-distribution-not-ec-2.png" width="550px"/> - -[View a larger version of this image](/images/config-example-distribution-not-ec-2.png) - -================ -File: docs/partials/template-functions/_string-comparison.mdx -================ -The following example uses: -* KOTS [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where KOTS is running -* [eq](https://pkg.go.dev/text/template#hdr-Functions) (_equal_) Go binary operator to compare the rendered value of the Distribution template function to a string, then return the boolean truth of the comparison - -```yaml -# KOTS Config custom resource -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: example_settings - title: My Example Config - description: Example fields for using Distribution template function - items: - - name: gke_distribution - type: label - title: "You are deploying to GKE" - # Use the eq binary operator to check if the rendered value - # of the KOTS Distribution template function is equal to gke - when: repl{{ eq Distribution "gke" }} - - name: openshift_distribution - type: label - title: "You are deploying to OpenShift" - when: repl{{ eq Distribution "openShift" }} - - name: eks_distribution - type: label - title: "You are deploying to EKS" - when: repl{{ eq Distribution "eks" }} - ... -``` - -The following image shows how only the `gke_distribution` item is displayed on the **Config** page when KOTS is running in a GKE cluster: - -<img alt="Config page with the text You are deploying to GKE" src="/images/config-example-distribution-gke.png" width="550px"/> - -================ -File: docs/partials/template-functions/_use-cases.mdx -================ -Common use cases for KOTS template functions include rendering values during installation or upgrade, such as: -* Customer-specific license field values -* User-provided configuration values -* Information about the customer environment, such the number of nodes or the Kubernetes version in the cluster where the application is installed -* Random strings - -KOTS template functions can also be used to work with integer, boolean, float, and string values, such as doing mathematical operations, trimming leading and trailing spaces, or converting string values to integers or booleans. - -================ -File: docs/partials/updating/_admin-console-air-gap.mdx -================ -import BuildAirGapBundle from "../install/_airgap-bundle-build.mdx" -import DownloadAirGapBundle from "../install/_airgap-bundle-download.mdx" -import ViewAirGapBundle from "../install/_airgap-bundle-view-contents.mdx" - -To perform an air gap update from the Admin Console: - -1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the target release is promoted to build and download the new `.airgap` bundle: - - <BuildAirGapBundle/> - -1. <DownloadAirGapBundle/> -1. <ViewAirGapBundle/> -1. In the Admin Console, go to the **Version History** tab. -1. Click **Upload a new version**. - - A new upstream version displays in the list of available versions. - - ![New Version Available](/images/new-version-available.png) - -1. (Optional) When there are multiple versions of an application, you can compare -the changes between them by clicking **Diff releases** in the right corner. - - You can review changes between any two arbitrary releases by clicking the icon in the header - of the release column. Select the two versions to compare, and click **Diff releases** - to show the relative changes between the two releases. - - ![Diff Releases](/images/diff-releases.png) - ![New Changes](/images/new-changes.png) - -1. (Optional) Click the **View preflight checks** icon to view or re-run the preflight checks. - - ![Preflight Checks](/images/preflight-checks.png) - -1. Return to the **Version History** tab and click **Deploy** next to the target version. - -================ -File: docs/partials/updating/_admin-console.mdx -================ -To perform an update from the Admin Console: - -1. In the Admin Console, go to the **Version History** tab. -1. Click **Check for updates**. - - A new upstream version displays in the list of available versions. - - <img alt="New Version Available" src="/images/new-version-available.png" width="650px"/> - - [View a larger version of this image](/images/new-version-available.png) - -1. (Optional) When there are multiple versions of an application, you can compare -the changes between them by clicking **Diff releases** in the right corner. - - You can review changes between any two arbitrary releases by clicking the icon in the header - of the release column. Select the two versions to compare, and click **Diff releases** - to show the relative changes between the two releases. - - <img alt="Diff Releases" src="/images/diff-releases.png" width="650px"/> - - [View a larger version of this image](/images/diff-releases.png) - - <img alt="New Changes" src="/images/new-changes.png" width="650px"/> - - [View a larger version of this image](/images/new-changes.png) - -1. (Optional) Click the **View preflight checks** icon to view or re-run the preflight checks. - - <img src="/images/preflight-checks.png" alt="Preflight checks" width="650px"/> - - [View a larger version of this image](/images/preflight-checks.png) - -1. Return to the **Version History** tab and click **Deploy** next to the target version. - -================ -File: docs/partials/updating/_installerRequirements.mdx -================ -* **installer-spec-file**: If you used the `installer-spec-file` flag to pass a `patch.yaml` file when you installed, you must pass the same `patch.yaml` file when you upgrade. This prevents the installer from overwriting any configuration from your `patch.yaml` file and making changes to the add-ons in your cluster. For example: `installer-spec-file="./patch.yaml"`. - -* **app-version-label**: By default, the script also upgrades your application to the latest version when you run the installation script. - - You can specify a target application version with the `app-version-label` flag. To avoid upgrading your application version, set the `app-version-label` flag to the currently installed application version. For example: `app-version-label=1.5.0`. - -================ -File: docs/partials/updating/_upgradePrompt.mdx -================ -(Kubernetes Upgrades Only) If a Kubernetes upgrade is required, the script automatically prints a `Drain local node and apply upgrade?` prompt. Confirm the prompt to drain the local primary node and apply the Kubernetes upgrade to the control plane. - - The script continues to drain and upgrade nodes sequentially. For each node, the script prints a command that you must run on the node to upgrade Kubernetes. For more information, see [About Kubernetes Updates](/enterprise/updating-kurl-about#kubernetes) in _About kURL Cluster Updates_. - -================ -File: docs/partials/vendor-api/_api-about.mdx -================ -The Vendor API is the API for the Vendor Portal. This API can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams and license files. - -================ -File: docs/partials/vendor-api/_team-token-note.mdx -================ -:::note -Team API tokens are deprecated and cannot be generated. If you are already using team API tokens, Replicated recommends that you migrate to Service Accounts or User API tokens instead because these options provide better granular control over token access. -::: - -================ -File: docs/reference/cron-expressions.md -================ -# Cron Expressions - -This topic describes the supported cron expressions that you can use to schedule automatic application update checks and automatic backups in the KOTS Admin Console. - -For more information, see [Configuring Automatic Updates](/enterprise/updating-apps) and [Schedule Automatic Backups](/enterprise/snapshots-creating#schedule-automatic-backups) in _Creating and Scheduling Backups_. - -## Syntax - -``` -<minute> <hour> <day-of-month> <month> <day-of-week> -``` - -## Fields - -The following table lists the required cron fields and supported values: - -<table> - <tr> - <th width="30%">Required Field</th> - <th width="30%">Allowed Values</th> - <th width="40%">Allowed Special Characters</th> - </tr> - <tr> - <td>Minute</td> - <td>0 through 59</td> - <td>, - * </td> - </tr> - <tr> - <td>Hour</td> - <td>0 through 23</td> - <td>, - * </td> - </tr> - <tr> - <td>Day-of-month</td> - <td>1 through 31</td> - <td>, - * ? </td> - </tr> - <tr> - <td>Month</td> - <td>1 through 12 or JAN through DEC</td> - <td>, - * </td> - </tr> - <tr> - <td>Day-of-week</td> - <td>1 through 7 or SUN through SAT</td> - <td>, - * ?</td> - </tr> - </table> - -## Special Characters - -Replicated uses an external cron Go library. For more information about it's usage, see [cron](https://pkg.go.dev/github.com/robfig/cron/v3). - -The following table describes the supported special characters: - -<table> - <tr> - <th width="20%">Special Character</th> - <th width="80%">Description</th> - </tr> - <tr> - <td>Comma (,)</td> - <td>Specifies a list or multiple values, which can be consecutive or not. For example, <code>1,2,4</code> in the Day-of-week field signifies every Monday, Tuesday, and Thursday.</td> - </tr> - <tr> - <td>Dash (-)</td> - <td>Specifies a contiguous range. For example, <code>4-6</code> in the Month field signifies April through June.</td> - </tr> - <tr> - <td>Asterisk (*)</td> - <td>Specifies that all of the values for the field are used. For example, using <code>*</code> in the Month field means that all of the months are included in the schedule.</td> - </tr> - <tr> - <td>Question mark (?)</td> - <td> Specifies that one or another value can be used. For example, enter <code>5</code> for Day-of-the-month and <code>?</code> for Day-of-the-week to check for updates on the 5th day of the month, regardless of which day of the week it is.</td> - </tr> -</table> - -## Predefined Schedules - -You can use one of the following predefined schedule values instead of a cron expression: - -<table> - <tr> - <th width="25%">Schedule Value</th> - <th width="50%">Description</th> - <th width="25%">Equivalent Cron Expression</th> - </tr> - <tr> - <td>@yearly (or @annually)</td> - <td>Runs once a year, at midnight on January 1.</td> - <td>0 0 1 1 *</td> - </tr> - <tr> - <td>@monthly</td> - <td>Runs once a month, at midnight on the first of the month.</td> - <td>0 0 1 * *</td> - </tr> - <tr> - <td>@weekly</td> - <td>Run once a week, at midnight on Saturday.</td> - <td>0 0 * * 0</td> - </tr> - <tr> - <td>@daily (or @midnight)</td> - <td>Runs once a day, at midnight.</td> - <td>0 0 * * *</td> - </tr> - <tr> - <td>@hourly</td> - <td>Runs once an hour, at the beginning of the hour.</td> - <td>0 * * * *</td> - </tr> - <tr> - <td>@never</td> - <td><p>Disables the schedule completely. Only used by KOTS.</p><p>This value can be useful when you are calling the API directly or are editing the KOTS configuration manually.</p></td> - <td>0 * * * *</td> - </tr> - <tr> - <td>@default</td> - <td><p>Selects the default schedule option (every 4 hours). Begins when the Admin Console starts up.</p><p>This value can be useful when you are calling the API directly or are editing the KOTS configuration manually.</p></td> - <td>0 * * * *</td> - </tr> -</table> - -## Intervals - -You can also schedule the job to operate at fixed intervals, starting at the time the job is added or when cron is run: - -``` -@every DURATION -``` - -Replace `DURATION` with a string that is accepted by time.ParseDuration, with the exception of seconds. Seconds are not supported by KOTS. For more information about duration strings, see [time.ParseDuration](http://golang.org/pkg/time/#ParseDuration) in the Go Time documentation. - -As with standard cron expressions, the interval does not include the job runtime. For example, if a job is scheduled to run every 10 minutes, and the job takes 4 minutes to run, there are 6 minutes of idle time between each run. - -## Examples - -The following examples show valid cron expressions to schedule checking for updates: - -- At 11:30 AM every day: - - ``` - 30 11 * * * - ``` - -- After 1 hour and 45 minutes, and then every interval following that: - - ``` - @every 1h45m - ``` - -================ -File: docs/reference/custom-resource-about.md -================ -# About Custom Resources - -You can include custom resources in releases to control the experience for applications installed with Replicated KOTS. - -Custom resources are consumed by KOTS, the Admin Console, or by other kubectl plugins. Custom resources are packaged as part of the application, but are _not_ deployed to the cluster. - -## KOTS Custom Resources - -The following are custom resources in the `kots.io` API group: - -| API Group/Version | Kind | Description | -|---------------|------|-------------| -| kots.io/v1beta1 | [Application](custom-resource-application) | Adds additional metadata (branding, release notes and more) to an application | -| kots.io/v1beta1 | [Config](custom-resource-config)| Defines a user-facing configuration screen in the Admin Console | -| kots.io/v1beta2 | [HelmChart](custom-resource-helmchart-v2) | Identifies an instantiation of a Helm Chart | -| kots.io/v1beta1 | [LintConfig](custom-resource-lintconfig) | Customizes the default rule levels for the KOTS release linter | - -## Other Custom Resources - -The following are custom resources in API groups other than `kots.io` that can be included in a KOTS release to configure additional functionality: - -| API Group/Version | Kind | Description | -|---------------|------|-------------| -| app.k8s.io/v1beta1 | [SIG Application](https://github.com/kubernetes-sigs/application#kubernetes-applications) | Defines metadata about the application | -| cluster.kurl.sh/v1beta1 | [Installer](https://kurl.sh/docs/create-installer/) | Defines a Replicated kURL distribution | -| embeddedcluster.replicated.com/v1beta1 | [Config](/reference/embedded-config) | Defines a Replicated Embedded Cluster distribution | -| troubleshoot.replicated.com/v1beta2 | [Preflight](custom-resource-preflight) | Defines the data to collect and analyze for custom preflight checks | -| troubleshoot.replicated.com/v1beta2 | [Redactor](https://troubleshoot.sh/reference/redactors/overview/) | Defines custom redactors that apply to support bundles and preflight checks | -| troubleshoot.sh/v1beta2 | [Support Bundle](custom-resource-preflight) | Defines the data to collect and analyze for a support bundle | -| velero.io/v1 | [Backup](https://velero.io/docs/v1.10/api-types/backup/) | A Velero backup request, triggered when the user initiates a backup with Replicated snapshots | - -================ -File: docs/reference/custom-resource-application.mdx -================ -import Title from "../partials/custom-resource-application/_title.mdx" -import Icon from "../partials/custom-resource-application/_icon.mdx" -import ReleaseNotes from "../partials/custom-resource-application/_releaseNotes.mdx" -import AllowRollback from "../partials/custom-resource-application/_allowRollback.mdx" -import AdditionalNamespaces from "../partials/custom-resource-application/_additionalNamespaces.mdx" -import AdditionalImages from "../partials/custom-resource-application/_additionalImages.mdx" -import RequireMinimalRBACPrivileges from "../partials/custom-resource-application/_requireMinimalRBACPrivileges.mdx" -import SupportMinimalRBACPrivileges from "../partials/custom-resource-application/_supportMinimalRBACPrivileges.mdx" -import Ports from "../partials/custom-resource-application/_ports.mdx" -import StatusInformers from "../partials/custom-resource-application/_statusInformers.mdx" -import Graphs from "../partials/custom-resource-application/_graphs.mdx" -import GraphsTemplates from "../partials/custom-resource-application/_graphs-templates.mdx" -import TargetKotsVersion from "../partials/custom-resource-application/_targetKotsVersion.mdx" -import MinKotsVersion from "../partials/custom-resource-application/_minKotsVersion.mdx" -import ProxyRegistryDomain from "../partials/custom-resource-application/_proxyRegistryDomain.mdx" -import ReplicatedRegistryDomain from "../partials/custom-resource-application/_replicatedRegistryDomain.mdx" -import ServicePortNote from "../partials/custom-resource-application/_servicePort-note.mdx" -import PortsServiceName from "../partials/custom-resource-application/_ports-serviceName.mdx" -import PortsLocalPort from "../partials/custom-resource-application/_ports-localPort.mdx" -import PortsServicePort from "../partials/custom-resource-application/_ports-servicePort.mdx" -import PortsApplicationURL from "../partials/custom-resource-application/_ports-applicationURL.mdx" -import KurlNote from "../partials/custom-resource-application/_ports-kurl-note.mdx" - -# Application - -The Application custom resource enables features such as branding, release notes, port forwarding, dashboard buttons, app status indicators, and custom graphs. - -There is some overlap between the Application custom resource manifest file and the [Kubernetes SIG Application custom resource](https://github.com/kubernetes-sigs/application/blob/master/docs/api.md). For example, enabling features such as [adding a button to the dashboard](/vendor/admin-console-adding-buttons-links) requires the use of both the Application and SIG Application custom resources. - -The following is an example manifest file for the Application custom resource: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: my-application -spec: - title: My Application - icon: https://support.io/img/logo.png - releaseNotes: These are our release notes - allowRollback: false - targetKotsVersion: "1.60.0" - minKotsVersion: "1.40.0" - requireMinimalRBACPrivileges: false - additionalImages: - - jenkins/jenkins:lts - additionalNamespaces: - - "*" - ports: - - serviceName: web - servicePort: 9000 - localPort: 9000 - applicationUrl: "http://web" - statusInformers: - - deployment/my-web-svc - - deployment/my-worker - graphs: - - title: User Signups - query: 'sum(user_signup_events_total)' -``` - -## title - -<table> - <tr> - <th>Description</th> - <td>The application title. Used on the license upload and in various places in the Replicated Admin Console.</td> - </tr> - <tr> - <th>Example</th> - <td><Title/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>Yes</td> - </tr> -</table> - -## icon - -<table> - <tr> - <th>Description</th> - <td>The icon file for the application. Used on the license upload, in various places in the Admin Console, and in the Download Portal. The icon can be a remote URL or a Base64 encoded image. Base64 encoded images are required to display the image in air gap installations with no outbound internet access.</td> - </tr> - <tr> - <th>Example</th> - <td><Icon/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>Yes</td> - </tr> -</table> - - -## releaseNotes - -<table> - <tr> - <th>Description</th> - <td>The release notes for this version. These can also be set when promoting a release.</td> - </tr> - <tr> - <th>Example</th> - <td><ReleaseNotes/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>Yes</td> - </tr> -</table> - -## allowRollback - -<table> - <tr> - <th>Description</th> - <td> - <p>Enable this flag to create a <strong>Rollback</strong> button on the Admin Console Version History page.</p> - <p>If an application is guaranteed not to introduce backwards-incompatible versions, such as through database migrations, then the <code>allowRollback</code> flag can allow end users to easily roll back to previous versions from the Admin Console.</p> - <p>Rollback does not revert any state. Rather, it recovers the YAML manifests that are applied to the cluster.</p> - </td> - </tr> - <tr> - <th>Example</th> - <td><AllowRollback/></td> - </tr> - <tr> - <th>Default</th> - <td><code>false</code></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>Embedded Cluster 1.17.0 and later supports partial rollbacks of the application version. Partial rollbacks are supported only when rolling back to a version where there is no change to the [Embedded Cluster Config](/reference/embedded-config) compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same Embedded Cluster Config.</td> - </tr> -</table> - - -## additionalNamespaces - -<table> - <tr> - <th>Description</th> - <td> - <p>An array of additional namespaces as strings that Replicated KOTS creates on the cluster. For more information, see <a href="/vendor/operator-defining-additional-namespaces">Defining Additional Namespaces</a>.</p> - <p>In addition to creating the additional namespaces, KOTS ensures that the application secret exists in the namespaces. KOTS also ensures that this application secret has access to pull the application images, including both images that are used and any images you add in the <code>additionalImages</code> field. This pull secret is automatically added to all manifest files that use private images.</p> - <p>For dynamically created namespaces, specify <code>"*"</code>.</p> - </td> - </tr> - <tr> - <th>Example</th> - <td><AdditionalNamespaces/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>Yes</td> - </tr> -</table> - -## additionalImages - -<table> - <tr> - <th>Description</th> - <td><p>An array of strings that reference images to be included in air gap bundles and pushed to the local registry during installation.</p><p>KOTS detects images from the PodSpecs in the application. Some applications, such as Operators, might need to include additional images that are not referenced until runtime. For more information, see <a href="/vendor/operator-defining-additional-images">Defining Additional Images</a>.</p></td> - </tr> - <tr> - <th>Example</th> - <td><AdditionalImages/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>Yes</td> - </tr> -</table> - -## requireMinimalRBACPrivileges - -<table> - <tr> - <th>Description</th> - <td><p><code>requireMinimalRBACPrivileges</code> applies to existing clusters only.</p><p>Requires minimal role-based access control (RBAC) be used for all customer installations. When set to <code>true</code>, KOTS creates a namespace-scoped Role and RoleBinding, instead of the default cluster-scoped ClusterRole and ClusterRoleBinding.</p><p>For additional requirements and limitations related to using namespace-scoped RBAC, see <a href="/vendor/packaging-rbac#min-rbac">About Namespace-scoped RBAC</a> in <em>Configuring KOTS RBAC</em>.</p></td> - </tr> - <tr> - <th>Example</th> - <td><RequireMinimalRBACPrivileges/></td> - </tr> - <tr> - <th>Default</th> - <td><code>false</code></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>No</td> - </tr> -</table> - -## supportMinimalRBACPrivileges - -<table> - <tr> - <th>Description</th> - <td><p><code>supportMinimalRBACPrivileges</code> applies to existing clusters only.</p><p>Allows minimal role-based access control (RBAC) be used for all customer installations. When set to <code>true</code>, KOTS supports creating a namespace-scoped Role and RoleBinding, instead of the default cluster-scoped ClusterRole and ClusterRoleBinding.</p><p> Minimal RBAC is not used by default. It is only used when the <code>--use-minimal-rbac</code> flag is passed to the <code>kots install</code> command.</p><p>For additional requirements and limitations related to using namespace-scoped RBAC, see <a href="/vendor/packaging-rbac#min-rbac">About Namespace-scoped RBAC</a> in <em>Configuring KOTS RBAC</em>.</p></td> - </tr> - <tr> - <th>Example</th> - <td><SupportMinimalRBACPrivileges/></td> - </tr> - <tr> - <th>Default</th> - <td><code>false</code></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>No</td> - </tr> -</table> - -## ports - -<table> -<tr> - <th>Description</th> - <td> - <p>Extra ports (additional to the <code>8800</code> Admin Console port) that are port-forwarded when running the <code>kubectl kots admin-console</code> command. With ports specified, KOTS can establish port forwarding to simplify connections to the deployed application. When the application starts and the service is ready, the KOTS CLI will print a message in the terminal with the URL where the port-forwarded service can be accessed. For more information, see <a href="/vendor/admin-console-port-forward">Port Forwarding Services with KOTS</a>.</p> - <KurlNote/> - <p>The <code>ports</code> key has the following fields:</p> - <ul> - <PortsServiceName/> - <PortsServicePort/> - <ServicePortNote/> - <PortsLocalPort/> - <PortsApplicationURL/> - For more information about adding links to port forwarded services, see <a href="/vendor/admin-console-port-forward#add-link">Add a Link to a Port-Forwarded Service in the Admin Console</a>. - </ul> - </td> - </tr> - <tr> - <th>Example</th> - <td><Ports/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td><p>Go templates are supported in the `serviceName` and `applicationUrl` fields only.</p><p>Using Go templates in the `localPort` or `servicePort` fields results in an installation error similar to the following: `json: cannot unmarshal string into Go struct field ApplicationPort.spec.ports.servicePort of type int`.</p></td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>Yes</td> - </tr> -</table> - -## statusInformers - -<table> - <tr> - <th>Description</th> - <td> - <p>Resources to watch and report application status back to the user. When you include <code>statusInformers</code>, the dashboard can indicate when the application deployment is complete and the application is ready for use.</p> - <p><code>statusInformers</code> use the format <code>[namespace/]type/name</code>, where namespace is optional.</p> - <p>For more information about including statusInformers, see <a href="/vendor/admin-console-display-app-status">Adding Resource Status Informers</a>.</p> - </td> - </tr> - <tr> - <th>Example</th> - <td><StatusInformers/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>Yes</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>Yes</td> - </tr> -</table> - -## graphs - -<table> - <tr> - <th>Description</th> - <td><p>Custom graphs to include on the Admin Console application dashboard.For more information about how to create custom graphs, see <a href="/vendor/admin-console-prometheus-monitoring">Adding Custom Graphs</a>.</p><p><code>graphs</code> has the following fields:</p><ul><li><code>graphs.title</code>: The graph title.</li><li><code>graphs.query</code>: The Prometheus query.</li><li><code>graphs.legend</code>: The legend to use for the query line. You can use Prometheus templating in the <code>legend</code> fields with each element returned from the Prometheus query. <p><GraphsTemplates/></p></li><li><code>graphs.queries</code>: A list of queries containing a <code>query</code> and <code>legend</code>.</li> <li><code>graphs.yAxisFormat</code>: The format of the Y axis labels with support for all Grafana units. For more information, see <a href="https://grafana.com/docs/features/panels/graph/#left-y-right-y">Visualizations</a> in the Grafana documentation.</li><li><code>graphs.yAxisTemplate</code>: Y axis labels template.</li></ul></td> - </tr> - <tr> - <th>Example</th> - <td><Graphs/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td> - <p>Yes</p> - </td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>No</td> - </tr> -</table> - -## proxyRegistryDomain - -:::important -`proxyRegistryDomain` is deprecated. For information about how to use a custom domain for the Replicated proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). -::: - -<table> - <tr> - <th>Description</th> - <td><p>The custom domain used for proxy.replicated.com. For more information, see <a href="/vendor/custom-domains-using">Using Custom Domains</a>.</p> <p>Introduced in KOTS v1.91.1.</p> </td> - </tr> - <tr> - <th>Example</th> - <td><ProxyRegistryDomain/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> -</table> - -## replicatedRegistryDomain - -:::important -`replicatedRegistryDomain` is deprecated. For information about how to use a custom domain for the Replicated registry, see [Using Custom Domains](/vendor/custom-domains-using). -::: - -<table> - <tr> - <th>Description</th> - <td><p>The custom domain used for registry.replicated.com. For more information, see <a href="/vendor/custom-domains-using">Using Custom Domains</a>.</p><p>Introduced in KOTS v1.91.1.</p> </td> - </tr> - <tr> - <th>Example</th> - <td><ReplicatedRegistryDomain/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>Yes</td> - </tr> -</table> - -## targetKotsVersion - -<table> - <tr> - <th>Description</th> - <td><p>The KOTS version that is targeted by the release. For more information, see <a href="/vendor/packaging-kots-versions">Setting Minimum and Target Versions for KOTS</a>.</p></td> - </tr> - <tr> - <th>Example</th> - <td><TargetKotsVersion/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>No. Setting <code>targetKotsVersion</code> to a version earlier than the KOTS version included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: <code>Error: This version of App Name requires a different version of KOTS from what you currently have installed.</code>. To avoid installation failures, do not use <code>targetKotsVersion</code> in releases that support installation with Embedded Cluster.</td> - </tr> -</table> - -## minKotsVersion (Beta) - -<table> - <tr> - <th>Description</th> - <td><p>The minimum KOTS version that is required by the release. For more information, see <a href="/vendor/packaging-kots-versions">Setting Minimum and Target Versions for KOTS</a>.</p></td> - </tr> - <tr> - <th>Example</th> - <td><MinKotsVersion/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> - <tr> - <th>Supported for <a href="/vendor/embedded-overview">Embedded Cluster</a>?</th> - <td>No. Setting <code>minKotsVersion</code> to a version later than the KOTS version included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: <code>Error: This version of App Name requires a different version of KOTS from what you currently have installed.</code>. To avoid installation failures, do not use <code>minKotsVersion</code> in releases that support installation with Embedded Cluster.</td> - </tr> -</table> - -================ -File: docs/reference/custom-resource-backup.md -================ -# Velero Backup Resource for Snapshots - -This topic provides information about the supported fields in the Velero Backup resource for the Replicated KOTS snapshots feature. - -## Overview - -The Velero Backup custom resource enables the KOTS snapshots backup and restore feature. The backend of this feature uses the Velero open source project to back up Kubernetes manifests and persistent volumes. - -## Example - -The following shows an example of the Velero Backup resource: - -```yaml -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: backup - annotations: - # `pvc-volume` will be the only volume included in the backup - backup.velero.io/backup-volumes: pvc-volume -spec: - includedNamespaces: - - '*' - excludedNamespaces: - - some-namespace - orderedResources: - pods: mysql/mysql-cluster-replica-0,mysql/mysql-cluster-replica-1 - persistentvolumes: pvc-12345,pvc-67890 - ttl: 720h - hooks: - resources: - - - name: my-hook - includedNamespaces: - - '*' - excludedNamespaces: - - some-namespace - includedResources: - - pods - excludedResources: [] - labelSelector: - matchLabels: - app: velero - component: server - pre: - - - exec: - container: my-container - command: - - /bin/uname - - -a - onError: Fail - timeout: 10s - post: [] -``` - -## Supported Fields for Full Backups with Snapshots {#fields} - -For partial backups with the snapshots feature, you can use all of the fields that Velero supports. See [Backups](https://velero.io/docs/v1.10/api-types/backup/) in the Velero documentation. - -However, not all fields are supported for full backups. The table below lists the fields that are supported for full backups with snapshots: - -<table> - <tr> - <th width="50%">Field Name</th> - <th width="50%">Description</th> - </tr> - <tr> - <td><code>includedNamespaces</code></td> - <td>(Optional) Specifies an array of namespaces to include in the backup. If unspecified, all namespaces are included.</td> - </tr> - <tr> - <td><code>excludedNamespaces</code></td> - <td>(Optional) Specifies an array of namespaces to exclude from the backup.</td> - </tr> - <tr> - <td><code>orderedResources</code></td> - <td>(Optional) Specifies the order of the resources to collect during the backup process. This is a map that uses a key as the plural resource. Each resource name has the format NAMESPACE/OBJECTNAME. The object names are a comma delimited list. For cluster resources, use OBJECTNAME only.</td> - </tr> - <tr> - <td><code>ttl</code></td> - <td> Specifies the amount of time before this backup is eligible for garbage collection. <b>Default:</b><code>720h</code> (equivalent to 30 days). This value is configurable only by the customer.</td> - </tr> - <tr> - <td><code>hooks</code></td> - <td>(Optional) Specifies the actions to perform at different times during a backup. The only supported hook is executing a command in a container in a pod (uses the pod exec API). Supports <code>pre</code> and <code>post</code> hooks.</td> - </tr> - <tr> - <td><code>hooks.resources</code></td> - <td>(Optional) Specifies an array of hooks that are applied to specific resources.</td> - </tr> - <tr> - <td><code>hooks.resources.name</code></td> - <td>Specifies the name of the hook. This value displays in the backup log.</td> - </tr> - <tr> - <td><code>hooks.resources.includedNamespaces</code></td> - <td>(Optional) Specifies an array of namespaces that this hook applies to. If unspecified, the hook is applied to all namespaces.</td> - </tr> - <tr> - <td><code>hooks.resources.excludedNamespaces</code></td> - <td>(Optional) Specifies an array of namespaces to which this hook does not apply.</td> - </tr> - <tr> - <td><code>hooks.resources.includedResources</code></td> - <td>Specifies an array of pod resources to which this hook applies.</td> - </tr> - <tr> - <td><code>hooks.resources.excludedResources</code></td> - <td>(Optional) Specifies an array of resources to which this hook does not apply.</td> - </tr> - <tr> - <td><code>hooks.resources.labelSelector</code></td> - <td>(Optional) Specifies that this hook only applies to objects that match this label selector.</td> - </tr> - <tr> - <td><code>hooks.resources.pre</code></td> - <td>Specifies an array of <code>exec</code> hooks to run before executing custom actions.</td> - </tr> - <tr> - <td><code>hooks.resources.post</code></td> - <td>Specifies an array of <code>exec</code> hooks to run after executing custom actions. Supports the same arrays and fields as <code>pre</code> hooks.</td> - </tr> - <tr> - <td><code>hooks.resources.[post/pre].exec</code></td> - <td>Specifies the type of the hook. <code>exec</code> is the only supported type.</td> - </tr> - <tr> - <td><code>hooks.resources.[post/pre].exec.container</code></td> - <td>(Optional) Specifies the name of the container where the specified command will be executed. If unspecified, the first container in the pod is used.</td> - </tr> - <tr> - <td><code>hooks.resources.[post/pre].exec.command</code></td> - <td>Specifies the command to execute. The format is an array.</td> - </tr> - <tr> - <td><code>hooks.resources.[post/pre].exec.onError</code></td> - <td>(Optional) Specifies how to handle an error that might occur when executing the command. <b>Valid values:</b> <code>Fail</code> and <code>Continue</code> <b>Default:</b> <code>Fail</code></td> - </tr> - <tr> - <td><code>hooks.resources.[post/pre].exec.timeout</code></td> - <td>(Optional) Specifies how many seconds to wait for the command to finish executing before the action times out. <b>Default:</b> <code>30s</code></td> - </tr> -</table> - -## Limitations {#limitations} - -- The following top-level Velero fields, or children of `spec`, are not supported in full backups: - - - `snapshotVolumes` - - `volumeSnapshotLocations` - - `labelSelector` - - `includedResources` - - `excludedResources` - - :::note - Some of these fields are supported for hook arrays, as described in the previous field definition table. See [Supported Fields for Full Backups with Snapshots](#fields) above. - ::: - -- All resources are included in the backup by default. However, resources can be excluded by adding `velero.io/exclude-from-backup=true` to the manifest files that you want to exclude. For more information, see [Configuring Snapshots](/vendor/snapshots-configuring-backups). - -================ -File: docs/reference/custom-resource-config.mdx -================ -import ItemTypes from "../partials/config/_item-types.mdx" -import PropertyWhen from "../partials/config/_property-when.mdx" -import RandomStringNote from "../partials/config/_randomStringNote.mdx" -import NameExample from "../partials/config/_nameExample.mdx" -import TypeExample from "../partials/config/_typeExample.mdx" -import DefaultExample from "../partials/config/_defaultExample.mdx" -import ValueExample from "../partials/config/_valueExample.mdx" -import RequiredExample from "../partials/config/_requiredExample.mdx" -import RecommendedExample from "../partials/config/_recommendedExample.mdx" -import HiddenExample from "../partials/config/_hiddenExample.mdx" -import ReadonlyExample from "../partials/config/_readonlyExample.mdx" -import WhenExample from "../partials/config/_whenExample.mdx" -import AffixExample from "../partials/config/_affixExample.mdx" -import HelpTextExample from "../partials/config/_helpTextExample.mdx" -import RegexValidationExample from "../partials/config/_regexValidationExample.mdx" -import WhenRequirements from "../partials/config/_when-requirements.mdx" -import WhenNote from "../partials/config/_when-note.mdx" - -# Config - -The Config custom resource can be provided by a vendor to specify a Config page in the Replicated Admin Console for collecting customer supplied values and template function rendering. - -The settings that appear on the Admin Console Config page are specified as an array configuration _groups_ and _items_. - -The following example shows three groups defined in the Config custom resource manifest file, and how these groups are displayed on the Admin Console Config page. - -For more information about the properties of groups and items, see [Group Properties](#group-properties) and [Item Properties](#item-properties) below. - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: my-application -spec: - groups: - - name: example_group - title: First Group - items: - - name: http_enabled - title: HTTP Enabled - type: bool - default: "0" - - name: example_group_2 - title: Second Group - when: false - items: - - name: key - title: Key - type: textarea - - name: hostname - title: Hostname - type: text - - name: example_group_3 - title: Third Group - items: - - name: email-address - title: Email Address - type: text - - name: password_text - title: Password - type: password - value: '{{repl RandomString 10}}' -``` -![Three groups of items on the config page](/images/config-screen-groups.png) -[View a larger version of this image](/images/config-screen-groups.png) - -## Group Properties - -Groups have a `name`, `title`, `description` and an array of `items`. - -### `description` - -Descriptive help text for the group that displays on the Admin Console Config page. Supports markdown formatting. - -To provide help text for individual items on the Config page, use the item `help-text` property. See [help_text](#help_text) below. - -```yaml -spec: - groups: - - name: example_group - title: First Group - # Provide a description of the input fields in the group - description: Select whether or not to enable HTTP. - items: - - name: http_enabled - title: HTTP Enabled - type: bool - default: "0" -``` - -### `name` - -A unique identifier for the group. - -```yaml -spec: - groups: - # The name must be unique - - name: example_group - title: First Group - items: - - name: http_enabled - title: HTTP Enabled - type: bool - default: "0" -``` - -### `title` - -The title of the group that displays on the Admin Console Config page. - -```yaml -spec: - groups: - - name: example_group - # First Group is the heading that appears on the Config page - title: First Group - items: - - name: http_enabled - title: HTTP Enabled - type: bool - default: "0" -``` - -### `when` - -The `when` property denotes groups that are displayed on the Admin Console **Config** page only when a condition evaluates to true. When the condition evaluates to false, the group is not displayed. - -<PropertyWhen/> - -:::note -`when` is a property of both groups and items. See [Item Properties > `when`](/reference/custom-resource-config#when-item) below. -::: - -#### Requirements and Limitations - -The `when` group property has the following requirements and limitations: - -<WhenRequirements/> - -#### Example - -In the following example, the `example_group_2` group of items will be displayed on the **Config** page only when the user enables the `http_enabled` configuration field. This example uses the KOTS [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to evaluate the value of the `http_enabled` configuration field. - -```yaml -spec: - groups: - - name: example_group - title: First Group - items: - - name: http_enabled - title: HTTP Enabled - type: bool - default: "0" - - name: example_group_2 - title: Second Group - # This group is displayed only when the `http_enabled` field is selected - when: repl{{ ConfigOptionEquals "http_enabled" "1" }} - items: - - name: key - title: Key - type: textarea - - name: hostname - title: Hostname - type: text - - name: example_group_3 - title: Third Group - items: - - name: email-address - title: Email Address - type: text - - name: password_text - title: Password - type: password - value: '{{repl RandomString 10}}' -``` - -![Only the first and third groups appear on the config screen](/images/config-screen-group-when-false.png) -[View a larger version of this image](/images/config-screen-group-when-false.png) - -For additional examples, see [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional). - -### `items` - -Each group contains an array of items that map to input fields on the Admin Console Config screen. All items have `name`, `title`, and `type` properties and belong to a single group. - -For more information, see [Item Properties](#item-properties) and [Item Types](#item-types) below. - -## Item Properties - -Items have a `name`, `title`, `type`, and other optional properties. - -### `affix` - -<table> - <tr> - <th>Description</th> - <td> - <p>Items can be affixed <code>left</code> or <code>right</code>. Affixing items allows them to appear in the Admin Console on the same line.</p><p>Specify the <code>affix</code> field to all of the items in a particular group to preserve the line spacing and prevent the appearance of crowded text.</p> - </td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td><AffixExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>Yes</td> - </tr> -</table> - -### `default` - -<table> - <tr> - <th>Description</th> - <td> - <p>Defines the default value for the config item. If the user does not provide a value for the item, then the <code>default</code> value is applied.</p> - <p>If the <code>default</code> value is not associated with a <code>password</code> type config item, then it appears as placeholder text in the Admin Console.</p> - </td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td><DefaultExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td><p>Yes. Every time the user makes a change to their configuration settings for the application, any template functions used in the <code>default</code> property are reevaluated.</p></td> - </tr> -</table> - -### `help_text` - -<table> - <tr> - <th>Description</th> - <td> - <p>Displays a helpful message below the <code>title</code> for the config item in the Admin Console.</p> - <p>Markdown syntax is supported. For more information about markdown syntax, see <a href="https://guides.github.com/features/mastering-markdown/">Basic writing and formatting syntax</a> in the GitHub Docs.</p> - </td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td><HelpTextExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>Yes</td> - </tr> -</table> - -### `hidden` - -<table> - <tr> - <th>Description</th> - <td> - <p>Hidden items are not visible in the Admin Console.</p> - <p><RandomStringNote/></p> - </td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td><HiddenExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> -</table> - -### `name` - -<table> - <tr> - <th>Description</th> - <td><p>A unique identifier for the config item. Item names must be unique both within the group and across all groups. The item <code>name</code> is not displayed in the Admin Console.</p><p> The item <code>name</code> can be used with KOTS template functions in the Config context (such as ConfigOption or ConfigOptionEquals) to return the value of the item. For more information, see <a href="/reference/template-functions-config-context">Config Context</a>.</p></td> - </tr> - <tr> - <th>Required?</th> - <td>Yes</td> - </tr> - <tr> - <th>Example</th> - <td><NameExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>Yes</td> - </tr> -</table> - -### `readonly` - -<table> - <tr> - <th>Description</th> - <td> - <p>Readonly items are displayed in the Admin Console and users cannot edit their value.</p> - <p><RandomStringNote/></p> - </td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td><ReadonlyExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> -</table> - -### `recommended` - -<table> - <tr> - <th>Description</th> - <td><p>Displays a Recommended tag for the config item in the Admin Console.</p></td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td> - <RecommendedExample/> - </td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> -</table> - -### `required` - -<table> - <tr> - <th>Description</th> - <td><p>Displays a Required tag for the config item in the Admin Console. A required item prevents the application from starting until it has a value.</p></td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td><RequiredExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> -</table> - -### `title` - -<table> - <tr> - <th>Description</th> - <td><p>The title of the config item that displays in the Admin Console.</p></td> - </tr> - <tr> - <th>Required?</th> - <td>Yes</td> - </tr> - <tr> - <th>Example</th> - <td><HelpTextExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>Yes</td> - </tr> -</table> - -### `type` - -<table> - <tr> - <th>Description</th> - <td> - <p>Each item has a <code>type</code> property that defines the type of user input accepted by the field.</p> - <p>The <code>type</code> property supports the following values: <ItemTypes/></p> - <p>For information about each type, see <a href="#item-types">Item Types</a>.</p> - </td> - </tr> - <tr> - <th>Required?</th> - <td>Yes</td> - </tr> - <tr> - <th>Example</th> - <td><TypeExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> -</table> - -### `value` - -<table> - <tr> - <th>Description</th> - <td> - <p>Defines the value of the config item. Data that you add to <code>value</code> appears as the HTML input value for the config item in the Admin Console.</p> - <p>If the config item is not readonly, then the data that you add to <code>value</code> is overwritten by any user input for the item. If the item is readonly, then the data that you add to <code>value</code> cannot be overwritten.</p> - </td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td><ValueExample/></td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td><p>Yes</p><RandomStringNote/></td> - </tr> -</table> - -### `when` {#when-item} - -<table> - <tr> - <th>Description</th> - <td><p>The <code>when</code> property denotes items that are displayed on the Admin Console <strong>Config</strong> page only when a condition evaluates to true. When the condition evaluates to false, the item is not displayed.</p><PropertyWhen/><p>The `when` item property has the following requirements and limitations:</p><WhenRequirements/><ul><li><code>when</code> cannot be applied to the items nested under a <code>radio</code>, <code>dropdown</code> or <code>select_one</code> item. To conditionally show or hide <code>radio</code>, <code>dropdown</code> or <code>select_one</code> items, apply the <code>when</code> property to the item itself.</li></ul><WhenNote/></td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td> - <p>Display the <code>database_host</code> and <code>database_password</code> items only when the user selects <code>external</code> for the <code>db_type</code> item:</p><p><WhenExample/></p><p>For additional examples, see <a href="/vendor/config-screen-conditional">Using Conditional Statements in Configuration Fields</a>.</p> - </td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>Yes</td> - </tr> -</table> - -### `validation` - -<table> - <tr> - <th>Description</th> - <td><p>The <code>validation</code> property can be used to validate an item's value, <br/>allowing you to specify custom validation rules that determine whether the value is valid or not.</p></td> - </tr> - <tr> - <th>Required?</th> - <td>No</td> - </tr> - <tr> - <th>Example</th> - <td> - <p>Validates and returns if <code>password</code> value is not matching the regex. <br/>The <code>jwt_token</code> file content is only validated if the file is uploaded since it is optional.</p> - <RegexValidationExample/> - </td> - </tr> - <tr> - <th>Supports Go templates?</th> - <td>No</td> - </tr> -</table> - -For information about supported validation types, see [Item Validation](#item-validation). - -## Item Types - -The section describes each of the item types: -<ItemTypes/> - -### `bool` -The `bool` input type should use a "0" or "1" to set the value -```yaml - - name: group_title - title: Group Title - items: - - name: http_enabled - title: HTTP Enabled - type: bool - default: "0" -``` - -![Boolean selector on the configuration screen](/images/config-screen-bool.png) - -[View a larger version of this image](/images/config-screen-bool.png) - -### `dropdown` - -> Introduced in KOTS v1.114.0 - -The `dropdown` item type includes one or more nested items that are displayed in a dropdown on the Admin Console config screen. Dropdowns are especially useful for displaying long lists of options. You can also use the [`radio`](#radio) item type to display radio buttons for items with shorter lists of options. - -To set a default value for `dropdown` items, set the `default` field to the name of the target nested item. - -```yaml -spec: - groups: - - name: example_settings - title: My Example Config - items: - - name: version - title: Version - default: version_latest - type: dropdown - items: - - name: version_latest - title: latest - - name: version_123 - title: 1.2.3 - - name: version_124 - title: 1.2.4 - - name: version_125 - title: 1.2.5 -``` - -![Dropdown item type on config screen](/images/config-screen-dropdown.png) - -[View a larger version of this image](/images/config-screen-dropdown.png) - -![Dropdown item type expanded](/images/config-screen-dropdown-open.png) - -[View a larger version of this image](/images/config-screen-dropdown-open.png) - -### `file` -A `file` is a special type of form field that renders an [`<input type="file" />`](https://www.w3schools.com/tags/tag_input.asp) HTML element. -Only the contents of the file, not the name, are captured. -See the [`ConfigOptionData`](template-functions-config-context#configoptiondata) template function for examples on how to use the file contents in your application. - -```yaml - - name: certs - title: TLS Configuration - items: - - name: tls_private_key_file - title: Private Key - type: file - - name: tls_certificate_file - title: Certificate - type: file -``` - -![File input field on the configuration screen](/images/config-screen-file.png) - -[View a larger version of this image](/images/config-screen-file.png) - -### `heading` -The `heading` type allows you to display a group heading as a sub-element within a group. -This is useful when you would like to use a config group to group items together, but still separate the items visually. - -```yaml - - name: ldap_settings - title: LDAP Server Settings - items: - ... - - name: ldap_schema - type: heading - title: LDAP schema - ... -``` - -![Heading on the configuration screen](/images/config-screen-heading.png) - -[View a larger versio of this image](/images/config-screen-heading.png) - -### `label` -The `label` type allows you to display an input label. -```yaml - - name: email - title: Email - items: - - name: email-address - title: Email Address - type: text - - name: description - type: label - title: "Note: The system will send you an email every hour." -``` -![Email address label on the configuration screen](/images/config-screen-label.png) - -[View a larger version of this image](/images/config-screen-label.png) - -### `password` -The `password` type is a text field that hides the character input. - -```yaml - - name: password_text - title: Password Text - type: password - value: '{{repl RandomString 10}}' -``` - -![Password text field on the configuration screen](/images/config-screen-password.png) - -[View a larger version of this image](/images/config-screen-password.png) - -### `radio` - -> Introduced in KOTS v1.114.0 - -The `radio` item type includes one or more nested items that are displayed as radio buttons on the Admin Console config screen. Radio buttons are especially useful for displaying short lists of options. You can also use the [`dropdown`](#dropdown) item type for items with longer lists of options. - -To set a default value for `radio` items, set the `default` field to the name of the target nested item. - -```yaml -spec: - groups: - - name: example_settings - title: My Example Config - items: - - name: authentication_type - title: Authentication Type - default: authentication_type_anonymous - type: radio - items: - - name: authentication_type_anonymous - title: Anonymous - - name: authentication_type_password - title: Password -``` - -### `select_one` (Deprecated) - -:::important -The `select_one` item type is deprecated and is not recommended for use. To display config items with multiple options, use the [`radio`](#radio) or [`dropdown`](#dropdown) item types instead. -::: - -`select_one` items must contain nested items. The nested items are displayed as radio buttons in the Admin Console. - -You can use the `name` field of a `select_one` item with KOTS template functions in the Config context (such as ConfigOption or ConfigOptionEquals) to return the option selected by the user. - -For example, if the user selects the **Password** option for the `select_one` item shown below, then the template function `'{{repl ConfigOption "authentication_type"}}'` is rendered as `authentication_type_password`. For more information about working with template functions in the Config context, see [Config Context](/reference/template-functions-config-context). - -```yaml -spec: - groups: - - name: example_settings - title: My Example Config - description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Nginx welcome page. - items: - - name: authentication_type - title: Authentication Type - default: authentication_type_anonymous - type: select_one - items: - - name: authentication_type_anonymous - title: Anonymous - - name: authentication_type_password - title: Password -``` - -![Select one field on the configuration screen](/images/config-screen-selectone.png) - -### `text` -A `text` input field allows users to enter a string value. -Optionally, all additional properties are available for this input type. - -```yaml - - name: example_text_input - title: Example Text Input - type: text -``` - -![Text field on the configuration screen](/images/config-screen-text.png) - -:::important -Do not store secrets or passwords in `text` items because they are not encrypted or masked and can be easily accessed. Instead, use [`password`](#password) items. -::: - -### `textarea` -A `textarea` items creates a multi-line text input for when users have to enter a sizeable amount of text. - -```yaml - - name: custom_key - title: Set your secret key for your app - description: Paste in your Custom Key - items: - - name: key - title: Key - type: textarea - - name: hostname - title: Hostname - type: text -``` -![Text area field on the configuration screen](/images/config-screen-textarea.png) - -## Item Validation - -A `validation` can be specified to validate the value of an item. `regex` is the supported validation type. - -Based on specified validation rules, the item is validated and a validation message is returned if the validation rule is not satisfied. A default message is returned if there is an empty validation message. - -The validation rules are as follows: - -- An item is validated only when its value is not empty. -- Items of types `text`, `textarea`, `password`, and `file` are validated, but `repeatable` items are not validated. -- If an item is marked as `hidden` or if its `when` condition is set to `false`, the item is not validated. -- If a group `when` condition is set to `false`, the items in the group are not validated. - -### `regex` -For applications installed with KOTS v1.98.0 or later, a `regex` can be used to validate whether an item's value matches the provided regular expression `pattern`. The regex pattern should be of the [RE2 regular expression](https://github.com/google/re2/wiki/Syntax) type and can validate the `text`, `textarea`, `password`, and `file` field types. - - The default validation message is `Value does not match regex`. - -<RegexValidationExample/> - -![Password validation error](/images/regex_password_validation_error.png) - -![File validation error only when uploaded](/images/regex_file_validation_error.png) - -## Repeatable Items - -A repeatable config item copies a YAML array entry or YAML document for as many values as are provided. Any number of values can be added to a repeatable item to generate additional copies. - -To make an item repeatable, set `repeatable` to true: - -```yaml - - name: ports_group - items: - - name: serviceport - title: Service Port - type: text - repeatable: true -``` - -Repeatable items do not use the `default` or `value` fields, but instead a `valuesByGroup` field. -`valuesByGroup` must have an entry for the parent Config Group name, with all of the default `key:value` pairs nested in the group. At least one default entry is required for the repeatable item: - -```yaml - valuesByGroup: - ports_group: - port-default-1: "80" -``` - -### Limitations - -* Repeatable items work only for text, textarea, and file types. -* Repeatable item names must only consist of lower case alphanumeric characters. -* Repeatable items are only supported for Kubernetes manifests, not Helm charts. - -### Template Targets - -Repeatable items require that you provide at least one `template`. The `template` defines a YAML target in the manifest to duplicate for each repeatable item. - -Required fields for a template target are `apiVersion`, `kind`, and `name`. - -`namespace` is an optional template target field to match a YAML document's `metadata.namespace` property when the same filename is used across multiple namespaces. - -The entire YAML node at the target is duplicated, including nested fields. - -The `yamlPath` field of the `template` must denote index position for arrays using square brackets. For example, `spec.ports[0]` selects the first port entry for duplication. All duplicate YAML is appended to the final array in the `yamlPath`. - -`yamlPath` must end with an array. - -**Example:** - -```yaml - templates: - - apiVersion: v1 - kind: Service - name: my-service - namespace: my-app - yamlPath: 'spec.ports[0]' -``` - -If the `yamlPath` field is not present, the entire YAML document matching the `template` is replaced with a copy for each of the repeatable item entries. The `metadata.name` field of the new document reflects the repeatable item `key`. - -### Templating - -The repeat items are called with the delimeters `repl[[ .itemName ]]` or `[[repl .itemName ]]`. These delimiters can be placed anywhere inside of the `yamlPath` target node: - -```yaml - - port: repl{{ ConfigOption "[[repl .serviceport ]]" | ParseInt }} - name: '[[repl .serviceport ]]' -``` -This repeatable templating is not compatible with sprig templating functions. It is designed for inserting repeatable `keys` into the manifest. Repeatable templating can be placed inside of Replicated config templating. - -### Ordering - -Repeatable templates are processed before config template rendering. - -Repeatable items are processed in order of the template targets in the Config Spec file. Effectively, this ordering is from the top of the Config Spec, by Config Group, by Config Item, and then by template target. - -```yaml - - name: ports_group - items: - - name: serviceport - title: Service Port - type: text - repeatable: true - templates: - - apiVersion: v1 #processed first - kind: Service - name: my-service - namespace: my-app - yamlPath: 'spec.ports[0]' - - apiVersion: v1 #processed second - kind: Service - name: my-service - namespace: my-app - {other item properties ...} - - name: other_ports - title: Other Service Port - type: text - repeatable: true - templates: - - apiVersion: v1 #processed third - kind: Service - name: my-other-service - namespace: my-app - {other item properties ...} - - name: deployments - items: - - name: deployment-name - title: Deployment Names - type: text - repeatable: true - templates: - - apiVersion: apps/v1 #processed fourth - kind: Deployment - name: my-deployment - namespace: my-app - {other item properties ...} -``` - -## Repeatable Examples - -In these examples, the default service port of "80" is included with the release. Port 443 is added as an additional port on the Admin Console configuration page, which is stored in the ConfigValues file. - -### Repeatable Item Example for a yamlPath - -**Config custom resource manifest file:** - -```yaml - - name: ports_group - items: - - name: serviceport - title: Service Port - type: text - repeatable: true - templates: - - apiVersion: v1 - kind: Service - name: my-service - namespace: my-app - yamlPath: spec.ports[0] - valuesByGroup: - ports_group: - port-default-1: "80" -``` - -**Config values:** -```yaml -apiVersion: kots.io/v1beta1 -kind: ConfigValues -metadata: - name: example_app -spec: - values: - port-default-1: - repeatableItem: serviceport - value: "80" - serviceport-8jdn2bgd: - repeatableItem: serviceport - value: "443" -``` - -**Template manifest:** -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-service - namespace: my-app -spec: - type: NodePort - ports: - - port: repl{{ ConfigOption "[[repl .serviceport ]]" | ParseInt }} - name: '[[repl .serviceport ]]' - selector: - app: repeat_example - component: my-deployment -``` - -**After repeatable config processing:** - -**Note**: This phase is internal to configuration rendering for KOTS. This example is only provided to further explain the templating process.* - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-service - namespace: my-app -spec: - type: NodePort - ports: - - port: repl{{ ConfigOption "port-default-1" | ParseInt }} - name: 'port-default-1' - - port: repl{{ ConfigOption "serviceport-8jdn2bgd" | ParseInt }} - name: 'serviceport-8jdn2bgd' - selector: - app: repeat_example - component: my-deployment -``` - -**Resulting manifest:** -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-service - namespace: my-app -spec: - type: NodePort - ports: - - port: 80 - name: port-default-1 - - port: 443 - name: serviceport-8jdn2bgd - selector: - app: repeat_example - component: my-deployment -``` - -### Repeatable Item Example for an Entire Document -**Config spec:** -```yaml - - name: ports_group - items: - - name: serviceport - title: Service Port - type: text - repeatable: true - templates: - - apiVersion: v1 - kind: Service - name: my-service - namespace: my-app - valuesByGroup: - ports_group: - port-default-1: "80" -``` - -**Config values:** -```yaml -apiVersion: kots.io/v1beta1 -kind: ConfigValues -metadata: - name: example_app -spec: - values: - port-default-1: - repeatableItem: serviceport - value: "80" - serviceport-8jdn2bgd: - repeatableItem: serviceport - value: "443" -``` - -**Template manifest:** -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-service - namespace: my-app -spec: - type: NodePort - ports: - - port: repl{{ ConfigOption "[[repl .serviceport ]]" | ParseInt }} - selector: - app: repeat_example - component: repl[[ .serviceport ]] -``` - -**After repeatable config processing:** - -**Note**: This phase is internal to configuration rendering for KOTS. This example is only provided to further explain the templating process.* - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: port-default-1 - namespace: my-app -spec: - type: NodePort - ports: - - port: repl{{ ConfigOption "port-default-1" | ParseInt }} - selector: - app: repeat_example - component: port-default-1 ---- -apiVersion: v1 -kind: Service -metadata: - name: serviceport-8jdn2bgd - namespace: my-app -spec: - type: NodePort - ports: - - port: repl{{ ConfigOption "serviceport-8jdn2bgd" | ParseInt }} - selector: - app: repeat_example - component: serviceport-8jdn2bgd -``` - -**Resulting manifest:** -```yaml -apiVersion: v1 -kind: Service -metadata: - name: port-default-1 - namespace: my-app -spec: - type: NodePort - ports: - - port: 80 - selector: - app: repeat_example - component: port-default-1 ---- -apiVersion: v1 -kind: Service -metadata: - name: serviceport-8jdn2bgd - namespace: my-app -spec: - type: NodePort - ports: - - port: 443 - selector: - app: repeat_example - component: serviceport-8jdn2bgd -``` - -================ -File: docs/reference/custom-resource-helmchart-v2.mdx -================ -import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" -import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" -import Chart from "../partials/helm/_helm-cr-chart.mdx" -import ChartName from "../partials/helm/_helm-cr-chart-name.mdx" -import ChartVersion from "../partials/helm/_helm-cr-chart-version.mdx" -import ChartReleaseName from "../partials/helm/_helm-cr-chart-release-name.mdx" -import HelmUpgradeFlags from "../partials/helm/_helm-cr-upgrade-flags.mdx" -import Values from "../partials/helm/_helm-cr-values.mdx" -import Weight from "../partials/helm/_helm-cr-weight.mdx" -import Exclude from "../partials/helm/_helm-cr-exclude.mdx" -import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" -import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" -import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" -import Namespace from "../partials/helm/_helm-cr-namespace.mdx" -import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" -import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" -import V2Example from "../partials/helm/_v2-native-helm-cr-example.mdx" -import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" - -# HelmChart v2 - -> Introduced in Replicated KOTS v1.99.0 - -<KotsHelmCrDescription/> - -For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). - -## Example - -The following is an example manifest file for the HelmChart v2 custom resource: - -<V2Example/> - -## chart - -<Chart/> - -### chart.name - -<ChartName/> - -### chart.chartVersion - -<ChartVersion/> - -## releaseName - -<ChartReleaseName/> - -## weight - -<Weight/> - -## helmUpgradeFlags - -<HelmUpgradeFlags/> - -## exclude - -<Exclude/> - -## values - -<Values/> - -For more information about using `values`, see [Setting Helm Chart Values with KOTS](/vendor/helm-optional-value-keys). - -## optionalValues - -<OptionalValues/> - -For more information about using `optionalValues`, see [Setting Helm Chart Values with KOTS](/vendor/helm-optional-value-keys). - -### optionalValues.when - -<OptionalValuesWhen/> - -### optionalValues.recursiveMerge - -<OptionalValuesRecursiveMerge/> - -**Default**: False - -For an example of recursive and non-recursive merging, see [About Recursive Merge](/vendor/helm-optional-value-keys#recursive-merge). - -## namespace - -<Namespace/> - -## builder - -The `builder` key is used to provide Helm values that are used during various stages of processing the Helm chart. - -The `builder` key is required for the following use cases: - -* To create an `.airgap` bundle for installations into air gap environments. - - <BuilderAirgapIntro/> - - For more information, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). - -* To support online installations that use a local private registry, the `builder` field renders the Helm chart with all of the necessary images so that KOTS knows where to pull the images. - - You cannot prevent customers from configuring a local private registry in the Admin Console. If you think any of your customers will use a local private registry, you should use the `builder` key. For more information, see [Configuring Local Image Registries](/enterprise/image-registry-settings). - -<HelmBuilderRequirements/> - -* Use the same `builder` configuration to support the use of local registries in both online and air gap installations. If you already configured the `builder` key to support air gap installations, then no additional configuration is required. - -**Example:** - -<BuilderExample/> - -================ -File: docs/reference/custom-resource-helmchart.mdx -================ -import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" -import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" -import Chart from "../partials/helm/_helm-cr-chart.mdx" -import ChartName from "../partials/helm/_helm-cr-chart-name.mdx" -import ChartVersion from "../partials/helm/_helm-cr-chart-version.mdx" -import ChartReleaseName from "../partials/helm/_helm-cr-chart-release-name.mdx" -import HelmUpgradeFlags from "../partials/helm/_helm-cr-upgrade-flags.mdx" -import Values from "../partials/helm/_helm-cr-values.mdx" -import Weight from "../partials/helm/_helm-cr-weight.mdx" -import WeightLimitation from "../partials/helm/_helm-cr-weight-limitation.mdx" -import Exclude from "../partials/helm/_helm-cr-exclude.mdx" -import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" -import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" -import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" -import Namespace from "../partials/helm/_helm-cr-namespace.mdx" -import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" -import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" -import Deprecated from "../partials/helm/_replicated-deprecated.mdx" -import ReplicatedHelmMigration from "../partials/helm/_replicated-helm-migration.mdx" -import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" - - -# HelmChart v1 (Deprecated) - -:::important -<Deprecated/> -::: - -<KotsHelmCrDescription/> - -For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). - -## Example - -The following is an example manifest file for the HelmChart v1 custom resource: - -```yaml -apiVersion: kots.io/v1beta1 -kind: HelmChart -metadata: - name: samplechart -spec: - # chart identifies a matching chart from a .tgz - chart: - name: samplechart - chartVersion: 3.1.7 - releaseName: samplechart-release-1 - - exclude: "repl{{ ConfigOptionEquals `include_chart` `include_chart_no`}}" - - # helmVersion identifies the Helm Version used to render the chart. Default is v3. - helmVersion: v3 - - # useHelmInstall identifies the kots.io/v1beta1 installation method - useHelmInstall: true - - # weight determines the order that charts with "useHelmInstall: true" are applied, with lower weights first. - weight: 42 - - # helmUpgradeFlags specifies additional flags to pass to the `helm upgrade` command. - helmUpgradeFlags: - - --skip-crds - - --no-hooks - - --timeout - - 1200s - - --history-max=15 - - # values are used in the customer environment, as a pre-render step - # these values will be supplied to helm template - values: - postgresql: - enabled: repl{{ ConfigOptionEquals `postgres_type` `embedded_postgres`}} - - optionalValues: - - when: "repl{{ ConfigOptionEquals `postgres_type` `external_postgres`}}" - recursiveMerge: false - values: - postgresql: - postgresqlDatabase: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_database`}}repl{{ end}}" - postgresqlUsername: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_username`}}repl{{ end}}" - postgresqlHost: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_host`}}repl{{ end}}" - postgresqlPassword: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_password`}}repl{{ end}}" - postgresqlPort: "repl{{ if ConfigOptionEquals `postgres_type` `external_postgres`}}repl{{ ConfigOption `external_postgres_port`}}repl{{ end}}" - - # namespace allows for a chart to be installed in an alternate namespace to - # the default - namespace: samplechart-namespace - - # builder values provide a way to render the chart with all images - # and manifests. this is used in Replicated to create `.airgap` packages - builder: - postgresql: - enabled: true -``` - -## chart - -<Chart/> - -### chart.name - -<ChartName/> - -### chart.chartVersion - -<ChartVersion/> - -### chart.releaseName - -> Introduced in Replicated KOTS v1.73.0 - -<ChartReleaseName/> - -## helmVersion - -Identifies the Helm Version used to render the chart. Acceptable values are `v2` or `v3`. `v3` is the default when no value is specified. - -:::note -<VersionLimitation/> -::: - -## useHelmInstall - -Identifies the method that KOTS uses to install the Helm chart: -* `useHelmInstall: true`: KOTS uses Kustomize to modify the chart then repackages the resulting manifests to install. This was previously referred to as the _native Helm_ installation method. - -* `useHelmInstall: false`: KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. This was previously referred to as the _Replicated Helm_ installation method. - - :::note - <ReplicatedHelmMigration/> - ::: - -For more information about how KOTS deploys Helm charts when `useHelmInstall` is `true` or `false`, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). - -## weight - -<WeightLimitation/> - -<Weight/> - -## helmUpgradeFlags - -The `helmUpgradeFlags` field is _not_ supported for HelmChart custom resources with `useHelmInstall: false`. - -<HelmUpgradeFlags/> - -## values - -<Values/> - -## exclude - -<Exclude/> - -## optionalValues - -<OptionalValues/> - -### optionalValues.when - -<OptionalValuesWhen/> - -### optionalValues.recursiveMerge - -:::note -`recursiveMerge` is available in KOTS v1.38.0 and later. -::: - -<OptionalValuesRecursiveMerge/> - -**Default**: False - -## namespace - -<Namespace/> - -## builder - -<BuilderAirgapIntro/> - -<HelmBuilderRequirements/> - -**Example:** - -<BuilderExample/> - -================ -File: docs/reference/custom-resource-identity.md -================ -:::important -This topic is deleted from the product documentation because this Beta feature is deprecated. -::: - -# Identity (Beta) - -The Identity custom resource allows you to configure the Replicated identity service for your application. - -The following is an example manifest file for the Identity custom resource: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Identity -metadata: - name: my-application -spec: - identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/dex - oidcRedirectUris: - - https://{{repl ConfigOption "ingress_hostname"}}/oidc/login/callback - supportedProviders: [ oidc ] - requireIdentityProvider: true - roles: - - id: member - name: Member - description: Can see every member and non-secret team in the organization. - - id: owner - name: Owner - description: Has full administrative access to the entire organization. - oauth2AlwaysShowLoginScreen: false - signingKeysExpiration: 6h - idTokensExpiration: 24h - webConfig: - title: My App - theme: - logoUrl: data:image/png;base64,<encoded_base64_stream> - logoBase64: <base64 encoded png file> - styleCssBase64: <base64 encoded [styles.css](https://github.com/dexidp/dex/blob/v2.27.0/web/themes/coreos/styles.css) file> - faviconBase64: <base64 encoded png file> -``` - -## identityIssuerURL -**(required)** This is the canonical URL that all clients must use to refer to the OIDC identity service. -If a path is provided, the HTTP service will listen at a non-root URL. - -## oidcRedirectUris -**(required)** A registered set of redirect URIs. -When redirecting from the Replicated app manager identity OIDC server to the client, the URI requested to redirect to must match one of these values. - -## supportedProviders -A list of supported identity providers. -If unspecified, all providers will be available. - -## requireIdentityProvider -If true, require the identity provider configuration to be set by the customer before the app can be deployed. - -## roles -**(`id` required)** A list of roles to be mapped to identity provider groups by the customer on the Replicated Admin Console identity service configuration page. - -## oauth2AlwaysShowLoginScreen -If true, show the identity provider selection screen even if there's only one configured. -Default `false`. - -## signingKeysExpiration -Defines the duration of time after which the SigningKeys will be rotated. -Default `6h`. - -## idTokensExpiration -Defines the duration of time for which the IdTokens will be valid. -Default `24h`. - -## webConfig -Can be used for branding the application identity login screen. - -================ -File: docs/reference/custom-resource-lintconfig.mdx -================ -import LinterDefinition from "../partials/linter-rules/_linter-definition.mdx" - -# LintConfig - -<LinterDefinition/> - -The linter runs automatically against releases that you create in the Replicated vendor portal, and displays any error or warning messages in the vendor portal UI. - -The linter rules have default levels that can be overwritten. You can configure custom levels by adding a LintConfig manifest file (`kind: LintConfig`) to the release. Specify the rule name and level you want the rule to have. Rules that are not included in the LintConfig manifest file keep their default level. For information about linter rules and their default levels, see [Linter Rules](/reference/linter). - -The supported levels are: - -<table> - <tr> - <th width="20%">Level</th> - <th width="80%">Description</th> - </tr> - <tr> - <td>error</td> - <td>The rule is enabled and shows as an error.</td> - </tr> - <tr> - <td>warn</td> - <td>The rule is enabled and shows as a warning.</td> - </tr> - <tr> - <td>info</td> - <td>The rule is enabled and shows an informational message.</td> - </tr> - <tr> - <td>off</td> - <td>The rule is disabled.</td> - </tr> - </table> - - -## Example -The following example manifest file overwrites the level for the application-icon to `off` to disable the rule. Additionally, the level for the application-statusInformers rule is changed to `error`, so instead of the default warning, it displays an error if the application is missing status informers. - -```yaml -apiVersion: kots.io/v1beta1 -kind: LintConfig -metadata: - name: default-lint-config -spec: - rules: - - name: application-icon - level: "off" - - name: application-statusInformers - level: "error" -``` - -================ -File: docs/reference/custom-resource-preflight.md -================ -# Preflight and Support Bundle - -You can define preflight checks and support bundle specifications for Replicated KOTS and Helm installations. - -Preflight collectors and analyzers provide cluster operators with clear feedback for any missing requirements or incompatibilities in the target environment before an application is deployed. Preflight checks are not automatically included in releases, so you must define them if you want to include them with a release. - -Support bundles collect and analyze troubleshooting data from a cluster and help diagnose problems with application deployments. For KOTS, default support bundles are automatically included with releases, and can be customized. For Helm installations, support bundles are not pre-enabled and must be defined if you want to use them. - -Collectors and analyzers are configured in Preflight and Support Bundle custom resources. - -:::note -Built-in redactors run by default for preflight checks and support bundles to protect sensitive information. -::: - -## Defining Custom Resources - -To define preflight checks or customize the default support bundle settings, add the corresponding custom resource YAML to your release. Then add custom collector and analyzer specifications to the custom resource. For more information about these troubleshoot features and how to configure them, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). - -The following sections show basic Preflight and Support Bundle custom resource definitions. - -### Preflight - -The Preflight custom resource uses `kind: Preflight`: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: sample -spec: - collectors: [] - analyzers: [] -``` - -### Support Bundle - -The Support Bundle custom resource uses `kind: SupportBundle`: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: sample -spec: - collectors: [] - analyzers: [] -``` - -## Global Fields - -Global fields, also known as shared properties, are fields that are supported on all collectors or all analyzers. The following sections list the global fields for [collectors](#collector-global-fields) and [analyzers](#analyzer-global-fields) respectively. - -Additionally, each collector and analyzer has its own fields. For more information about collector- and analyzer-specific fields, see the [Troubleshoot documentation](https://troubleshoot.sh/docs/). - -### Collector Global Fields - -The following fields are supported on all optional collectors for preflights and support bundles. For a list of collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. - -<table> - <tr> - <th width="30%">Field Name</th> - <th width="70%">Description</th> - </tr> - <tr> - <td><code>collectorName</code></td> - <td>(Optional) A collector can specify the <code>collectorName</code> field. In some collectors, this field controls the path where result files are stored in the support bundle.</td> - </tr> - <tr> - <td><code>exclude</code></td> - <td>(Optional) (KOTS Only) Based on the runtime available configuration, a conditional can be specified in the <code>exclude</code> field. This is useful for deployment techniques that allow templating for Replicated KOTS and the optional KOTS Helm component. When this value is <code>true</code>, the collector is not included.</td> - </tr> -</table> - -### KOTS Collector Example - -This is an example of collector definition for a KOTS support bundle: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: sample -spec: - collectors: - - collectd: - collectorName: "collectd" - image: busybox:1 - namespace: default - hostPath: "/var/lib/collectd/rrd" - imagePullPolicy: IfNotPresent - imagePullSecret: - name: my-temporary-secret - data: - .dockerconfigjson: ewoJICJhdXRocyI6IHsKzCQksHR0cHM6Ly9pbmRleC5kb2NrZXIuaW8vdjEvIjoge30KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy4xMiAoZGFyd2luKSIKCX0sCgkiY3JlZHNTdG9yZSI6ICJkZXNrdG9wIiwKCSJleHBlcmltZW50YWwiOiAiZGlzYWJsZWQiLAoJInN0YWNrT3JjaGVzdHJhdG9yIjogInN3YXJtIgp9 - type: kubernetes.io/dockerconfigjson -``` - -### Analyzer Global Fields - -The following fields are supported on all optional analyzers for preflights and support bundles. For a list of analyzers, see [Analyzing Data](https://troubleshoot.sh/docs/analyze/) in the Troubleshoot documentation. - -<table> - <tr> - <th width="30%">Field Name</th> - <th width="70%">Description</th> - </tr> - <tr> - <td><code>collectorName</code></td> - <td>(Optional) An analyzer can specify the <code>collectorName</code> field.</td> - </tr> - <tr> - <td><code>exclude</code></td> - <td>(Optional) (KOTS Only) A condition based on the runtime available configuration can be specified in the <code>exclude</code> field. This is useful for deployment techniques that allow templating for KOTS and the optional KOTS Helm component. When this value is <code>true</code>, the analyzer is not included.</td> - </tr> - <tr> - <td><code>strict</code></td> - <td>(Optional) (KOTS Only) An analyzer can be set to <code>strict: true</code> so that <code>fail</code> outcomes for that analyzer prevent the release from being deployed by KOTS until the vendor-specified requirements are met. When <code>exclude: true</code> is also specified, <code>exclude</code> overrides <code>strict</code> and the analyzer is not executed.</td> - </tr> -</table> - -### KOTS Analyzer Example - -This is an example of an KOTS analyzer definition with a strict preflight check and `exclude` set for installations that do not use Replicated kURL. In this case, the strict preflight is enforced on an embedded cluster but not on an existing cluster or air gap cluster. - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: check-kubernetes-version -spec: - analyzers: - - clusterVersion: - exclude: 'repl{{ (not IsKurl) }}' - strict: true - outcomes: - - fail: - when: "< 1.16.0" - message: The application requires Kubernetes 1.16.0 or later - uri: https://kubernetes.io - - warn: - when: "< 1.17.0" - message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.17.0 or later. - uri: https://kubernetes.io - - pass: - message: Your cluster meets the recommended and required versions of Kubernetes. -``` - -================ -File: docs/reference/custom-resource-redactor.md -================ -# Redactor (KOTS Only) - -This topic describes how to define redactors with the Redactor custom resource. - -:::note -Custom redactors defined with the Redactor resource apply only to installations with Replicated KOTS. -::: - -## Overview - -Preflight checks and support bundles include built-in redactors. These built-in redactors use regular expressions to identify and hide potentially sensitive data before it is analyzed. For example, the built-in redactors hide values that match common patterns for data sources, passwords, and user IDs that can be found in standard database connection strings. They also hide environment variables with names that begin with words like token, password, or user. To view the complete list of regex patterns for the built-in redactors, see [`redact.go`](https://github.com/replicatedhq/troubleshoot/blob/main/pkg/redact/redact.go#L204) in the open-source Troubleshoot GitHub repo. - -For Replicated KOTS installations, you can also add custom redactors to support bundles using the Redactor custom resource manifest file. For example, you can redact API keys or account numbers, depending on your customer needs. For more information about redactors, see [Redacting Data](https://troubleshoot.sh/docs/redact/) in the Troubleshoot documentation. - -## Defining Custom Redactors - -You can add custom redactors for KOTS installations using the following basic Redactor custom resource manifest file (`kind: Redactor`): - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Redactor -metadata: - name: sample -spec: - redactors: [] -``` - -## Objects and Fields - -A redactor supports two objects: `fileSelector` and `removals`. These objects specify the files the redactor applies to and how the redactions occur. For more information and examples of these fields, see [KOTS Redactor Example](#example) below and [Redactors](https://troubleshoot.sh/docs/redact/redactors/) in the Troubleshoot documentation. - -### fileSelector - -The `fileSelector` object determines which files the redactor is applied to. If this object is omitted from the manifest file, the redactor is applied to all files. This object supports the following optional fields: - -<table> - <tr> - <th width="30%">Field Name</th> - <th width="70%">Description</th> - </tr> - <tr> - <td><code>file</code></td> - <td>(Optional) Specifies a single file for redaction.</td> - </tr> - <tr> - <td><code>files</code></td> - <td>(Optional) Specifies multiple files for redaction.</td> - </tr> -</table> - -Globbing is used to match files. For example, <code>/my/test/glob/*</code> matches <code>/my/test/glob/file</code>, but does not match <code>/my/test/glob/subdir/file</code>. - -### removals - -The `removals` object is required and defines the redactions that occur. This object supports the following fields. At least one of these fields must be specified: - -<table> - <tr> - <th width="30%">Field Name</th> - <th width="70%">Description</th> - </tr> - <tr> - <td><code>regex</code></td> - <td>(Optional) Allows a regular expression to be applied for removal and redaction on lines that immediately follow a line that matches a filter. The <code>selector</code> field is used to identify lines, and the <code>redactor</code> field specifies a regular expression that runs on the line after any line identified by <code>selector</code>. If <code>selector</code> is empty, the redactor runs on every line. Using a <code>selector</code> is useful for removing values from pretty-printed JSON, where the value to be redacted is pretty-printed on the line beneath another value.<br></br><br></br>Matches to the regex are removed or redacted, depending on the construction of the regex. Any portion of a match not contained within a capturing group is removed entirely. The contents of capturing groups tagged <code>mask</code> are masked with <code>***HIDDEN***</code>. Capturing groups tagged <code>drop</code> are dropped.</td> - </tr> - <tr> - <td><code>values</code></td> - <td>(Optional) Specifies values to replace with the string <code>***HIDDEN***</code>.</td> - </tr> - <tr> - <td><code>yamlPath</code></td> - <td>(Optional) Specifies a <code>.</code>-delimited path to the items to be redacted from a YAML document. If an item in the path is the literal string <code>*</code>, the redactor is applied to all options at that level.<br></br><br></br>Files that fail to parse as YAML or do not contain any matches are not modified. Files that do contain matches are re-rendered, which removes comments and custom formatting. Multi-document YAML is not fully supported. Only the first document is checked for matches, and if a match is found, later documents are discarded entirely.</td> - </tr> -</table> - -## KOTS Redactor Example {#example} - -The following example shows `regex` and `yamlPath` redaction for a support bundle: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Redactor -metadata: - name: my-redactor-name -spec: - redactors: - - name: all files # as no file is specified, this redactor will run against all files - removals: - regex: - - redactor: (another)(?P<mask>.*)(here) # this will replace anything between the strings `another` and `here` with `***HIDDEN***` - - selector: 'S3_ENDPOINT' # remove the value in lines immediately following those that contain the string `S3_ENDPOINT` - redactor: '("value": ").*(")' - yamlPath: - - "abc.xyz.*" # redact all items in the array at key `xyz` within key `abc` in YAML documents -``` - -================ -File: docs/reference/embedded-cluster-install.mdx -================ -import ProxyLimitations from "../partials/embedded-cluster/_proxy-install-limitations.mdx" -import ProxyRequirements from "../partials/embedded-cluster/_proxy-install-reqs.mdx" - - -# Embedded Cluster Install Command Options - -This topic describes the options available with the Embedded Cluster install command. For more information about how to install with Embedded Cluster, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded) or [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded). - -## Usage - -```bash -sudo ./APP_SLUG install --license LICENSE_FILE [flags] -``` -* `APP_SLUG` is the unique application slug -* `LICENSE_FILE` is the customer's license - -## Flags - -<table> - <tr> - <th width="35%">Flag</th> - <th width="65%">Description</th> - </tr> - <tr> - <td>`--admin-console-password`</td> - <td> - <p>Set the password for the Admin Console. The password must be at least six characters in length. If not set, the user is prompted to provide an Admin Console password.</p> - </td> - </tr> - <tr> - <td>`--admin-console-port`</td> - <td> - <p>Port on which to run the KOTS Admin Console. **Default**: By default, the Admin Console runs on port 30000.</p> - <p>**Limitation:** It is not possible to change the port for the Admin Console during a restore with Embedded Cluster. For more information, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery).</p> - </td> - </tr> - <tr> - <td>`--airgap-bundle`</td> - <td>The Embedded Cluster air gap bundle used for installations in air-gapped environments with no outbound internet access. For information about how to install in an air-gapped environment, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap).</td> - </tr> - <tr> - <td>`--cidr`</td> - <td> - <p>The range of IP addresses that can be assigned to Pods and Services, in CIDR notation. **Default:** By default, the CIDR block is `10.244.0.0/16`.</p> - <p>**Requirement**: Embedded Cluster 1.16.0 or later.</p> - </td> - </tr> - <tr> - <td>`--config-values`</td> - <td> - <p>Path to the ConfigValues file for the application. The ConfigValues file can be used to pass the application configuration values from the command line during installation, such as when performing automated installations as part of CI/CD pipelines. For more information, see [Automating Installation with Embedded Cluster](/enterprise/installing-embedded-automation).</p> - <p><strong>Requirement:</strong> Embedded Cluster 1.18.0 and later.</p> - </td> - </tr> - <tr> - <td>`--data-dir`</td> - <td> - <p>The data directory used by Embedded Cluster. **Default**: `/var/lib/embedded-cluster`</p> - <p>**Requirement**: Embedded Cluster 1.16.0 or later.</p> - <p>**Limitations:**</p> - <ul> - <li>The data directory for Embedded Cluster cannot be changed after the cluster is installed.</li> - <li>For multi-node installations, the same data directory that is supplied at installation is used for all nodes joined to the cluster. You cannot choose a different data directory when joining nodes with the Embedded Cluster `join` command. For more information about joining nodes, see [Add Nodes to a Cluster](/enterprise/embedded-manage-nodes#add-nodes) in _Managing Multi-Node Clusters with Embedded Cluster_.</li> - <li>If you use the `--data-dir` flag to change the data directory during installation, then you must use the same location when restoring in a disaster recovery scenario. For more information about disaster recovery with Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery).</li> - <li>Replicated does not support using symlinks for the Embedded Cluster data directory. Use the `--data-dir` flag instead of symlinking `/var/lib/embedded-cluster`.</li> - </ul> - </td> - </tr> - <tr> - <td>`--http-proxy`</td> - <td> - <p>Proxy server to use for HTTP.</p> - <ProxyRequirements/> - <ProxyLimitations/> - </td> - </tr> - <tr> - <td>`--https-proxy`</td> - <td> - <p>Proxy server to use for HTTPS.</p> - <ProxyRequirements/> - <ProxyLimitations/> - </td> - </tr> - <tr> - <td>`--local-artifact-mirror-port`</td> - <td> - <p>Port on which to run the Local Artifact Mirror (LAM). **Default**: By default, the LAM runs on port 50000.</p> - </td> - </tr> - <tr> - <td>`--network-interface`</td> - <td> - <p>The name of the network interface to bind to for the Kubernetes API. A common use case of `--network-interface` is for multi-node clusters where node communication should happen on a particular network. **Default**: If a network interface is not provided, the first valid, non-local network interface is used.</p> - </td> - </tr> - <tr> - <td>`--no-proxy`</td> - <td> - <p>Comma-separated list of hosts for which not to use a proxy.</p> - <p>For single-node installations, pass the IP address of the node where you are installing. For multi-node installations, when deploying the first node, pass the list of IP addresses for all nodes in the cluster (typically in CIDR notation). The network interface's subnet will automatically be added to the no-proxy list if the node's IP address is not already included.</p> - <p>The following are never proxied:</p> - <ul> - <li>Internal cluster communication (`localhost`, `127.0.0.1`, `.cluster.local`, `.svc`)</li> - <li>The CIDR block used for assigning IPs to Kubernetes Pods and Services. By default, the CIDR block is `10.244.0.0/16`. For information about how to change this default, see [Set IP Address Range for Pods and Services](#set-ip-address-range-for-pods-and-services).</li> - </ul> - <p>To ensure your application's internal cluster communication is not proxied, use fully qualified domain names like `my-service.my-namespace.svc` or `my-service.my-namespace.svc.cluster.local`.</p> - <ProxyRequirements/> - <ProxyLimitations/> - </td> - </tr> - <tr> - <td>`--private-ca`</td> - <td> - <p>The path to trusted certificate authority (CA) certificates. Using the `--private-ca` flag ensures that the CA is trusted by the installation. KOTS writes the CA certificates provided with the `--private-ca` flag to a ConfigMap in the cluster.</p> - <p>The KOTS [PrivateCACert](/reference/template-functions-static-context#privatecacert) template function returns the ConfigMap containing the private CA certificates supplied with the `--private-ca` flag. You can use this template function to mount the ConfigMap so your containers trust the CA too.</p> - </td> - </tr> -</table> - -## Examples - -### Air Gap Install - -```bash -sudo ./my-app install --license license.yaml --airgap-bundle myapp.airgap -``` - -### Change the Admin Console and LAM Ports - -```bash -sudo ./my-app install --license license.yaml --admin-console-port=20000 --local-artifact-mirror-port=40000 -``` - -### Change the Data Directory - -```bash -sudo ./my-app install --license license.yaml --data-dir /data/embedded-cluster -``` - -### Headless (Automated) Install - -```bash -sudo ./my-app install --license license.yaml \ - --config-values configvalues.yaml \ - --admin-console-password password -``` - -### Install Behind a Proxy - -```bash -sudo ./APP_SLUG install --license license.yaml \ - --http-proxy=HOST:PORT \ - --https-proxy=HOST:PORT \ - --no-proxy=LIST_OF_HOSTS -``` -Where: - -* `HOST:PORT` is the host and port of the proxy server -* `LIST_OF_HOSTS` is the list of hosts to not proxy. For example, the IP address of the node where you are installing. Or, for multi-node clusters, the list of IP addresses for all nodes in the cluster, typically in CIDR notation. - -### Install Behind an MITM Proxy - -```bash -sudo ./my-app install --license license.yaml --private-ca /path/to/private-ca-bundle \ - --http-proxy=http://10.128.0.0:3300 \ - --https-proxy=http://10.128.0.0:3300 \ - --no-proxy=123.89.46.4,10.96.0.0/16,*.example.com -``` - -### Set Admin Console Password - -```bash -sudo ./my-app install --license license.yaml --admin-console-password password -``` - -### Set IP Address Range for Pods and Services - -```bash -sudo ./my-app install --license license.yaml --cidr 172.16.136.0/16 -``` - -### Use a Specific Network Interface - -```bash -sudo ./my-app install --license license.yaml --network-interface eno167777 -``` - -================ -File: docs/reference/embedded-config.mdx -================ -# Embedded Cluster Config - -This topic is a reference for the Replicated Embedded Cluster Config custom resource. For more information about Embedded Cluster, see [Using Embedded Cluster](/vendor/embedded-overview). - -:::note -Embedded Cluster is in beta. If you are instead looking for information about creating Kubernetes Installers with Replicated kURL, see the [Replicated kURL](/vendor/packaging-embedded-kubernetes) section. -::: - -## Overview - -To install your application with Embedded Cluster, an Embedded Cluster Config must be created in a release. Embedded Cluster installation artifacts are available only for releases that include an Embedded Cluster Config. - -The Embedded Cluster Config lets you define several aspects of the Kubernetes cluster that will be created. - -### Limitations - -* The Embedded Cluster Config does not support the use of Go template functions, including [KOTS template functions](/reference/template-functions-about). - -For additional property-specific limitations, see the sections below. - -### Example - -```yaml -apiVersion: embeddedcluster.replicated.com/v1beta1 -kind: Config -spec: - version: 2.1.3+k8s-1.30 - roles: - controller: - name: management - labels: - management: "true" - custom: - - name: app - labels: - app: "true" - extensions: - helm: - repositories: - - name: ingress-nginx - url: https://kubernetes.github.io/ingress-nginx - charts: - - name: ingress-nginx - chartname: ingress-nginx/ingress-nginx - namespace: ingress-nginx - version: "4.8.3" - values: | - controller: - service: - type: NodePort - nodePorts: - http: "80" - https: "443" - # Known issue: Only use image tags for multi-architecture images. - # Set digest to empty string to ensure the air gap builder uses - # single-architecture images. - image: - digest: "" - digestChroot: "" - admissionWebhooks: - patch: - image: - digest: "" -``` - -## version - -You must specify which version of Embedded Cluster to install. Each version of Embedded Cluster includes particular versions of components like KOTS (Admin Console) and OpenEBS. - -For a full list of versions, see the Embedded Cluster [releases page](https://github.com/replicatedhq/embedded-cluster/releases) in GitHub. It's recommended to keep this version as up to date as possible because Embedded Cluster is changing rapidly. - -## roles - -You can optionally customize node roles in the Embedded Cluster Config using the `roles` key. - -If the `roles` key is configured, users select one or more roles to assign to a node when it is joined to the cluster. A single node can be assigned: -* The `controller` role, which designates nodes that run the Kubernetes control plane -* One or more `custom` roles -* Both the `controller` role _and_ one or more `custom` roles - -For more information about how to assign node roles in the Admin Console, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). - -If the `roles` key is _not_ configured, all nodes joined to the cluster are assigned the `controller` role. The `controller` role designates nodes that run the Kubernetes control plane. Controller nodes can also run other workloads, such as application or Replicated KOTS workloads. - -For more information, see the sections below. - -### controller - -By default, all nodes joined to a cluster are assigned the `controller` role. - -You can customize the `controller` role in the following ways: -* Change the `name` that is assigned to controller nodes. By default, controller nodes are named “controller”. If you plan to create any `custom` roles, Replicated recommends that you change the default name for the `controller` role to a term that is easy to understand, such as "management". This is because, when you add `custom` roles, both the name of the `controller` role and the names of any `custom` roles are displayed to the user when they join a node. -* Add one or more `labels` to be assigned to all controller nodes. See [labels](#labels). - -#### Example - -```yaml -apiVersion: embeddedcluster.replicated.com/v1beta1 -kind: Config -spec: - roles: - controller: - name: management - labels: - management: "true" # Label applied to "management" nodes -``` - -### custom - -You can add `custom` roles that users can assign to one or more nodes in the cluster. Each `custom` role that you add must have a `name` and can also have one or more `labels`. See [labels](#labels). - -Adding `custom` node roles is useful if you need to assign application workloads to specific nodes in multi-node clusters. For example, if your application has graphics processing unit (GPU) workloads, you could create a `custom` role that will add a `gpu=true` label to any node that is assigned the role. This allows you to then schedule GPU workloads on nodes labled `gpu=true`. Or, if your application includes any resource-intensive workloads (such as a database) that must be run on dedicated nodes, you could create a `custom` role that adds a `db=true` label to the node. This way, the database workload could be assigned to a certain node or nodes. - -#### Example - -```yaml -apiVersion: embeddedcluster.replicated.com/v1beta1 -kind: Config -spec: - roles: - custom: - - name: app - labels: - app: "true" # Label applied to "app" nodes -``` - -### labels - -You can define Kubernetes labels for the default `controller` role and any `custom` roles that you add. When `labels` are defined, Embedded Cluster applies the label to any node in the cluster that is assigned the given role. Labels are useful for tasks like assigning workloads to nodes. - -#### Example - -```yaml -apiVersion: embeddedcluster.replicated.com/v1beta1 -kind: Config -spec: - roles: - controller: - name: management - labels: - management: "true" # Label applied to "management" nodes - custom: - - name: db - labels: - db: "true" # Label applied to "db" nodes - - name: gpu - labels: - gpu: "true" # Label applied to "gpu" nodes -``` - -## extensions - -If you need to install Helm charts before your application and as part of the Embedded Cluster itself, you can do this with Helm extensions. One situation where this is useful is if you want to ship an ingress controller, because Embedded Cluster does not yet include one. - -Helm extensions are updated when new versions of your application are deployed from the Admin Console. So, for example, you can change the values for a Helm extension from one release to another, and those changes will be applied to the cluster when the new release is deployed. - -The format for specifying Helm extensions uses the same k0s Helm extensions format from the k0s configuration. For more information about these fields, see the [k0s documentation](https://docs.k0sproject.io/stable/helm-charts/#example). - -### Limitation - -If a Helm extension is removed from the Embedded Cluster Config, the associated Helm chart is not removed from the cluster. - -### Requirements - -* The `version` field is required. Failing to specify a chart version will cause problems for upgrades. - -* If you need to install multiple charts in a particular order, set the `order` field to a value greater than or equal to 10. Numbers below 10 are reserved for use by Embedded Cluster to deploy things like a storage provider and the Admin Console. If an `order` is not provided, Helm extensions are installed with order 10. - -### Example - -```yaml -apiVersion: embeddedcluster.replicated.com/v1beta1 -kind: Config -spec: - extensions: - helm: - repositories: - - name: ingress-nginx - url: https://kubernetes.github.io/ingress-nginx - charts: - - name: ingress-nginx - chartname: ingress-nginx/ingress-nginx - namespace: ingress-nginx - version: "4.8.3" - values: | - controller: - service: - type: NodePort - nodePorts: - http: "80" - https: "443" - # Known issue: Only use image tags for multi-architecture images. - # Set digest to empty string to ensure the air gap builder uses - # single-architecture images. - image: - digest: "" - digestChroot: "" - admissionWebhooks: - patch: - image: - digest: "" -``` - -## unsupportedOverrides - -:::important -This feature should be used with caution by advanced users who understand the risks and ramifications of changing the default configuration. -::: - -Unsupported overrides allow you to override Embedded Cluster's default configuration, including the k0s config and the Helm values for extensions like KOTS and OpenEBS. This should be used with caution because changes here are untested and can disrupt or break Embedded Clusters. Any issues that are caused by unsupported overrides are not supported. - -While they should be used with caution, unsupported overrides are useful if you need to make changes that are not otherwise exposed by Embedded Cluster. - -### Override the k0s Config - -By default, Embedded Cluster uses a k0s config that is tested and known to work for Embedded Clusters. In some circumstances, you might want to change the k0s config. - -For more information on the k0s config, see [Configuration options](https://docs.k0sproject.io/stable/configuration/#configuration-file-reference) in the k0s documentation. - -For example, you can do the following to enable WireGuard-based encryption. Note that other configuration might be necessary. See [`spec.network.calico`](https://docs.k0sproject.io/stable/configuration/#specnetworkcalico) in the k0s documentation for more details. -```yaml -apiVersion: embeddedcluster.replicated.com/v1beta1 -kind: Config -spec: - unsupportedOverrides: - k0s: | - config: - spec: - network: - calico: - wireguard: true -``` - -#### Limtiations - -* The `spec.api` and `spec.storage` keys in the k0s config cannot be changed after installation. Whereas most keys in the k0s config apply to the whole cluster, these two keys are set for each node. Embedded Cluster cannot update these keys on each individual node during updates, so they cannot be changed after installation. - -* Overrides overwrite the corresponding fields in the k0s configuration. They are not merged into Embedded Cluster’s default configuration. When using overrides to override a list, for example, ensure that you include other elements in the list that Embedded Cluster includes by default. - -### Override the Helm Values for Built-In Extensions - -Embedded Cluster deploys built-in extensions like KOTS and OpenEBS to provide capabilities like storage and application management. These extensions are deployed with Helm, and the Helm values for each can be modified if necessary. - -To modify these values, you can use the `unsupportedOverrides.builtInExtensions` key of the Embedded Cluster Config. Each chart you want to modify is an item in the array. The `name` key identifies the Helm chart that you want to modify, and the `values` key is a string where you specify your modified Helm values. Your modified values are merged into the values used by Embedded Cluster. - -The following are the built-in extensions available for modification: - -- `openebs` -- `admin-console` -- `velero` -- `embedded-cluster-operator` - -#### Example - -```yaml -apiVersion: embeddedcluster.replicated.com/v1beta1 -kind: Config -spec: - unsupportedOverrides: - builtInExtensions: - - name: openebs - values: | - key: value -``` - -================ -File: docs/reference/kots-cli-admin-console-garbage-collect-images.md -================ -# admin-console garbage-collect-images - -Starts image garbage collection. -The KOTS Admin Console must be running and an application must be installed in order to use this command. - -### Usage -```bash -kubectl kots admin-console garbage-collect-images -n <namespace> -``` - -This command supports all [global flags](kots-cli-global-flags). - -| Flag | Type | Description | -|:--------------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `-h, --help` | | help for admin-console | -| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | -| `--ignore-rollback` | string | force images garbage collection even if rollback is enabled for the application (default false). Note: this may impact the ability to rollback the application to a previous version. | - -### Examples -```bash -kubectl kots admin-console garbage-collect-images -n default -``` - -================ -File: docs/reference/kots-cli-admin-console-generate-manifests.mdx -================ -import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" -import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" -import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" -import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" -import StrictSecContext from "../partials/kots-cli/_strict-sec-context-yaml.mdx" - -# admin-console generate-manifests - -Running this command will create a directory on the workstation containing the Replicated Admin Console manifests. These assets can be used to deploy KOTS to a cluster through other workflows, such as kubectl, to provide additional customization of the Admin Console before deploying. - -### Limitations - -* `generate-manifests` does not support generating manifests for Red Hat OpenShift clusters or GKE Autopilot clusters if executed without a Kubernetes cluster context. - -* To upgrade a KOTS instance that has ever been on version 1.72.0 or earlier, you must run `generate-manifests` with a Kubernetes cluster context. - -* The `admin-console generate-manifests` command does not accept the [`--strict-security-context`](/reference/kots-cli-install#usage) flag, which deploys KOTS Pods with a security context. To generate Admin Console manifests with a security context, add the following to the Pod templates for Deployments and StatefulSets deployed by KOTS: - - <StrictSecContext/> - -### Usage -```bash -kubectl kots admin-console generate-manifests [flags] -``` - -This command supports the following flags: - -<table> - <tr> - <td>Flag</td> - <td>Type</td> - <td>Description</td> - </tr> - <tr> - <td><code>--rootdir</code></td> - <td>string</td> - <td>Root directory where the YAML will be written (default `${HOME}` or `%USERPROFILE%`)</td> - </tr> - <tr> - <td><code>--namespace</code></td> - <td>string</td> - <td>Target namespace for the Admin Console</td> - </tr> - <tr> - <td><code>--shared-password</code></td> - <td>string</td> - <td>Shared password to use when deploying the Admin Console</td> - </tr> - <tr> - <td><code>--http-proxy</code></td> - <td>string</td> - <td>Sets HTTP_PROXY environment variable in all KOTS Admin Console components</td> - </tr> - <tr> - <td><code>--http-proxy</code></td> - <td>string</td> - <td>Sets HTTP_PROXY environment variable in all KOTS Admin Console</td> - </tr> - <KotsadmNamespace/> - <KotsadmRegistry/> - <tr> - <td><code>--no-proxy</code></td> - <td>string</td> - <td>Sets NO_PROXY environment variable in all KOTS Admin Console components</td> - </tr> - <tr> - <td><code>--private-ca-configmap</code></td> - <td>string</td> - <td>Name of a ConfigMap containing private CAs to add to the kotsadm deployment</td> - </tr> - <RegistryPassword/> - <RegistryUsername/> - <tr> - <td><code>--with-minio</code></td> - <td>bool</td> - <td>Set to true to include a local minio instance to be used for storage (default true)</td> - </tr> - <tr> - <td><code>--minimal-rbac</code></td> - <td>bool</td> - <td>Set to true to include a local minio instance to be used for storage (default true)</td> - </tr> - <tr> - <td><code>--additional-namespaces</code></td> - <td>string</td> - <td>Comma delimited list to specify additional namespace(s) managed by KOTS outside where it is to be deployed. Ignored without with <code>--minimal-rbac=true</code></td> - </tr> - <tr> - <td><code>--storage-class</code></td> - <td>string</td> - <td>Sets the storage class to use for the KOTS Admin Console components. <strong>Default:</strong> unset, which means the default storage class will be used</td> - </tr> -</table> - -### Examples -```bash -kubectl kots admin-console generate-manifests -kubectl kots admin-console generate-manifests --rootdir ./manifests -kubectl kots admin-console generate-manifests --namespace kotsadm --minimal-rbac=true --additional-namespaces="app1,app3" -``` - -================ -File: docs/reference/kots-cli-admin-console-index.md -================ -# admin-console - -Enables access to the KOTS Admin Console from a local machine. - -This command opens localhost port 8800, which forwards to the `kotsadm` service. -Alternatively you can specify the `--port` flag to specify a port other than 8800. - -To access the Admin Console, browse to http://localhost:8800 after running this command. - -### Usage -```bash -kubectl kots admin-console [flags] -``` - -This command supports all [global flags](kots-cli-global-flags) and also: - -| Flag | Type | Description | -|:------------------|--------|---------------------------------------------------------------------------------| -| `-h, --help` | | Help for admin-console. | -| `-n, --namespace` | string | The namespace where the Admin Console is running. **Default:** "default" | -| `--port` | string | Override the local port on which to access the Admin Console. **Default:** 8800 | - -### Examples -```bash -kubectl kots admin-console --namespace kots-sentry -``` - -================ -File: docs/reference/kots-cli-admin-console-push-images.md -================ -# admin-console push-images - -Pushes images from an air gap bundle to a private registry. -The air gap bundle can be either a KOTS Admin Console release or an application release. - -### Usage -```bash -kubectl kots admin-console push-images [airgap-bundle] [private-registry] [flags] -``` - -This command supports all [global flags](kots-cli-global-flags) and also: - -| Flag | Type | Description | -|:------------------------|--------|-------------------------------------| -| `-h, --help` | | Help for the command | -| `--registry-username` | string | username for the private registry | -| `--registry-password` | string | password for the private registry | -| `--skip-registry-check` | bool | Set to `true` to skip the connectivity test and validation of the provided registry information. **Default:** `false` | - -### Examples -```bash -kubectl kots admin-console push-images ./kotsadm.tar.gz private.registry.host/app-name \ - --registry-username rw-username \ - --registry-password rw-password -``` - -================ -File: docs/reference/kots-cli-admin-console-upgrade.mdx -================ -# admin-console upgrade - -import EnsureRBAC from "../partials/kots-cli/_ensure-rbac.mdx" -import Help from "../partials/kots-cli/_help.mdx" -import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" -import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" -import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" -import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" -import SkipRBACCheck from "../partials/kots-cli/_skip-rbac-check.mdx" -import StrictSecurityContext from "../partials/kots-cli/_strict-security-context.mdx" -import WaitDuration from "../partials/kots-cli/_wait-duration.mdx" -import WithMinIO from "../partials/kots-cli/_with-minio.mdx" - -Upgrades the KOTS Admin Console to match the version of KOTS CLI. - - -### Usage -```bash -kubectl kots admin-console upgrade [flags] -``` - -This command supports all [global flags](kots-cli-global-flags) and also: -<table> - <tr> - <th width="30%">Flag</th> - <th>Type</th> - <th>Description</th> - </tr> - <EnsureRBAC/> - <Help/> - <KotsadmNamespace/> - <KotsadmRegistry/> - <RegistryPassword/> - <RegistryUsername/> - <SkipRBACCheck/> - <StrictSecurityContext/> - <WaitDuration/> - <WithMinIO/> -</table> - -### Examples -```bash -kubectl kots admin-console upgrade --namespace kots-sentry -kubectl kots admin-console upgrade --ensure-rbac=false -``` - -================ -File: docs/reference/kots-cli-backup-index.md -================ -# backup - -Create a full instance snapshot for disaster recovery. - -### Usage - -```bash -kubectl kots backup [flags] -``` - -This command supports the following flags: - -| Flag | Type | Description | -| :---------------- | ------ | ------------------------------------------------------------------------------- | -| `-h, --help` | | Help for `backup`. | -| `-n, --namespace` | string | The namespace where the Admin Console is running. **Default:** `default` | -| `-o, --output` | string | The output format. Supports JSON. Defaults to plain text if not set. | -| `--wait`. | bool | Wait for the backup to finish. **Default:** true | - -### Example - -```bash -kubectl kots backup --namespace kots-sentry -``` - -================ -File: docs/reference/kots-cli-backup-ls.md -================ -# backup ls - -:::note -This command is deprecated. Use [`kubectl kots get backups`](/reference/kots-cli-get-backups) instead. -::: - -Show a list of all the available instance snapshots for disaster recovery. - -### Usage - -```bash -kubectl kots backup ls [flags] -``` - -This command supports the following flags: - -| Flag | Type | Description | -| :---------------- | ------ | ------------------------------------------------------------------- | -| `-h, --help` | | Help for `backup ls`. | -| `-n, --namespace` | string | Filter by the namespace the Admin Console was installed in. | - -### Example - -```bash -kubectl kots backup ls --namespace kots-sentry -``` - -================ -File: docs/reference/kots-cli-docker-ensure-secret.md -================ -# docker ensure-secret - -Creates an image pull secret for Docker Hub that the Admin Console can utilize to avoid [rate limiting](/enterprise/image-registry-rate-limits). -The credentials are validated before creating the image pull secret. -Running this command creates a new application version, based on the latest version, with the new image pull secret added to all Kubernetes manifests that have images. -In order for this secret to take effect to avoid rate limiting, the new version must be deployed. - -### Usage - -```bash -kubectl kots docker ensure-secret [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -| ----------------- | ------ | ------------------------------------------------------------------- | -| `-h, --help` | | help for ensure-secret | -| `--dockerhub-username` | string | DockerHub username to be used _(required)_ | -| `--dockerhub-password` | string | DockerHub password to be used _(required)_ | -| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | - -### Example - -```bash -kubectl kots docker ensure-secret --dockerhub-username sentrypro --dockerhub-password password --namespace sentry-pro -``` - -================ -File: docs/reference/kots-cli-docker-index.md -================ -# docker - -KOTS Docker interface - -### Usage - -```bash -kubectl kots docker [command] -``` - -This command supports all [global flags](kots-cli-global-flags). - -================ -File: docs/reference/kots-cli-download.md -================ -# download - -Retrieves a copy of the application manifests from the cluster, and store them in a specific directory structure on your workstation. -Requires a running application with the KOTS Admin Console. - -## Usage -```bash -kubectl kots download [app-slug] [flags] -``` - -* _Replace `[app-slug]` with the application slug provided by your software vendor (required)._ For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. -* _Provide `[flags]` according to the table below_ - -This command supports all [global flags](kots-cli-global-flags) and also: - - -| Flag | Type | Description | -|:----------------------------|--------|-----------------------------------------------------------------------------------------------------------------------| -| `--decrypt-password-values` | bool | decrypt password values to plaintext | -| `--dest` | string | the directory to store the application in _(defaults to current working dir)_ | -| `--current` | bool | download the archive of the currently deployed app version | -| `--sequence` | int | sequence of the app version to download the archive for (defaults to the latest version unless --current flag is set) | -| `-h, --help` | | help for download | -| `-n, --namespace` | string | the namespace to download from _(default `"default"`)_ | -| `--overwrite` | | overwrite any local files, if present | -| `-o, --output` | string | output format (currently supported: json) _(defaults to plain text if not set)_ | - -## Example -```bash -kubectl kots download kots-sentry --namespace kots-sentry --dest ./manifests --overwrite -``` - -================ -File: docs/reference/kots-cli-enable-ha.md -================ -# enable-ha - -(Deprecated) Runs the rqlite StatefulSet as three replicas for data replication and high availability. - -This command is deprecated and will be removed in a future release. The EKCO add-on for Replicated kURL now scales up the rqlite StatefulSet automatically when three or more nodes are healthy and the OpenEBS localpv storage class is available. For more information, see [EKCO add-on](https://kurl.sh/docs/add-ons/ekco#kotsadm) in the kURL documentation. - -## Usage -```bash -kubectl kots enable-ha [flags] -``` - -* _Provide `[flags]` according to the table below_ - -This command supports all [global flags](kots-cli-global-flags) and also: - - -| Flag | Type | Description | -|:---------------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `--wait-duration` | string | Timeout used while waiting for individual components to be ready. Must be in Go duration format. For example, `10s` or `2m`. See [func ParseDuration](https://pkg.go.dev/time#ParseDuration) in the Go documentation. | -| `-h, --help` | | Help for `enable-ha`. | -| `-n, --namespace` | string | The namespace where the Admin Console is running _(required)_ | - -## Example -```bash -kubectl kots enable-ha --namespace kots-sentry -``` - -================ -File: docs/reference/kots-cli-get-apps.md -================ -# get apps - -The `kots get apps` command lists installed applications. - -### Usage - -```bash -kubectl kots get apps [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -| :---------------- | ------ | ------------------------------------------------------------------- | -| `-h, --help` | | help for get apps | -| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | - -### Example - -```bash -kubectl kots get apps -n default -``` - -================ -File: docs/reference/kots-cli-get-backups.md -================ -# get backups - -The `kots get backups` command lists available full snapshots (instance). - -### Usage - -```bash -kubectl kots get backups [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -| :---------------- | ------ | ------------------------------------------------------------------- | -| `-h, --help` | | help for get backups | -| `-n, --namespace` | string | filter by the namespace in which the Admin Console is/was installed | - -### Examples - -Basic - -```bash -kubectl kots get backups -``` - -Filtering by a namespace - -```bash -kubectl kots get backups -n default -``` - -================ -File: docs/reference/kots-cli-get-config.md -================ -# get config - -The `kots get config` command returns the `configValues` file for an application. - -### Usage - -```bash -kubectl kots get config [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -| :---------------- | ------ | ------------------------------------------------------------------- | -| `--appslug` | string | The slug of the target application. Required when more than one application is deployed. Your software vendor provides the application slug. For more information, see <a href="/vendor/vendor-portal-manage-app#slug">Get the Application Slug</a> in <em>Managing Applications</em>.| -| `--current` | bool | When set, the `configValues` file for the currently deployed version of the application is retrieved.| -| `--sequence` | int | Retrieves the `configValues` file for the specified application sequence. **Default**: Latest (unless the `--current` flag is set).| -| `--decrypt` | bool | Decrypts password items within the configuration.| -| `-h, --help` | | Help for `get config`.| -| `-n, --namespace` | string | (Required) The namespace where the Admin Console is running.| - -### Example - -```bash -kubectl kots get config -n default --sequence 5 --appslug myapp -``` - -================ -File: docs/reference/kots-cli-get-index.md -================ -# get - -The `kots get` command shows information about one or more resources. - -### Usage -```bash -kubectl kots get [resource] [flags] -``` - -This command supports all [global flags](kots-cli-global-flags) and also: - -| Flag | Type | Description | -|:----------------------|------|-------------| -| `-o, --output` | | Output format. **Supported formats**: `json`. | - -### Resources - -* `apps` lists installed applications. -* `backups` lists available full snapshots (instance). -* `config` lists the **configValues** for an application. -* `restores` lists created full snapshot restores. -* `versions` lists the versions available for a given `app-slug`. - -================ -File: docs/reference/kots-cli-get-restores.md -================ -# get restores - -The `kots get restores` command lists created full snapshot restores. - -### Usage - -```bash -kubectl kots get restores [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -| :---------------- | ------ | ------------------------------------------------------------------- | -| `-h, --help` | | help for get restores | -| `-n, --namespace` | string | filter by the namespace in which the Admin Console is/was installed | - -### Examples - -Basic - -```bash -kubectl kots get restores -``` - -Filtering by a namespace - -```bash -kubectl kots get restores -n default -``` - -================ -File: docs/reference/kots-cli-get-versions.md -================ -# get versions - -The `kots get versions` command lists all versions of an application. - -> Introduced in KOTS v1.61.0 - -### Usage - -```bash -kubectl kots get versions [app-slug] [flags] -``` - -- _Replace `[app-slug]` with the app slug for your KOTS application (required)._ -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -| :------------------------ | ------ | --------------------------------------------------------------------------------------------------- | -| `-h, --help` | | Help for `get versions`. | -| `-n, --namespace` | string | (Required) The namespace where the Admin Console is running. | -| `--current-page` | int | Offset, by page size, at which to start retrieving versions. **Default:** 0 | -| `--page-size` | int | Number of versions to return. **Default:** 20 | -| `--pin-latest` | int | When set to true, always returns the latest version at the beginning. **Default:** false | -| `--pin-latest-deployable` | int | When set to true, always returns the latest version that can be deployed. The latest deployable version can differ from the latest version if a required version, which cannot be skipped, is present. **Default:** false | -| `-o, --output` | string | Output format. **Supported formats:** `json`. **Default:** Plain text | - -### Example - -```bash -kubectl kots get versions kots-sentry -n default -``` - -================ -File: docs/reference/kots-cli-getting-started.md -================ -# Installing the KOTS CLI - -Users can interact with the Replicated KOTS CLI to install and manage applications with Replicated KOTS. The KOTS CLI is a kubectl plugin that runs locally on any computer. - - -## Prerequisite - -Install kubectl, the Kubernetes command-line tool. See [Install Tools](https://kubernetes.io/docs/tasks/tools/) in the Kubernetes documentation. - -:::note -If you are using a cluster created with Replicated kURL, kURL already installed both kubectl and the KOTS CLI when provisioning the cluster. For more information, see [Online Installation with kURL](/enterprise/installing-kurl) and [Air Gap Installation with kURL](/enterprise/installing-kurl-airgap). -::: - -## Install - -To install the latest version of the KOTS CLI to `/usr/local/bin`, run: - -```bash -curl https://kots.io/install | bash -``` - -To install to a directory other than `/usr/local/bin`, run: - -```bash -curl https://kots.io/install | REPL_INSTALL_PATH=/path/to/cli bash -``` - -To install a specific version of the KOTS CLI, run: - -```bash -curl https://kots.io/install/<version> | bash -``` - -To verify your installation, run: - -```bash -kubectl kots --help -``` - -## Install without Root Access - -You can install the KOTS CLI on computers without root access or computers that cannot write to the `/usr/local/bin` directory. - -To install the KOTS CLI without root access, you can do any of the following: - -* (Online Only) [Install to a Different Directory](#install-to-a-different-directory) -* (Online Only) [Install Using Sudo](#install-using-sudo) -* (Online or Air Gap) [Manually Download and Install](#manually-download-and-install) - -### Install to a Different Directory - -You can set the `REPL_INSTALL_PATH` environment variable to install the KOTS CLI to a directory other than `/usr/local/bin` that does not require elevated permissions. - -**Example:** - -In the following example, the installation script installs the KOTS CLI to `~/bin` in the local directory. You can use the user home symbol `~` in the `REPL_INSTALL_PATH` environment variable. The script expands `~` to `$HOME`. - -```bash -curl -L https://kots.io/install | REPL_INSTALL_PATH=~/bin bash -``` - -### Install Using Sudo - -If you have sudo access to the directory where you want to install the KOTS CLI, you can set the `REPL_USE_SUDO` environment variable so that the installation script prompts you for your sudo password. - -When you set the `REPL_USE_SUDO` environment variable to any value, the installation script uses sudo to create and write to the installation directory as needed. The script prompts for a sudo password if it is required for the user executing the script in the specified directory. - -**Example:** - -In the following example, the script uses sudo to install the KOTS CLI to the default `/usr/local/bin` directory. - -```bash -curl -L https://kots.io/install | REPL_USE_SUDO=y bash -``` - -**Example:** - -In the following example, the script uses sudo to install the KOTS CLI to the `/replicated/bin` directory. - -```bash -curl -L https://kots.io/install | REPL_INSTALL_PATH=/replicated/bin REPL_USE_SUDO=y bash -``` - -### Manually Download and Install - -You can manually download and install the KOTS CLI binary to install without root access, rather than using the installation script. - -Users in air gap environments can also follow this procedure to install the KOTS CLI. - -To manually download and install the KOTS CLI: - -1. Download the KOTS CLI release for your operating system. - - You can run one of the following commands to download the latest version of the KOTS CLI from the [Releases](https://github.com/replicatedhq/kots/releases/latest) page in the KOTS GitHub repository: - - * **MacOS (AMD and ARM)**: - - ```bash - curl -L https://github.com/replicatedhq/kots/releases/latest/download/kots_darwin_all.tar.gz - ``` - - * **Linux (AMD)**: - - ```bash - curl -L https://github.com/replicatedhq/kots/releases/latest/download/kots_linux_amd64.tar.gz - ``` - - * **Linux (ARM)**: - - ```bash - curl -L https://github.com/replicatedhq/kots/releases/latest/download/kots_linux_arm64.tar.gz - ``` - -1. Unarchive the `.tar.gz` file that you downloaded: - - * **MacOS (AMD and ARM)**: - - ```bash - tar xvf kots_darwin_all.tar.gz - ``` - * **Linux (AMD)**: - - ```bash - tar xvf kots_linux_amd64.tar.gz - ``` - * **Linux (ARM)**: - - ```bash - tar xvf kots_linux_arm64.tar.gz - ``` - -1. Rename the `kots` executable to `kubectl-kots` and move it to one of the directories that is in your PATH environment variable. This ensures that the system can access the executable when you run KOTS CLI commands. - - :::note - You can run `echo $PATH` to view the list of directories in your PATH. - ::: - - Run one of the following commands, depending on if you have write access to the target directory: - - * **You have write access to the directory**: - - ```bash - mv kots /PATH_TO_TARGET_DIRECTORY/kubectl-kots - ``` - Replace `PATH_TO_TARGET_DIRECTORY` with the path to a directory that is in your PATH environment variable. For example, `/usr/local/bin`. - - * **You do _not_ have write access to the directory**: - - ```bash - sudo mv kots /PATH_TO_TARGET_DIRECTORY/kubectl-kots - ``` - Replace `PATH_TO_TARGET_DIRECTORY` with the path to a directory that is in your PATH environment variable. For example, `/usr/local/bin`. - -1. Verify the installation: - - ``` - kubectl kots --help - ``` - -## Uninstall - -The KOTS CLI is a plugin for the Kubernetes kubectl command line tool. The KOTS CLI plugin is named `kubectl-kots`. - -For more information about working with kubectl, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. - -To uninstall the KOTS CLI: - -1. Find the location where the `kubectl-kots` plugin is installed on your `PATH`: - - ``` - kubectl plugin list kubectl-kots cli - ``` - -2. Delete `kubectl-kots`: - - ``` - sudo rm PATH_TO_KOTS - ``` - Replace `PATH_TO_KOTS` with the location where `kubectl-kots` is installed. - - **Example**: - - ``` - sudo rm /usr/local/bin/kubectl-kots - ``` - -================ -File: docs/reference/kots-cli-global-flags.md -================ -# Global flags - -All KOTS CLI commands support a set of global flags to be used to connect to the cluster. - -| Flag | Type | Description | -|---|---|---| -| `--as` | string | Username to impersonate for the operation | -| `--as-group` | stringArray | Group to impersonate for the operation, this flag can be repeated to specify multiple groups. | -| `--cache-dir` | string | Default HTTP cache directory (default "~/.kube/http-cache") | -| `--certificate-authority` | string | Path to a cert file for the certificate authority | -| `--client-certificate` | string | Path to a client certificate file for TLS | -| `--client-key` | string | Path to a client key file for TLS | -| `--cluster` | string | The name of the kubeconfig cluster to use | -| `--context` | string | The name of the kubeconfig context to use | -| `--insecure-skip-tls-verify` | bool | If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure | -| `--kubeconfig` | string | Path to the kubeconfig file to use for CLI requests. | -| `-n, --namespace` | string | If present, the namespace scope for this CLI request | -| `--request-timeout` | string | The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") | -| `-s, --server` | string | The address and port of the Kubernetes API server | -| `--token` | string | Bearer token for authentication to the API server | -| `--user` | string | The name of the kubeconfig user to use | - -================ -File: docs/reference/kots-cli-identity-service-enable-shared-password.md -================ -# identity-service enable-shared-password - -Enable the shared password login option in the KOTS Admin Console. - -### Usage - -```bash -kubectl kots identity-service enable-shared-password [flags] -``` - -This command supports all [global flags](kots-cli-global-flags) and also: - -| Flag | Type | Description | -| :---------------- | ------ | ------------------------------------------------ | -| `-n, --namespace` | string | the namespace where the Admin Console is running | - -NOTE: `--namespace` flag is required. - -### Examples - -```bash -kubectl kots identity-service enable-shared-password --namespace kots-sentry -``` - -================ -File: docs/reference/kots-cli-identity-service-index.md -================ -# identity-service - -KOTS Identity Service - -### Usage - -```bash -kubectl kots identity-service [command] -``` - -This command supports all [global flags](kots-cli-global-flags). - -================ -File: docs/reference/kots-cli-install.mdx -================ -import StrictSecurityContext from "../partials/kots-cli/_strict-security-context.mdx" -import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" -import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" -import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" -import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" -import Help from "../partials/kots-cli/_help.mdx" - -# install - -Installs the application and the KOTS Admin Console directly to a cluster. -The `kots install` command pulls Kubernetes manifests from the remote upstream, deploys the manifests to the specified cluster, installs the Admin Console, and sets up port forwarding to make the Admin Console accessible on port 8800. -Alternatively, you can specify the `--port` flag to override the default port. - -### Usage - -```bash -kubectl kots install [upstream uri] [flags] -``` - -- _Replace [upstream-uri] with the URI for your KOTS application (required)._ -- _If the KOTS application has been packaged by Replicated Vendor, the `--license-file` flag must be provided._ -- _Provide [flags] according to the table below_ - -This command supports all [global flags](kots-cli-global-flags) and also: - -<table> - <tr> - <td>Flag</td> - <td>Type</td> - <td>Description</td> - </tr> - <tr> - <td><code>--additional-annotations</code></td> - <td>bool</td> - <td>Additional annotations to add to kotsadm pods.</td> - </tr> - <tr> - <td><code>--additional-labels</code></td> - <td>bool</td> - <td>Additional labels to add to kotsadm pods.</td> - </tr> - <tr> - <td><code>--airgap</code></td> - <td>bool</td> - <td>Set to <code>true</code> to run install in air gapped mode. Setting <code>--airgap-bundle</code> implies <code>--airgap=true</code>. <strong>Default:</strong> <code>false</code>. For more information, see <a href="/enterprise/installing-existing-cluster-airgapped">Air Gap Installation in Existing Clusters with KOTS</a>.</td> - </tr> - <tr> - <td><code>--airgap-bundle</code></td> - <td>string</td> - <td>Path to the application air gap bundle where application metadata will be loaded from. Setting <code>--airgap-bundle</code> implies <code>--airgap=true</code>. For more information, see <a href="/enterprise/installing-existing-cluster-airgapped">Air Gap Installation in Existing Clusters with KOTS</a>.</td> - </tr> - <tr> - <td><code>--app-version-label</code></td> - <td>string</td> - <td>The application version label to install. If not specified, the latest version is installed.</td> - </tr> - <tr> - <td><code>--config-values</code></td> - <td>string</td> - <td>Path to a manifest file containing configuration values. This manifest must be <code>apiVersion: kots.io/v1beta1</code> and <code>kind: ConfigValues</code>. For more information, see <a href="/enterprise/installing-existing-cluster-automation">Installing with the KOTS CLI</a>.</td> - </tr> - <tr> - <td><code>--copy-proxy-env</code></td> - <td>bool</td> - <td>Copy proxy environment variables from current environment into all Admin Console components. <strong>Default:</strong> <code>false</code></td> - </tr> - <tr> - <td><code>--disable-image-push</code></td> - <td>bool</td> - <td>Set to <code>true</code> to disable images from being pushed to private registry. <strong>Default:</strong> <code>false</code></td> - </tr> - <tr> - <td><code>--ensure-rbac</code></td> - <td>bool</td> - <td>When <code>false</code>, KOTS does not attempt to create the RBAC resources necessary to manage applications. <strong>Default:</strong> <code>true</code>. If a role specification is needed, use the [generate-manifests](kots-cli-admin-console-generate-manifests) command.</td> - </tr> - <Help/> - <tr> - <td><code>--http-proxy</code></td> - <td>string</td> - <td>Sets HTTP_PROXY environment variable in all Admin Console components.</td> - </tr> - <tr> - <td><code>--https-proxy</code></td> - <td>string</td> - <td>Sets HTTPS_PROXY environment variable in all Admin Console components.</td> - </tr> - <KotsadmNamespace/> - <KotsadmRegistry/> - <tr> - <td><code>--license-file</code></td> - <td>string</td> - <td>Path to a license file.</td> - </tr> - <tr> - <td><code>--local-path</code></td> - <td>string</td> - <td>Specify a local-path to test the behavior of rendering a Replicated application locally. Only supported on Replicated application types.</td> - </tr> - <tr> - <td><code>--name</code></td> - <td>string</td> - <td>Name of the application to use in the Admin Console.</td> - </tr> - <tr> - <td><code>--no-port-forward</code></td> - <td>bool</td> - <td>Set to <code>true</code> to disable automatic port forward. <strong>Default:</strong> <code>false</code></td> - </tr> - <tr> - <td><code>--no-proxy</code></td> - <td>string</td> - <td>Sets NO_PROXY environment variable in all Admin Console components.</td> - </tr> - <tr> - <td><code>--port</code></td> - <td>string</td> - <td>Override the local port to access the Admin Console. <strong>Default:</strong> 8800</td> - </tr> - <tr> - <td><code>--private-ca-configmap</code></td> - <td>string</td> - <td>Name of a ConfigMap containing private CAs to add to the kotsadm deployment.</td> - </tr> - <tr> - <td><code>--preflights-wait-duration</code></td> - <td>string</td> - <td>Timeout to be used while waiting for preflights to complete. Must be in [Go duration](https://pkg.go.dev/time#ParseDuration) format. For example, 10s, 2m. <strong>Default:</strong> 15m</td> - </tr> - <RegistryPassword/> - <RegistryUsername/> - <tr> - <td><code>--repo</code></td> - <td>string</td> - <td>Repo URI to use when installing a Helm chart.</td> - </tr> - <tr> - <td><code>--shared-password</code></td> - <td>string</td> - <td>Shared password to use when deploying the Admin Console.</td> - </tr> - <tr> - <td><code>--skip-compatibility-check</code></td> - <td>bool</td> - <td>Set to <code>true</code> to skip compatibility checks between the current KOTS version and the application. <strong>Default:</strong> <code>false</code></td> - </tr> - <tr> - <td><code>--skip-preflights</code></td> - <td>bool</td> - <td>Set to <code>true</code> to skip preflight checks. <strong>Default:</strong> <code>false</code>. If any strict preflight checks are configured, the <code>--skip-preflights</code> flag is not honored because strict preflight checks must run and contain no failures before the application is deployed. For more information, see [Defining Preflight Checks](/vendor/preflight-defining).</td> - </tr> - <tr> - <td><code>--skip-rbac-check</code></td> - <td>bool</td> - <td>Set to <code>true</code> to bypass RBAC check. <strong>Default:</strong> <code>false</code></td> - </tr> - <tr> - <td><code>--skip-registry-check</code></td> - <td>bool</td> - <td>Set to <code>true</code> to skip the connectivity test and validation of the provided registry information. <strong>Default:</strong> <code>false</code></td> - </tr> - <StrictSecurityContext/> - <tr> - <td><code>--use-minimal-rbac</code></td> - <td>bool</td> - <td>When set to <code>true</code>, KOTS RBAC permissions are limited to the namespace where it is installed. To use <code>--use-minimal-rbac</code>, the application must support namespace-scoped installations and the user must have the minimum RBAC permissions required by KOTS in the target namespace. For a complete list of requirements, see [Namespace-scoped RBAC Requirements​](/enterprise/installing-general-requirements#namespace-scoped) in _Installation Requirements_. <strong>Default:</strong> <code>false</code></td> - </tr> - <tr> - <td><code>--wait-duration</code></td> - <td>string</td> - <td>Timeout to be used while waiting for individual components to be ready. Must be in [Go duration](https://pkg.go.dev/time#ParseDuration) format. For example, 10s, 2m. <strong>Default:</strong> 2m</td> - </tr> - <tr> - <td><code>--with-minio</code></td> - <td>bool</td> - <td>When set to <code>true</code>, KOTS deploys a local MinIO instance for storage and uses MinIO for host path and NFS snapshot storage. <strong>Default:</strong> <code>true</code></td> - </tr> - <tr> - <td><code>--storage-class</code></td> - <td>string</td> - <td>Sets the storage class to use for the KOTS Admin Console components. <strong>Default:</strong> unset, which means the default storage class will be used</td> - </tr> -</table> - - -### Examples - -```bash -kubectl kots install sentry/unstable --license-file ~/license.yaml -kubectl kots install kots-sentry/stable --shared-password IgqG5OBc9Gp --license-file ~/sentry-license.yaml --namespace sentry-namespace --config-values ~/config-values.yaml -kubectl kots install --ensure-rbac=false -``` - -================ -File: docs/reference/kots-cli-pull.md -================ -# pull - -Running this command will create a directory on the workstation containing the application and Kubernetes manifests. These assets can be used to deploy KOTS to a cluster through other workflows, such as kubectl. This command is necessary when managing a application without the use of the Admin Console. - -### Usage -```bash -kubectl kots pull [upstream uri] [flags] -``` -* _Replace `[upstream-uri]` with the URI for your KOTS application (required)._ -* _If the KOTS application has been packaged by Replicated Vendor, the `--license-file` flag must be provided._ -* _Provide `[flags]` according to the table below_ - -This command supports all [global flags](kots-cli-global-flags) and also: - -| Flag | Type | Description | -|:-----|------|-------------| -| `--downstream` | strings | the list of any downstreams to create/update | -| `--exclude-admin-console` | bool | set to true to exclude the Admin Console _(only valid when `[upstream-uri]` points to a replicated app)_ | -| `--exclude-kots-kinds` | bool | set to true to exclude rendering KOTS custom objects to the base directory _(default `true`)_ | -| `-h, --help` | | help for pull | -| `--image-namespace` | string | the namespace/org in the docker registry to push images to _(required when `--rewrite-images` is set)_ | -| `--license-file` | string | path to a license file _(required when `[upstream-uri]` points to a replicated app)_ | -| `--local-path` | string | specify a local-path to pull a locally available replicated app _(only valid when `[upstream-uri]` points to a replicated app)_ | -| `-n, --namespace` | string | namespace to render the upstream to in the base _(default `"default"`)_ | -| `--private-ca-configmap` | string | name of a ConfigMap containing private CAs to add to the kotsadm deployment. -| `--registry-endpoint` | string | the endpoint of the local docker registry to use when pushing images _(required when `--rewrite-images` is set)_ | -| `--rewrite-images` | bool | set to true to force all container images to be rewritten and pushed to a local registry | -| `--rootdir` | string | root directory that will be used to write the yaml to _(default `${HOME}` or `%USERPROFILE%`)_ | -| `--shared-password` | string | shared password to use when deploying the Admin Console | -| `--http-proxy` | string | sets HTTP_PROXY environment variable in all KOTS Admin Console components | -| `--https-proxy` | string | sets HTTPS_PROXY environment variable in all KOTS Admin Console components | -| `--no-proxy` | string | sets NO_PROXY environment variable in all KOTS Admin Console components | -| `--copy-proxy-env` | bool | copy proxy environment variables from current environment into all KOTS Admin Console components | -| `--config-values` | string | path to a manifest containing config values (must be apiVersion: kots.io/v1beta1, kind: ConfigValues) | -| `--with-minio` | bool | set to true to include a local minio instance to be used for storage _(default true)_ | -| `--storage-class` | string | sets the storage class to use for the KOTS Admin Console components. _(default unset, which means the default storage class will be used)_ | - -### Example -```bash -kubectl kots pull sentry/unstable --license-file ~/license.yaml -``` - -================ -File: docs/reference/kots-cli-remove.md -================ -# remove - -Remove application reference from the KOTS Admin Console. - -You can use the `kots remove` command to remove one or more installed applications from the Admin Console. -By default, the deployed application is not removed from the cluster. Only the reference for the application is removed from the Admin Console. To completely remove the application and delete its resources from the cluster, use the `--undeploy` flag. - -### Usage -```bash -kubectl kots remove [app-slug] -n [namespace] -``` -* _`[app-slug]` is the slug of the installed application to be removed (required)_ -* _Provide `[flags]` according to the table below_ - -This command supports all [global flags](kots-cli-global-flags) and also: - -<table> - <tr> - <th width="20%">Flag</th> - <th width="10%">Type</th> - <th width="70%">Description</th> - </tr> - <tr> - <td><code>--force</code></td> - <td><code>bool</code></td> - <td> - <p>Removes the reference even if the application has already been deployed.</p> - </td> - </tr> - <tr> - <td><code>--undeploy</code></td> - <td><code>bool</code></td> - <td> - <p>Un-deploys the application by deleting all its resources from the cluster. When <code>--undeploy</code> is set, the <code>--force</code> flag is set automatically.</p> - <p><strong>Note:</strong> <code>--undeploy</code> can remove application resources only from the namespace where KOTS is installed and from any namespaces provided in the <a href="custom-resource-application#additionalnamespaces">additionalNamespaces</a> field in the Application custom resource.</p> - <p>The following describes how <code>--undeploy</code> removes application resources:</p> - <ul> - <li>For applications deployed with <code>kubectl apply</code> (including standalone manifest files and Helm charts deployed with <a href="/vendor/helm-native-about#replicated">Replicated Helm</a>), <code>--undeploy</code> identifies and removes resources based on a <code>kots.io/app-slug: <app_slug></code> annotation that is applied to all application resources during deployment. </li> - <li>For Helm chart applications deployed with HelmChart custom resources with <code>apiVersion: kots.io/v1beta2</code> or <code>apiVersion: kots.io/v1beta1</code> and <code>useHelmInstall: true</code>, <code>--undeploy</code> runs <code>helm uninstall</code>.</li> - </ul> - </td> - </tr> - <tr> - <td><code>-n</code></td> - <td><code>string</code></td> - <td><p>The namespace where the target application is deployed. Use <code>default</code> for the default namespace.</p></td> - </tr> -</table> - -### Example -```bash -kubectl kots remove sentry -n default -``` - -================ -File: docs/reference/kots-cli-reset-password.md -================ -# reset-password - -If you deployed an application with the KOTS Admin Console, the `kots reset-password` command will change the bcrypted password hash in the cluster, allowing you to log in again. - -### Usage -```bash -kubectl kots reset-password [namespace] [flags] -``` -* _Replace `[namespace]` with the namespace where the Admin Console and your KOTS application resides (required)._ -* _Provide `[flags]` according to the table below_ - -This command supports all [global flags](kots-cli-global-flags) and also: - - -| Flag | Type | Description | -|:----------------------|------|-------------| -| `-h, --help` | | help for reset-password | -| `-n, --namespace`| string | the namespace where the Admin Console is running | - -### Examples -```bash -kubectl kots reset-password sentry-namespace -``` - -================ -File: docs/reference/kots-cli-reset-tls.md -================ -# reset-tls - -If a bad TLS certificate is uploaded to the KOTS Admin Console or the kotsadm-tls secret is missing, the `kots reset-tls` command reapplies a default self-signed TLS certificate. -For more information about the certificates stored in this secret, see [Setting up TLS Certificates](https://kurl.sh/docs/install-with-kurl/setup-tls-certs) in the open source kURL documentation. - -### Usage -```bash -kubectl kots reset-tls [namespace] [flags] -``` -* _Replace `[namespace]` with the namespace where the Admin Console and your KOTS application resides (required)._ -* _Provide `[flags]` according to the table below_ - -This command supports all [global flags](kots-cli-global-flags) and also: - - -| Flag | Type | Description | -|:----------------------|------|-------------| -| `-h, --help` | | Help for `reset-tls`. | -| `-n, --namespace`| string | The namespace where the Admin Console is running. | -| `--accept-anonymous-uploads`| bool | Allow uploading a new certificate prior to authenticating. | - -### Examples -```bash -kubectl kots reset-tls sentry-namespace - -================ -File: docs/reference/kots-cli-restore-index.md -================ -# restore - -Restore full snapshots for disaster recovery, or do a partial restore of the application only or the Replicated Admin Console only. - -### Usage - -```bash -kubectl kots restore --from-backup [flags] -``` - -This command supports the following flags: - -| Flag | Type | Description | -| :-------------------------- | ------ | --------------------------------------------------------------------------------------------- | -| `--exclude-admin-console` | bool | Exclude restoring the Admin Console and only restore the applications. **Default:** false | -| `--exclude-apps` | bool | Exclude restoring the applications and only restore the Admin Console. **Default:** false | -| `--from-backup` | string | (Required) The name of the backup to restore from. | -| `-h, --help` | | Help for `restore`. | -| `-o, --output` | string | The output format. Supports JSON. Defaults to plain text if not set. | -| `--velero-namespace` | string | (Required for minimal RBAC installations) The namespace where Velero is installed. | -| `--wait-for-apps` | bool | Wait for all applications to be restored. **Default:** true | - -### Example - -```bash -kubectl kots restore --from-backup instance-942kf -``` - -================ -File: docs/reference/kots-cli-restore-ls.md -================ -# restore ls - -:::note -This command is deprecated. Use [`kubectl kots get restores`](/reference/kots-cli-get-restores) instead. -::: - -Show a list of all the available full snapshot restores for disaster recovery. - -### Usage - -```bash -kubectl kots restore ls [flags] -``` - -This command supports the following flags: - -| Flag | Type | Description | -| :---------------- | ------ | ------------------------------------------------------------------- | -| `-h, --help` | | Help for `restore ls`. | -| `-n, --namespace` | string | Filter by the namespace the Admin Console was installed in.| - -### Example - -```bash -kubectl kots restore ls --namespace kots-sentry -``` - -================ -File: docs/reference/kots-cli-set-config.mdx -================ -import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" - - -# set config - -The `kots set config` allows setting values for application config items in the latest release version. - -> Introduced in KOTS v1.31.0 - -## Usage - -```bash -kubectl kots set config [appSlug] [KEY_1=VAL_1 ... KEY_N=VAL_N] [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -| :-------------------| ------ | ------------------------------------------------------------------------------------------------------------------------------------- | -| `--config-file` | string | path to a manifest containing config values (must be `apiVersion: kots.io/v1beta1, kind: ConfigValues`) | -| `--merge` | bool | when set to true, only keys specified in config file will be updated. This flag can only be used when `--config-file` flag is used. | -|`--key` | string | name of a single key to set. This flag requires `--value` or `--value-from-file` flags | -| `--value` | string | the value to set for the key specified in the `--key` flag. This flag cannot be used with `--value-from-file` flag. | -| `--value-from-file` | string | path to the file containing the value to set for the key specified in the `--key` flag. This flag cannot be used with `--value` flag. | -| `--deploy` | bool | when set, automatically deploy the latest version with the new configuration | -| `--skip-preflights` | bool | set to true to skip preflight checks when deploying new version | -| `--current` | bool | set to true to use the currently deployed version of the app as the base for the new version | -| `--sequence` | int | sequence of the app version to use as the base for the new version (defaults to the latest version unless --current flag is set) | -| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | - - -#### About Strict Preflight Checks - -<PreflightsStrict/> - - -## Examples - -```bash -kubectl kots set config myapp -n default --config-file /path/to/local/config.yaml -``` - -```bash -kubectl kots set config myapp -n default --key config-item-name --value-from-file /path/to/config/file/value.txt -``` - -```bash -kubectl kots set config myapp -n default config-item-name="config item value" -``` - -```bash -kubectl kots set config myapp -n default --key config-item-name --value "config item value" -``` - -================ -File: docs/reference/kots-cli-set-index.md -================ -# set - -Configure KOTS resources. - -### Usage - -```bash -kubectl kots set [resource] [flags] -``` - -This command supports all [global flags](kots-cli-global-flags). - -### Resources - -* `config` set config items for application. - -================ -File: docs/reference/kots-cli-upload.mdx -================ -import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" - -# upload - -Upload Kubernetes manifests from the local filesystem, creating a new version of the application that can be deployed. -When you have a copy of an application that was created with `kots pull` or `kots download`, you can upload it back to the Admin Console using the `kots upload` command. - -## Usage -```bash -kubectl kots upload [source] [flags] -``` -* _Replace `[source]` with a directory containing the manifests of your KOTS application (required)._ -* _Provide `[flags]` according to the table below_ - -This command supports all [global flags](kots-cli-global-flags) and also: - - -| Flag | Type | Description | -|:----------------------|------|-------------| -| `-h, --help` | | help for upload | -| `--name`| string | the name of the kotsadm application to create | -| `-n, --namespace`| string | the namespace to upload to _(default `"default"`)_ | -| `--slug`| string | the application slug to use. if not present, a new one will be created | -| `--upstream-uri`| string | the upstream uri that can be used to check for updates | -| `--deploy`| bool | when set, automatically deploy the uploaded version | -| `--skip-preflights`| bool | set to true to skip preflight checks | -| `-o, --output` | string | output format (currently supported: json) _(defaults to plain text if not set)_ | - - -Any `plainText` values in the `upstream/userdata/config.yaml` file will be re-encrypted using the application cipher automatically, if the matching config item is a password type. -If both an encrypted and plainText value is provided on a single item, the plainText value will overwrite the encrypted value, if they differ. - -#### About Strict Preflight Checks - -<PreflightsStrict/> - - -## Examples - -```bash -kubectl kots upload ./manifests --name kots-sentry --namespace kots-sentry --slug kots-sentry --upstream-uri kots-sentry/unstable -``` - -================ -File: docs/reference/kots-cli-upstream-download.md -================ -# upstream download - -The `kots upstream download` command retries downloading a failed update of the upstream application. - -### Usage -```bash -kubectl kots upstream download [app-slug] [flags] -``` -* _Replace `[app-slug]` with the app slug for your KOTS application (required)._ -* _Provide `[flags]` according to the table below._ - -| Flag | Type | Description | -|:----------------------------------|--------|--------------------------------------------------------------------------------------------------| -| `-h, --help` | | Help for `upstream download`. | -| `--kubeconfig` | string | The kubeconfig to use. **Default**: `$KUBECONFIG`. If unset, then `$HOME/.kube/config`. | -| `-n, --namespace` | string | (Required) The namespace where the Admin Console is running. | -| `--sequence` | int | (Required) The local app sequence for the version to retry downloading. | -| `--skip-preflights` | bool | Set to `true` to skip preflight checks. | -| `--skip-compatibility-check` | bool | Set to `true` to skip compatibility checks between the current kots version and the update. | -| `--wait` | bool | Set to `false` to download the update in the background. **Default**: `true`. | -| `-o, --output` | string | Output format. **Supported formats**: `json`. **Default**: Plain text. | - -### Example -```bash -kubectl kots upstream download kots-sentry --namespace kots-sentry --sequence 8 -``` - -================ -File: docs/reference/kots-cli-upstream-upgrade.mdx -================ -import PreflightsStrict from "../partials/preflights/_preflights-strict.mdx" - -# upstream upgrade - -The `kots upstream upgrade` fetches the latest version of the upstream application. -It is functionality equivalent to clicking the "Check For Updates" in the Admin Console. - -## Usage -```bash -kubectl kots upstream upgrade [app-slug] [flags] -``` -* _Replace `[app-slug]` with the app slug for your KOTS application (required)._ -* _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -|:-------------------------|--------|--------------------------------------------------------------------------------------------------| -| `-h, --help` | | help for upstream | -| `--kubeconfig` | string | the kubeconfig to use. **Default:** `$KUBECONFIG`. If unset, then `$HOME/.kube/config` | -| `-n, --namespace` | string | (Required) the namespace where the Admin Console is running | -| `--deploy` | bool | ensures the latest available release is deployed | -| `--deploy-version-label` | string | ensures the release with the provided version label is deployed | -| `--skip-preflights` | bool | set to true to skip preflight checks | -| `--airgap-bundle` | string | path to the application airgap bundle where application images and metadata will be loaded from | -| `--kotsadm-namespace` | string | the registry namespace to use for application images | -| `--kotsadm-registry` | string | the registry endpoint where application images will be pushed | -| `--registry-password` | string | the password to use to authenticate with the registry | -| `--registry-username` | string | the username to use to authenticate with the registry | -| `--disable-image-push` | bool | set to true to disable images from being pushed to private registry. **Default:** `false` | -| `--skip-registry-check` | bool | Set to `true` to skip the connectivity test and validation of the provided registry information. **Default:** `false` | -| `--wait` | bool | set to false to download the updates in the background **Default:** `true` | -| `-o, --output` | string | output format (currently supported: json). **Default:** Plain text if not set | - - -#### About Strict Preflight Checks - -<PreflightsStrict/> - - -## Example -```bash -kubectl kots upstream upgrade kots-sentry --namespace kots-sentry -``` - -================ -File: docs/reference/kots-cli-upstream.md -================ -# upstream - -KOTS Upstream interface. - -### Usage -```bash -kubectl kots upstream [command] [flags] -``` - -This command supports all [global flags](kots-cli-global-flags). - -================ -File: docs/reference/kots-cli-velero-configure-aws-s3.md -================ -# velero configure-aws-s3 - -Configures snapshots to use an AWS S3 Bucket as a storage destination. -This command supports auth via [IAM User Access Keys](https://github.com/vmware-tanzu/velero-plugin-for-aws#option-1-set-permissions-with-an-iam-user) and IAM Instance Roles for the velero-plugin-for-aws. - -Valid Subcommands: -* `access-key` -* `instance-role` - -### Usage - -```bash -kubectl kots velero configure-aws-s3 [subcommand] -``` - -| Flag | Type | Description | -|--------------|------|--------------------------| -| `-h, --help` | | help for configure-aws-s3 | - -### access-key - -```bash -kubectl kots velero configure-aws-s3 access-key [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -|------------------------|--------|-------------------------------------------------------------------------------| -| `-h, --help` | | help for access-key | -| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | -| `--access-key-id` | string | the aws access key id to use for accessing the bucket _(required)_ | -| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | -| `--path ` | string | path to a subdirectory in the object store bucket | -| `--region ` | string | the region where the bucket exists _(required)_ | -| `--secret-access-key ` | string | the aws secret access key to use for accessing the bucket _(required)_ | -| `--skip-validation` | bool | skip the validation of the S3 Bucket _(default `false`)_ | - -#### Example - -```bash -kubectl kots velero configure-aws-s3 access-key --namespace default --region us-east-1 --bucket kots-snaps --access-key-id XXXXXXXJTJB7M2XZUV7D --secret-access-key <secret access key here> -``` - -### instance-role - -```bash -kubectl kots velero configure-aws-s3 instance-role [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -|------------------------|--------|-------------------------------------------------------------------------------| -| `-h, --help` | | help for access-key | -| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | -| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | -| `--path ` | string | path to a subdirectory in the object store bucket | -| `--region ` | string | the region where the bucket exists _(required)_ | -| `--skip-validation` | bool | skip the validation of the S3 Bucket _(default `false`)_ | - -#### Example - -```bash -kubectl kots velero configure-aws-s3 instance-role --namespace default --region us-east-1 --bucket kots-snaps -``` - -================ -File: docs/reference/kots-cli-velero-configure-azure.md -================ -# velero configure-azure - -Configures snapshots to use an Azure Blob Storage Container as a storage destination. -Currently only the [Service Principle authentication method](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#option-1-create-service-principal) of the velero-plugin-for-microsoft-azure. - -Valid Subcommands: -* service-principle - -### Usage - -```bash -kubectl kots velero configure-azure [subcommand] -``` - -| Flag | Type | Description | -|--------------|------|--------------------------| -| `-h, --help` | | help for configure-azure | - -### service-principle - -```bash -kubectl kots velero configure-azure service-principle [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -|---------------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------| -| `-h, --help` | | help for service-principle | -| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | -| `--client-id` | string | the client ID of a Service Principle with access to the blob storage container _(required)_ | -| `--client-secret` | string | the client secret of a Service Principle with access to the blob storage container _(required)_ | -| `--cloud-name` | string | the Azure cloud target. Options: AzurePublicCloud, AzureUSGovernmentCloud, AzureChinaCloud, AzureGermanCloud _(default `AzurePublicCloud`)_ | -| `--container` | string | name of the Azure blob storage container where backups should be stored _(required)_ | -| `--path ` | string | path to a subdirectory in the blob storage container | -| `--resource-group` | string | the resource group name of the blob storage container _(required)_ | -| `--skip-validation` | bool | skip the validation of the blob storage container _(default `false`)_ | -| `--storage-account` | string | the storage account name of the blob storage container _(required)_ | -| `--subscription-id` | string | the subscription id associated with the blob storage container _(required)_ | -| `--tenant-id ` | string | the tenant ID associated with the blob storage container _(required)_ | - -#### Example - -```bash -kubectl kots velero configure-azure service-principle --namespace default --container velero --resource-group Velero_Backups --storage-account velero1111362eb32b --subscription-id "1111111-1111-47a7-9671-c904d681c2b2" --tenant-id "1111111-1111-42e1-973b-ad2efc689308" --client-id "1111111-1111-4ac3-9e2b-bbea61392432" --client-secret "<secret here>" -``` - -================ -File: docs/reference/kots-cli-velero-configure-gcp.md -================ -# velero configure-gcp - -Configures snapshots to use a Google Cloud Platform Object Storage Bucket as a storage destination. -This command supports auth via [Serivce Account Credentials](https://github.com/vmware-tanzu/velero-plugin-for-gcp#option-1-set-permissions-with-a-service-account) or [Workload Identity](https://github.com/vmware-tanzu/velero-plugin-for-gcp#option-2-set-permissions-with-using-workload-identity-optional). - -Valid Subcommands: -* `service-account` -* `workload-identity` - -### Usage - -```bash -kubectl kots velero configure-gcp [subcommand] -``` - -| Flag | Type | Description | -|--------------|------|--------------------------| -| `-h, --help` | | help for configure-aws-s3 | - -### service-account - -```bash -kubectl kots velero configure-gcp service-account [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -|---------------------|--------|-------------------------------------------------------------------------------| -| `-h, --help` | | help for access-key | -| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | -| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | -| `--json-file` | string | path to JSON credntials file for veloro _(required)_ | -| `--path ` | string | path to a subdirectory in the object store bucket | -| `--skip-validation` | bool | skip the validation of the GCP Bucket _(default `false`)_ | - -#### Example - -```bash -kubectl kots velero configure-gcp service-account --namespace default --bucket velero-backups --json-file sa-creds.json -``` - -### workload-identity - -```bash -kubectl kots velero configure-gcp workload-identity [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -|---------------------|--------|-------------------------------------------------------------------------------| -| `-h, --help` | | help for access-key | -| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | -| `--bucket` | string | name of the object storage bucket where backups should be stored _(required)_ | -| `--path ` | string | path to a subdirectory in the object store bucket | -| `--service-account` | string | the service account to use if using Google Cloud instance role _(required)_ | -| `--skip-validation` | bool | skip the validation of the GCP Bucket _(default `false`)_ | - -#### Example - -```bash -kubectl kots velero configure-gcp workload-identity --namespace default --bucket velero-backups --service-account ss-velero@gcp-project.iam.gserviceaccount.com -``` - -================ -File: docs/reference/kots-cli-velero-configure-hostpath.mdx -================ -import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" -import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" -import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" -import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" -import Help from "../partials/kots-cli/_help.mdx" - -# velero configure-hostpath - -Configure snapshots to use a host path as storage destination. - -### Usage - -```bash -kubectl kots velero configure-hostpath [flags] -``` - -- _Provide `[flags]` according to the table below_ - -<table> - <tr> - <td width="30%">Flag</td> - <td>Type</td> - <td>Description</td> - </tr> - <Help/> - <tr> - <td>`-n, --namespace`</td> - <td>string</td> - <td>The namespace of the Admin Console (required)</td> - </tr> - <tr> - <td>`--hostpath`</td> - <td>string</td> - <td>A local host path on the node</td> - </tr> - <KotsadmNamespace/> - <KotsadmRegistry/> - <RegistryPassword/> - <RegistryUsername/> - <tr> - <td>`--force-reset`</td> - <td>bool</td> - <td>Bypass the reset prompt and force resetting the nfs path. (default `false`)</td> - </tr> - <tr> - <td>`--output`</td> - <td>string</td> - <td>Output format. Supported values: `json`</td> - </tr> -</table> - -### Examples - -Basic - -```bash -kubectl kots velero configure-hostpath --hostpath /mnt/kots-sentry-snapshots --namespace kots-sentry -``` - -Using a registry for airgapped installations - -```bash -kubectl kots velero configure-hostpath \ - --hostpath /mnt/kots-sentry-snapshots \ - --namespace kots-sentry \ - --kotsadm-registry private.registry.host/kots-sentry \ - --registry-username ro-username \ - --registry-password ro-password -``` - -================ -File: docs/reference/kots-cli-velero-configure-internal.md -================ -# velero configure-internal - -:::important -The following command is applicable only to embedded clusters created by Replicated kURL and is _not_ recommended for production usage. -Consider configuring one of the other available storage destinations. See [Configuring Other Storage Destinations](/enterprise/snapshots-storage-destinations). -::: - -Configures snapshots to use the internal object store in embedded clusters as a storage destination. - -### Usage - -```bash -kubectl kots velero configure-internal [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -|------------------------|--------|-------------------------------------------------------------------------------| -| `-h, --help` | | help for access-key | -| `--skip-validation` | bool | skip the validation of the S3 Bucket _(default `false`)_ | - -#### Example - -```bash -kubectl kots velero configure-internal -``` - -================ -File: docs/reference/kots-cli-velero-configure-nfs.mdx -================ -import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" -import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" -import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" -import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" -import Help from "../partials/kots-cli/_help.mdx" - -# velero configure-nfs - -Configures snapshots to use NFS as storage destination. - -### Usage - -```bash -kubectl kots velero configure-nfs [flags] -``` - -- _Provide `[flags]` according to the table below_ - -<table> - <tr> - <td width="30%">Flag</td> - <td>Type</td> - <td>Description</td> - </tr> - <Help/> - <tr> - <td>`-n, --namespace`</td> - <td>string</td> - <td>The namespace of the Admin Console (required)</td> - </tr> - <tr> - <td>`--nfs-server`</td> - <td>string</td> - <td>The hostname or IP address of the NFS server (required)</td> - </tr> - <tr> - <td>`--nfs-path`</td> - <td>string</td> - <td>The path that is exported by the NFS server (required)</td> - </tr> - <KotsadmNamespace/> - <KotsadmRegistry/> - <RegistryPassword/> - <RegistryUsername/> - <tr> - <td>`--force-reset`</td> - <td>bool</td> - <td>Bypass the reset prompt and force resetting the nfs path. (default `false`)</td> - </tr> - <tr> - <td>`--output`</td> - <td>string</td> - <td>Output format. Supported values: `json`</td> - </tr> -</table> - -### Examples - -Basic - -```bash -kubectl kots velero configure-nfs --nfs-server 10.128.0.32 --nfs-path /mnt/nfs_share --namespace kots-sentry -``` - -Using a registry for airgapped installations - -```bash -kubectl kots velero configure-nfs \ - --nfs-server 10.128.0.32 \ - --nfs-path /mnt/nfs_share \ - --namespace kots-sentry \ - --kotsadm-registry private.registry.host/kots-sentry \ - --registry-username ro-username \ - --registry-password ro-password -``` - -================ -File: docs/reference/kots-cli-velero-configure-other-s3.mdx -================ -import KotsadmNamespace from "../partials/kots-cli/_kotsadm-namespace.mdx" -import KotsadmRegistry from "../partials/kots-cli/_kotsadm-registry.mdx" -import RegistryPassword from "../partials/kots-cli/_registry-password.mdx" -import RegistryUsername from "../partials/kots-cli/_registry-username.mdx" -import Help from "../partials/kots-cli/_help.mdx" - -# velero configure-other-s3 - -Configures snapshots to use an S3-compatible storage provider, such as Minio, as a storage destination. - -### Usage - -```bash -kubectl kots velero configure-other-s3 [flags] -``` - -- _Provide `[flags]` according to the table below_ - -<table> - <tr> - <td width="30%">Flag</td> - <td>Type</td> - <td>Description</td> - </tr> - <Help/> - <tr> - <td>`-n, --namespace`</td> - <td>string</td> - <td>The namespace of the Admin Console (required)</td> - </tr> - <tr> - <td>`--access-key-id`</td> - <td>string</td> - <td>The AWS access key ID to use for accessing the bucket (required)</td> - </tr> - <tr> - <td>`--bucket`</td> - <td>string</td> - <td>Name of the object storage bucket where backups should be stored (required)</td> - </tr> - <tr> - <td>`--endpoint`</td> - <td>string</td> - <td>The S3 endpoint (for example, http://some-other-s3-endpoint) (required)</td> - </tr> - <tr> - <td>`--path`</td> - <td>string</td> - <td>Path to a subdirectory in the object store bucket</td> - </tr> - <tr> - <td>`--region`</td> - <td>string</td> - <td>The region where the bucket exists (required)</td> - </tr> - <tr> - <td>`--secret-access-key`</td> - <td>string</td> - <td>The AWS secret access key to use for accessing the bucket (required)</td> - </tr> - <tr> - <td>`--cacert`</td> - <td>string</td> - <td>File containing a certificate bundle to use when verifying TLS connections to the object store</td> - </tr> - <tr> - <td>`--skip-validation`</td> - <td>bool</td> - <td>Skip the validation of the S3 bucket (default `false`)</td> - </tr> - <KotsadmNamespace/> - <KotsadmRegistry/> - <RegistryPassword/> - <RegistryUsername/> -</table> - -#### Example - -```bash -kubectl kots velero configure-other-s3 --namespace default --endpoint http://minio --region us-east-1 --bucket kots-snaps --access-key-id XXXXXXXJTJB7M2XZUV7D --secret-access-key mysecretkey -``` - -================ -File: docs/reference/kots-cli-velero-ensure-permissions.md -================ -# velero ensure-permissions - -Ensures the necessary permissions that enables Replicated KOTS to access Velero. - -### Usage - -```bash -kubectl kots velero ensure-permissions [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -| ----------------- | ------ | ------------------------------------------------------------------- | -| `-h, --help` | | help for ensure-permissions | -| `-n, --namespace` | string | the namespace where the Admin Console is running _(required)_ | -| `--velero-namespace` | string | the namespace where velero is running _(required)_ | - -### Example - -```bash -kubectl kots velero ensure-permissions --namespace kots-sentry --velero-namespace velero -``` - -================ -File: docs/reference/kots-cli-velero-index.md -================ -# velero - -The KOTS Velero interface, which configures storage destinations for backups (snapshots), permissions, and print instructions fo set up. - -### Usage - -```bash -kubectl kots velero [command] [global flags] -``` - -This command supports all [global flags](kots-cli-global-flags). - -The following `kots velero` commands are supported: - -- [`configure-aws-s3`](kots-cli-velero-configure-aws-s3): Configures an AWS S3 bucket as the storage destination. -- [`configure-azure`](kots-cli-velero-configure-azure): Configures an Azure Blob Storage Container as the storage destination. -- [`configure-gcp`](kots-cli-velero-configure-gcp): Configures a Google Cloud Platform Object Storage Bucket as The storage destination. -- [`configure-internal`](kots-cli-velero-configure-internal): (Embedded clusters only) Configures the internal object store in the cluster as the storage destination. -- [`configure-other-s3`](kots-cli-velero-configure-other-s3): Configures an S3-compatible storage provider as the storage destination. -- [`configure-nfs`](kots-cli-velero-configure-nfs): Configures NFS as the storage destination. -- [`configure-hostpath`](kots-cli-velero-configure-hostpath): Configures a host path as the storage destination. -- [`ensure-permissions`](kots-cli-velero-ensure-permissions): Allows the KOTS Admin Console to access Velero. - -================ -File: docs/reference/kots-cli-velero-print-fs-instructions.md -================ -# velero print-fs-instructions - -:::note -This command is deprecated. Use [`kubectl kots velero configure-hostpath`](/reference/kots-cli-velero-configure-hostpath) or [`kubectl kots velero configure-nfs`](/reference/kots-cli-velero-configure-nfs) instead. -::: - -Prints instructions for setting up a file system as the snapshots storage destination (such as NFS or host path). - -### Usage - -```bash -kubectl kots velero print-fs-instructions [flags] -``` - -- _Provide `[flags]` according to the table below_ - -| Flag | Type | Description | -| ----------------- | ------ | ------------------------------------------------------------------- | -| `-h, --help` | | help for ensure-permissions | -| `-n, --namespace` | string | the namespace of the Admin Console _(required)_ | - -### Example - -Basic - -```bash -kubectl kots velero print-fs-instructions --namespace kots-sentry -``` - -================ -File: docs/reference/linter.mdx -================ -import MissingKindField from "../partials/linter-rules/_missing-kind-field.mdx" -import MissingAPIVersionField from "../partials/linter-rules/_missing-api-version-field.mdx" -import PreflightSpec from "../partials/linter-rules/_preflight-spec.mdx" -import ConfigSpec from "../partials/linter-rules/_config-spec.mdx" -import TroubleshootSpec from "../partials/linter-rules/_troubleshoot-spec.mdx" -import ApplicationSpec from "../partials/linter-rules/_application-spec.mdx" -import ApplicationIcon from "../partials/linter-rules/_application-icon.mdx" -import ApplicationStatusInformers from "../partials/linter-rules/_application-statusInformers.mdx" -import InvalidTargetKOTS from "../partials/linter-rules/_invalid-target-kots-version.mdx" -import InvalidMinKOTS from "../partials/linter-rules/_invalid-min-kots-version.mdx" -import InvalidKubernetesInstaller from "../partials/linter-rules/_invalid-kubernetes-installer.mdx" -import DeprecatedKubernetesInstallerVersion from "../partials/linter-rules/_deprecated-kubernetes-installer-version.mdx" -import InvalidHelmReleaseName from "../partials/linter-rules/_invalid-helm-release-name.mdx" -import Replicas1 from "../partials/linter-rules/_replicas-1.mdx" -import Privileged from "../partials/linter-rules/_privileged.mdx" -import AllowPrivilegeEscalation from "../partials/linter-rules/_allow-privilege-escalation.mdx" -import ContainerImageLatestTag from "../partials/linter-rules/_container-image-latest-tag.mdx" -import ContainerImageLocalImageName from "../partials/linter-rules/_container-image-local-image-name.mdx" -import ContainerResources from "../partials/linter-rules/_container-resources.mdx" -import ContainerResourceLimits from "../partials/linter-rules/_container-resource-limits.mdx" -import ContainerResourceRequests from "../partials/linter-rules/_container-resource-requests.mdx" -import ResourceLimitsCPU from "../partials/linter-rules/_resource-limits-cpu.mdx" -import ResourceLimitsMemory from "../partials/linter-rules/_resource-limits-memory.mdx" -import ResourceRequestsCPU from "../partials/linter-rules/_resource-requests-cpu.mdx" -import ResourceRequestsMemory from "../partials/linter-rules/_resource-requests-memory.mdx" -import VolumesHostPaths from "../partials/linter-rules/_volumes-host-paths.mdx" -import VolumeDockerSock from "../partials/linter-rules/_volume-docker-sock.mdx" -import HardcodedNamespace from "../partials/linter-rules/_hardcoded-namespace.mdx" -import ConfigOptionInvalidType from "../partials/linter-rules/_config-option-invalid-type.mdx" -import ConfigOptionInvalidRegexValidator from "../partials/linter-rules/_config-option-invalid-regex-validator.mdx" -import ConfigOptionRegexValidatorInvalidType from "../partials/linter-rules/_config-option-regex-validator-invalid-type.mdx" -import RepeatOptionMissingTemplate from "../partials/linter-rules/_repeat-option-missing-template.mdx" -import RepeatOptionMissingValuesByGroup from "../partials/linter-rules/_repeat-option-missing-valuesByGroup.mdx" -import RepeatOptionMalformedYAMLPath from "../partials/linter-rules/_repeat-option-malformed-yamlpath.mdx" -import ConfigOptionPasswordType from "../partials/linter-rules/_config-option-password-type.mdx" -import ConfigOptionIsCircular from "../partials/linter-rules/_config-option-is-circular.mdx" -import InvalidRenderedYaml from "../partials/linter-rules/_invalid-rendered-yaml.mdx" -import InvalidType from "../partials/linter-rules/_invalid_type.mdx" -import InvalidYaml from "../partials/linter-rules/_invalid-yaml.mdx" -import LinterDefinition from "../partials/linter-rules/_linter-definition.mdx" -import MayContainSecrets from "../partials/linter-rules/_may-contain-secrets.mdx" - -# Linter Rules - -This topic describes the release linter and the linter rules. - -## Overview - -<LinterDefinition/> - -The linter runs automatically against KOTS releases that you create in the Replicated vendor portal, and displays any error or warning messages in the vendor portal UI. - -To lint manifest files from the command line, you can run the Replicated CLI `replicated release lint` command against the root directory of your application manifest files. You can also use the `--lint` flag when you create a release with the `replicated release create` command. For more information, see [release lint](/reference/replicated-cli-release-lint) and [release create](/reference/replicated-cli-release-create) in the _Replicated CLI_ section. - -## Linter Rules - -This section lists the linter rules and the default rule levels (Info, Warn, Error). You can customize the default rule levels in the Replicated LinterConfig custom resource. -For more information, see [LintConfig](custom-resource-lintconfig). - -### allow-privilege-escalation - -<table> - <tr> - <th>Description</th> - <td>Notifies if any manifest file has <code>allowPrivilegeEscalation</code> set to <code>true</code>.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><AllowPrivilegeEscalation/></td> - </tr> -</table> - -### application-icon - -<table> - <tr> - <th>Description</th> - <td> - Requires an application icon. - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code>. - </td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><ApplicationIcon/></td> - </tr> -</table> - -### application-spec - -<table> - <tr> - <th>Description</th> - <td> - <p>Requires an Application custom resource manifest file.</p> - <p>Accepted value for <code>kind</code>: <code>Application</code></p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ApplicationSpec/></td> - </tr> -</table> - -### application-statusInformers - -<table> - <tr> - <th>Description</th> - <td> - Requires <code>statusInformers</code>. - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code>. - </td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><ApplicationStatusInformers/></td> - </tr> -</table> - -### config-option-invalid-type - -<table> - <tr> - <th>Description</th> - <td> - <p>Enforces valid types for Config items.</p> - <p>For more information, see <a href="/reference/custom-resource-config#items">Items</a> in <em>Config</em>.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><ConfigOptionInvalidType/></td> - </tr> -</table> - -### config-option-is-circular - -<table> - <tr> - <th>Description</th> - <td>Enforces that all ConfigOption items do not reference themselves.</td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>. - </td> - </tr> - <tr> - <th>Example</th> - <td> <ConfigOptionIsCircular/> </td> - </tr> -</table> - - -### config-option-not-found - -<table> - <tr> - <th>Description</th> - <td> - Requires all ConfigOption items to be defined in the <code>Config</code> custom resource manifest file. - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> -</table> - - -### config-option-not-repeatable - -<table> - <tr> - <th>Description</th> - <td> - Enforces that sub-templated ConfigOption items must be repeatable. - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> -</table> - -### config-option-password-type - -<table> - <tr> - <th>Description</th> - <td> - <p>Requires ConfigOption items with any of the following names to have <code>type</code> set to <code>password</code>:</p> - <ul> - <li><code>password</code></li> - <li><code>secret</code></li> - <li><code>token</code></li> - </ul> - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><ConfigOptionPasswordType/></td> - </tr> -</table> - -### config-option-when-is-invalid - -<table> - <tr> - <th>Description</th> - <td> - <p>Enforces valid <code>ConfigOption.when</code>.</p> - <p>For more information, see <a href="/reference/custom-resource-config#when">when</a> in <em>Config</em>.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>.</td> - </tr> -</table> - -### config-option-invalid-regex-validator - -<table> - <tr> - <th>Description</th> - <td> - <p>Enforces valid <a href="https://github.com/google/re2/wiki/Syntax">RE2 regular expressions</a> pattern when regex validation is present.</p> - <p>For more information, see <a href="/reference/custom-resource-config#validation">Validation</a> in <em>Config</em>.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>.</td> - </tr> - <tr> - <th>Example</th> - <td><ConfigOptionInvalidRegexValidator/></td> - </tr> -</table> - -### config-option-regex-validator-invalid-type - -<table> - <tr> - <th>Description</th> - <td> - <p>Enforces valid item type when regex validation is present.</p> - <p>Item type should be <code>text</code>|<code>textarea</code>|<code>password</code>|<code>file</code></p> - <p>For more information, see <a href="/reference/custom-resource-config#validation">Validation</a> in <em>Config</em>.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>Files with <code>kind: Config</code> and <code>apiVersion: kots.io/v1beta1</code>.</td> - </tr> - <tr> - <th>Example</th> - <td><ConfigOptionRegexValidatorInvalidType/></td> - </tr> -</table> - -### config-spec - -<table> - <tr> - <th>Description</th> - <td> - <p>Requires a Config custom resource manifest file.</p> - <p>Accepted value for <code>kind</code>: <code>Config</code></p> - <p>Accepted value for <code>apiVersion</code>: <code>kots.io/v1beta1</code></p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ConfigSpec/></td> - </tr> -</table> - -### container-image-latest-tag - -<table> - <tr> - <th>Description</th> - <td>Notifies if any manifest file has a container image tag appended with - <code>:latest</code>.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ContainerImageLatestTag/></td> - </tr> -</table> - -### container-image-local-image-name - -<table> - <tr> - <th>Description</th> - <td>Disallows any manifest file having a container image tag that includes <code>LocalImageName</code>.</td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ContainerImageLocalImageName/></td> - </tr> -</table> - -### container-resource-limits - -<table> - <tr> - <th>Description</th> - <td>Notifies if a <code>spec.container</code> has no <code>resources.limits</code> field.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ContainerResourceLimits/></td> - </tr> -</table> - - -### container-resource-requests - -<table> - <tr> - <th>Description</th> - <td>Notifies if a <code>spec.container</code> has no <code>resources.requests</code> field.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ContainerResourceRequests/></td> - </tr> -</table> - -### container-resources - -<table> - <tr> - <th>Description</th> - <td>Notifies if a manifest file has no <code>resources</code> field.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ContainerResources/></td> - </tr> -</table> - -### deprecated-kubernetes-installer-version - -<table> - <tr> - <th>Description</th> - <td> - <p>Disallows using the deprecated kURL installer <code>apiVersion</code>.</p> - <p><code>kurl.sh/v1beta1</code> is deprecated. Use <code>cluster.kurl.sh/v1beta1</code> instead.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Files with <code>kind: Installer</code> and <code>apiVersion: kurl.sh/v1beta1</code>. - </td> - </tr> - <tr> - <th>Example</th> - <td><DeprecatedKubernetesInstallerVersion/></td> - </tr> -</table> - -### duplicate-helm-release-name - -<table> - <tr> - <th>Description</th> - <td> - <p>Enforces unique <code>spec.chart.releaseName</code> across all HelmChart custom resource manifest files.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Files with <code>kind: HelmChart</code> and <code>apiVersion: kots.io/v1beta1</code>. - </td> - </tr> -</table> - -### duplicate-kots-kind - -<table> - <tr> - <th>Description</th> - <td> - <p>Disallows duplicate Replicated custom resources. - A release can only include one of each <code>kind</code> of custom resource.</p> - <p>This rule disallows inclusion of more than one file with:</p> - <ul> - <li>The same <code>kind</code> and <code>apiVersion</code></li> - <li><code>kind: Troubleshoot</code> and any Troubleshoot <code>apiVersion</code></li> - <li><code>kind: Installer</code> and any Installer <code>apiVersion</code></li> - </ul> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - All files - </td> - </tr> -</table> - -### hardcoded-namespace - -<table> - <tr> - <th>Description</th> - <td> - <p>Notifies if any manifest file has a <code>metadata.namespace</code> set - to a static field.</p> - <p>Replicated strongly recommends not specifying a namespace to allow - for flexibility when deploying into end user environments.</p> - <p>For more information, see <a href="/vendor/namespaces">Managing Application Namespaces</a>.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><HardcodedNamespace/></td> - </tr> -</table> - -### helm-archive-missing - -<table> - <tr> - <th>Description</th> - <td><p>Requires that a <code>*.tar.gz</code> file is present that matches what is in the HelmChart custom resource manifest file.</p></td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Releases with a HelmChart custom resource manifest file containing <code>kind: HelmChart</code> and <code>apiVersion: kots.io/v1beta1</code>. - </td> - </tr> -</table> - -### helm-chart-missing - -<table> - <tr> - <th>Description</th> - <td><p>Enforces that a HelmChart custom resource manifest file with <code>kind: HelmChart</code> is present if there is a <code>*.tar.gz</code> archive present.</p></td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Releases with a <code>*.tar.gz</code> archive file present. - </td> - </tr> -</table> - -### invalid-helm-release-name - -<table> - <tr> - <th>Description</th> - <td> - <p>Enforces valid <code>spec.chart.releaseName</code> in the HelmChart custom resource manifest file.</p> - <p><code>spec.chart.releaseName</code> must meet the following requirements:</p> - <ul> - <li>Begin and end with a lowercase letter or number</li> - <li>Contain only lowercase letters, numbers, periods, and hyphens (<code>-</code>)</li> - <li>Contain a lowercase letter or number between any two symbols (periods or hyphens)</li> - </ul> - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Files with <code>kind: HelmChart</code> and <code>apiVersion: kots.io/v1beta1</code>. - </td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><InvalidHelmReleaseName/></td> - </tr> -</table> - -### invalid-kubernetes-installer - -<table> - <tr> - <th>Description</th> - <td> - <p>Enforces valid Replicated kURL add-on versions.</p> - <p>kURL add-ons included in the kURL installer must pin specific versions rather than <code>latest</code> or x-ranges (1.2.x).</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - <p>Files with <code>kind: Installer</code> and one of the following values for <code>apiVersion</code>:</p> - <ul> - <li><code>cluster.kurl.sh/v1beta1</code></li> - <li><code>kurl.sh/v1beta1</code></li> - </ul> - </td> - </tr> - <tr> - <th>Example</th> - <td><InvalidKubernetesInstaller/></td> - </tr> -</table> - -### invalid-min-kots-version - -<table> - <tr> - <th>Description</th> - <td> - <p>Requires <code>minKotsVersion</code> in the Application custom resource to use valid Semantic Versioning. - See <a href="https://semver.org/">Semantic Versioning 2.0.0</a>.</p> - <p>Accepts a <code>v</code> as an optional prefix, so both <code>1.0.0</code> and <code>v1.0.0</code> are valid.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code>. - </td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><InvalidMinKOTS/></td> - </tr> -</table> - -### invalid-rendered-yaml - -<table> - <tr> - <th>Description</th> - <td><p>Enforces valid YAML after rendering the manifests using the Config spec.</p></td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - YAML files - </td> - </tr> - <tr> - <th>Example</th> - <td><InvalidRenderedYaml/></td> - </tr> -</table> - -### invalid-target-kots-version - -<table> - <tr> - <th>Description</th> - <td> - <p>Requires <code>targetKotsVersion</code> in the Application custom resource to use valid Semantic Versioning. - See <a href="https://semver.org/">Semantic Versioning 2.0.0</a>.</p> - <p>Accepts a <code>v</code> as an optional prefix, so both <code>1.0.0</code> and <code>v1.0.0</code> are valid.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - Files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code> - </td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><InvalidTargetKOTS/></td> - </tr> -</table> - -### invalid-type - -<table> - <tr> - <th>Description</th> - <td><p>Requires that the value of a property matches that property's expected type.</p></td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - All files - </td> - </tr> - <tr> - <th>Example</th> - <td><InvalidType/></td> - </tr> -</table> - -### invalid-yaml - -<table> - <tr> - <th>Description</th> - <td><p>Enforces valid YAML.</p></td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td> - YAML files - </td> - </tr> - <tr> - <th>Example</th> - <td><InvalidYaml/></td> - </tr> -</table> - -### may-contain-secrets - -<table> - <tr> - <th>Description</th> - <td> Notifies if any manifest file may contain secrets.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><MayContainSecrets/></td> - </tr> -</table> - -### missing-api-version-field - -<table> - <tr> - <th>Description</th> - <td>Requires the <code>apiVersion:</code> field in all files.</td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><MissingAPIVersionField/></td> - </tr> -</table> - -### missing-kind-field - -<table> - <tr> - <th>Description</th> - <td>Requires the <code>kind:</code> field in all files.</td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><MissingKindField/></td> - </tr> -</table> - -### nonexistent-status-informer-object - -<table> - <tr> - <th>Description</th> - <td> - <p>Requires that each <code>statusInformers</code> entry references an existing Kubernetes workload.</p> - <p>The linter cannot evaluate <code>statusInformers</code> for Helm-managed resources because it does not template Helm charts during analysis.</p> - <p>If you configure status informers for Helm-managed resources, you can ignore <code>nonexistent-status-informer-object</code> warnings for those workloads. To disable <code>nonexistent-status-informer-object</code> warnings, change the level for this rule to <code>info</code> or <code>off</code> in the LintConfig custom resource manifest file. See <a href="custom-resource-lintconfig">LintConfig</a> in <em>Custom Resources</em>.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Warning</td> - </tr> - <tr> - <th>Applies To</th> - <td> - <p>Compares <code>statusInformer</code> values in files with <code>kind: Application</code> and <code>apiVersion: kots.io/v1beta1</code> to all manifests in the release.</p> - </td> - </tr> -</table> - -### preflight-spec - -<table> - <tr> - <th>Description</th> - <td> - <p>Requires a Preflight custom resource manifest file with:</p> - <p><code>kind: Preflight</code></p> - <p>and one of the following:</p> - <ul> - <li><code>apiVersion: troubleshoot.replicated.com/v1beta1</code></li> - <li><code>apiVersion: troubleshoot.sh/v1beta2</code></li> - </ul> - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><PreflightSpec/></td> - </tr> -</table> - -### privileged - -<table> - <tr> - <th>Description</th> - <td>Notifies if any manifest file has <code>privileged</code> set to <code>true</code>.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><Privileged/></td> - </tr> -</table> - -### repeat-option-malformed-yamlpath - -<table> - <tr> - <th>Description</th> - <td> - <p>Enforces ConfigOption <code>yamlPath</code> ending with square brackets denoting index position.</p> - <p>For more information, see <a href="/reference/custom-resource-config#template-targets">Repeatable Item Template Targets</a> in <em>Config</em>.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><RepeatOptionMalformedYAMLPath/></td> - </tr> -</table> - -### repeat-option-missing-template - -<table> - <tr> - <th>Description</th> - <td> - <p>Disallows repeating Config item with undefined <code>item.templates</code>.</p> - <p>For more information, see <a href="/reference/custom-resource-config#template-targets">Repeatable Item Template Targets</a> in <em>Config</em>.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><RepeatOptionMissingTemplate/></td> - </tr> -</table> - - -### repeat-option-missing-valuesByGroup - -<table> - <tr> - <th>Description</th> - <td> - <p>Disallows repeating Config item with undefined <code>item.valuesByGroup</code>.</p> - <p>For more information, see <a href="/reference/custom-resource-config#repeatable-items">Repeatable Items</a> in <em>Config</em>.</p> - </td> - </tr> - <tr> - <th>Level</th> - <td>Error</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of correct YAML for this rule:</p><RepeatOptionMissingValuesByGroup/></td> - </tr> -</table> - -### replicas-1 - -<table> - <tr> - <th>Description</th> - <td>Notifies if any manifest file has <code>replicas</code> set to <code>1</code>.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><Replicas1/></td> - </tr> -</table> - -### resource-limits-cpu - -<table> - <tr> - <th>Description</th> - <td>Notifies if a <code>spec.container</code> has no <code>resources.limits.cpu</code> field.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ResourceLimitsCPU/></td> - </tr> -</table> - -### resource-limits-memory - -<table> - <tr> - <th>Description</th> - <td>Notifies if a <code>spec.container</code> has no <code>resources.limits.memory</code> field.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ResourceLimitsMemory/></td> - </tr> -</table> - -### resource-requests-cpu - -<table> - <tr> - <th>Description</th> - <td>Notifies if a <code>spec.container</code> has no <code>resources.requests.cpu</code> field.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ResourceRequestsCPU/></td> - </tr> -</table> - -### resource-requests-memory - -<table> - <tr> - <th>Description</th> - <td>Notifies if a <code>spec.container</code> has no <code>resources.requests.memory</code> field.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><ResourceRequestsMemory/></td> - </tr> -</table> - -### troubleshoot-spec - -<table> - <tr> - <th>Description</th> - <td> - <p>Requires a Troubleshoot manifest file.</p> - <p>Accepted values for <code>kind</code>:</p> - <ul> - <li><code>Collector</code></li> - <li><code>SupportBundle</code></li> - </ul> - <p>Accepted values for <code>apiVersion</code>:</p> - <ul> - <li><code>troubleshoot.replicated.com/v1beta1</code></li> - <li><code>troubleshoot.sh/v1beta2</code></li> - </ul> - </td> - </tr> - <tr> - <th>Level</th> - <td>Warn</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><TroubleshootSpec/></td> - </tr> -</table> - -### volume-docker-sock - -<table> - <tr> - <th>Description</th> - <td>Notifies if a <code>spec.volumes</code> has <code>hostPath</code> - set to <code>/var/run/docker.sock</code>.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><VolumeDockerSock/></td> - </tr> -</table> - -### volumes-host-paths - -<table> - <tr> - <th>Description</th> - <td>Notifies if a <code>spec.volumes</code> has defined a <code>hostPath</code>.</td> - </tr> - <tr> - <th>Level</th> - <td>Info</td> - </tr> - <tr> - <th>Applies To</th> - <td>All files</td> - </tr> - <tr> - <th>Example</th> - <td><p>Example of matching YAML for this rule:</p><VolumesHostPaths/></td> - </tr> -</table> - -================ -File: docs/reference/replicated-cli-api-get.mdx -================ -# replicated api get - -Make ad-hoc GET API calls to the Replicated API - -### Synopsis - -This is essentially like curl for the Replicated API, but -uses your local credentials and prints the response unmodified. - -We recommend piping the output to jq for easier reading. - -Pass the PATH of the request as the final argument. Do not include the host or version. - -``` -replicated api get [flags] -``` - -### Examples - -``` -replicated api get /v3/apps -``` - -### Options - -``` - -h, --help help for get -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API - -================ -File: docs/reference/replicated-cli-api-patch.mdx -================ -# replicated api patch - -Make ad-hoc PATCH API calls to the Replicated API - -### Synopsis - -This is essentially like curl for the Replicated API, but -uses your local credentials and prints the response unmodified. - -We recommend piping the output to jq for easier reading. - -Pass the PATH of the request as the final argument. Do not include the host or version. - -``` -replicated api patch [flags] -``` - -### Examples - -``` -replicated api patch /v3/customer/2VffY549paATVfHSGpJhjh6Ehpy -b '{"name":"Valuable Customer"}' -``` - -### Options - -``` - -b, --body string JSON body to send with the request - -h, --help help for patch -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API - -================ -File: docs/reference/replicated-cli-api-post.mdx -================ -# replicated api post - -Make ad-hoc POST API calls to the Replicated API - -### Synopsis - -This is essentially like curl for the Replicated API, but -uses your local credentials and prints the response unmodified. - -We recommend piping the output to jq for easier reading. - -Pass the PATH of the request as the final argument. Do not include the host or version. - -``` -replicated api post [flags] -``` - -### Examples - -``` -replicated api post /v3/app/2EuFxKLDxKjPNk2jxMTmF6Vxvxu/channel -b '{"name":"marc-waz-here"}' -``` - -### Options - -``` - -b, --body string JSON body to send with the request - -h, --help help for post -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API - -================ -File: docs/reference/replicated-cli-api-put.mdx -================ -# replicated api put - -Make ad-hoc PUT API calls to the Replicated API - -### Synopsis - -This is essentially like curl for the Replicated API, but -uses your local credentials and prints the response unmodified. - -We recommend piping the output to jq for easier reading. - -Pass the PATH of the request as the final argument. Do not include the host or version. - -``` -replicated api put [flags] -``` - -### Examples - -``` -replicated api put /v3/app/2EuFxKLDxKjPNk2jxMTmF6Vxvxu/channel/2QLPm10JPkta7jO3Z3Mk4aXTPyZ -b '{"name":"marc-waz-here2"}' -``` - -### Options - -``` - -b, --body string JSON body to send with the request - -h, --help help for put -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API - -================ -File: docs/reference/replicated-cli-api.mdx -================ -# replicated api - -Make ad-hoc API calls to the Replicated API - -### Options - -``` - -h, --help help for api -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated api get](replicated-cli-api-get) - Make ad-hoc GET API calls to the Replicated API -* [replicated api patch](replicated-cli-api-patch) - Make ad-hoc PATCH API calls to the Replicated API -* [replicated api post](replicated-cli-api-post) - Make ad-hoc POST API calls to the Replicated API -* [replicated api put](replicated-cli-api-put) - Make ad-hoc PUT API calls to the Replicated API - -================ -File: docs/reference/replicated-cli-app-create.mdx -================ -# replicated app create - -Create a new application - -### Synopsis - -Create a new application in your Replicated account. - -This command allows you to initialize a new application that can be distributed -and managed using the KOTS platform. When you create a new app, it will be set up -with default configurations, which you can later customize. - -The NAME argument is required and will be used as the application's name. - -``` -replicated app create NAME [flags] -``` - -### Examples - -``` -# Create a new app named "My App" -replicated app create "My App" - -# Create a new app and output the result in JSON format -replicated app create "Another App" --output json - -# Create a new app with a specific name and view details in table format -replicated app create "Custom App" --output table -``` - -### Options - -``` - -h, --help help for create - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated app](replicated-cli-app) - Manage applications - -================ -File: docs/reference/replicated-cli-app-ls.mdx -================ -# replicated app ls - -List applications - -### Synopsis - -List all applications in your Replicated account, -or search for a specific application by name or ID. - -This command displays information about your applications, including their -names, IDs, and associated channels. If a NAME argument is provided, it will -filter the results to show only applications that match the given name or ID. - -The output can be customized using the --output flag to display results in -either table or JSON format. - -``` -replicated app ls [NAME] [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Examples - -``` -# List all applications -replicated app ls - -# Search for a specific application by name -replicated app ls "My App" - -# List applications and output in JSON format -replicated app ls --output json - -# Search for an application and display results in table format -replicated app ls "App Name" --output table -``` - -### Options - -``` - -h, --help help for ls - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated app](replicated-cli-app) - Manage applications - -================ -File: docs/reference/replicated-cli-app-rm.mdx -================ -# replicated app rm - -Delete an application - -### Synopsis - -Delete an application from your Replicated account. - -This command allows you to permanently remove an application from your account. -Once deleted, the application and all associated data will be irretrievably lost. - -Use this command with caution as there is no way to undo this operation. - -``` -replicated app rm NAME [flags] -``` - -### Aliases - -``` -rm, delete -``` - -### Examples - -``` -# Delete a app named "My App" -replicated app delete "My App" - -# Delete an app and skip the confirmation prompt -replicated app delete "Another App" --force - -# Delete an app and output the result in JSON format -replicated app delete "Custom App" --output json -``` - -### Options - -``` - -f, --force Skip confirmation prompt. There is no undo for this action. - -h, --help help for rm - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated app](replicated-cli-app) - Manage applications - -================ -File: docs/reference/replicated-cli-app.mdx -================ -# replicated app - -Manage applications - -### Synopsis - -The app command allows you to manage applications in your Replicated account. - -This command provides a suite of subcommands for creating, listing, updating, and -deleting applications. You can perform operations such as creating new apps, -viewing app details, modifying app settings, and removing apps from your account. - -Use the various subcommands to: -- Create new applications -- List all existing applications -- View details of a specific application -- Update application settings -- Delete applications from your account - -### Examples - -``` -# List all applications -replicated app ls - -# Create a new application -replicated app create "My New App" - -# View details of a specific application -replicated app inspect "My App Name" - -# Delete an application -replicated app delete "App to Remove" - -# Update an application's settings -replicated app update "My App" --channel stable - -# List applications with custom output format -replicated app ls --output json -``` - -### Options - -``` - -h, --help help for app -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated app create](replicated-cli-app-create) - Create a new application -* [replicated app ls](replicated-cli-app-ls) - List applications -* [replicated app rm](replicated-cli-app-rm) - Delete an application - -================ -File: docs/reference/replicated-cli-channel-create.mdx -================ -# replicated channel create - -Create a new channel in your app - -### Synopsis - -Create a new channel in your app and print the channel on success. - -``` -replicated channel create [flags] -``` - -### Examples - -``` -replicated channel create --name Beta --description 'New features subject to change' -``` - -### Options - -``` - --description string A longer description of this channel - -h, --help help for create - --name string The name of this channel - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated channel](replicated-cli-channel) - List channels - -================ -File: docs/reference/replicated-cli-channel-demote.mdx -================ -# replicated channel demote - -Demote a release from a channel - -### Synopsis - -Demote a channel release from a channel using a channel sequence or release sequence. - -``` -replicated channel demote CHANNEL_ID_OR_NAME [flags] -``` - -### Examples - -``` - # Demote a release from a channel by channel sequence - replicated channel release demote Beta --channel-sequence 15 - - # Demote a release from a channel by release sequence - replicated channel release demote Beta --release-sequence 12 -``` - -### Options - -``` - --channel-sequence int The channel sequence to demote - -h, --help help for demote - --release-sequence int The release sequence to demote -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated channel](replicated-cli-channel) - List channels - -================ -File: docs/reference/replicated-cli-channel-disable-semantic-versioning.mdx -================ -# replicated channel disable-semantic-versioning - -Disable semantic versioning for CHANNEL_ID - -### Synopsis - -Disable semantic versioning for the CHANNEL_ID. - -``` -replicated channel disable-semantic-versioning CHANNEL_ID [flags] -``` - -### Examples - -``` -replicated channel disable-semantic-versioning CHANNEL_ID -``` - -### Options - -``` - -h, --help help for disable-semantic-versioning -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated channel](replicated-cli-channel) - List channels - -================ -File: docs/reference/replicated-cli-channel-enable-semantic-versioning.mdx -================ -# replicated channel enable-semantic-versioning - -Enable semantic versioning for CHANNEL_ID - -### Synopsis - -Enable semantic versioning for the CHANNEL_ID. - -``` -replicated channel enable-semantic-versioning CHANNEL_ID [flags] -``` - -### Examples - -``` -replicated channel enable-semantic-versioning CHANNEL_ID -``` - -### Options - -``` - -h, --help help for enable-semantic-versioning -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated channel](replicated-cli-channel) - List channels - -================ -File: docs/reference/replicated-cli-channel-inspect.mdx -================ -# replicated channel inspect - -Show full details for a channel - -### Synopsis - -Show full details for a channel - -``` -replicated channel inspect CHANNEL_ID [flags] -``` - -### Options - -``` - -h, --help help for inspect - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated channel](replicated-cli-channel) - List channels - -================ -File: docs/reference/replicated-cli-channel-ls.mdx -================ -# replicated channel ls - -List all channels in your app - -### Synopsis - -List all channels in your app - -``` -replicated channel ls [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Options - -``` - -h, --help help for ls - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated channel](replicated-cli-channel) - List channels - -================ -File: docs/reference/replicated-cli-channel-rm.mdx -================ -# replicated channel rm - -Remove (archive) a channel - -### Synopsis - -Remove (archive) a channel - -``` -replicated channel rm CHANNEL_ID [flags] -``` - -### Aliases - -``` -rm, delete -``` - -### Options - -``` - -h, --help help for rm -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated channel](replicated-cli-channel) - List channels - -================ -File: docs/reference/replicated-cli-channel-un-demote.mdx -================ -# replicated channel un-demote - -Un-demote a release from a channel - -### Synopsis - -Un-demote a channel release from a channel using a channel sequence or release sequence. - -``` -replicated channel un-demote CHANNEL_ID_OR_NAME [flags] -``` - -### Examples - -``` - # Un-demote a release from a channel by channel sequence - replicated channel release un-demote Beta --channel-sequence 15 - - # Un-demote a release from a channel by release sequence - replicated channel release un-demote Beta --release-sequence 12 -``` - -### Options - -``` - --channel-sequence int The channel sequence to un-demote - -h, --help help for un-demote - --release-sequence int The release sequence to un-demote -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated channel](replicated-cli-channel) - List channels - -================ -File: docs/reference/replicated-cli-channel.mdx -================ -# replicated channel - -List channels - -### Synopsis - -List channels - -### Options - -``` - -h, --help help for channel -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated channel create](replicated-cli-channel-create) - Create a new channel in your app -* [replicated channel demote](replicated-cli-channel-demote) - Demote a release from a channel -* [replicated channel disable-semantic-versioning](replicated-cli-channel-disable-semantic-versioning) - Disable semantic versioning for CHANNEL_ID -* [replicated channel enable-semantic-versioning](replicated-cli-channel-enable-semantic-versioning) - Enable semantic versioning for CHANNEL_ID -* [replicated channel inspect](replicated-cli-channel-inspect) - Show full details for a channel -* [replicated channel ls](replicated-cli-channel-ls) - List all channels in your app -* [replicated channel rm](replicated-cli-channel-rm) - Remove (archive) a channel -* [replicated channel un-demote](replicated-cli-channel-un-demote) - Un-demote a release from a channel - -================ -File: docs/reference/replicated-cli-cluster-addon-create-object-store.mdx -================ -# replicated cluster addon create object-store - -Create an object store bucket for a cluster. - -### Synopsis - -Creates an object store bucket for a cluster, requiring a bucket name prefix. The bucket name will be auto-generated using the format "[BUCKET_PREFIX]-[ADDON_ID]-cmx". This feature provisions an object storage bucket that can be used for storage in your cluster environment. - -``` -replicated cluster addon create object-store CLUSTER_ID --bucket-prefix BUCKET_PREFIX [flags] -``` - -### Examples - -``` -# Create an object store bucket with a specified prefix -replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket - -# Create an object store bucket and wait for it to be ready (up to 5 minutes) -replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket --wait 5m - -# Perform a dry run to validate inputs without creating the bucket -replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket --dry-run - -# Create an object store bucket and output the result in JSON format -replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket --output json - -# Create an object store bucket with a custom prefix and wait for 10 minutes -replicated cluster addon create object-store 05929b24 --bucket-prefix custom-prefix --wait 10m -``` - -### Options - -``` - --bucket-prefix string A prefix for the bucket name to be created (required) - --dry-run Simulate creation to verify that your inputs are valid without actually creating an add-on - -h, --help help for object-store - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --wait duration Wait duration for add-on to be ready before exiting (leave empty to not wait) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster addon create](replicated-cli-cluster-addon-create) - Create cluster add-ons. - -================ -File: docs/reference/replicated-cli-cluster-addon-create.mdx -================ -# replicated cluster addon create - -Create cluster add-ons. - -### Synopsis - -Create new add-ons for a cluster. This command allows you to add functionality or services to a cluster by provisioning the required add-ons. - -### Examples - -``` -# Create an object store bucket add-on for a cluster -replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket - -# Perform a dry run for creating an object store add-on -replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket --dry-run -``` - -### Options - -``` - -h, --help help for create -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. -* [replicated cluster addon create object-store](replicated-cli-cluster-addon-create-object-store) - Create an object store bucket for a cluster. - -================ -File: docs/reference/replicated-cli-cluster-addon-ls.mdx -================ -# replicated cluster addon ls - -List cluster add-ons for a cluster. - -### Synopsis - -The 'cluster addon ls' command allows you to list all add-ons for a specific cluster. This command provides a detailed overview of the add-ons currently installed on the cluster, including their status and any relevant configuration details. - -This can be useful for monitoring the health and configuration of add-ons or performing troubleshooting tasks. - -``` -replicated cluster addon ls CLUSTER_ID [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Examples - -``` -# List add-ons for a cluster with default table output -replicated cluster addon ls CLUSTER_ID - -# List add-ons for a cluster with JSON output -replicated cluster addon ls CLUSTER_ID --output json - -# List add-ons for a cluster with wide table output -replicated cluster addon ls CLUSTER_ID --output wide -``` - -### Options - -``` - -h, --help help for ls - --output string The output format to use. One of: json|table|wide (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. - -================ -File: docs/reference/replicated-cli-cluster-addon-rm.mdx -================ -# replicated cluster addon rm - -Remove cluster add-on by ID. - -### Synopsis - -The 'cluster addon rm' command allows you to remove a specific add-on from a cluster by specifying the cluster ID and the add-on ID. - -This command is useful when you want to deprovision an add-on that is no longer needed or when troubleshooting issues related to specific add-ons. The add-on will be removed immediately, and you will receive confirmation upon successful removal. - -``` -replicated cluster addon rm CLUSTER_ID --id ADDON_ID [flags] -``` - -### Aliases - -``` -rm, delete -``` - -### Examples - -``` -# Remove an add-on with ID 'abc123' from cluster 'cluster456' -replicated cluster addon rm cluster456 --id abc123 -``` - -### Options - -``` - -h, --help help for rm - --id string The ID of the cluster add-on to remove (required) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. - -================ -File: docs/reference/replicated-cli-cluster-addon.mdx -================ -# replicated cluster addon - -Manage cluster add-ons. - -### Synopsis - -The 'cluster addon' command allows you to manage add-ons installed on a test cluster. Add-ons are additional components or services that can be installed and configured to enhance or extend the functionality of the cluster. - -You can use various subcommands to create, list, remove, or check the status of add-ons on a cluster. This command is useful for adding databases, object storage, monitoring, security, or other specialized tools to your cluster environment. - -### Examples - -``` -# List all add-ons installed on a cluster -replicated cluster addon ls CLUSTER_ID - -# Remove an add-on from a cluster -replicated cluster addon rm CLUSTER_ID --id ADDON_ID - -# Create an object store bucket add-on for a cluster -replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket - -# List add-ons with JSON output -replicated cluster addon ls CLUSTER_ID --output json -``` - -### Options - -``` - -h, --help help for addon -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. -* [replicated cluster addon create](replicated-cli-cluster-addon-create) - Create cluster add-ons. -* [replicated cluster addon ls](replicated-cli-cluster-addon-ls) - List cluster add-ons for a cluster. -* [replicated cluster addon rm](replicated-cli-cluster-addon-rm) - Remove cluster add-on by ID. - -================ -File: docs/reference/replicated-cli-cluster-create.mdx -================ -# replicated cluster create - -Create test clusters. - -### Synopsis - -The 'cluster create' command provisions a new test cluster with the specified Kubernetes distribution and configuration. You can customize the cluster's size, version, node groups, disk space, IP family, and other parameters. - -This command supports creating clusters on multiple Kubernetes distributions, including setting up node groups with different instance types and counts. You can also specify a TTL (Time-To-Live) to automatically terminate the cluster after a set duration. - -Use the '--dry-run' flag to simulate the creation process and get an estimated cost without actually provisioning the cluster. - -``` -replicated cluster create [flags] -``` - -### Examples - -``` -# Create a new cluster with basic configuration -replicated cluster create --distribution eks --version 1.21 --nodes 3 --instance-type t3.large --disk 100 --ttl 24h - -# Create a cluster with a custom node group -replicated cluster create --distribution eks --version 1.21 --nodegroup name=workers,instance-type=t3.large,nodes=5 --ttl 24h - -# Simulate cluster creation (dry-run) -replicated cluster create --distribution eks --version 1.21 --nodes 3 --disk 100 --ttl 24h --dry-run - -# Create a cluster with autoscaling configuration -replicated cluster create --distribution eks --version 1.21 --min-nodes 2 --max-nodes 5 --instance-type t3.large --ttl 24h - -# Create a cluster with multiple node groups -replicated cluster create --distribution eks --version 1.21 \ ---nodegroup name=workers,instance-type=t3.large,nodes=3 \ ---nodegroup name=cpu-intensive,instance-type=c5.2xlarge,nodes=2 \ ---ttl 24h - -# Create a cluster with custom tags -replicated cluster create --distribution eks --version 1.21 --nodes 3 --tag env=test --tag project=demo --ttl 24h - -# Create a cluster with addons -replicated cluster create --distribution eks --version 1.21 --nodes 3 --addon object-store --ttl 24h -``` - -### Options - -``` - --addon stringArray Addons to install on the cluster (can be specified multiple times) - --bucket-prefix string A prefix for the bucket name to be created (required by '--addon object-store') - --disk int Disk Size (GiB) to request per node (default 50) - --distribution string Kubernetes distribution of the cluster to provision - --dry-run Dry run - -h, --help help for create - --instance-type string The type of instance to use (e.g. m6i.large) - --ip-family string IP Family to use for the cluster (ipv4|ipv6|dual). - --license-id string License ID to use for the installation (required for Embedded Cluster distribution) - --max-nodes string Maximum Node count (non-negative number) (only for EKS, AKS and GKE clusters). - --min-nodes string Minimum Node count (non-negative number) (only for EKS, AKS and GKE clusters). - --name string Cluster name (defaults to random name) - --nodegroup stringArray Node group to create (name=?,instance-type=?,nodes=?,min-nodes=?,max-nodes=?,disk=? format, can be specified multiple times). For each nodegroup, at least one flag must be specified. The flags min-nodes and max-nodes are mutually dependent. - --nodes int Node count (default 1) - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --tag stringArray Tag to apply to the cluster (key=value format, can be specified multiple times) - --ttl string Cluster TTL (duration, max 48h) - --version string Kubernetes version to provision (format is distribution dependent) - --wait duration Wait duration for cluster to be ready (leave empty to not wait) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. - -================ -File: docs/reference/replicated-cli-cluster-kubeconfig.mdx -================ -# replicated cluster kubeconfig - -Download credentials for a test cluster. - -### Synopsis - -The 'cluster kubeconfig' command downloads the credentials (kubeconfig) required to access a test cluster. You can either merge these credentials into your existing kubeconfig file or save them as a new file. - -This command ensures that the kubeconfig is correctly configured for use with your Kubernetes tools. You can specify the cluster by ID or by name. Additionally, the kubeconfig can be written to a specific file path or printed to stdout. - -You can also use this command to automatically update your current Kubernetes context with the downloaded credentials. - -``` -replicated cluster kubeconfig [ID] [flags] -``` - -### Examples - -``` -# Download and merge kubeconfig into your existing configuration -replicated cluster kubeconfig CLUSTER_ID - -# Save the kubeconfig to a specific file -replicated cluster kubeconfig CLUSTER_ID --output-path ./kubeconfig - -# Print the kubeconfig to stdout -replicated cluster kubeconfig CLUSTER_ID --stdout - -# Download kubeconfig for a cluster by name -replicated cluster kubeconfig --name "My Cluster" - -# Download kubeconfig for a cluster by ID -replicated cluster kubeconfig --id CLUSTER_ID -``` - -### Options - -``` - -h, --help help for kubeconfig - --id string id of the cluster to download credentials for (when name is not provided) - --name string name of the cluster to download credentials for (when id is not provided) - --output-path string path to kubeconfig file to write to, if not provided, it will be merged into your existing kubeconfig - --stdout write kubeconfig to stdout -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. - -================ -File: docs/reference/replicated-cli-cluster-ls.mdx -================ -# replicated cluster ls - -List test clusters. - -### Synopsis - -The 'cluster ls' command lists all test clusters. This command provides information about the clusters, such as their status, name, distribution, version, and creation time. The output can be formatted in different ways, depending on your needs. - -You can filter the list of clusters by time range and status (e.g., show only terminated clusters). You can also watch clusters in real-time, which updates the list every few seconds. - -Clusters that have been deleted will be shown with a 'deleted' status. - -``` -replicated cluster ls [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Examples - -``` -# List all clusters with default table output -replicated cluster ls - -# Show clusters created after a specific date -replicated cluster ls --start-time 2023-01-01T00:00:00Z - -# Watch for real-time updates -replicated cluster ls --watch - -# List clusters with JSON output -replicated cluster ls --output json - -# List only terminated clusters -replicated cluster ls --show-terminated - -# List clusters with wide table output -replicated cluster ls --output wide -``` - -### Options - -``` - --end-time string end time for the query (Format: 2006-01-02T15:04:05Z) - -h, --help help for ls - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --show-terminated when set, only show terminated clusters - --start-time string start time for the query (Format: 2006-01-02T15:04:05Z) - -w, --watch watch clusters -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. - -================ -File: docs/reference/replicated-cli-cluster-nodegroup-ls.mdx -================ -# replicated cluster nodegroup ls - -List node groups for a cluster. - -### Synopsis - -The 'cluster nodegroup ls' command lists all the node groups associated with a given cluster. Each node group defines a specific set of nodes with particular configurations, such as instance types and scaling options. - -You can view information about the node groups within the specified cluster, including their ID, name, node count, and other configuration details. - -You must provide the cluster ID to list its node groups. - -``` -replicated cluster nodegroup ls [ID] [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Examples - -``` -# List all node groups in a cluster with default table output -replicated cluster nodegroup ls CLUSTER_ID - -# List node groups with JSON output -replicated cluster nodegroup ls CLUSTER_ID --output json - -# List node groups with wide table output -replicated cluster nodegroup ls CLUSTER_ID --output wide -``` - -### Options - -``` - -h, --help help for ls - --output string The output format to use. One of: json|table|wide (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster nodegroup](replicated-cli-cluster-nodegroup) - Manage node groups for clusters. - -================ -File: docs/reference/replicated-cli-cluster-nodegroup.mdx -================ -# replicated cluster nodegroup - -Manage node groups for clusters. - -### Synopsis - -The 'cluster nodegroup' command provides functionality to manage node groups within a cluster. This command allows you to list node groups in a Kubernetes or VM-based cluster. - -Node groups define a set of nodes with specific configurations, such as instance types, node counts, or scaling rules. You can use subcommands to perform various actions on node groups. - -### Examples - -``` -# List all node groups for a cluster -replicated cluster nodegroup ls CLUSTER_ID -``` - -### Options - -``` - -h, --help help for nodegroup -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. -* [replicated cluster nodegroup ls](replicated-cli-cluster-nodegroup-ls) - List node groups for a cluster. - -================ -File: docs/reference/replicated-cli-cluster-port-expose.mdx -================ -# replicated cluster port expose - -Expose a port on a cluster to the public internet. - -### Synopsis - -The 'cluster port expose' command is used to expose a specified port on a cluster to the public internet. When exposing a port, the command automatically creates a DNS entry and, if using the "https" protocol, provisions a TLS certificate for secure communication. - -You can also create a wildcard DNS entry and TLS certificate by specifying the "--wildcard" flag. Please note that creating a wildcard certificate may take additional time. - -This command supports different protocols including "http", "https", "ws", and "wss" for web traffic and web socket communication. - -NOTE: Currently, this feature only supports VM-based cluster distributions. - -``` -replicated cluster port expose CLUSTER_ID --port PORT [flags] -``` - -### Examples - -``` -# Expose port 8080 with HTTPS protocol and wildcard DNS -replicated cluster port expose CLUSTER_ID --port 8080 --protocol https --wildcard - -# Expose port 3000 with HTTP protocol -replicated cluster port expose CLUSTER_ID --port 3000 --protocol http - -# Expose port 8080 with multiple protocols -replicated cluster port expose CLUSTER_ID --port 8080 --protocol http,https - -# Expose port 8080 and display the result in JSON format -replicated cluster port expose CLUSTER_ID --port 8080 --protocol https --output json -``` - -### Options - -``` - -h, --help help for expose - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --port int Port to expose (required) - --protocol strings Protocol to expose (valid values are "http", "https", "ws" and "wss") (default [http,https]) - --wildcard Create a wildcard DNS entry and TLS certificate for this port -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. - -================ -File: docs/reference/replicated-cli-cluster-port-ls.mdx -================ -# replicated cluster port ls - -List cluster ports for a cluster. - -### Synopsis - -The 'cluster port ls' command lists all the ports configured for a specific cluster. You must provide the cluster ID to retrieve and display the ports. - -This command is useful for viewing the current port configurations, protocols, and other related settings of your test cluster. The output format can be customized to suit your needs, and the available formats include table, JSON, and wide views. - -``` -replicated cluster port ls CLUSTER_ID [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Examples - -``` -# List ports for a cluster in the default table format -replicated cluster port ls CLUSTER_ID - -# List ports for a cluster in JSON format -replicated cluster port ls CLUSTER_ID --output json - -# List ports for a cluster in wide format -replicated cluster port ls CLUSTER_ID --output wide -``` - -### Options - -``` - -h, --help help for ls - --output string The output format to use. One of: json|table|wide (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. - -================ -File: docs/reference/replicated-cli-cluster-port-rm.mdx -================ -# replicated cluster port rm - -Remove cluster port by ID. - -### Synopsis - -The 'cluster port rm' command removes a specific port from a cluster. You must provide either the ID of the port or the port number and protocol(s) to remove. - -This command is useful for managing the network settings of your test clusters by allowing you to clean up unused or incorrect ports. After removing a port, the updated list of ports will be displayed. - -Note that you can only use either the port ID or port number when removing a port, not both at the same time. - -``` -replicated cluster port rm CLUSTER_ID --id PORT_ID [flags] -``` - -### Aliases - -``` -rm, delete -``` - -### Examples - -``` -# Remove a port using its ID -replicated cluster port rm CLUSTER_ID --id PORT_ID - -# Remove a port using its number (deprecated) -replicated cluster port rm CLUSTER_ID --port 8080 --protocol http,https - -# Remove a port and display the result in JSON format -replicated cluster port rm CLUSTER_ID --id PORT_ID --output json -``` - -### Options - -``` - -h, --help help for rm - --id string ID of the port to remove (required) - --output string The output format to use. One of: json|table|wide (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. - -================ -File: docs/reference/replicated-cli-cluster-port.mdx -================ -# replicated cluster port - -Manage cluster ports. - -### Synopsis - -The 'cluster port' command is a parent command for managing ports in a cluster. It allows users to list, remove, or expose specific ports used by the cluster. Use the subcommands (such as 'ls', 'rm', and 'expose') to manage port configurations effectively. - -This command provides flexibility for handling ports in various test clusters, ensuring efficient management of cluster networking settings. - -### Examples - -``` -# List all exposed ports in a cluster -replicated cluster port ls [CLUSTER_ID] - -# Remove an exposed port from a cluster -replicated cluster port rm [CLUSTER_ID] [PORT] - -# Expose a new port in a cluster -replicated cluster port expose [CLUSTER_ID] [PORT] -``` - -### Options - -``` - -h, --help help for port -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. -* [replicated cluster port expose](replicated-cli-cluster-port-expose) - Expose a port on a cluster to the public internet. -* [replicated cluster port ls](replicated-cli-cluster-port-ls) - List cluster ports for a cluster. -* [replicated cluster port rm](replicated-cli-cluster-port-rm) - Remove cluster port by ID. - -================ -File: docs/reference/replicated-cli-cluster-prepare.mdx -================ -# replicated cluster prepare - -Prepare cluster for testing. - -### Synopsis - -The 'cluster prepare' command provisions a Kubernetes cluster and installs an application using a Helm chart or KOTS YAML configuration. - -This command is designed to be used in CI environments to prepare a cluster for testing by deploying a Helm chart or KOTS application with entitlements and custom values. You can specify the cluster configuration, such as the Kubernetes distribution, version, node count, and instance type, and then install your application automatically. - -Alternatively, if you prefer deploying KOTS applications, you can specify YAML manifests for the release and use the '--shared-password' flag for the KOTS admin console. - -You can also pass entitlement values to configure the cluster's customer entitlements. - -Note: -- The '--chart' flag cannot be used with '--yaml', '--yaml-file', or '--yaml-dir'. -- If deploying a Helm chart, use the '--set' flags to pass chart values. When deploying a KOTS application, the '--shared-password' flag is required. - -``` -replicated cluster prepare [flags] -``` - -### Examples - -``` -replicated cluster prepare --distribution eks --version 1.27 --instance-type c6.xlarge --node-count 3 --chart ./your-chart.tgz --values ./values.yaml --set chart-key=value --set chart-key2=value2 -``` - -### Options - -``` - --app-ready-timeout duration Timeout to wait for the application to be ready. Must be in Go duration format (e.g., 10s, 2m). (default 5m0s) - --chart string Path to the helm chart package to deploy - --cluster-id string The ID of an existing cluster to use instead of creating a new one. - --config-values-file string Path to a manifest containing config values (must be apiVersion: kots.io/v1beta1, kind: ConfigValues). - --disk int Disk Size (GiB) to request per node. (default 50) - --distribution string Kubernetes distribution of the cluster to provision - --entitlements strings The entitlements to set on the customer. Can be specified multiple times. - -h, --help help for prepare - --instance-type string the type of instance to use clusters (e.g. x5.xlarge) - --name string Cluster name - --namespace string The namespace into which to deploy the KOTS application or Helm chart. (default "default") - --node-count int Node count. (default 1) - --set stringArray Set values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2). - --set-file stringArray Set values from respective files specified via the command line (can specify multiple or separate values with commas: key1=path1,key2=path2). - --set-json stringArray Set JSON values on the command line (can specify multiple or separate values with commas: key1=jsonval1,key2=jsonval2). - --set-literal stringArray Set a literal STRING value on the command line. - --set-string stringArray Set STRING values on the command line (can specify multiple or separate values with commas: key1=val1,key2=val2). - --shared-password string Shared password for the KOTS admin console. - --ttl string Cluster TTL (duration, max 48h) - --values strings Specify values in a YAML file or a URL (can specify multiple). - --version string Kubernetes version to provision (format is distribution dependent) - --wait duration Wait duration for cluster to be ready. (default 5m0s) - --yaml string The YAML config for this release. Use '-' to read from stdin. Cannot be used with the --yaml-file flag. - --yaml-dir string The directory containing multiple yamls for a KOTS release. Cannot be used with the --yaml flag. - --yaml-file string The YAML config for this release. Cannot be used with the --yaml flag. -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. - -================ -File: docs/reference/replicated-cli-cluster-rm.mdx -================ -# replicated cluster rm - -Remove test clusters. - -### Synopsis - -The 'rm' command removes test clusters immediately. - -You can remove clusters by specifying a cluster ID, or by using other criteria such as cluster names or tags. Alternatively, you can remove all clusters in your account at once. - -This command can also be used in a dry-run mode to simulate the removal without actually deleting anything. - -You cannot mix the use of cluster IDs with other options like removing by name, tag, or removing all clusters at once. - -``` -replicated cluster rm ID [ID …] [flags] -``` - -### Aliases - -``` -rm, delete -``` - -### Examples - -``` -# Remove a specific cluster by ID -replicated cluster rm CLUSTER_ID - -# Remove all clusters -replicated cluster rm --all -``` - -### Options - -``` - --all remove all clusters - --dry-run Dry run - -h, --help help for rm - --name stringArray Name of the cluster to remove (can be specified multiple times) - --tag stringArray Tag of the cluster to remove (key=value format, can be specified multiple times) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. - -================ -File: docs/reference/replicated-cli-cluster-shell.mdx -================ -# replicated cluster shell - -Open a new shell with kubeconfig configured. - -### Synopsis - -The 'shell' command opens a new shell session with the kubeconfig configured for the specified test cluster. This allows you to have immediate kubectl access to the cluster within the shell environment. - -You can either specify the cluster ID directly or provide the cluster name to resolve the corresponding cluster ID. The shell will inherit your existing environment and add the necessary kubeconfig context for interacting with the Kubernetes cluster. - -Once inside the shell, you can use 'kubectl' to interact with the cluster. To exit the shell, press Ctrl-D or type 'exit'. When the shell closes, the kubeconfig will be reset back to your default configuration. - -``` -replicated cluster shell [ID] [flags] -``` - -### Examples - -``` -# Open a shell for a cluster by ID -replicated cluster shell CLUSTER_ID - -# Open a shell for a cluster by name -replicated cluster shell --name "My Cluster" -``` - -### Options - -``` - -h, --help help for shell - --id string id of the cluster to have kubectl access to (when name is not provided) - --name string name of the cluster to have kubectl access to. -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. - -================ -File: docs/reference/replicated-cli-cluster-update-nodegroup.mdx -================ -# replicated cluster update nodegroup - -Update a nodegroup for a test cluster. - -### Synopsis - -The 'nodegroup' command allows you to update the configuration of a nodegroup within a test cluster. You can update attributes like the number of nodes, minimum and maximum node counts for autoscaling, and more. - -If you do not provide the nodegroup ID, the command will try to resolve it based on the nodegroup name provided. - -``` -replicated cluster update nodegroup [ID] [flags] -``` - -### Examples - -``` -# Update the number of nodes in a nodegroup -replicated cluster update nodegroup CLUSTER_ID --nodegroup-id NODEGROUP_ID --nodes 3 - -# Update the autoscaling limits for a nodegroup -replicated cluster update nodegroup CLUSTER_ID --nodegroup-id NODEGROUP_ID --min-nodes 2 --max-nodes 5 -``` - -### Options - -``` - -h, --help help for nodegroup - --max-nodes string The maximum number of nodes in the nodegroup - --min-nodes string The minimum number of nodes in the nodegroup - --nodegroup-id string The ID of the nodegroup to update - --nodegroup-name string The name of the nodegroup to update - --nodes int The number of nodes in the nodegroup - --output string The output format to use. One of: json|table|wide (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --id string id of the cluster to update (when name is not provided) - --name string Name of the cluster to update. - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster update](replicated-cli-cluster-update) - Update cluster settings. - -================ -File: docs/reference/replicated-cli-cluster-update-ttl.mdx -================ -# replicated cluster update ttl - -Update TTL for a test cluster. - -### Synopsis - -The 'ttl' command allows you to update the Time-To-Live (TTL) of a test cluster. The TTL represents the duration for which the cluster will remain active before it is automatically terminated. The duration starts from the moment the cluster becomes active. You must provide a valid duration, with a maximum limit of 48 hours. - -``` -replicated cluster update ttl [ID] [flags] -``` - -### Examples - -``` -# Update the TTL for a specific cluster -replicated cluster update ttl CLUSTER_ID --ttl 24h -``` - -### Options - -``` - -h, --help help for ttl - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --ttl string Update TTL which starts from the moment the cluster is running (duration, max 48h). -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --id string id of the cluster to update (when name is not provided) - --name string Name of the cluster to update. - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster update](replicated-cli-cluster-update) - Update cluster settings. - -================ -File: docs/reference/replicated-cli-cluster-update.mdx -================ -# replicated cluster update - -Update cluster settings. - -### Synopsis - -The 'update' command allows you to update various settings of a test cluster, such as its name or ID. - -You can either specify the cluster ID directly or provide the cluster name, and the command will resolve the corresponding cluster ID. This allows you to modify the cluster's configuration based on the unique identifier or the name of the cluster. - -### Examples - -``` -# Update a cluster using its ID -replicated cluster update --id <cluster-id> [subcommand] - -# Update a cluster using its name -replicated cluster update --name <cluster-name> [subcommand] -``` - -### Options - -``` - -h, --help help for update - --id string id of the cluster to update (when name is not provided) - --name string Name of the cluster to update. -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. -* [replicated cluster update nodegroup](replicated-cli-cluster-update-nodegroup) - Update a nodegroup for a test cluster. -* [replicated cluster update ttl](replicated-cli-cluster-update-ttl) - Update TTL for a test cluster. - -================ -File: docs/reference/replicated-cli-cluster-upgrade.mdx -================ -# replicated cluster upgrade - -Upgrade a test cluster. - -### Synopsis - -The 'upgrade' command upgrades a Kubernetes test cluster to a specified version. You must provide a cluster ID and the version to upgrade to. The upgrade can be simulated with a dry-run option, or you can choose to wait for the cluster to be fully upgraded. - -``` -replicated cluster upgrade [ID] [flags] -``` - -### Examples - -``` -# Upgrade a cluster to a new Kubernetes version -replicated cluster upgrade [CLUSTER_ID] --version 1.31 - -# Perform a dry run of a cluster upgrade without making any changes -replicated cluster upgrade [CLUSTER_ID] --version 1.31 --dry-run - -# Upgrade a cluster and wait for it to be ready -replicated cluster upgrade [CLUSTER_ID] --version 1.31 --wait 30m -``` - -### Options - -``` - --dry-run Dry run - -h, --help help for upgrade - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --version string Kubernetes version to upgrade to (format is distribution dependent) - --wait duration Wait duration for cluster to be ready (leave empty to not wait) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. - -================ -File: docs/reference/replicated-cli-cluster-versions.mdx -================ -# replicated cluster versions - -List cluster versions. - -### Synopsis - -The 'versions' command lists available Kubernetes versions for supported distributions. You can filter the versions by specifying a distribution and choose between different output formats. - -``` -replicated cluster versions [flags] -``` - -### Examples - -``` -# List all available Kubernetes cluster versions -replicated cluster versions - -# List available versions for a specific distribution (e.g., eks) -replicated cluster versions --distribution eks - -# Output the versions in JSON format -replicated cluster versions --output json -``` - -### Options - -``` - --distribution string Kubernetes distribution to filter by. - -h, --help help for versions - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. - -================ -File: docs/reference/replicated-cli-cluster.mdx -================ -# replicated cluster - -Manage test Kubernetes clusters. - -### Synopsis - -The 'cluster' command allows you to manage and interact with Kubernetes clusters used for testing purposes. With this command, you can create, list, remove, and manage node groups within clusters, as well as retrieve information about available clusters. - -### Examples - -``` -# Create a single-node EKS cluster -replicated cluster create --distribution eks --version 1.31 - -# List all clusters -replicated cluster ls - -# Remove a specific cluster by ID -replicated cluster rm <cluster-id> - -# List all nodegroups in a specific cluster -replicated cluster nodegroup ls <cluster-id> -``` - -### Options - -``` - -h, --help help for cluster -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated cluster addon](replicated-cli-cluster-addon) - Manage cluster add-ons. -* [replicated cluster create](replicated-cli-cluster-create) - Create test clusters. -* [replicated cluster kubeconfig](replicated-cli-cluster-kubeconfig) - Download credentials for a test cluster. -* [replicated cluster ls](replicated-cli-cluster-ls) - List test clusters. -* [replicated cluster nodegroup](replicated-cli-cluster-nodegroup) - Manage node groups for clusters. -* [replicated cluster port](replicated-cli-cluster-port) - Manage cluster ports. -* [replicated cluster prepare](replicated-cli-cluster-prepare) - Prepare cluster for testing. -* [replicated cluster rm](replicated-cli-cluster-rm) - Remove test clusters. -* [replicated cluster shell](replicated-cli-cluster-shell) - Open a new shell with kubeconfig configured. -* [replicated cluster update](replicated-cli-cluster-update) - Update cluster settings. -* [replicated cluster upgrade](replicated-cli-cluster-upgrade) - Upgrade a test cluster. -* [replicated cluster versions](replicated-cli-cluster-versions) - List cluster versions. - -================ -File: docs/reference/replicated-cli-completion.mdx -================ -# replicated completion - -Generate completion script - -``` -replicated completion [bash|zsh|fish|powershell] -``` - -### Examples - -``` -To load completions: - -Bash: - - This script depends on the 'bash-completion' package. - If it is not installed already, you can install it via your OS's package manager. - - $ source <(replicated completion bash) - - # To load completions for each session, execute once: - # Linux: - $ replicated completion bash > /etc/bash_completion.d/replicated - # macOS: - $ replicated completion bash > $(brew --prefix)/etc/bash_completion.d/replicated - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ replicated completion zsh > "${fpath[1]}/_replicated" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ replicated completion fish | source - - # To load completions for each session, execute once: - $ replicated completion fish > ~/.config/fish/completions/replicated.fish - -PowerShell: - - PS> replicated completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> replicated completion powershell > replicated.ps1 - # and source this file from your PowerShell profile. - -``` - -### Options - -``` - -h, --help help for completion -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated - -================ -File: docs/reference/replicated-cli-customer-archive.mdx -================ -# replicated customer archive - -Archive a customer - -### Synopsis - -Archive a customer for the current application. - -This command allows you to archive a customer record. Archiving a customer -will make their license inactive and remove them from active customer lists. -This action is reversible - you can unarchive a customer later if needed. - -The customer can be specified by either their name or ID. - -``` -replicated customer archive <customer_name_or_id> [flags] -``` - -### Examples - -``` -# Archive a customer by name -replicated customer archive "Acme Inc" - -# Archive a customer by ID -replicated customer archive cus_abcdef123456 - -# Archive multiple customers by ID -replicated customer archive cus_abcdef123456 cus_xyz9876543210 - -# Archive a customer in a specific app (if you have multiple apps) -replicated customer archive --app myapp "Acme Inc" -``` - -### Options - -``` - --app string The app to archive the customer in (not required when using a customer id) - -h, --help help for archive -``` - -### Options inherited from parent commands - -``` - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated customer](replicated-cli-customer) - Manage customers - -================ -File: docs/reference/replicated-cli-customer-create.mdx -================ -# replicated customer create - -Create a new customer for the current application - -### Synopsis - -Create a new customer for the current application with specified attributes. - -This command allows you to create a customer record with various properties such as name, -custom ID, channels, license type, and feature flags. You can set expiration dates, -enable or disable specific features, and assign the customer to one or more channels. - -The --app flag must be set to specify the target application. - -``` -replicated customer create [flags] -``` - -### Examples - -``` -# Create a basic customer with a name and assigned to a channel -replicated customer create --app myapp --name "Acme Inc" --channel stable - -# Create a customer with multiple channels and a custom ID -replicated customer create --app myapp --name "Beta Corp" --custom-id "BETA123" --channel beta --channel stable - -# Create a paid customer with specific features enabled -replicated customer create --app myapp --name "Enterprise Ltd" --type paid --channel enterprise --airgap --snapshot - -# Create a trial customer with an expiration date -replicated customer create --app myapp --name "Trial User" --type trial --channel stable --expires-in 720h - -# Create a customer with all available options -replicated customer create --app myapp --name "Full Options Inc" --custom-id "FULL001" \ - --channel stable --channel beta --default-channel stable --type paid \ - --email "contact@fulloptions.com" --expires-in 8760h \ - --airgap --snapshot --kots-install --embedded-cluster-download \ - --support-bundle-upload --ensure-channel -``` - -### Options - -``` - --airgap If set, the license will allow airgap installs. - --channel stringArray Release channel to which the customer should be assigned (can be specified multiple times) - --custom-id string Set a custom customer ID to more easily tie this customer record to your external data systems - --default-channel string Which of the specified channels should be the default channel. if not set, the first channel specified will be the default channel. - --developer-mode If set, Replicated SDK installed in dev mode will use mock data. - --email string Email address of the customer that is to be created. - --embedded-cluster-download If set, the license will allow embedded cluster downloads. - --ensure-channel If set, channel will be created if it does not exist. - --expires-in duration If set, an expiration date will be set on the license. Supports Go durations like '72h' or '3600m' - --geo-axis If set, the license will allow Geo Axis usage. - --gitops If set, the license will allow the GitOps usage. - --helm-install If set, the license will allow Helm installs. - --helmvm-cluster-download If set, the license will allow helmvm cluster downloads. - -h, --help help for create - --identity-service If set, the license will allow Identity Service usage. - --installer-support If set, the license will allow installer support. - --kots-install If set, the license will allow KOTS install. Otherwise license will allow Helm CLI installs only. (default true) - --kurl-install If set, the license will allow kURL installs. - --name string Name of the customer - --output string The output format to use. One of: json|table (default: table) (default "table") - --snapshot If set, the license will allow Snapshots. - --support-bundle-upload If set, the license will allow uploading support bundles. - --type string The license type to create. One of: dev|trial|paid|community|test (default: dev) (default "dev") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated customer](replicated-cli-customer) - Manage customers - -================ -File: docs/reference/replicated-cli-customer-download-license.mdx -================ -# replicated customer download-license - -Download a customer's license - -### Synopsis - -The download-license command allows you to retrieve and save a customer's license. - -This command fetches the license for a specified customer and either outputs it -to stdout or saves it to a file. The license contains crucial information about -the customer's subscription and usage rights. - -You must specify the customer using either their name or ID with the --customer flag. - -``` -replicated customer download-license [flags] -``` - -### Examples - -``` -# Download license for a customer by ID and output to stdout -replicated customer download-license --customer cus_abcdef123456 - -# Download license for a customer by name and save to a file -replicated customer download-license --customer "Acme Inc" --output license.yaml - -# Download license for a customer in a specific app (if you have multiple apps) -replicated customer download-license --app myapp --customer "Acme Inc" --output license.yaml -``` - -### Options - -``` - --customer string The Customer Name or ID - -h, --help help for download-license - -o, --output string Path to output license to. Defaults to stdout (default "-") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated customer](replicated-cli-customer) - Manage customers - -================ -File: docs/reference/replicated-cli-customer-inspect.mdx -================ -# replicated customer inspect - -Show detailed information about a specific customer - -### Synopsis - -The inspect command provides comprehensive details about a customer. - - This command retrieves and displays full information about a specified customer, - including their assigned channels, registry information, and other relevant attributes. - It's useful for getting an in-depth view of a customer's configuration and status. - - You must specify the customer using either their name or ID with the --customer flag. - -``` -replicated customer inspect [flags] -``` - -### Examples - -``` -# Inspect a customer by ID -replicated customer inspect --customer cus_abcdef123456 - -# Inspect a customer by name -replicated customer inspect --customer "Acme Inc" - -# Inspect a customer and output in JSON format -replicated customer inspect --customer cus_abcdef123456 --output json - -# Inspect a customer for a specific app (if you have multiple apps) -replicated customer inspect --app myapp --customer "Acme Inc" -``` - -### Options - -``` - --customer string The Customer Name or ID - -h, --help help for inspect - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated customer](replicated-cli-customer) - Manage customers - -================ -File: docs/reference/replicated-cli-customer-ls.mdx -================ -# replicated customer ls - -List customers for the current application - -### Synopsis - -List customers associated with the current application. - -This command displays information about customers linked to your application. -By default, it shows all non-test customers. You can use flags to: -- Filter customers by a specific app version -- Include test customers in the results -- Change the output format (table or JSON) - -The command requires an app to be set using the --app flag. - -``` -replicated customer ls [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Examples - -``` -# List all customers for the current application -replicated customer ls --app myapp -# Output results in JSON format -replicated customer ls --app myapp --output json - -# Combine multiple flags -replicated customer ls --app myapp --output json -``` - -### Options - -``` - --app-version string Filter customers by a specific app version - -h, --help help for ls - --include-test Include test customers in the results - --output string Output format: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated customer](replicated-cli-customer) - Manage customers - -================ -File: docs/reference/replicated-cli-customer-update.mdx -================ -# replicated customer update - -Update an existing customer - -### Synopsis - -Update an existing customer's information and settings. - - This command allows you to modify various attributes of a customer, including their name, - custom ID, assigned channels, license type, and feature flags. You can update expiration dates, - enable or disable specific features, and change channel assignments. - - The --customer flag is required to specify which customer to update. - -``` -replicated customer update --customer <id> --name <name> [options] [flags] -``` - -### Examples - -``` -# Update a customer's name -replicated customer update --customer cus_abcdef123456 --name "New Company Name" - -# Change a customer's channel and make it the default -replicated customer update --customer cus_abcdef123456 --channel stable --default-channel stable - -# Enable airgap installations for a customer -replicated customer update --customer cus_abcdef123456 --airgap - -# Update multiple attributes at once -replicated customer update --customer cus_abcdef123456 --name "Updated Corp" --type paid --channel enterprise --airgap --snapshot - -# Set an expiration date for a customer's license -replicated customer update --customer cus_abcdef123456 --expires-in 8760h - -# Update a customer and output the result in JSON format -replicated customer update --customer cus_abcdef123456 --name "JSON Corp" --output json -``` - -### Options - -``` - --airgap If set, the license will allow airgap installs. - --channel stringArray Release channel to which the customer should be assigned (can be specified multiple times) - --custom-id string Set a custom customer ID to more easily tie this customer record to your external data systems - --customer string The ID of the customer to update - --default-channel string Which of the specified channels should be the default channel. if not set, the first channel specified will be the default channel. - --developer-mode If set, Replicated SDK installed in dev mode will use mock data. - --email string Email address of the customer that is to be updated. - --embedded-cluster-download If set, the license will allow embedded cluster downloads. - --ensure-channel If set, channel will be created if it does not exist. - --expires-in duration If set, an expiration date will be set on the license. Supports Go durations like '72h' or '3600m' - --geo-axis If set, the license will allow Geo Axis usage. - --gitops If set, the license will allow the GitOps usage. - --helm-install If set, the license will allow Helm installs. - --helmvm-cluster-download If set, the license will allow helmvm cluster downloads. - -h, --help help for update - --identity-service If set, the license will allow Identity Service usage. - --kots-install If set, the license will allow KOTS install. Otherwise license will allow Helm CLI installs only. (default true) - --kurl-install If set, the license will allow kURL installs. - --name string Name of the customer - --output string The output format to use. One of: json|table (default: table) (default "table") - --snapshot If set, the license will allow Snapshots. - --support-bundle-upload If set, the license will allow uploading support bundles. - --type string The license type to update. One of: dev|trial|paid|community|test (default: dev) (default "dev") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated customer](replicated-cli-customer) - Manage customers - -================ -File: docs/reference/replicated-cli-customer.mdx -================ -# replicated customer - -Manage customers - -### Synopsis - -The customers command allows vendors to create, display, modify end customer records. - -### Options - -``` - -h, --help help for customer -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated customer archive](replicated-cli-customer-archive) - Archive a customer -* [replicated customer create](replicated-cli-customer-create) - Create a new customer for the current application -* [replicated customer download-license](replicated-cli-customer-download-license) - Download a customer's license -* [replicated customer inspect](replicated-cli-customer-inspect) - Show detailed information about a specific customer -* [replicated customer ls](replicated-cli-customer-ls) - List customers for the current application -* [replicated customer update](replicated-cli-customer-update) - Update an existing customer - -================ -File: docs/reference/replicated-cli-default-clear-all.mdx -================ -# replicated default clear-all - -Clear all default values - -### Synopsis - -Clears all default values that are used by other commands. - -This command removes all default values that are used by other commands run by the current user. - -``` -replicated default clear-all [flags] -``` - -### Examples - -``` -# Clear all default values -replicated default clear-all -``` - -### Options - -``` - -h, --help help for clear-all -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated default](replicated-cli-default) - Manage default values used by other commands - -================ -File: docs/reference/replicated-cli-default-clear.mdx -================ -# replicated default clear - -Clear default value for a key - -### Synopsis - -Clears default value for the specified key. - -This command removes default values that are used by other commands run by the current user. - -Supported keys: -- app: the default application to use - -``` -replicated default clear KEY [flags] -``` - -### Examples - -``` -# Clear default application -replicated default clear app -``` - -### Options - -``` - -h, --help help for clear -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated default](replicated-cli-default) - Manage default values used by other commands - -================ -File: docs/reference/replicated-cli-default-set.mdx -================ -# replicated default set - -Set default value for a key - -### Synopsis - -Sets default value for the specified key. - -This command sets default values that will be used by other commands run by the current user. - -Supported keys: -- app: the default application to use - -The output can be customized using the --output flag to display results in -either table or JSON format. - -``` -replicated default set KEY VALUE [flags] -``` - -### Examples - -``` -# Set default application -replicated default set app my-app-slug -``` - -### Options - -``` - -h, --help help for set - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated default](replicated-cli-default) - Manage default values used by other commands - -================ -File: docs/reference/replicated-cli-default-show.mdx -================ -# replicated default show - -Show default value for a key - -### Synopsis - -Shows defaul values for the specified key. - -This command shows default values that will be used by other commands run by the current user. - -Supported keys: -- app: the default application to use - -The output can be customized using the --output flag to display results in -either table or JSON format. - -``` -replicated default show KEY [flags] -``` - -### Examples - -``` -# Show default application -replicated default show app - -``` - -### Options - -``` - -h, --help help for show - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated default](replicated-cli-default) - Manage default values used by other commands - -================ -File: docs/reference/replicated-cli-default.mdx -================ -# replicated default - -Manage default values used by other commands - -### Options - -``` - -h, --help help for default -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated default clear](replicated-cli-default-clear) - Clear default value for a key -* [replicated default clear-all](replicated-cli-default-clear-all) - Clear all default values -* [replicated default set](replicated-cli-default-set) - Set default value for a key -* [replicated default show](replicated-cli-default-show) - Show default value for a key - -================ -File: docs/reference/replicated-cli-installer-create.mdx -================ -# replicated installer create - -Create a new installer spec - -### Synopsis - -Create a new installer spec by providing YAML configuration for a https://kurl.sh cluster. - -``` -replicated installer create [flags] -``` - -### Options - -``` - --auto generate default values for use in CI - -y, --confirm-auto auto-accept the configuration generated by the --auto flag - --ensure-channel When used with --promote <channel>, will create the channel if it doesn't exist - -h, --help help for create - --promote string Channel name or id to promote this installer to - --yaml string The YAML config for this installer. Use '-' to read from stdin. Cannot be used with the --yaml-file flag. - --yaml-file string The file name with YAML config for this installer. Cannot be used with the --yaml flag. -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated installer](replicated-cli-installer) - Manage Kubernetes installers - -================ -File: docs/reference/replicated-cli-installer-ls.mdx -================ -# replicated installer ls - -List an app's Kubernetes Installers - -### Synopsis - -List an app's https://kurl.sh Kubernetes Installers - -``` -replicated installer ls [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Options - -``` - -h, --help help for ls - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated installer](replicated-cli-installer) - Manage Kubernetes installers - -================ -File: docs/reference/replicated-cli-installer.mdx -================ -# replicated installer - -Manage Kubernetes installers - -### Synopsis - -The installers command allows vendors to create, display, modify and promote kurl.sh specs for managing the installation of Kubernetes. - -### Options - -``` - -h, --help help for installer -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated installer create](replicated-cli-installer-create) - Create a new installer spec -* [replicated installer ls](replicated-cli-installer-ls) - List an app's Kubernetes Installers - -================ -File: docs/reference/replicated-cli-installing.mdx -================ -import Verify from "../partials/replicated-cli/_verify-install.mdx" -import Sudo from "../partials/replicated-cli/_sudo-install.mdx" -import Login from "../partials/replicated-cli/_login.mdx" -import Logout from "../partials/replicated-cli/_logout.mdx" -import AuthToken from "../partials/replicated-cli/_authorize-with-token-note.mdx" - -# Installing the Replicated CLI - -Vendors can use the Replicated CLI to manage their applications with Replicated programmatically, rather than using the Replicated vendor portal. - -## Prerequisites - -Complete the following prerequisites before installing the Replicated CLI: - -- Create a vendor account. See [Creating a Vendor Account](/vendor/vendor-portal-creating-account). -- To run on Linux or Mac, install [curl](https://curl.haxx.se/). -- To run through a Docker container, install [docker](https://www.docker.com). - -## Install and Run - -You can install and run the Replicated CLI in the following environments: - -* Directly on MacOS -* Directly on Linux -* Through Docker (Useful for Windows, GitHub Actions, or computers without sufficient access) - -### MacOS - -To install and run the latest Replicated CLI on MacOS: - -1. Run one of the following commands: - - - With Brew: - - ```shell - brew install replicatedhq/replicated/cli - ``` - - - Without Brew: - - ```shell - curl -s https://api.github.com/repos/replicatedhq/replicated/releases/latest \ - | grep "browser_download_url.*darwin_all.tar.gz" \ - | cut -d : -f 2,3 \ - | tr -d \" \ - | wget -O replicated.tar.gz -qi - - tar xf replicated.tar.gz replicated && rm replicated.tar.gz - mv replicated /usr/local/bin/replicated - ``` - - <Sudo/> - -1. <Verify/> - -1. <Login/> - - <AuthToken/> - -1. <Logout/> - -### Linux - -To install and run the latest Replicated CLI on Linux: - -1. Run the following command: - - ```shell - curl -s https://api.github.com/repos/replicatedhq/replicated/releases/latest \ - | grep "browser_download_url.*linux_amd64.tar.gz" \ - | cut -d : -f 2,3 \ - | tr -d \" \ - | wget -O replicated.tar.gz -qi - - tar xf replicated.tar.gz replicated && rm replicated.tar.gz - mv replicated /usr/local/bin/replicated - ``` - - <Sudo/> - -1. <Verify/> - -1. <Login/> - - <AuthToken/> - -1. <Logout/> - -### Docker / Windows - -Installing in Docker environments requires that you set the `REPLICATED_API_TOKEN` environment variable to authorize the Replicated CLI with an API token. For more information, see [(Optional) Set Environment Variables](#env-var) below. - -To install and run the latest Replicated CLI in Docker environments: - -1. Generate a service account or user API token in the vendor portal. To create new releases, the token must have `Read/Write` access. See [Generating API Tokens](/vendor/replicated-api-tokens). - -1. Get the latest Replicated CLI installation files from the [replicatedhq/replicated repository](https://github.com/replicatedhq/replicated/releases) on GitHub. - - Download and install the files. For simplicity, the usage in the next step is represented assuming that the CLI is downloaded and installed to the desktop. - -1. Authorize the Replicated CLI: - - - Through a Docker container: - - ```shell - docker run \ - -e REPLICATED_API_TOKEN=$TOKEN \ - replicated/vendor-cli --help - ``` - Replace `TOKEN` with your API token. - - - On Windows: - - ```dos - docker.exe run \ - -e REPLICATED_API_TOKEN=%TOKEN% \ - replicated/vendor-cli --help - ``` - - Replace `TOKEN` with your API token. - - For more information about the `docker run` command, see [docker run](https://docs.docker.com/engine/reference/commandline/run/) in the Docker documentation. - -## (Optional) Set Environment Variables {#env-var} - -The Replicated CLI supports setting the following environment variables: - -* **`REPLICATED_API_TOKEN`**: A service account or user API token generated from a vendor portal team or individual account. The `REPLICATED_API_TOKEN` environment variable has the following use cases: - - * To use Replicated CLI commands as part of automation (such as from continuous integration and continuous delivery pipelines), authenticate by providing the `REPLICATED_API_TOKEN` environment variable. - - * To authorize the Replicated CLI when installing and running the CLI in Docker containers. - - * Optionally set the `REPLICATED_API_TOKEN` environment variable instead of using the `replicated login` command to authorize the Replicated CLI in MacOS or Linux environments. - -* **`REPLICATED_APP`**: The slug of the target application. - - When using the Replicated CLI to manage applications through your vendor account (including channels, releases, customers, or other objects associated with an application), you can set the `REPLICATED_APP` environment variable to avoid passing the application slug with each command. - -### `REPLICATED_API_TOKEN` - -To set the `REPLICATED_API_TOKEN` environment variable: - -1. Generate a service account or user API token in the vendor portal. To create new releases, the token must have `Read/Write` access. See [Generating API Tokens](/vendor/replicated-api-tokens). - -1. Set the environment variable, replacing `TOKEN` with the token you generated in the previous step: - - * **MacOs or Linux**: - - ``` - export REPLICATED_API_TOKEN=TOKEN - ``` - - * **Docker**: - - ``` - docker run \ - -e REPLICATED_API_TOKEN=$TOKEN \ - replicated/vendor-cli --help - ``` - - * **Windows**: - - ``` - docker.exe run \ - -e REPLICATED_API_TOKEN=%TOKEN% \ - replicated/vendor-cli --help - ``` - -### `REPLICATED_APP` - -To set the `REPLICATED_APP` environment variable: - -1. In the [vendor portal](https://vendor.replicated.com), go to the **Application Settings** page and copy the slug for the target application. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Application_. - -1. Set the environment variable, replacing `APP_SLUG` with the slug for the target application that you retreived in the previous step: - - * **MacOs or Linux**: - - ``` - export REPLICATED_APP=APP_SLUG - ``` - - * **Docker**: - - ``` - docker run \ - -e REPLICATED_APP=$APP_SLUG - replicated/vendor-cli --help - ``` - - * **Windows**: - - ``` - docker.exe run \ - -e REPLICATED_APP=%APP_SLUG% \ - replicated/vendor-cli --help - ``` - -================ -File: docs/reference/replicated-cli-instance-inspect.mdx -================ -# replicated instance inspect - -Show full details for a customer instance - -### Synopsis - -Show full details for a customer instance - -``` -replicated instance inspect [flags] -``` - -### Options - -``` - --customer string Customer Name or ID - -h, --help help for inspect - --instance string Instance Name or ID - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated instance](replicated-cli-instance) - Manage instances - -================ -File: docs/reference/replicated-cli-instance-ls.mdx -================ -# replicated instance ls - -list customer instances - -### Synopsis - -list customer instances - -``` -replicated instance ls [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Options - -``` - --customer string Customer Name or ID - -h, --help help for ls - --output string The output format to use. One of: json|table (default: table) (default "table") - --tag stringArray Tags to use to filter instances (key=value format, can be specified multiple times). Only one tag needs to match (an OR operation) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated instance](replicated-cli-instance) - Manage instances - -================ -File: docs/reference/replicated-cli-instance-tag.mdx -================ -# replicated instance tag - -tag an instance - -### Synopsis - -remove or add instance tags - -``` -replicated instance tag [flags] -``` - -### Options - -``` - --customer string Customer Name or ID - -h, --help help for tag - --instance string Instance Name or ID - --output string The output format to use. One of: json|table (default: table) (default "table") - --tag stringArray Tags to apply to instance. Leave value empty to remove tag. Tags not specified will not be removed. -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated instance](replicated-cli-instance) - Manage instances - -================ -File: docs/reference/replicated-cli-instance.mdx -================ -# replicated instance - -Manage instances - -### Synopsis - -The instance command allows vendors to display and tag customer instances. - -### Options - -``` - -h, --help help for instance -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated instance inspect](replicated-cli-instance-inspect) - Show full details for a customer instance -* [replicated instance ls](replicated-cli-instance-ls) - list customer instances -* [replicated instance tag](replicated-cli-instance-tag) - tag an instance - -================ -File: docs/reference/replicated-cli-login.mdx -================ -# replicated login - -Log in to Replicated - -### Synopsis - -This command will open your browser to ask you authentication details and create / retrieve an API token for the CLI to use. - -``` -replicated login [flags] -``` - -### Options - -``` - -h, --help help for login -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated - -================ -File: docs/reference/replicated-cli-logout.mdx -================ -# replicated logout - -Logout from Replicated - -### Synopsis - -This command will remove any stored credentials from the CLI. - -``` -replicated logout [flags] -``` - -### Options - -``` - -h, --help help for logout -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated - -================ -File: docs/reference/replicated-cli-registry-add-dockerhub.mdx -================ -# replicated registry add dockerhub - -Add a DockerHub registry - -### Synopsis - -Add a DockerHub registry using a username/password or an account token - -``` -replicated registry add dockerhub [flags] -``` - -### Options - -``` - --authtype string Auth type for the registry (default "password") - -h, --help help for dockerhub - --output string The output format to use. One of: json|table (default: table) (default "table") - --password string The password to authenticate to the registry with - --password-stdin Take the password from stdin - --token string The token to authenticate to the registry with - --token-stdin Take the token from stdin - --username string The userame to authenticate to the registry with -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --skip-validation Skip validation of the registry (not recommended) -``` - -### SEE ALSO - -* [replicated registry add](replicated-cli-registry-add) - add - -================ -File: docs/reference/replicated-cli-registry-add-ecr.mdx -================ -# replicated registry add ecr - -Add an ECR registry - -### Synopsis - -Add an ECR registry using an Access Key ID and Secret Access Key - -``` -replicated registry add ecr [flags] -``` - -### Options - -``` - --accesskeyid string The access key id to authenticate to the registry with - --endpoint string The ECR endpoint - -h, --help help for ecr - --output string The output format to use. One of: json|table (default: table) (default "table") - --secretaccesskey string The secret access key to authenticate to the registry with - --secretaccesskey-stdin Take the secret access key from stdin -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --skip-validation Skip validation of the registry (not recommended) - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated registry add](replicated-cli-registry-add) - add - -================ -File: docs/reference/replicated-cli-registry-add-gar.mdx -================ -# replicated registry add gar - -Add a Google Artifact Registry - -### Synopsis - -Add a Google Artifact Registry using a service account key - -``` -replicated registry add gar [flags] -``` - -### Options - -``` - --authtype string Auth type for the registry (default "serviceaccount") - --endpoint string The GAR endpoint - -h, --help help for gar - --output string The output format to use. One of: json|table (default: table) (default "table") - --serviceaccountkey string The service account key to authenticate to the registry with - --serviceaccountkey-stdin Take the service account key from stdin - --token string The token to use to auth to the registry with - --token-stdin Take the token from stdin -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --skip-validation Skip validation of the registry (not recommended) -``` - -### SEE ALSO - -* [replicated registry add](replicated-cli-registry-add) - add - -================ -File: docs/reference/replicated-cli-registry-add-gcr.mdx -================ -# replicated registry add gcr - -Add a Google Container Registry - -### Synopsis - -Add a Google Container Registry using a service account key - -``` -replicated registry add gcr [flags] -``` - -### Options - -``` - --endpoint string The GCR endpoint - -h, --help help for gcr - --output string The output format to use. One of: json|table (default: table) (default "table") - --serviceaccountkey string The service account key to authenticate to the registry with - --serviceaccountkey-stdin Take the service account key from stdin -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --skip-validation Skip validation of the registry (not recommended) - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated registry add](replicated-cli-registry-add) - add - -================ -File: docs/reference/replicated-cli-registry-add-ghcr.mdx -================ -# replicated registry add ghcr - -Add a GitHub Container Registry - -### Synopsis - -Add a GitHub Container Registry using a username and personal access token (PAT) - -``` -replicated registry add ghcr [flags] -``` - -### Options - -``` - -h, --help help for ghcr - --output string The output format to use. One of: json|table (default: table) (default "table") - --token string The token to use to auth to the registry with - --token-stdin Take the token from stdin -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --skip-validation Skip validation of the registry (not recommended) -``` - -### SEE ALSO - -* [replicated registry add](replicated-cli-registry-add) - add - -================ -File: docs/reference/replicated-cli-registry-add-other.mdx -================ -# replicated registry add other - -Add a generic registry - -### Synopsis - -Add a generic registry using a username/password - -``` -replicated registry add other [flags] -``` - -### Options - -``` - --endpoint string endpoint for the registry - -h, --help help for other - --output string The output format to use. One of: json|table (default: table) (default "table") - --password string The password to authenticate to the registry with - --password-stdin Take the password from stdin - --username string The userame to authenticate to the registry with -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --skip-validation Skip validation of the registry (not recommended) - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated registry add](replicated-cli-registry-add) - add - -================ -File: docs/reference/replicated-cli-registry-add-quay.mdx -================ -# replicated registry add quay - -Add a quay.io registry - -### Synopsis - -Add a quay.io registry using a username/password (or a robot account) - -``` -replicated registry add quay [flags] -``` - -### Options - -``` - -h, --help help for quay - --output string The output format to use. One of: json|table (default: table) (default "table") - --password string The password to authenticate to the registry with - --password-stdin Take the password from stdin - --username string The userame to authenticate to the registry with -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --skip-validation Skip validation of the registry (not recommended) - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated registry add](replicated-cli-registry-add) - add - -================ -File: docs/reference/replicated-cli-registry-add.mdx -================ -# replicated registry add - -add - -### Synopsis - -add - -### Options - -``` - -h, --help help for add - --skip-validation Skip validation of the registry (not recommended) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated registry](replicated-cli-registry) - Manage registries -* [replicated registry add dockerhub](replicated-cli-registry-add-dockerhub) - Add a DockerHub registry -* [replicated registry add ecr](replicated-cli-registry-add-ecr) - Add an ECR registry -* [replicated registry add gar](replicated-cli-registry-add-gar) - Add a Google Artifact Registry -* [replicated registry add gcr](replicated-cli-registry-add-gcr) - Add a Google Container Registry -* [replicated registry add ghcr](replicated-cli-registry-add-ghcr) - Add a GitHub Container Registry -* [replicated registry add other](replicated-cli-registry-add-other) - Add a generic registry -* [replicated registry add quay](replicated-cli-registry-add-quay) - Add a quay.io registry - -================ -File: docs/reference/replicated-cli-registry-ls.mdx -================ -# replicated registry ls - -list registries - -### Synopsis - -list registries, or a single registry by name - -``` -replicated registry ls [NAME] [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Options - -``` - -h, --help help for ls - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated registry](replicated-cli-registry) - Manage registries - -================ -File: docs/reference/replicated-cli-registry-rm.mdx -================ -# replicated registry rm - -remove registry - -### Synopsis - -remove registry by endpoint - -``` -replicated registry rm [ENDPOINT] [flags] -``` - -### Aliases - -``` -rm, delete -``` - -### Options - -``` - -h, --help help for rm -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated registry](replicated-cli-registry) - Manage registries - -================ -File: docs/reference/replicated-cli-registry-test.mdx -================ -# replicated registry test - -test registry - -### Synopsis - -test registry - -``` -replicated registry test HOSTNAME [flags] -``` - -### Options - -``` - -h, --help help for test - --image string The image to test pulling -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated registry](replicated-cli-registry) - Manage registries - -================ -File: docs/reference/replicated-cli-registry.mdx -================ -# replicated registry - -Manage registries - -### Synopsis - -registry can be used to manage existing registries and add new registries to a team - -### Options - -``` - -h, --help help for registry -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated registry add](replicated-cli-registry-add) - add -* [replicated registry ls](replicated-cli-registry-ls) - list registries -* [replicated registry rm](replicated-cli-registry-rm) - remove registry -* [replicated registry test](replicated-cli-registry-test) - test registry - -================ -File: docs/reference/replicated-cli-release-compatibility.mdx -================ -# replicated release compatibility - -Report release compatibility - -### Synopsis - -Report release compatibility for a kubernetes distribution and version - -``` -replicated release compatibility SEQUENCE [flags] -``` - -### Options - -``` - --distribution string Kubernetes distribution of the cluster to report on. - --failure If set, the compatibility will be reported as a failure. - -h, --help help for compatibility - --notes string Additional notes to report. - --success If set, the compatibility will be reported as a success. - --version string Kubernetes version of the cluster to report on (format is distribution dependent) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated release](replicated-cli-release) - Manage app releases - -================ -File: docs/reference/replicated-cli-release-create.mdx -================ -# replicated release create - -Create a new release - -### Synopsis - -Create a new release by providing application manifests for the next release in - your sequence. - -``` -replicated release create [flags] -``` - -### Options - -``` - --auto generate default values for use in CI - -y, --confirm-auto auto-accept the configuration generated by the --auto flag - --ensure-channel When used with --promote <channel>, will create the channel if it doesn't exist - --fail-on string The minimum severity to cause the command to exit with a non-zero exit code. Supported values are [info, warn, error, none]. (default "error") - -h, --help help for create - --lint Lint a manifests directory prior to creation of the KOTS Release. - --promote string Channel name or id to promote this release to - --release-notes string When used with --promote <channel>, sets the **markdown** release notes - --version string When used with --promote <channel>, sets the version label for the release in this channel - --yaml-dir string The directory containing multiple yamls for a Kots release. Cannot be used with the --yaml flag. -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated release](replicated-cli-release) - Manage app releases - -================ -File: docs/reference/replicated-cli-release-download.mdx -================ -# replicated release download - -Download application manifests for a release. - -### Synopsis - -Download application manifests for a release to a specified directory. - -For non-KOTS applications, this is equivalent to the 'release inspect' command. - -``` -replicated release download RELEASE_SEQUENCE [flags] -``` - -### Examples - -``` -replicated release download 1 --dest ./manifests -``` - -### Options - -``` - -d, --dest string Directory to which release manifests should be downloaded - -h, --help help for download -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated release](replicated-cli-release) - Manage app releases - -================ -File: docs/reference/replicated-cli-release-inspect.mdx -================ -# replicated release inspect - -Long: information about a release - -### Synopsis - -Show information about the specified application release. - -This command displays detailed information about a specific release of an application. - -The output can be customized using the --output flag to display results in -either table or JSON format. - - -``` -replicated release inspect RELEASE_SEQUENCE [flags] -``` - -### Examples - -``` -# Display information about a release -replicated release inspect 123 - -# Display information about a release in JSON format -replicated release inspect 123 --output json -``` - -### Options - -``` - -h, --help help for inspect - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated release](replicated-cli-release) - Manage app releases - -================ -File: docs/reference/replicated-cli-release-lint.mdx -================ -# replicated release lint - -Lint a directory of KOTS manifests - -### Synopsis - -Lint a directory of KOTS manifests - -``` -replicated release lint [flags] -``` - -### Options - -``` - --fail-on string The minimum severity to cause the command to exit with a non-zero exit code. Supported values are [info, warn, error, none]. (default "error") - -h, --help help for lint - --output string The output format to use. One of: json|table (default: table) (default "table") - --yaml-dir yaml The directory containing multiple yamls for a Kots release. Cannot be used with the yaml flag. -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated release](replicated-cli-release) - Manage app releases - -================ -File: docs/reference/replicated-cli-release-ls.mdx -================ -# replicated release ls - -List all of an app's releases - -### Synopsis - -List all of an app's releases - -``` -replicated release ls [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Options - -``` - -h, --help help for ls - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated release](replicated-cli-release) - Manage app releases - -================ -File: docs/reference/replicated-cli-release-promote.mdx -================ -# replicated release promote - -Set the release for a channel - -### Synopsis - -Set the release for a channel - -``` -replicated release promote SEQUENCE CHANNEL_ID [flags] -``` - -### Examples - -``` -replicated release promote 15 fe4901690971757689f022f7a460f9b2 -``` - -### Options - -``` - -h, --help help for promote - --optional If set, this release can be skipped - --release-notes string The **markdown** release notes - --required If set, this release can't be skipped - --version string A version label for the release in this channel -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated release](replicated-cli-release) - Manage app releases - -================ -File: docs/reference/replicated-cli-release-test.mdx -================ -# replicated release test - -Test the application release - -### Synopsis - -Test the application release - -``` -replicated release test SEQUENCE [flags] -``` - -### Options - -``` - -h, --help help for test -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated release](replicated-cli-release) - Manage app releases - -================ -File: docs/reference/replicated-cli-release-update.mdx -================ -# replicated release update - -Updated a release's yaml config - -### Synopsis - -Updated a release's yaml config - -``` -replicated release update SEQUENCE [flags] -``` - -### Options - -``` - -h, --help help for update - --yaml string The new YAML config for this release. Use '-' to read from stdin. Cannot be used with the --yaml-file flag. - --yaml-dir string The directory containing multiple yamls for a Kots release. Cannot be used with the --yaml flag. - --yaml-file string The file name with YAML config for this release. Cannot be used with the --yaml flag. -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated release](replicated-cli-release) - Manage app releases - -================ -File: docs/reference/replicated-cli-release.mdx -================ -# replicated release - -Manage app releases - -### Synopsis - -The release command allows vendors to create, display, and promote their releases. - -### Options - -``` - -h, --help help for release -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated release compatibility](replicated-cli-release-compatibility) - Report release compatibility -* [replicated release create](replicated-cli-release-create) - Create a new release -* [replicated release download](replicated-cli-release-download) - Download application manifests for a release. -* [replicated release inspect](replicated-cli-release-inspect) - Long: information about a release -* [replicated release lint](replicated-cli-release-lint) - Lint a directory of KOTS manifests -* [replicated release ls](replicated-cli-release-ls) - List all of an app's releases -* [replicated release promote](replicated-cli-release-promote) - Set the release for a channel -* [replicated release test](replicated-cli-release-test) - Test the application release -* [replicated release update](replicated-cli-release-update) - Updated a release's yaml config - -================ -File: docs/reference/replicated-cli-version-upgrade.mdx -================ -# replicated version upgrade - -Upgrade the replicated CLI to the latest version - -### Synopsis - -Download, verify, and upgrade the Replicated CLI to the latest version - -``` -replicated version upgrade [flags] -``` - -### Options - -``` - -h, --help help for upgrade -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated version](replicated-cli-version) - Print the current version and exit - -================ -File: docs/reference/replicated-cli-version.mdx -================ -# replicated version - -Print the current version and exit - -### Synopsis - -Print the current version and exit - -``` -replicated version [flags] -``` - -### Options - -``` - -h, --help help for version - --json output version info in json -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated version upgrade](replicated-cli-version-upgrade) - Upgrade the replicated CLI to the latest version - -================ -File: docs/reference/replicated-cli-vm-create.mdx -================ -# replicated vm create - -Create one or more test VMs with specified distribution, version, and configuration options. - -### Synopsis - -Create one or more test VMs with a specified distribution, version, and a variety of customizable configuration options. - -This command allows you to provision VMs with different distributions (e.g., Ubuntu, RHEL), versions, instance types, and more. You can set the number of VMs to create, disk size, and specify the network to use. If no network is provided, a new network will be created automatically. You can also assign tags to your VMs and use a TTL (Time-To-Live) to define how long the VMs should live. - -By default, the command provisions one VM, but you can customize the number of VMs to create by using the "--count" flag. Additionally, you can use the "--dry-run" flag to simulate the creation without actually provisioning the VMs. - -The command also supports a "--wait" flag to wait for the VMs to be ready before returning control, with a customizable timeout duration. - -``` -replicated vm create [flags] -``` - -### Examples - -``` -# Create a single Ubuntu 20.04 VM -replicated vm create --distribution ubuntu --version 20.04 - -# Create 3 Ubuntu 22.04 VMs -replicated vm create --distribution ubuntu --version 22.04 --count 3 - -# Create 5 Ubuntu VMs with a custom instance type and disk size -replicated vm create --distribution ubuntu --version 20.04 --count 5 --instance-type r1.medium --disk 100 -``` - -### Options - -``` - --count int Number of matching VMs to create (default 1) - --disk int Disk Size (GiB) to request per node (default 50) - --distribution string Distribution of the vm to provision - --dry-run Dry run - -h, --help help for create - --instance-type string The type of instance to use (e.g. r1.medium) - --name string VM name (defaults to random name) - --network string The network to use for the VM(s). If not supplied, create a new network - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --tag stringArray Tag to apply to the VM (key=value format, can be specified multiple times) - --ttl string VM TTL (duration, max 48h) - --version string Vversion to provision (format is distribution dependent) - --wait duration Wait duration for VM(s) to be ready (leave empty to not wait) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm](replicated-cli-vm) - Manage test virtual machines. - -================ -File: docs/reference/replicated-cli-vm-ls.mdx -================ -# replicated vm ls - -List test VMs and their status, with optional filters for start/end time and terminated VMs. - -### Synopsis - -List all test VMs in your account, including their current status, distribution, version, and more. You can use optional flags to filter the output based on VM termination status, start time, or end time. This command can also watch the VM status in real-time. - -By default, the command will return a table of all VMs, but you can switch to JSON or wide output formats for more detailed information. The command supports filtering to show only terminated VMs or to specify a time range for the query. - -You can use the '--watch' flag to monitor VMs continuously. This will refresh the list of VMs every 2 seconds, displaying any updates in real-time, such as new VMs being created or existing VMs being terminated. - -The command also allows you to customize the output format, supporting 'json', 'table', and 'wide' views for flexibility based on your needs. - -``` -replicated vm ls [flags] -``` - -### Aliases - -``` -ls, list -``` - -### Examples - -``` -# List all active VMs -replicated vm ls - -# List all VMs that were created after a specific start time -replicated vm ls --start-time 2024-10-01T00:00:00Z - -# Show only terminated VMs -replicated vm ls --show-terminated - -# Watch VM status changes in real-time -replicated vm ls --watch -``` - -### Options - -``` - --end-time string end time for the query (Format: 2006-01-02T15:04:05Z) - -h, --help help for ls - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --show-terminated when set, only show terminated vms - --start-time string start time for the query (Format: 2006-01-02T15:04:05Z) - -w, --watch watch vms -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm](replicated-cli-vm) - Manage test virtual machines. - -================ -File: docs/reference/replicated-cli-vm-port-expose.mdx -================ -# replicated vm port expose - -Expose a port on a vm to the public internet. - -### Synopsis - -The 'vm port expose' command is used to expose a specified port on a vm to the public internet. When exposing a port, the command automatically creates a DNS entry and, if using the "https" protocol, provisions a TLS certificate for secure communication. - -You can also create a wildcard DNS entry and TLS certificate by specifying the "--wildcard" flag. Please note that creating a wildcard certificate may take additional time. - -This command supports different protocols including "http", "https", "ws", and "wss" for web traffic and web socket communication. - -``` -replicated vm port expose VM_ID --port PORT [flags] -``` - -### Examples - -``` -# Expose port 8080 with HTTPS protocol and wildcard DNS -replicated vm port expose VM_ID --port 8080 --protocol https --wildcard - -# Expose port 3000 with HTTP protocol -replicated vm port expose VM_ID --port 3000 --protocol http - -# Expose port 8080 with multiple protocols -replicated vm port expose VM_ID --port 8080 --protocol http,https - -# Expose port 8080 and display the result in JSON format -replicated vm port expose VM_ID --port 8080 --protocol https --output json -``` - -### Options - -``` - -h, --help help for expose - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --port int Port to expose (required) - --protocol strings Protocol to expose (valid values are "http", "https", "ws" and "wss") (default [http,https]) - --wildcard Create a wildcard DNS entry and TLS certificate for this port -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. - -================ -File: docs/reference/replicated-cli-vm-port-ls.mdx -================ -# replicated vm port ls - -List vm ports for a vm. - -### Synopsis - -The 'vm port ls' command lists all the ports configured for a specific vm. You must provide the vm ID to retrieve and display the ports. - -This command is useful for viewing the current port configurations, protocols, and other related settings of your test vm. The output format can be customized to suit your needs, and the available formats include table, JSON, and wide views. - -``` -replicated vm port ls VM_ID [flags] -``` - -### Examples - -``` -# List ports for a vm in the default table format -replicated vm port ls VM_ID - -# List ports for a vm in JSON format -replicated vm port ls VM_ID --output json - -# List ports for a vm in wide format -replicated vm port ls VM_ID --output wide -``` - -### Options - -``` - -h, --help help for ls - --output string The output format to use. One of: json|table|wide (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. - -================ -File: docs/reference/replicated-cli-vm-port-rm.mdx -================ -# replicated vm port rm - -Remove vm port by ID. - -### Synopsis - -The 'vm port rm' command removes a specific port from a vm. You must provide the ID of the port to remove. - -This command is useful for managing the network settings of your test vms by allowing you to clean up unused or incorrect ports. After removing a port, the updated list of ports will be displayed. - -``` -replicated vm port rm VM_ID --id PORT_ID [flags] -``` - -### Examples - -``` -# Remove a port using its ID -replicated vm port rm VM_ID --id PORT_ID - -# Remove a port and display the result in JSON format -replicated vm port rm VM_ID --id PORT_ID --output json -``` - -### Options - -``` - -h, --help help for rm - --id string ID of the port to remove (required) - --output string The output format to use. One of: json|table|wide (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. - -================ -File: docs/reference/replicated-cli-vm-port.mdx -================ -# replicated vm port - -Manage VM ports. - -### Synopsis - -The 'vm port' command is a parent command for managing ports in a vm. It allows users to list, remove, or expose specific ports used by the vm. Use the subcommands (such as 'ls', 'rm', and 'expose') to manage port configurations effectively. - -This command provides flexibility for handling ports in various test vms, ensuring efficient management of vm networking settings. - -### Examples - -``` -# List all exposed ports in a vm -replicated vm port ls [VM_ID] - -# Remove an exposed port from a vm -replicated vm port rm [VM_ID] [PORT] - -# Expose a new port in a vm -replicated vm port expose [VM_ID] [PORT] -``` - -### Options - -``` - -h, --help help for port -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm](replicated-cli-vm) - Manage test virtual machines. -* [replicated vm port expose](replicated-cli-vm-port-expose) - Expose a port on a vm to the public internet. -* [replicated vm port ls](replicated-cli-vm-port-ls) - List vm ports for a vm. -* [replicated vm port rm](replicated-cli-vm-port-rm) - Remove vm port by ID. - -================ -File: docs/reference/replicated-cli-vm-rm.mdx -================ -# replicated vm rm - -Remove test VM(s) immediately, with options to filter by name, tag, or remove all VMs. - -### Synopsis - -The 'rm' command allows you to remove test VMs from your account immediately. You can specify one or more VM IDs directly, or use flags to filter which VMs to remove based on their name, tags, or simply remove all VMs at once. - -This command supports multiple filtering options, including removing VMs by their name, by specific tags, or by specifying the '--all' flag to remove all VMs in your account. - -You can also use the '--dry-run' flag to simulate the removal without actually deleting the VMs. - -``` -replicated vm rm ID [ID …] [flags] -``` - -### Aliases - -``` -rm, delete -``` - -### Examples - -``` -# Remove a VM by ID -replicated vm rm aaaaa11 - -# Remove multiple VMs by ID -replicated vm rm aaaaa11 bbbbb22 ccccc33 - -# Remove all VMs with a specific name -replicated vm rm --name test-vm - -# Remove all VMs with a specific tag -replicated vm rm --tag env=dev - -# Remove all VMs -replicated vm rm --all - -# Perform a dry run of removing all VMs -replicated vm rm --all --dry-run -``` - -### Options - -``` - --all remove all vms - --dry-run Dry run - -h, --help help for rm - --name stringArray Name of the vm to remove (can be specified multiple times) - --tag stringArray Tag of the vm to remove (key=value format, can be specified multiple times) -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm](replicated-cli-vm) - Manage test virtual machines. - -================ -File: docs/reference/replicated-cli-vm-update-ttl.mdx -================ -# replicated vm update ttl - -Update TTL for a test VM. - -### Synopsis - -The 'ttl' command allows you to update the Time to Live (TTL) for a test VM. This command modifies the lifespan of a running VM by updating its TTL, which is a duration starting from the moment the VM is provisioned. - -The TTL specifies how long the VM will run before it is automatically terminated. You can specify a duration up to a maximum of 48 hours. - -The command accepts a VM ID as an argument and requires the '--ttl' flag to specify the new TTL value. - -You can also specify the output format (json, table, wide) using the '--output' flag. - -``` -replicated vm update ttl [ID] [flags] -``` - -### Examples - -``` -# Update the TTL of a VM to 2 hours -replicated vm update ttl aaaaa11 --ttl 2h - -# Update the TTL of a VM to 30 minutes -replicated vm update ttl aaaaa11 --ttl 30m -``` - -### Options - -``` - -h, --help help for ttl - --output string The output format to use. One of: json|table|wide (default: table) (default "table") - --ttl string Update TTL which starts from the moment the vm is running (duration, max 48h). -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --id string id of the vm to update (when name is not provided) - --name string Name of the vm to update. - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm update](replicated-cli-vm-update) - Update VM settings. - -================ -File: docs/reference/replicated-cli-vm-update.mdx -================ -# replicated vm update - -Update VM settings. - -### Synopsis - -The 'vm update' command allows you to modify the settings of a virtual machine. You can update a VM either by providing its ID or by specifying its name. This command supports updating various VM settings, which will be handled by specific subcommands. - -- To update the VM by its ID, use the '--id' flag. -- To update the VM by its name, use the '--name' flag. - -Subcommands will allow for more specific updates like TTL - -### Examples - -``` -# Update a VM by specifying its ID -replicated vm update --id aaaaa11 --ttl 12h - -# Update a VM by specifying its name -replicated vm update --name --ttl 12h -``` - -### Options - -``` - -h, --help help for update - --id string id of the vm to update (when name is not provided) - --name string Name of the vm to update. -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm](replicated-cli-vm) - Manage test virtual machines. -* [replicated vm update ttl](replicated-cli-vm-update-ttl) - Update TTL for a test VM. - -================ -File: docs/reference/replicated-cli-vm-versions.mdx -================ -# replicated vm versions - -List available VM versions. - -### Synopsis - -The 'vm versions' command lists all the available versions of virtual machines that can be provisioned. This includes the available distributions and their respective versions. - -- You can filter the list by a specific distribution using the '--distribution' flag. -- The output can be formatted as a table or in JSON format using the '--output' flag. - -``` -replicated vm versions [flags] -``` - -### Examples - -``` -# List all available VM versions -replicated vm versions - -# List VM versions for a specific distribution (e.g., Ubuntu) -replicated vm versions --distribution ubuntu - -# Display the output in JSON format -replicated vm versions --output json -``` - -### Options - -``` - --distribution string Kubernetes distribution to filter by. - -h, --help help for versions - --output string The output format to use. One of: json|table (default: table) (default "table") -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated vm](replicated-cli-vm) - Manage test virtual machines. - -================ -File: docs/reference/replicated-cli-vm.mdx -================ -# replicated vm - -Manage test virtual machines. - -### Synopsis - -The 'vm' command allows you to manage and interact with virtual machines (VMs) used for testing purposes. With this command, you can create, list, remove, update, and manage VMs, as well as retrieve information about available VM versions. - -### Examples - -``` -# Create a single Ubuntu VM -replicated vm create --distribution ubuntu --version 20.04 - -# List all VMs -replicated vm ls - -# Remove a specific VM by ID -replicated vm rm <vm-id> - -# Update TTL for a specific VM -replicated vm update ttl <vm-id> --ttl 24h -``` - -### Options - -``` - -h, --help help for vm -``` - -### Options inherited from parent commands - -``` - --app string The app slug or app id to use in all calls - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated](replicated) - Manage your Commercial Software Distribution Lifecycle using Replicated -* [replicated vm create](replicated-cli-vm-create) - Create one or more test VMs with specified distribution, version, and configuration options. -* [replicated vm ls](replicated-cli-vm-ls) - List test VMs and their status, with optional filters for start/end time and terminated VMs. -* [replicated vm port](replicated-cli-vm-port) - Manage VM ports. -* [replicated vm rm](replicated-cli-vm-rm) - Remove test VM(s) immediately, with options to filter by name, tag, or remove all VMs. -* [replicated vm update](replicated-cli-vm-update) - Update VM settings. -* [replicated vm versions](replicated-cli-vm-versions) - List available VM versions. - -================ -File: docs/reference/replicated-sdk-apis.md -================ -# Replicated SDK API - -The Replicated SDK provides an API that you can use to embed Replicated functionality in your Helm chart application. - -For example, if your application includes a UI where users manage their application instance, then you can use the `/api/v1/app/updates` endpoint to include messages in the UI that encourage users to upgrade when new versions are available. You could also revoke access to the application during runtime when a license expires using the `/api/v1/license/fields` endpoint. - -For more information about how to get started with the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). - -For information about how to develop against the Replicated SDK API with mock data, see [Developing Against the Replicated SDK](/vendor/replicated-sdk-development). - -## app - -### GET /app/info - -List details about an application instance, including the app name, location of the Helm chart in the Replicated OCI registry, and details about the current application release that the instance is running. - -```bash -GET http://replicated:3000/api/v1/app/info -``` - -Response: - -```json -{ - "instanceID": "8dcdb181-5cc4-458c-ad95-c0a1563cb0cb", - "appSlug": "my-app", - "appName": "My App", - "appStatus": "ready", - "helmChartURL": "oci://registry.replicated.com/my-app/beta/my-helm-chart", - "currentRelease": { - "versionLabel": "0.1.72", - "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", - "channelName": "Beta", - "createdAt": "2023-05-28T16:31:21Z", - "releaseNotes": "", - "helmReleaseName": "my-helm-chart", - "helmReleaseRevision": 5, - "helmReleaseNamespace": "my-helm-chart" - }, - "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", - "channelName": "Beta", - "channelSequence": 4, - "releaseSequence": 30 -} -``` - -### GET /app/status - -List details about an application status, including the list of individual resource states and the overall application state. - -```bash -GET http://replicated:3000/api/v1/app/status -``` - -Response: - -```json -{ - "appStatus": { - "appSlug": "my-app", - "resourceStates": [ - { - "kind": "deployment", - "name": "api", - "namespace": "default", - "state": "ready" - } - ], - "updatedAt": "2024-12-19T23:01:52.207162284Z", - "state": "ready", - "sequence": 268 - } -} -``` - -### GET /app/updates - -List details about the releases that are available to an application instance for upgrade, including the version label, created timestamp, and release notes. - -```bash -GET http://replicated:3000/api/v1/app/updates -``` - -Response: - -```json -[ - { - "versionLabel": "0.1.15", - "createdAt": "2023-05-12T15:48:45.000Z", - "releaseNotes": "Awesome new features!" - } -] -``` - -### GET /app/history - -List details about the releases that an application instance has installed previously. - -```bash -GET http://replicated:3000/api/v1/app/history -``` - -Response: - -```json -{ - "releases": [ - { - "versionLabel": "0.1.70", - "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", - "channelName": "Stable", - "createdAt": "2023-05-12T17:43:51Z", - "releaseNotes": "", - "helmReleaseName": "echo-server", - "helmReleaseRevision": 2, - "helmReleaseNamespace": "echo-server-helm" - } - ] -} -``` - -### POST /app/custom-metrics - -Send custom application metrics. For more information and examples see [Configuring Custom Metrics](/vendor/custom-metrics). - -### PATCH /app/custom-metrics - -Send partial custom application metrics for upserting. - -```bash -PATCH http://replicated:3000/api/v1/app/custom-metrics -``` -Request: - -```json -{ - "data": { - "numProjects": 20, - } -} -``` - -Response: Status `200` OK - -### DELETE /app/custom-metrics/\{metric_name\} - -Delete an application custom metric. - -```bash -DELETE http://replicated:3000/api/v1/app/custom-metrics/numProjects -``` - -Response: Status `204` No Content - -### POST /app/instance-tags - -Programmatically set new instance tags or overwrite existing tags. Instance tags are key-value pairs, where the key and the value are strings. - -Setting a tag with the `name` key will set the instance's name in the vendor portal. - -The `force` parameter defaults to `false`. If `force` is `false`, conflicting pre-existing tags will not be overwritten and the existing tags take precedence. If the `force` parameter is set to `true`, any conflicting pre-existing tags will be overwritten. - -To delete a particular tag, set the key's value to an empty string `""`. - -```bash -POST http://replicated:3000/api/v1/app/instance-tags -``` -Request: - -```json -{ - "data": { - "force": false, - "tags": { - "name": "my-instance-name", - "preExistingKey": "will-not-be-overwritten", - "cpuCores": "10", - "supportTier": "basic" - } - } -} -``` - -Response: Status `200` OK - -## license - -### GET /license/info - -List details about the license that was used to install, including the license ID, type, the customer name, and the channel the customer is assigned. - -```bash -GET http://replicated:3000/api/v1/license/info -``` - -Response: - -```json -{ - "licenseID": "YiIXRTjiB7R...", - "appSlug": "my-app", - "channelID": "2CBDxNwDH1xyYiIXRTjiB7REjKX", - "channelName": "Stable", - "customerName": "Example Customer", - "customerEmail": "username@example.com", - "licenseType": "dev", - "licenseSequence": 1, - "isAirgapSupported": false, - "isGitOpsSupported": false, - "isIdentityServiceSupported": false, - "isGeoaxisSupported": false, - "isSnapshotSupported": false, - "isSupportBundleUploadSupported": false, - "isSemverRequired": true, - "endpoint": "https://replicated.app", - "entitlements": { - "expires_at": { - "title": "Expiration", - "description": "License Expiration", - "value": "", - "valueType": "String" - }, - "numSeats": { - "title": "Number of Seats", - "value": 10, - "valueType": "Integer" - } - } -} -``` - -### GET /license/fields - -List details about all the fields in the license that was used to install, including the field names, descriptions, values, and signatures. - -```bash -GET http://replicated:3000/api/v1/license/fields -``` - -Response: - -```json -{ - "expires_at": { - "name": "expires_at", - "title": "Expiration", - "description": "License Expiration", - "value": "2023-05-30T00:00:00Z", - "valueType": "String", - "signature": { - "v1": "Vs+W7+sF0RA6UrFEJcyHAbC5YCIT67hdsDdqtJTRBd4ZitTe4pr1D/SZg2k0NRIozrBP1mXuTgjQgeI8PyQJc/ctQwZDikIEKFW0sVv0PFPQV7Uf9fy7wRgadfUxkagcCS8O6Tpcm4WqlhEcgiJGvPBki3hZLnMO9Ol9yOepZ7UtrUMVsBUKwcTJWCytpFpvvOLfSNoHxMnPuSgpXumbHZjvdXrJoJagoRDXPiXXKGh02DOr58ncLofYqPzze+iXWbE8tqdFBZc72lLayT1am3MN0n3ejCNWNeX9+CiBJkqMqLLkjN4eugUmU/gBiDtJgFUB2gq8ejVVcohqos69WA==" - } - }, - "numSeats": { - "name": "numSeats", - "title": "Number of Seats", - "value": 10, - "valueType": "Integer", - "signature": { - "v1": "UmsYlVr4+Vg5TWsJV6goagWUM4imdj8EUUcdau7wIzfcU0MuZnv3UNVlwVE/tCuROCMcbei6ygjm4j5quBdkAGUyq86BCtohg/SqRsgVoNV6BN0S+tnqJ7w4/nqRVBc2Gsn7wTYNXiszLMkmfeNOrigLgsrtaGJmZ4IsczwI1V5Tr+AMAgrACL/UyLg78Y6EitKFW4qvJ9g5Q8B3uVmT+h9xTBxJFuKTQS6qFcDx9XCu+bKqoSmJDZ8lwgwpJDAiBzIhxiAd66lypHX9cpOg5A7cKEW+FLdaBKQdNRcPHQK2O9QwFj/NKEeCJEufuD3OeV8MSbN2PCehMzbj7tXSww==" - } - } -} -``` - -### GET /license/fields/\{field_name\} - -List details about one of the fields in the license that was used to install, including the field name, description, value, and signature. - -```bash -GET http://replicated:3000/api/v1/license/fields/\{field_name\} -``` - -Example request: - -```bash -curl replicated:3000/api/v1/license/fields/expires_at -``` - -Response: - -```json -{ - "name": "expires_at", - "title": "Expiration", - "description": "License Expiration", - "value": "2023-05-30T00:00:00Z", - "valueType": "String", - "signature": { - "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2MD0YBlIAZEs1zXpmvwLdfcoTsZMOj0lZbxkPN5dPhEPIVcQgrzfzwU5HIwQbwc2jwDrLBQS4hGOKdxOWXnBUNbztsHXMqlAYQsmAhspRLDhBiEoYpFV/8oaaAuNBrmRu/IVAW6ahB4KtP/ytruVdBup3gn1U/uPAl5lhzuBifaW+NDFfJxAXJrhdTxMBxzfdKa6dGmlGu7Ou/xqDU1bNF3AuWoP3C78GzSBQrD1ZPnu/d+nuEjtakKSX3EK6VUisNucm8/TFlEVKUuX7hex7uZ9Of+UgS1GutQXOhXzfMZ7u+0zHXvQ==" - } -} -``` - -## Integration - -### GET /api/v1/integration/status - -Get status of Development Mode. When this mode is enabled, the `app` API will use mock data. This value cannot be set programmatically. It is controlled by the installed license. - -```json -{ - "isEnabled": true -} -``` - -### GET /api/v1/integration/mock-data - -Get mock data that is used when Development Mode is enabled. - -```json -{ - "appStatus": "ready", - "helmChartURL": "oci://registry.replicated.com/dev-app/dev-channel/dev-parent-chart", - "currentRelease": { - "versionLabel": "0.1.3", - "releaseNotes": "release notes 0.1.3", - "createdAt": "2023-05-23T20:58:07Z", - "deployedAt": "2023-05-23T21:58:07Z", - "helmReleaseName": "dev-parent-chart", - "helmReleaseRevision": 3, - "helmReleaseNamespace": "default" - }, - "deployedReleases": [ - { - "versionLabel": "0.1.1", - "releaseNotes": "release notes 0.1.1", - "createdAt": "2023-05-21T20:58:07Z", - "deployedAt": "2023-05-21T21:58:07Z", - "helmReleaseName": "dev-parent-chart", - "helmReleaseRevision": 1, - "helmReleaseNamespace": "default" - }, - { - "versionLabel": "0.1.2", - "releaseNotes": "release notes 0.1.2", - "createdAt": "2023-05-22T20:58:07Z", - "deployedAt": "2023-05-22T21:58:07Z", - "helmReleaseName": "dev-parent-chart", - "helmReleaseRevision": 2, - "helmReleaseNamespace": "default" - }, - { - "versionLabel": "0.1.3", - "releaseNotes": "release notes 0.1.3", - "createdAt": "2023-05-23T20:58:07Z", - "deployedAt": "2023-05-23T21:58:07Z", - "helmReleaseName": "dev-parent-chart", - "helmReleaseRevision": 3, - "helmReleaseNamespace": "default" - } - ], - "availableReleases": [ - { - "versionLabel": "0.1.4", - "releaseNotes": "release notes 0.1.4", - "createdAt": "2023-05-24T20:58:07Z", - "deployedAt": "2023-05-24T21:58:07Z", - "helmReleaseName": "", - "helmReleaseRevision": 0, - "helmReleaseNamespace": "" - }, - { - "versionLabel": "0.1.5", - "releaseNotes": "release notes 0.1.5", - "createdAt": "2023-06-01T20:58:07Z", - "deployedAt": "2023-06-01T21:58:07Z", - "helmReleaseName": "", - "helmReleaseRevision": 0, - "helmReleaseNamespace": "" - } - ] -} -``` - -### POST /api/v1/integration/mock-data - -Programmatically set mock data that is used when Development Mode is enabled. The payload will overwrite the existing mock data. Any data that is not included in the payload will be removed. For example, to remove release data, simply include empty arrays: - -```bash -POST http://replicated:3000/api/v1/integration/mock-data -``` - -Request: - -```json -{ - "appStatus": "ready", - "helmChartURL": "oci://registry.replicated.com/dev-app/dev-channel/dev-parent-chart", - "currentRelease": { - "versionLabel": "0.1.3", - "releaseNotes": "release notes 0.1.3", - "createdAt": "2023-05-23T20:58:07Z", - "deployedAt": "2023-05-23T21:58:07Z", - "helmReleaseName": "dev-parent-chart", - "helmReleaseRevision": 3, - "helmReleaseNamespace": "default" - }, - "deployedReleases": [], - "availableReleases": [] -} -``` - -Response: Status `201` Created - -## Examples - -This section provides example use cases for the Replicated SDK API. - -### Support Update Checks in Your Application - -The `api/v1/app/updates` endpoint returns details about new releases that are available to an instance for upgrade. You could use the `api/v1/app/updates` endpoint to allow your users to easily check for available updates from your application. - -Additionally, to make it easier for users to upgrade to new versions of your application, you could provide customer-specific upgrade instructions in your application by injecting values returned by the `/api/v1/license/info` and `/api/vi/app/info` endpoints. - -The following examples show how you could include a page in your application that lists available updates and also provides customer-specific upgrade instructions: - -![a user interface showing a list of available releases](/images/slackernews-update-page.png) -[View a larger version of this image](/images/slackernews-update-page.png) - -![user-specific application upgrade instructions displayed in a dialog](/images/slackernews-update-instructions.png) -[View a larger version of this image](/images/slackernews-update-instructions.png) - -To use the SDK API to check for available application updates and provide customer-specific upgrade instructions: - -1. From your application, call the `api/v1/app/updates` endpoint to return available updates for the application instance. Use the response to display available upgrades for the customer. - - ```bash - curl replicated:3000/api/v1/app/updates - ``` - - **Example response**: - - ```json - [ - { - "versionLabel": "0.1.15", - "createdAt": "2023-05-12T15:48:45.000Z", - "releaseNotes": "Awesome new features!" - } - ] - ``` - -1. For each available release, add logic that displays the required upgrade commands with customer-specific values. To upgrade, users must first run `helm registry login` to authenticate to the Replicated registry. Then, they can run `helm upgrade`: - - 1. Inject customer-specific values into the `helm registry login` command: - - ```bash - helm registry login REGISTRY_DOMAIN --username EMAIL --password LICENSE_ID - ``` - - The `helm registry login` command requires the following components: - - * `REGISTRY_DOMAIN`: The domain for the registry where your Helm chart is pushed. The registry domain is either `replicated.registry.com` or a custom domain that you added. - - * `EMAIL`: The customer email address is available from the `/api/v1/license/info` endpoint in the `customerEmail` field. - - * `LICENSE_ID` The customer license ID is available from the `/api/v1/license/info` endpoint in the `licenseID` field. - - 1. Inject customer-specific values into the `helm upgrade` command: - - ```bash - helm upgrade -n NAMESPACE RELEASE_NAME HELM_CHART_URL - ``` - - The following describes where the values in the `helm upgrade` command are available: - - * `NAMESPACE`: The release namespace is available from the `/api/v1/app/info` endpoint in the `currentRelease.helmReleaseNamespace` - - * `RELEASE_NAME`: The release name is available from the `/api/v1/app/info` endpoint in the `currentRelease.helmReleaseName` field. - - * `HELM_CHART_URL`: The URL of the Helm chart at the OCI registry is available from the `/api/v1/app/info` endpoint in the `helmChartURL` field. - -### Revoke Access at Runtime When a License Expires - -You can use the Replicated SDK API `/api/v1/license/fields/{field_name}` endpoint to revoke a customer's access to your application during runtime when their license expires. - -To revoke access to your application when a license expires: - -1. In the vendor portal, click **Customers**. Select the target customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. - -1. Under **Expiration policy**: - - 1. Enable **Customer's license has an expiration date**. - - 1. For **When does this customer expire?**, use the calendar to set an expiration date for the license. - - <img alt="expiration policy field in the manage customer page" src="/images/customer-expiration-policy.png" width="500px"/> - - [View a larger version of this image](/images/customer-expiration-policy.png) - -1. Install the Replicated SDK as a standalone component in your cluster. This is called _integration mode_. Installing in integration mode allows you to develop locally against the SDK API without needing to create releases for your application in the vendor portal. See [Developing Against the SDK API](/vendor/replicated-sdk-development). - -1. In your application, use the `/api/v1/license/fields/expires_at` endpoint to get the `expires_at` field that you defined in the previous step. - - **Example:** - - ```bash - curl replicated:3000/api/v1/license/fields/expires_at - ``` - - ```json - { - "name": "expires_at", - "title": "Expiration", - "description": "License Expiration", - "value": "2023-05-30T00:00:00Z", - "valueType": "String", - "signature": { - "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2M..." - } - } - ``` - -1. Add logic to your application to revoke access if the current date and time is more recent than the expiration date of the license. - -1. (Recommended) Use signature verification in your application to ensure the integrity of the license field. See [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). - -================ -File: docs/reference/replicated.mdx -================ -# replicated - -Manage your Commercial Software Distribution Lifecycle using Replicated - -### Synopsis - -The 'replicated' CLI allows Replicated customers (vendors) to manage their Commercial Software Distribution Lifecycle (CSDL) using the Replicated API. - -### Options - -``` - --app string The app slug or app id to use in all calls - -h, --help help for replicated - --token string The API token to use to access your app in the Vendor API -``` - -### SEE ALSO - -* [replicated api](replicated-cli-api) - Make ad-hoc API calls to the Replicated API -* [replicated app](replicated-cli-app) - Manage applications -* [replicated channel](replicated-cli-channel) - List channels -* [replicated cluster](replicated-cli-cluster) - Manage test Kubernetes clusters. -* [replicated completion](replicated-cli-completion) - Generate completion script -* [replicated customer](replicated-cli-customer) - Manage customers -* [replicated default](replicated-cli-default) - Manage default values used by other commands -* [replicated installer](replicated-cli-installer) - Manage Kubernetes installers -* [replicated instance](replicated-cli-instance) - Manage instances -* [replicated login](replicated-cli-login) - Log in to Replicated -* [replicated logout](replicated-cli-logout) - Logout from Replicated -* [replicated registry](replicated-cli-registry) - Manage registries -* [replicated release](replicated-cli-release) - Manage app releases -* [replicated version](replicated-cli-version) - Print the current version and exit -* [replicated vm](replicated-cli-vm) - Manage test virtual machines. - -================ -File: docs/reference/template-functions-about.mdx -================ -import UseCases from "../partials/template-functions/_use-cases.mdx" - -# About Template Functions - -This topic describes Replicated KOTS template functions, including information about use cases, template function contexts, syntax. - -## Overview - -For Kubernetes manifest files for applications deployed by Replicated KOTS, Replicated provides a set of custom template functions based on the Go text/template library. - -<UseCases/> - -All functionality of the Go templating language, including if statements, loops, and variables, is supported with KOTS template functions. For more information about the Go library, see [text/template](https://golang.org/pkg/text/template/) in the Go documentation. - -### Supported File Types - -You can use KOTS template functions in Kubernetes manifest files for applications deployed by KOTS, such as: -* Custom resources in the `kots.io` API group like Application, Config, or HelmChart -* Custom resources in other API groups like Preflight, SupportBundle, or Backup -* Kubernetes objects like Deployments, Services, Secrets, or ConfigMaps -* Kubernetes Operators - -### Limitations - -* Not all fields in the Config and Application custom resources support templating. For more information, see [Application](/reference/custom-resource-application) and [Item Properties](/reference/custom-resource-config#item-properties) in _Config_. - -* Templating is not supported in the [Embedded Cluster Config](/reference/embedded-config) resource. - -* KOTS template functions are not directly supported in Helm charts. For more information, see [Helm Charts](#helm-charts) below. - -### Helm Charts - -KOTS template functions are _not_ directly supported in Helm charts. However, the HelmChart custom resource provides a way to map values rendered by KOTS template functions to Helm chart values. This allows you to use KOTS template functions with Helm charts without making changes to those Helm charts. - -For information about how to map values from the HelmChart custom resource to Helm chart `values.yaml` files, see [Setting Helm Chart Values with KOTS](/vendor/helm-optional-value-keys). - -### Template Function Rendering - -During application installation and upgrade, KOTS templates all Kubernetes manifest files in a release (except for the Config custom resource) at the same time during a single process. - -For the [Config](/reference/custom-resource-config) custom resource, KOTS templates each item separately so that config items can be used in templates for other items. For examples of this, see [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional) and [Template Function Examples](/reference/template-functions-examples). - -## Syntax {#syntax} - -The KOTS template function syntax supports the following functionally equivalent delimiters: -* [`repl{{ ... }}`](#syntax-integer) -* [`{{repl ... }}`](#syntax-string) - -### Syntax Requirements - -KOTS template function syntax has the following requirements: -* In both the `repl{{ ... }}` and `{{repl ... }}` syntaxes, there must be no whitespace between `repl` and the `{{` delimiter. -* The manifests where KOTS template functions are used must be valid YAML. This is because the YAML manifests are linted before KOTS template functions are rendered. - -### `repl{{ ... }}` {#syntax-integer} - -This syntax is recommended for most use cases. - -Any quotation marks wrapped around this syntax are stripped during rendering. If you need the rendered value to be quoted, you can pipe into quote (`| quote`) or use the [`{{repl ... }}`](#syntax-string) syntax instead. - -#### Integer Example - -```yaml -http: - port: repl{{ ConfigOption "load_balancer_port" }} -``` -```yaml -http: - port: 8888 -``` - -#### Example with `| quote` - -```yaml -customTag: repl{{ ConfigOption "tag" | quote }} -``` -```yaml -customTag: 'key: value' -``` - -#### If-Else Example - -```yaml -http: - port: repl{{ if ConfigOptionEquals "ingress_type" "load_balancer" }}repl{{ ConfigOption "load_balancer_port" }}repl{{ else }}8081repl{{ end }} -``` -```yaml -http: - port: 8081 -``` - -For more examples, see [Template Function Examples](/reference/template-functions-examples). - -### `{{repl ... }}` {#syntax-string} - -This syntax can be useful when having the delimiters outside the template function improves readability of the YAML, such as in multi-line statements or if-else statements. - -To use this syntax at the beginning of a value in YAML, it _must_ be wrapped in quotes because you cannot start a YAML value with the `{` character and manifests consumed by KOTS must be valid YAML. When this syntax is wrapped in quotes, the rendered value is also wrapped in quotes. - -#### Example With Quotes - -The following example is wrapped in quotes because it is used at the beginning of a statement in YAML: - -```yaml -customTag: '{{repl ConfigOption "tag" }}' -``` -```yaml -customTag: 'key: value' -``` - -#### If-Else Example -```yaml -my-service: - type: '{{repl if ConfigOptionEquals "ingress_type" "load_balancer" }}LoadBalancer{{repl else }}ClusterIP{{repl end }}' -``` -```yaml -my-service: - type: 'LoadBalancer' -``` - -For more examples, see [Template Function Examples](/reference/template-functions-examples). - -## Contexts {#contexts} - -KOTS template functions are grouped into different contexts, depending on the phase of the application lifecycle when the function is available and the context of the data that is provided. - -### Static Context - -The context necessary to render the static template functions is always available. - -The static context also includes the Masterminds Sprig function library. For more information, see [Sprig Function Documentation](http://masterminds.github.io/sprig/) on the sprig website. - -For a list of all KOTS template functions available in the static context, see [Static context](template-functions-static-context). - -### Config Context - -Template functions in the config context are available when rendering an application that includes a Config custom resource. -At execution time, template functions in the config context also can use the static context functions. - -For a list of all KOTS template functions available in the config context, see [Config context](template-functions-config-context). - -### License Context - -Template functions in the license context have access to customer license and version data. - -For a list of all KOTS template functions available in the license context, see [License context](template-functions-license-context). - -### kURL Context - -Template functions in the kURL context have access to information about applications installed in embedded clusters created by Replicated kURL. - -For a list of all KOTS template functions available in the kURL context, see [kURL context](template-functions-kurl-context). - -### Identity Context - -Template functions in the Identity context have access to Replicated identity service information. - -For a list of all KOTS template functions available in the identity context, see [Identity context](template-functions-identity-context). - -================ -File: docs/reference/template-functions-config-context.md -================ -# Config Context - -## ConfigOption - -```go -func ConfigOption(optionName string) string -``` - -Returns the value of the config option as a string. - -For information about the config screen and associated options, see [Config](custom-resource-config) in the _Custom Resources_ section. - -```yaml -'{{repl ConfigOption "hostname" }}' -``` - -`ConfigOption` returns the base64 **encoded** value of the `file` config option. - -```yaml -'{{repl ConfigOption "ssl_key"}}' -``` - -To use files in a Secret, use `ConfigOption`: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: tls-secret -type: kubernetes.io/tls -data: - tls.crt: '{{repl ConfigOption "tls_certificate_file" }}' - tls.key: '{{repl ConfigOption "tls_private_key_file" }}' -``` - -For more information about using TLS certificates, see [Using TLS Certificates](../vendor/packaging-using-tls-certs). - -## ConfigOptionData - -```go -func ConfigOptionData(optionName string) string -``` - -`ConfigOptionData` returns the base64 **decoded** value of a `file` config option. - -```yaml -'{{repl ConfigOptionData "ssl_key"}}' -``` - -To use files in a ConfigMap, use `ConfigOptionData`: -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: tls-config -data: - tls.crt: | - repl{{- ConfigOptionData "tls_certificate_file" | nindent 4 }} - - tls.key: | - repl{{- ConfigOptionData "tls_private_key_file" | nindent 4 }} -``` - -## ConfigOptionFilename - -```go -func ConfigOptionFilename(optionName string) string -``` - -`ConfigOptionFilename` returns the filename associated with a `file` config option. -It will return an empty string if used erroneously with other types. - -```yaml -'{{repl ConfigOptionFilename "pom_file"}}' -``` - -As an example, if you have the following Config Spec defined: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: my-application -spec: - groups: - - name: java_settings - title: Java Settings - description: Configures the Java Server build parameters - items: - - name: pom_file - type: file - required: true -``` - -You can use `ConfigOptionFilename` in a Pod Spec to mount a file like so: -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: configmap-demo-pod -spec: - containers: - - name: some-java-app - image: busybox - command: ["bash"] - args: - - "-C" - - "cat /config/{{repl ConfigOptionFilename pom_file}}" - volumeMounts: - - name: config - mountPath: "/config" - readOnly: true - volumes: - - name: config - configMap: - name: demo-configmap - items: - - key: data_key_one - path: repl{{ ConfigOptionFilename pom_file }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: demo-configmap -data: - data_key_one: repl{{ ConfigOptionData pom_file }} -``` - -## ConfigOptionEquals - -```go -func ConfigOptionEquals(optionName string, expectedValue string) bool -``` - -Returns true if the configuration option value is equal to the supplied value. - -```yaml -'{{repl ConfigOptionEquals "http_enabled" "1" }}' -``` - -## ConfigOptionNotEquals - -```go -func ConfigOptionNotEquals(optionName string, expectedValue string) bool -``` - -Returns true if the configuration option value is not equal to the supplied value. - -```yaml -'{{repl ConfigOptionNotEquals "http_enabled" "1" }}' -``` - -## LocalRegistryAddress - -```go -func LocalRegistryAddress() string -``` - -Returns the local registry host or host/namespace that's configured. -This will always return everything before the image name and tag. - -## LocalRegistryHost - -```go -func LocalRegistryHost() string -``` - -Returns the host of the local registry that the user configured. Alternatively, for air gap installations with Replicated Embedded Cluster or Replicated kURL, LocalRegistryHost returns the host of the built-in registry. - -Includes the port if one is specified. - -## LocalRegistryNamespace - -```go -func LocalRegistryNamespace() string -``` - -Returns the namespace of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryNamespace returns the namespace of the built-in registry. - -## LocalImageName - -```go -func LocalImageName(remoteImageName string) string -``` - -Given a `remoteImageName`, rewrite the `remoteImageName` so that it can be pulled to local hosts. - -A common use case for the `LocalImageName` function is to ensure that a Kubernetes Operator can determine the names of container images on Pods created at runtime. For more information, see [Referencing Images](/vendor/operator-referencing-images) in the _Packaging a Kubernetes Operator Application_ section. - -`LocalImageName` rewrites the `remoteImageName` in one of the following ways, depending on if a private registry is configured and if the image must be proxied: - -* If there is a private registry configured in the customer's environment, such as in air gapped environments, rewrite `remoteImageName` to reference the private registry locally. For example, rewrite `elasticsearch:7.6.0` as `registry.somebigbank.com/my-app/elasticsearch:7.6.0`. - -* If there is no private registry configured in the customer's environment, but the image must be proxied, rewrite `remoteImageName` so that the image can be pulled through the proxy registry. For example, rewrite `"quay.io/orgname/private-image:v1.2.3"` as `proxy.replicated.com/proxy/app-name/quay.io/orgname/private-image:v1.2.3`. - -* If there is no private registry configured in the customer's environment and the image does not need to be proxied, return `remoteImageName` without changes. - -For more information about the Replicated proxy registry, see [About the Proxy Registry](/vendor/private-images-about). - -## LocalRegistryImagePullSecret - -```go -func LocalRegistryImagePullSecret() string -``` - -Returns the base64 encoded local registry image pull secret value. -This is often needed when an operator is deploying images to a namespace that is not managed by Replicated KOTS. -Image pull secrets must be present in the namespace of the pod. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: my-image-pull-secret - namespace: my-namespace -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' ---- -apiVersion: v1 -kind: Pod -metadata: - name: dynamic-pod - namespace: my-namespace -spec: - containers: - - image: '{{repl LocalImageName "registry.replicated.com/my-app/my-image:abcdef" }}' - name: my-container - imagePullSecrets: - - name: my-image-pull-secret -``` - -## ImagePullSecretName - -```go -func ImagePullSecretName() string -``` - -Returns the name of the image pull secret that can be added to pod specs that use private images. -The secret will be automatically created in all application namespaces. -It will contain authentication information for any private registry used with the application. - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-deployment -spec: - template: - spec: - imagePullSecrets: - - name: repl{{ ImagePullSecretName }} -``` - -## HasLocalRegistry - -```go -func HasLocalRegistry() bool -``` - -Returns true if the environment is configured to rewrite images to a local registry. -HasLocalRegistry is always true for air gap installations. HasLocalRegistry is true in online installations if the user pushed images to a local registry. - -================ -File: docs/reference/template-functions-examples.mdx -================ -import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" -import StringComparison from "../partials/template-functions/_string-comparison.mdx" -import NeComparison from "../partials/template-functions/_ne-comparison.mdx" -import GoSprig from "../partials/template-functions/_go-sprig.mdx" -import UseCases from "../partials/template-functions/_use-cases.mdx" - -# Template Function Examples - -This topic provides examples of how to use Replicated KOTS template functions in various common use cases. For more information about working with KOTS template functions, including the supported syntax and the types of files where KOTS template functions can be used, see [About Template Functions](template-functions-about). - -## Overview - -<GoSprig/> - -<UseCases/> - -For examples demonstrating these use cases and more, see the sections below. - -## Comparison Examples - -This section includes examples of how to use KOTS template functions to compare different types of data. - -### Boolean Comparison - -Boolean values can be used in comparisons to evaluate if a given statement is true or false. Because many KOTS template functions return string values, comparing boolean values often requires using the KOTS [ParseBool](/reference/template-functions-static-context#parsebool) template function to return the boolean represented by the string. - -One common use case for working with boolean values is to check that a given field is present in the customer's license. For example, you might need to show a configuration option on the KOTS Admin Console **Config** page only when the customer's license has a certain entitlement. - -The following example creates a conditional statement in the KOTS Config custom resource that evaluates to true when a specified license field is present in the customer's license _and_ the customer enables a specified configuration option on the Admin Console **Config** page. - -```yaml -# KOTS Config custom resource -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: example_group - title: Example Config - items: - - name: radio_example - title: Select One - type: radio - items: - - name: option_one - title: Option One - - name: option_two - title: Option Two - - name: conditional_item - title: Conditional Item - type: text - # Display this item only when the customer enables the option_one config field *and* - # has the feature-1 entitlement in their license - when: repl{{ and (LicenseFieldValue "feature-1" | ParseBool) (ConfigOptionEquals "radio_example" "option_one")}} -``` - -This example uses the following KOTS template functions: -* [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) to return the string value of a boolean type license field named `feature-1` - :::note - The LicenseFieldValue template function always returns a string, regardless of the license field type. - ::: -* [ParseBool](/reference/template-functions-static-context#parsebool) to convert the string returned by the LicenseFieldValue template function to a boolean -* [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) to return a boolean that evaluates to true if the configuration option value is equal to the supplied value - -### Integer Comparison - -Integer values can be compared using operators such as greater than, less than, equal to, and so on. Because many KOTS template functions return string values, working with integer values often requires using another function to return the integer represented by the string, such as: -* KOTS [ParseInt](/reference/template-functions-static-context#parseint), which returns the integer value represented by the string with the option to provide a `base` other than 10 -* Sprig [atoi](https://masterminds.github.io/sprig/conversion.html), which is equivalent to ParseInt(s, 10, 0), converted to type integer - -A common use case for comparing integer values with KOTS template functions is to display different configuration options on the KOTS Admin Console **Config** page depending on integer values from the customer's license. For example, licenses might include an entitlement that defines the number of seats the customer is entitled to. In this case, it can be useful to conditionally display or hide certain fields on the **Config** page depending on the customer's team size. - -<IntegerComparison/> - -### String Comparison - -A common use case for string comparison is to compare the rendered value of a KOTS template function against a string to conditionally show or hide fields on the KOTS Admin Console **Config** page depending on details about the customer's environment. For example, a string comparison can be used to check the Kubernetes distribution of the cluster where an application is deployed. - -<StringComparison/> - -### Not Equal To Comparison - -It can be useful to compare the rendered value of a KOTS template function against another value to check if the two values are different. For example, you can conditionally show fields on the KOTS Admin Console **Config** page only when the Kubernetes distribution of the cluster where the application is deployed is _not_ [Replicated embedded cluster](/vendor/embedded-overview). - -<NeComparison/> - -### Logical AND Comparison - -Logical comparisons such as AND, OR, and NOT can be used with KOTS template functions. A common use case for logical AND comparisons is to construct more complex conditional statements where it is necessary that two different conditions are both true. - -The following example shows how to use an `and` operator that evaluates to true when two different configuration options on the Admin Console **Config** page are both enabled. This example uses the KOTS [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to return a boolean that evaluates to true if the configuration option value is equal to the supplied value. - -```yaml -# KOTS Config custom resource -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: example_group - title: Example Config - items: - - name: radio_example - title: Select One Example - type: radio - items: - - name: option_one - title: Option One - - name: option_two - title: Option Two - - name: boolean_example - title: Boolean Example - type: bool - default: "0" - - name: conditional_item - title: Conditional Item - type: text - # Display this item only when *both* specified config options are enabled - when: repl{{ and (ConfigOptionEquals "radio_example" "option_one") (ConfigOptionEquals "boolean_example" "1")}} -``` - -As shown below, when both `Option One` and `Boolean Example` are selected, the conditional statement evaluates to true and the `Conditional Item` field is displayed: - -<img alt="Conditional item displayed" src="/images/conditional-item-true.png" width="550px"/> - -[View a larger version of this image](/images/conditional-item-true.png) - -Alternatively, if either `Option One` or `Boolean Example` is not selected, then the conditional statement evaluates to false and the `Conditional Item` field is not displayed: - -<img alt="Option two selected" src="/images/conditional-item-false-option-two.png" width="550px"/> - -[View a larger version of this image](/images/conditional-item-false-option-two.png) - -<img alt="Boolean field deselected" src="/images/conditional-item-false-boolean.png" width="550px"/> - -[View a larger version of this image](/images/conditional-item-false-boolean.png) - -## Conditional Statement Examples - -This section includes examples of using KOTS template functions to construct conditional statements. Conditional statements can be used with KOTS template functions to render different values depending on a given condition. - -### If-Else Statements - -A common use case for if-else statements with KOTS template functions is to set values for resources or objects deployed by your application, such as custom annotations or service types, based on user-specific data. - -This section includes examples of both single line and multi-line if-else statements. Using multi-line formatting can be useful to improve the readability of YAML files when longer or more complex if-else statements are needed. - -Multi-line if-else statements can be constructed using YAML block scalars and block chomping characters to ensure the rendered result is valid YAML. A _folded_ block scalar style is denoted using the greater than (`>`) character. With the folded style, single line breaks in the string are treated as a space. Additionally, the block chomping minus (`-`) character is used to remove all the line breaks at the end of a string. For more information about working with these characters, see [Block Style Productions](https://yaml.org/spec/1.2.2/#chapter-8-block-style-productions) in the YAML documentation. - -:::note -For Helm-based applications that need to use more complex or nested if-else statements, you can alternatively use templating within your Helm chart `templates` rather than in the KOTS HelmChart custom resource. For more information, see [If/Else](https://helm.sh/docs/chart_template_guide/control_structures/#ifelse) in the Helm documentation. -::: - -#### Single Line - -The following example shows if-else statements used in the KOTS HelmChart custom resource `values` field to render different values depending on if the user selects a load balancer or an ingress controller as the ingress type for the application. This example uses the KOTS [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to return a boolean that evaluates to true if the configuration option value is equal to the supplied value. - -```yaml -# KOTS HelmChart custom resource -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: my-app -spec: - chart: - name: my-app - chartVersion: 0.23.0 - values: - services: - my-service: - enabled: true - appName: ["my-app"] - # Render the service type based on the user's selection - # '{{repl ...}}' syntax is used for `type` to improve readability of the if-else statement and render a string - type: '{{repl if ConfigOptionEquals "ingress_type" "load_balancer" }}LoadBalancer{{repl else }}ClusterIP{{repl end }}' - ports: - http: - enabled: true - # Render the HTTP port for the service depending on the user's selection - # repl{{ ... }} syntax is used for `port` to render an integer value - port: repl{{ if ConfigOptionEquals "ingress_type" "load_balancer" }}repl{{ ConfigOption "load_balancer_port" }}repl{{ else }}8081repl{{ end }} - protocol: HTTP - targetPort: 8081 -``` - -#### Multi-Line in KOTS HelmChart Values - -The following example uses a multi-line if-else statement in the KOTS HelmChart custom resource to render the path to the Replicated SDK image depending on if the user pushed images to a local private registry. - -This example uses the following KOTS template functions: -* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry) to return true if the environment is configured to rewrite images to a local registry -* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost) to return the local registry host configured by the user -* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) to return the local registry namespace configured by the user - -:::note -This example uses the `{{repl ...}}` syntax rather than the `repl{{ ... }}` syntax to improve readability in the YAML file. However, both syntaxes are supported for this use case. For more information, see [Syntax](/reference/template-functions-about#syntax) in _About Template Functions_. -::: - -```yaml -# KOTS HelmChart custom resource -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - values: - images: - replicated-sdk: >- - {{repl if HasLocalRegistry -}} - {{repl LocalRegistryHost }}/{{repl LocalRegistryNamespace }}/replicated-sdk:1.0.0-beta.29 - {{repl else -}} - docker.io/replicated/replicated-sdk:1.0.0-beta.29 - {{repl end}} -``` - -Given the example above, if the user is _not_ using a local registry, then the `replicated-sdk` value in the Helm chart is set to the location of the image on the default docker registry, as shown below: - -```yaml -# Helm chart values file - -images: - replicated-sdk: 'docker.io/replicated/replicated-sdk:1.0.0-beta.29' -``` - -#### Multi-Line in Secret Object - -The following example uses multi-line if-else statements in a Secret object deployed by KOTS to conditionally set the database hostname, port, username, and password depending on if the customer uses the database embedded with the application or brings their own external database. - -This example uses the following KOTS template functions: -* [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) to return a boolean that evaluates to true if the configuration option value is equal to the supplied value -* [ConfigOption](/reference/template-functions-config-context#configoption) to return the user-supplied value for the specified configuration option -* [Base64Encode](/reference/template-functions-static-context#base64encode) to encode the string with base64 - -:::note -This example uses the `{{repl ...}}` syntax rather than the `repl{{ ... }}` syntax to improve readability in the YAML file. However, both syntaxes are supported for this use case. For more information, see [Syntax](/reference/template-functions-about#syntax) in _About Template Functions_. -::: - -```yaml -# Postgres Secret -apiVersion: v1 -kind: Secret -metadata: - name: postgres -data: - # Render the value for the database hostname depending on if an embedded or - # external db is used. - # Also, base64 encode the rendered value. - DB_HOST: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "postgres" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_host" | Base64Encode }} - {{repl end}} - DB_PORT: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "5432" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_port" | Base64Encode }} - {{repl end}} - DB_USER: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "postgres" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_user" | Base64Encode }} - {{repl end}} - DB_PASSWORD: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl ConfigOption "embedded_postgres_password" | Base64Encode }} - {{repl else -}} - {{repl ConfigOption "external_postgres_password" | Base64Encode }} - {{repl end}} -``` - -### Ternary Operators - -Ternary operators are useful for templating strings where certain values within the string must be rendered differently depending on a given condition. Compared to if-else statements, ternary operators are useful when a small portion of a string needs to be conditionally rendered, as opposed to rendering different values based on a conditional statement. For example, a common use case for ternary operators is to template the path to an image repository based on user-supplied values. - -The following example uses ternary operators to render the registry and repository for a private nginx image depending on if a local image regsitry is used. This example uses the following KOTS template functions: -* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry) to return true if the environment is configured to rewrite images to a local registry -* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost) to return the local registry host configured by the user -* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) to return the local registry namespace configured by the user - -```yaml -# KOTS HelmChart custom resource -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - values: - image: - # If a local registry is configured, use the local registry host. - # Otherwise, use proxy.replicated.com - registry: repl{{ HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }} - # If a local registry is configured, use the local registry's namespace. - # Otherwise, use proxy/my-app/quay.io/my-org - repository: repl{{ HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx - tag: v1.0.1 -``` - -## Formatting Examples - -This section includes examples of how to format the rendered output of KOTS template functions. - -In addition to the examples in this section, KOTS template functions in the Static context include several options for formatting values, such as converting strings to upper or lower case and trimming leading and trailing space characters. For more information, see [Static Context](/reference/template-functions-static-context). - -### Indentation - -When using template functions within nested YAML, it is important that the rendered template functions are indented correctly so that the YAML renders. A common use case for adding indentation to KOTS template functions is when templating annotations in the metadata of resources or objects deployed by your application based on user-supplied values. - -The [nindent](https://masterminds.github.io/sprig/strings.html) function can be used to prepend a new line to the beginning of the string and indent the string by a specified number of spaces. - -#### Indent Templated Helm Chart Values - -The following example shows templating a Helm chart value that sets annotations for an Ingress object. This example uses the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function to return user-supplied annotations from the Admin Console **Config** page. It also uses [nindent](https://masterminds.github.io/sprig/strings.html) to indent the rendered value ten spaces. - -```yaml -# KOTS HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: myapp -spec: - values: - services: - myservice: - annotations: repl{{ ConfigOption "additional_annotations" | nindent 10 }} -``` - -#### Indent Templated Annotations in Manifest Files - -The following example shows templating annotations for an Ingress object. This example uses the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function to return user-supplied annotations from the Admin Console **Config** page. It also uses [nindent](https://masterminds.github.io/sprig/strings.html) to indent the rendered value four spaces. - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-ingress - annotations: - kots.io/placeholder: |- - repl{{ ConfigOption "ingress_annotations" | nindent 4 }} -``` - -### Render Quoted Values - -To wrap a rendered value in quotes, you can pipe the result from KOTS template functions with the `repl{{ ... }}` syntax into quotes using `| quote`. Or, you can use the `'{{repl ... }}'` syntax instead. - -One use case for quoted values in YAML is when indicator characters are included in values. In YAML, indicator characters (`-`, `?`, `:`) have special semantics and must be escaped if used in values. For more information, see [Indicator Charactors](https://yaml.org/spec/1.2.2/#53-indicator-characters) in the YAML documentation. - -#### Example with `'{{repl ... }}'` Syntax - -```yaml -customTag: '{{repl ConfigOption "tag" }}' -``` -#### Example with `| quote` - -```yaml -customTag: repl{{ ConfigOption "tag" | quote }} -``` - -The result for both examples is: - -```yaml -customTag: 'key: value' -``` - -## Variables Example - -This section includes an example of using variables with KOTS template functions. For more information, see [Variables](https://pkg.go.dev/text/template#hdr-Variables) in the Go documentation. - -### Using Variables to Generate TLS Certificates in JSON - -You can use the Sprig [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions with KOTS template functions to generate certificate authorities (CAs) and signed certificates in JSON. One use case for this is to generate default CAs, certificates, and keys that users can override with their own values on the Admin Console **Config** page. - -The Sprig [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions require the subject's common name and the certificate's validity duration in days. The `genSignedCert` function also requires the CA that will sign the certificate. You can use variables and KOTS template functions to provide the necessary parameters when calling these functions. - -The following example shows how to use variables and KOTS template functions in the `default` property of a [`hidden`](/reference/custom-resource-config#hidden) item to pass parameters to the `genCA` and `genSignedCert` functions and generate a CA, certificate, and key. This example uses a `hidden` item (which is an item that is not displayed on the **Config** page) to generate the certificate chain because variables used in the KOTS Config custom resource can only be accessed from the same item where they were declared. For this reason, `hidden` items can be useful for evaluating complex templates. - -This example uses the following: -* KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function to render the user-supplied value for the ingress hostname. This is passed as a parameter to the [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions -* Sprig [genCA](https://masterminds.github.io/sprig/crypto.html) and [genSignedCert](https://masterminds.github.io/sprig/crypto.html) functions to generate a CA and a certificate signed by the CA -* Sprig [dict](https://masterminds.github.io/sprig/dicts.html), [set](https://masterminds.github.io/sprig/dicts.html), and [dig](https://masterminds.github.io/sprig/dicts.html) dictionary functions to create a dictionary with entries for both the CA and the certificate, then traverse the dictionary to return the values of the CA, certificate, and key. -* [toJson](https://masterminds.github.io/sprig/defaults.html) and [fromJson](https://masterminds.github.io/sprig/defaults.html) Sprig functions to encode the CA and certificate into a JSON string, then decode the JSON for the purpose of displaying the values on the **Config** page as defaults - -:::important -Default values are treated as ephemeral. The following certificate chain is recalculated each time the application configuration is modified. Before using this example with your application, be sure that your application can handle updating these parameters dynamically. -::: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: example_settings - title: My Example Config - items: - - name: ingress_hostname - title: Ingress Hostname - help_text: Enter a DNS hostname to use as the cert's CN. - type: text - - name: tls_json - title: TLS JSON - type: textarea - hidden: true - default: |- - repl{{ $ca := genCA (ConfigOption "ingress_hostname") 365 }} - repl{{ $tls := dict "ca" $ca }} - repl{{ $cert := genSignedCert (ConfigOption "ingress_hostname") (list ) (list (ConfigOption "ingress_hostname")) 365 $ca }} - repl{{ $_ := set $tls "cert" $cert }} - repl{{ toJson $tls }} - - name: tls_ca - title: Signing Authority - type: textarea - default: repl{{ fromJson (ConfigOption "tls_json") | dig "ca" "Cert" "" }} - - name: tls_cert - title: TLS Cert - type: textarea - default: repl{{ fromJson (ConfigOption "tls_json") | dig "cert" "Cert" "" }} - - name: tls_key - title: TLS Key - type: textarea - default: repl{{ fromJson (ConfigOption "tls_json") | dig "cert" "Key" "" }} -``` - -The following image shows how the default values for the CA, certificate, and key are displayed on the **Config** page: - -<img alt="Default values for CA, certificate, and key on the Config page" src="/images/certificate-chain-default-values.png" width="550px"/> - -[View a larger version of this image](/images/certificate-chain-default-values.png) - -## Additional Examples - -The following topics include additional examples of using KOTS template functions in Kubernetes manifests deployed by KOTS or in KOTS custom resources: - -* [Add Status Informers](/vendor/admin-console-display-app-status#add-status-informers) in _Adding Resource Status Informers_ -* [Conditionally Including or Excluding Resources](/vendor/packaging-include-resources) -* [Example: Including Optional Helm Charts](/vendor/helm-optional-charts) -* [Example: Adding Database Configuration Options](/vendor/tutorial-adding-db-config) -* [Templating Annotations](/vendor/resources-annotations-templating) -* [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup) - -================ -File: docs/reference/template-functions-identity-context.md -================ -# Identity Context - -## IdentityServiceEnabled - -```go -func IdentityServiceEnabled() bool -``` - -Returns true if the Replicated identity service has been enabled and configured by the end customer. - -```yaml -apiVersion: apps/v1 -kind: Deployment -... - env: - - name: IDENTITY_ENABLED - value: repl{{ IdentityServiceEnabled }} -``` - - -## IdentityServiceClientID - -```go -func IdentityServiceClientID() string -``` - -Returns the client ID required for the application to connect to the identity service OIDC server. - -```yaml -apiVersion: apps/v1 -kind: Deployment -... - env: - - name: CLIENT_ID - value: repl{{ IdentityServiceClientID }} -``` - - -## IdentityServiceClientSecret - -```go -func IdentityServiceClientSecret() (string, error) -``` - -Returns the client secret required for the application to connect to the identity service OIDC server. - -```yaml -apiVersion: v1 -kind: Secret -... -data: - CLIENT_SECRET: repl{{ IdentityServiceClientSecret | b64enc }} -``` - - -## IdentityServiceRoles - -```go -func IdentityServiceRoles() map[string][]string -``` - -Returns a list of groups specified by the customer mapped to a list of roles as defined in the Identity custom resource manifest file. - -For more information about roles in the Identity custom resource, see [Identity](custom-resource-identity#roles) in the _Custom resources_ section. - -```yaml -apiVersion: apps/v1 -kind: Deployment -... - env: - - name: RESTRICTED_GROUPS - value: repl{{ IdentityServiceRoles | keys | toJson }} -``` - - -## IdentityServiceName - -```go -func IdentityServiceName() string -``` - -Returns the Service name for the identity service OIDC server. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -... - - path: /dex - backend: - service: - name: repl{{ IdentityServiceName }} - port: - number: repl{{ IdentityServicePort }} -``` - - -## IdentityServicePort - -```go -func IdentityServicePort() string -``` - -Returns the Service port number for the identity service OIDC server. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -... - - path: /dex - backend: - service: - name: repl{{ IdentityServiceName }} - port: - number: repl{{ IdentityServicePort }} -``` - -================ -File: docs/reference/template-functions-kurl-context.md -================ -# kURL Context - -## kURL Context Functions - -For applications installed in embedded clusters created with Replicated kURL, you can use template functions to show all options the cluster was installed with. - -The creation of the Installer custom resource will reflect both install script changes made by posting YAML to the kURL API and changes made with -s flags at runtime. These functions are not available on the config page. - -KurlBool, KurlInt, KurlString, and KurlOption all take a string yamlPath as a param. -This path is the path from the manifest file, and is delineated between addon and subfield by a period ’.’. -For example, the kURL Kubernetes version can be accessed as `{{repl KurlString "Kubernetes.Version" }}`. - -KurlBool, KurlInt, KurlString respectively return a bool, integer, and string value. -If used on a valid field but with the wrong type these will return the falsy value for their type, false, 0, and “string respectively. -The `KurlOption` function will convert all bool, int, and string fields to string. -All functions will return falsy values if there is nothing at the yamlPath specified, or if these functions are run in a cluster with no installer custom resource (as in, not a cluster created by kURL). - -The following provides a complete list of the Installer custom resource with annotations: - -## KurlBool - -```go -func KurlBool(yamlPath string) bool -``` - -Returns the value at the yamlPath if there is a valid boolean there, or false if there is not. - -```yaml -'{{repl KurlBool "Docker.NoCEonEE" }}' -``` - - -## KurlInt - -```go -func KurlInt(yamlPath string) int -``` - -Returns the value at the yamlPath if there is a valid integer there, or 0 if there is not. - -```yaml -'{{repl KurlInt "Rook.CephReplicaCount" }}' -``` - - -## KurlString - -```go -func KurlString(yamlPath string) string -``` - -Returns the value at the yamlPath if there is a valid string there, or "" if there is not. - -```yaml -'{{repl KurlString "Kubernetes.Version" }}' -``` - - -## KurlOption - -```go -func KurlOption(yamlPath string) string -``` - -Returns the value at the yamlPath if there is a valid string, int, or bool value there, or "" if there is not. -Int and Bool values will be converted to string values. - -```yaml -'{{repl KurlOption "Rook.CephReplicaCount" }}' -``` - - -## KurlAll - -```go -func KurlAll() string -``` - -Returns all values in the Installer custom resource as key:value pairs, sorted by key. - -```yaml -'{{repl KurlAll }}' -``` - -================ -File: docs/reference/template-functions-license-context.md -================ -# License Context - -## LicenseFieldValue -```go -func LicenseFieldValue(name string) string -``` -LicenseFieldValue returns the value of the specified license field. LicenseFieldValue accepts custom license fields and all built-in license fields. For a list of all built-in fields, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). - -LicenseFieldValue always returns a string, regardless of the license field type. To return integer or boolean values, you need to use the [ParseInt](/reference/template-functions-static-context#parseint) or [ParseBool](/reference/template-functions-static-context#parsebool) template function to convert the string value. - -#### String License Field - -The following example returns the value of the built-in `customerName` license field: - -```yaml -customerName: '{{repl LicenseFieldValue "customerName" }}' -``` -#### Integer License Field - -The following example returns the value of a custom integer license field named `numSeats`: - -```yaml -numSeats: repl{{ LicenseFieldValue "numSeats" | ParseInt }} -``` -This example uses [ParseInt](/reference/template-functions-static-context#parseint) to convert the returned value to an integer. - -#### Boolean License Field - -The following example returns the value of a custom boolean license field named `feature-1`: - -```yaml -feature-1: repl{{ LicenseFieldValue "feature-1" | ParseBool }} -``` -This example uses [ParseBool](/reference/template-functions-static-context#parsebool) to convert the returned value to a boolean. - -## LicenseDockerCfg -```go -func LicenseDockerCfg() string -``` -LicenseDockerCfg returns a value that can be written to a secret if needed to deploy manually. -Replicated KOTS creates and injects this secret automatically in normal conditions, but some deployments (with static, additional namespaces) may need to include this. - -```yaml -apiVersion: v1 -kind: Secret -type: kubernetes.io/dockerconfigjson -metadata: - name: myapp-registry - namespace: my-other-namespace -data: - .dockerconfigjson: repl{{ LicenseDockerCfg }} -``` - -## Sequence - -```go -func Sequence() int64 -``` -Sequence is the sequence of the application deployed. -This will start at 0 for each installation, and increase with every app update, config change, license update and registry setting change. - -```yaml -'{{repl Sequence }}' -``` - -## Cursor - -```go -func Cursor() string -``` -Cursor is the channel sequence of the app. -For instance, if 5 releases have been promoted to the channel that the app is running, then this would return the string `5`. - -```yaml -'{{repl Cursor }}' -``` - -## ChannelName - -```go -func ChannelName() string -``` -ChannelName is the name of the deployed channel of the app. - -```yaml -'{{repl ChannelName }}' -``` - -## VersionLabel - -```go -func VersionLabel() string -``` -VersionLabel is the semantic version of the app, as specified when promoting a release to a channel. - -```yaml -'{{repl VersionLabel }}' -``` - -## ReleaseNotes - -```go -func ReleaseNotes() string -``` -ReleaseNotes is the release notes of the current version of the app. - -```yaml -'{{repl ReleaseNotes }}' -``` - -## IsAirgap - -```go -func IsAirgap() bool -``` -IsAirgap is `true` when the app is installed via uploading an airgap package, false otherwise. - -```yaml -'{{repl IsAirgap }}' -``` - -================ -File: docs/reference/template-functions-static-context.md -================ -# Static Context - -## About Mastermind Sprig - -Many of the utility functions provided come from sprig, a third-party library of Go template functions. -For more information, see [Sprig Function Documentation](https://masterminds.github.io/sprig/) on the sprig website. - -## Certificate Functions - -### PrivateCACert - ->Introduced in KOTS v1.117.0 - -```yaml -func PrivateCACert() string -``` - -PrivateCACert returns the name of a ConfigMap that contains private CA certificates provided by the end user. For Embedded Cluster installations, these certificates are provided with the `--private-ca` flag for the `install` command. For KOTS installations, the user provides the ConfigMap using the `--private-ca-configmap` flag for the `install` command. - -You can use this template function to mount the specified ConfigMap so your containers can access the internet through enterprise proxies that issue their own TLS certificates in order to inspect traffic. - -:::note -This function will return the name of the ConfigMap even if the ConfigMap has no entries. If no ConfigMap exists, this function returns the empty string. -::: - -## Cluster Information Functions - -### Distribution -```go -func Distribution() string -``` -Distribution returns the Kubernetes distribution detected. The possible return values are: - -* aks -* digitalOcean -* dockerDesktop -* eks -* embedded-cluster -* gke -* ibm -* k0s -* k3s -* kind -* kurl -* microk8s -* minikube -* oke -* openShift -* rke2 - -:::note -[IsKurl](#iskurl) can also be used to detect kURL instances. -::: - -#### Detect the Distribution -```yaml -repl{{ Distribution }} -``` -#### Equal To Comparison -```yaml -repl{{ eq Distribution "gke" }} -``` -#### Not Equal To Comparison -```yaml -repl{{ ne Distribution "embedded-cluster" }} -``` -See [Functions](https://pkg.go.dev/text/template#hdr-Functions) in the Go documentation. - -### IsKurl -```go -func IsKurl() bool -``` -IsKurl returns true if running within a kurl-based installation. -#### Detect kURL Installations -```yaml -repl{{ IsKurl }} -``` -#### Detect Non-kURL Installations -```yaml -repl{{ not IsKurl }} -``` -See [Functions](https://pkg.go.dev/text/template#hdr-Functions) in the Go documentation. - -### KotsVersion - -```go -func KotsVersion() string -``` - -KotsVersion returns the current version of KOTS. - -```yaml -repl{{ KotsVersion }} -``` - -You can compare the KOTS version as follows: -```yaml -repl{{KotsVersion | semverCompare ">= 1.19"}} -``` - -This returns `true` if the KOTS version is greater than or equal to `1.19`. - -For more complex comparisons, see [Semantic Version Functions](https://masterminds.github.io/sprig/semver.html) in the sprig documentation. - -### KubernetesMajorVersion - -> Introduced in KOTS v1.92.0 - -```go -func KubernetesMajorVersion() string -``` - -KubernetesMajorVersion returns the Kubernetes server *major* version. - -```yaml -repl{{ KubernetesMajorVersion }} -``` - -You can compare the Kubernetes major version as follows: -```yaml -repl{{lt (KubernetesMajorVersion | ParseInt) 2 }} -``` - -This returns `true` if the Kubernetes major version is less than `2`. - -### KubernetesMinorVersion - -> Introduced in KOTS v1.92.0 - -```go -func KubernetesMinorVersion() string -``` - -KubernetesMinorVersion returns the Kubernetes server *minor* version. - -```yaml -repl{{ KubernetesMinorVersion }} -``` - -You can compare the Kubernetes minor version as follows: -```yaml -repl{{gt (KubernetesMinorVersion | ParseInt) 19 }} -``` - -This returns `true` if the Kubernetes minor version is greater than `19`. - -### KubernetesVersion - -> Introduced in KOTS v1.92.0 - -```go -func KubernetesVersion() string -``` - -KubernetesVersion returns the Kubernetes server version. - -```yaml -repl{{ KubernetesVersion }} -``` - -You can compare the Kubernetes version as follows: -```yaml -repl{{KubernetesVersion | semverCompare ">= 1.19"}} -``` - -This returns `true` if the Kubernetes version is greater than or equal to `1.19`. - -For more complex comparisons, see [Semantic Version Functions](https://masterminds.github.io/sprig/semver.html) in the sprig documentation. - -### Namespace -```go -func Namespace() string -``` -Namespace returns the Kubernetes namespace that the application belongs to. -```yaml -'{{repl Namespace}}' -``` - -### NodeCount -```go -func NodeCount() int -``` -NodeCount returns the number of nodes detected within the Kubernetes cluster. -```yaml -repl{{ NodeCount }} -``` - -### Lookup - -> Introduced in KOTS v1.103.0 - -```go -func Lookup(apiversion string, resource string, namespace string, name string) map[string]interface{} -``` - -Lookup searches resources in a running cluster and returns a resource or resource list. - -Lookup uses the Helm lookup function to search resources and has the same functionality as the Helm lookup function. For more information, see [lookup](https://helm.sh/docs/chart_template_guide/functions_and_pipelines/#using-the-lookup-function) in the Helm documentation. - -```yaml -repl{{ Lookup "API_VERSION" "KIND" "NAMESPACE" "NAME" }} -``` - -Both `NAME` and `NAMESPACE` are optional and can be passed as an empty string (""). - -The following combination of parameters are possible: - -<table> - <tr> - <th>Behavior</th> - <th>Lookup function</th> - </tr> - <tr> - <td style={{ fontSize: 14 }}><code>kubectl get pod mypod -n mynamespace</code></td> - <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Pod" "mynamespace" "mypod" }}</code></td> - </tr> - <tr> - <td style={{ fontSize: 14 }}><code>kubectl get pods -n mynamespace</code></td> - <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Pod" "mynamespace" "" }}</code></td> - </tr> - <tr> - <td style={{ fontSize: 14 }}><code>kubectl get pods --all-namespaces</code></td> - <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Pod" "" "" }}</code></td> - </tr> - <tr> - <td style={{ fontSize: 14 }}><code>kubectl get namespace mynamespace</code></td> - <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Namespace" "" "mynamespace" }}</code></td> - </tr> - <tr> - <td style={{ fontSize: 14 }}><code>kubectl get namespaces</code></td> - <td style={{ fontSize: 14 }}><code>repl{{ Lookup "v1" "Namespace" "" "" }}</code></td> - </tr> -</table> - -The following describes working with values returned by the Lookup function: - -* When Lookup finds an object, it returns a dictionary with the key value pairs from the object. This dictionary can be navigated to extract specific values. For example, the following returns the annotations for the `mynamespace` object: - - ``` - repl{{ (Lookup "v1" "Namespace" "" "mynamespace").metadata.annotations }} - ``` - -* When Lookup returns a list of objects, it is possible to access the object list through the `items` field. For example: - - ``` - services: | - repl{{- range $index, $service := (Lookup "v1" "Service" "mynamespace" "").items }} - - repl{{ $service.metadata.name }} - repl{{- end }} - ``` - - For an array value type, omit the `|`. For example: - - ``` - services: - repl{{- range $index, $service := (Lookup "v1" "Service" "mynamespace" "").items }} - - repl{{ $service.metadata.name }} - repl{{- end }} - ``` - -* When no object is found, Lookup returns an empty value. This can be used to check for the existence of an object. - -## Date Functions - -### Now -```go -func Now() string -``` -Returns the current timestamp as an RFC3339 formatted string. -```yaml -'{{repl Now }}' -``` - -### NowFmt -```go -func NowFmt(format string) string -``` -Returns the current timestamp as a formatted string. -For information about Go time formatting guidelines, see [Constants](https://golang.org/pkg/time/#pkg-constants) in the Go documentation. -```yaml -'{{repl NowFmt "20060102" }}' -``` - -## Encoding Functions - -### Base64Decode -```go -func Base64Decode(stringToDecode string) string -``` -Returns decoded string from a Base64 stored value. -```yaml -'{{repl ConfigOption "base_64_encoded_name" | Base64Decode }}' -``` - -### Base64Encode -```go -func Base64Encode(stringToEncode string) string -``` -Returns a Base64 encoded string. -```yaml -'{{repl ConfigOption "name" | Base64Encode }}' -``` - -### UrlEncode -```go -func UrlEncode(stringToEncode string) string -``` -Returns the string, url encoded. -Equivalent to the `QueryEscape` function within the golang `net/url` library. For more information, see [func QueryEscape](https://godoc.org/net/url#QueryEscape) in the Go documentation. -```yaml -'{{repl ConfigOption "smtp_email" | UrlEncode }}:{{repl ConfigOption "smtp_password" | UrlEncode }}@smtp.example.com:587' -``` - -### UrlPathEscape - -```go -func UrlPathEscape(stringToEncode string) string -``` -Returns the string, url *path* encoded. -Equivalent to the `PathEscape` function within the golang `net/url` library. For more information, see [func PathEscape](https://godoc.org/net/url#PathEscape) in the Go documentation. -```yaml -'{{repl ConfigOption "smtp_email" | UrlPathEscape }}:{{repl ConfigOption "smtp_password" | UrlPathEscape }}@smtp.example.com:587' -``` - -## Encryption Functions - -### KubeSeal -```go -func KubeSeal(certData string, namespace string, name string, value string) string -``` - -## Integer and Float Functions - -### HumanSize -```go -func HumanSize(size interface{}) string -``` -HumanSize returns a human-readable approximation of a size in bytes capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -The size must be a integer or floating point number. -```yaml -'{{repl ConfigOption "min_size_bytes" | HumanSize }}' -``` - -## Proxy Functions - -### HTTPProxy - -```go -func HTTPProxy() string -``` -HTTPProxy returns the address of the proxy that the Admin Console is configured to use. -```yaml -repl{{ HTTPProxy }} -``` - -### HTTPSProxy - -```go -func HTTPSProxy() string -``` -HTTPSProxy returns the address of the proxy that the Admin Console is configured to use. -```yaml -repl{{ HTTPSProxy }} -``` - -### NoProxy - -```go -func NoProxy() string -``` -NoProxy returns the comma-separated list of no-proxy addresses that the Admin Console is configured to use. -```yaml -repl{{ NoProxy }} -``` - -## Math Functions -### Add -```go -func Add(x interface{}, y interface{}) interface{} -``` -Adds x and y. - -If at least one of the operands is a floating point number, the result will be a floating point number. - -If both operands are integers, the result will be an integer. -```yaml -'{{repl Add (ConfigOption "maximum_users") 1}}' -``` - -### Div -```go -func Div(x interface{}, y interface{}) interface{} -``` -Divides x by y. - -If at least one of the operands is a floating point number, the result will be a floating point number. - -If both operands are integers, the result will be an integer and will be rounded down. -```yaml -'{{repl Div (ConfigOption "maximum_users") 2.0}}' -``` - -### Mult -```go -func Mult(x interface{}, y interface{}) interface{} -``` -Multiplies x and y. - -Both operands must be either an integer or a floating point number. - -If at least one of the operands is a floating point number, the result will be a floating point number. - -If both operands are integers, the result will be an integer. -```yaml -'{{repl Mult (NodePrivateIPAddressAll "DB" "redis" | len) 2}}' -``` - -If a template function returns a string, the value must be converted to an integer or a floating point number first: -```yaml -'{{repl Mult (ConfigOption "session_cookie_age" | ParseInt) 86400}}' -``` - -### Sub -```go -func Sub(x interface{}, y interface{}) interface{} -``` -Subtracts y from x. - -If at least one of the operands is a floating point number, the result will be a floating point number. - -If both operands are integers, the result will be an integer. -```yaml -'{{repl Sub (ConfigOption "maximum_users") 1}}' -``` - -## String Functions - -### ParseBool -```go -func ParseBool(str string) bool -``` -ParseBool returns the boolean value represented by the string. -```yaml -'{{repl ConfigOption "str_value" | ParseBool }}' -``` - -### ParseFloat -```go -func ParseFloat(str string) float64 -``` -ParseFloat returns the float value represented by the string. -```yaml -'{{repl ConfigOption "str_value" | ParseFloat }}' -``` - -### ParseInt -```go -func ParseInt(str string, args ...int) int64 -``` -ParseInt returns the integer value represented by the string with optional base (default 10). -```yaml -'{{repl ConfigOption "str_value" | ParseInt }}' -``` - -### ParseUint -```go -func ParseUint(str string, args ...int) uint64 -``` -ParseUint returns the unsigned integer value represented by the string with optional base (default 10). -```yaml -'{{repl ConfigOption "str_value" | ParseUint }}' -``` - -### RandomString -```go -func RandomString(length uint64, providedCharset ...string) string -``` -Returns a random string with the desired length and charset. -Provided charsets must be Perl formatted and match individual characters. -If no charset is provided, `[_A-Za-z0-9]` will be used. - -#### Examples - -The following example generates a 64-character random string: - -```yaml -'{{repl RandomString 64}}' -``` -The following example generates a 64-character random string that contains `a`s and `b`s: - -```yaml -'{{repl RandomString 64 "[ab]" }}' -``` -#### Generating Persistent and Ephemeral Strings - -When you assign the RandomString template function to a `value` key in the Config custom resource, you can use the `hidden` and `readonly` properties to control the behavior of the RandomString function each time it is called. The RandomString template function is called each time the user deploys a change to the configuration settings for the application. - -Depending on if the `hidden` and `readonly` properties are `true` or `false`, the random string generated by a RandomString template function in a `value` key is either ephemeral or persistent between configuration changes: - -* **Ephemeral**: The value of the random string _changes_ when the user deploys a change to the configuration settings for the application. -* **Persistent**: The value of the random string does _not_ change when the user deploys a change to the configuration settings for the application. - -For more information about these properties, see [`hidden`](custom-resource-config#hidden) and [`readonly`](custom-resource-config#readonly) in _Config_. - -:::note -If you assign the RandomString template function to a `default` key in the Config custom resource rather than a `value` key, then the `hidden` and `readonly` properties do _not_ affect the behavior of the RandomString template function. For more information about the behavior of the `default` key in the Config custom resource, see [`default`](custom-resource-config#default) in _Config_. -::: - -The following table describes the behavior of the RandomString template function when it is assigned to a `value` key in the Config custom resource and the `hidden` and `readonly` properties are `true` or `false`: - -<table> - <tr> - <th width="15%">readonly</th> - <th width="15%">hidden</th> - <th width="15%">Outcome</th> - <th width="55%">Use Case</th> - </tr> - <tr> - <td>false</td> - <td>true</td> - <td>Persistent</td> - <td> - <p>Set <code>readonly</code> to <code>false</code> and <code>hidden</code> to <code>true</code> if:</p> - <ul> - <li>The random string must <em>not</em> change each time the user deploys a change to the application's configuration settings.</li> - <li>The user does <em>not</em> need to see or change, or must be prevented from seeing or changing, the value of the random string.</li> - </ul> - </td> - </tr> - <tr> - <td>true</td> - <td>false</td> - <td>Ephemeral</td> - <td> - <p>Set <code>readonly</code> to <code>true</code> and <code>hidden</code> to <code>false</code> if:</p> - <ul> - <li>The random string <em>must</em> change each time the user deploys a change to the application's configuration settings.</li> - <li>The user does <em>not</em> need to change, or must be prevented from changing, the value of the random string.</li> - <li>The user <em>must</em> be able to see the value of the random string.</li> - </ul> - </td> - </tr> - <tr> - <td>true</td> - <td>true</td> - <td>Ephemeral</td> - <td> - <p>Set <code>readonly</code> to <code>true</code> and <code>hidden</code> to <code>true</code> if:</p> - <ul> - <li>The random string <em>must</em> change each time the user deploys a change to the application's configuration settings.</li> - <li>The user does <em>not</em> need to see or change, or must be preventing from seeing or changing, the value of the random string.</li> - </ul> - </td> - </tr> - <tr> - <td>false</td> - <td>false</td> - <td>Persistent</td> - <td> - <p>Set <code>readonly</code> to <code>false</code> and <code>hidden</code> to <code>false</code> if:</p> - <ul> - <li>The random string must <em>not</em> change each time the user deploys a change to the application's configuration settings.</li> - <li>The user <em>must</em> be able to see and change the value of the random string.</li> - </ul> - <p>For example, set both <code>readonly</code> and <code>hidden</code> to <code>false</code> to generate a random password that users must be able to see and then change to a different value that they choose.</p> - </td> - </tr> -</table> - -### Split -```go -func Split(s string, sep string) []string -``` -Split slices s into all substrings separated by sep and returns an array of the substrings between those separators. -```yaml -'{{repl Split "A,B,C" "," }}' -``` - -Combining `Split` and `index`: -Assuming the `github_url` param is set to `https://github.mycorp.internal:3131`, the following would set -`GITHUB_HOSTNAME` to `github.mycorp.internal`. -```yaml -'{{repl index (Split (index (Split (ConfigOption "github_url") "/") 2) ":") 0}}' -``` - -### ToLower -```go -func ToLower(stringToAlter string) string -``` -Returns the string, in lowercase. -```yaml -'{{repl ConfigOption "company_name" | ToLower }}' -``` - -### ToUpper -```go -func ToUpper(stringToAlter string) string -``` -Returns the string, in uppercase. -```yaml -'{{repl ConfigOption "company_name" | ToUpper }}' -``` - -### Trim -```go -func Trim(s string, args ...string) string -``` -Trim returns a string with all leading and trailing strings contained in the optional args removed (default space). -```yaml -'{{repl Trim (ConfigOption "str_value") "." }}' -``` - -### TrimSpace -```go -func TrimSpace(s string) string -``` -Trim returns a string with all leading and trailing spaces removed. -```yaml -'{{repl ConfigOption "str_value" | TrimSpace }}' -``` - -### YamlEscape -```go -func YamlEscape(input string) string -``` - -YamlEscape returns an escaped and quoted version of the input string, suitable for use within a YAML document. -This can be useful when dealing with user-uploaded files that may include null bytes and other nonprintable characters. For more information about printable characters, see [Character Set](https://yaml.org/spec/1.2.2/#51-character-set) in the YAML documentation. - -```yaml -repl{{ ConfigOptionData "my_file_upload" | YamlEscape }} -``` - -================ -File: docs/reference/vendor-api-using.md -================ -import ApiAbout from "../partials/vendor-api/_api-about.mdx" - -# Using the Vendor API v3 - -This topic describes how to use Replicated Vendor API authentication tokens to make API calls. - -## About the Vendor API - -<ApiAbout/> - -## API Token Requirement - -To use the Vendor API v3, you need a token for authorization. You provide the token as the value of the `Authorization` header of Vendor API calls. For example, to pass a token as the authorization header in a request: - -``` -curl --request GET \ - --url https://api.replicated.com/vendor/v3/customers \ - --header 'Accept: application/json' \ - --header 'Authorization: my-token' -``` - -Generate a service account or user API token in the Vendor Portal. The token must have `Read/Write` access to create new releases. See [Generating API Tokens](/vendor/replicated-api-tokens). - -## Vendor API v3 Documentation - -For Vendor API documentation and an interactive API console, see [Vendor API v3 Reference](https://replicated-vendor-api.readme.io/v3/reference/createapp). - -For the Vendor API swagger specification, see [vendor-api-v3.json](https://api.replicated.com/vendor/v3/spec/vendor-api-v3.json). - -![vendor api documentation page](/images/vendor-api-docs.png) - -[View a larger version of this image](/images/vendor-api-docs.png) - -================ -File: docs/vendor/admin-console-adding-buttons-links.mdx -================ -# Adding Links to the Dashboard - -This topic describes how to use the Kubernetes SIG Application custom resource to add links to the Replicated KOTS Admin Console dashboard. - -## Overview - -Replicated recommends that every application include a Kubernetes SIG Application custom resource. The Kubernetes SIG Application custom resource provides a standard API for creating, viewing, and managing applications. For more information, see [Kubernetes Applications](https://github.com/kubernetes-sigs/application#kubernetes-applications) in the kubernetes-sigs GitHub repository. - -You can include the Kubernetes SIG Application custom resource in your releases to add links to the Admin Console dashboard. Common use cases include adding links to documentation, dashboards, or a landing page for the application. - -For example, the following shows an **Open App** button on the dashboard of the Admin Console for an application named Gitea: - -<img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> - -[View a larger version of this image](/images/gitea-open-app.png) - -:::note -KOTS uses the Kubernetes SIG Application custom resource as metadata and does not require or use an in-cluster controller to handle this custom resource. An application that follows best practices does not require cluster admin privileges or any cluster-wide components to be installed. -::: - -## Add a Link - -To add a link to the Admin Console dashboard, include a [Kubernetes SIG Application](https://github.com/kubernetes-sigs/application#kubernetes-applications) custom resource in the release with a `spec.descriptor.links` field. The `spec.descriptor.links` field is an array of links that are displayed on the Admin Console dashboard after the application is deployed. - -Each link in the `spec.descriptor.links` array contains two fields: -* `description`: The link text that will appear on the Admin Console dashboard. -* `url`: The target URL. - -For example: - -```yaml -# app.k8s.io/v1beta1 Application Custom resource - -apiVersion: app.k8s.io/v1beta1 -kind: Application -metadata: - name: "gitea" -spec: - descriptor: - links: - - description: About Wordpress - url: "https://wordpress.org/" -``` - -When the application is deployed, the "About Wordpress" link is displayed on the Admin Console dashboard as shown below: - -<img alt="About Wordpress link on the Admin Console dashboard" src="/images/dashboard-link-about-wordpress.png" width="450px"/> - -[View a larger version of this image](/images/dashboard-link-about-wordpress.png) - -For an additional example of a Kubernetes SIG Application custom resource, see [application.yaml](https://github.com/kubernetes-sigs/application/blob/master/docs/examples/wordpress/application.yaml) in the kubernetes-sigs GitHub repository. - -### Create URLs with User-Supplied Values Using KOTS Template Functions {#url-template} - -You can use KOTS template functions to template URLs in the Kubernetes SIG Application custom resource. This can be useful when all or some of the URL is a user-supplied value. For example, an application might allow users to provide their own ingress controller or load balancer. In this case, the URL can be templated to render the hostname that the user provides on the Admin Console Config screen. - -The following examples show how to use the KOTS [ConfigOption](/reference/template-functions-config-context#configoption) template function in the Kubernetes SIG Application custom resource `spec.descriptor.links.url` field to render one or more user-supplied values: - -* In the example below, the URL hostname is a user-supplied value for an ingress controller that the user configures during installation. - - ```yaml - apiVersion: app.k8s.io/v1beta1 - kind: Application - metadata: - name: "my-app" - spec: - descriptor: - links: - - description: Open App - url: 'http://{{repl ConfigOption "ingress_host" }}' - ``` -* In the example below, both the URL hostname and a node port are user-supplied values. It might be necessary to include a user-provided node port if you are exposing NodePort services for installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about). - - ```yaml - apiVersion: app.k8s.io/v1beta1 - kind: Application - metadata: - name: "my-app" - spec: - descriptor: - links: - - description: Open App - url: 'http://{{repl ConfigOption "hostname" }}:{{repl ConfigOption "node_port"}}' - ``` - -For more information about working with KOTS template functions, see [About Template Functions](/reference/template-functions-about). - -================ -File: docs/vendor/admin-console-customize-app-icon.md -================ -# Customizing the Application Icon - -You can add a custom application icon that displays in the Replicated Admin Console and the download portal. Adding a custom icon helps ensure that your brand is reflected for your customers. - -:::note -You can also use a custom domain for the download portal. For more information, see [About Custom Domains](custom-domains). -::: - -## Add a Custom Icon - -For information about how to choose an image file for your custom application icon that displays well in the Admin Console, see [Icon Image File Recommendations](#icon-image-file-recommendations) below. - -To add a custom application icon: - -1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. -1. Create or open the Application custom resource manifest file. An Application custom resource manifest file has `apiVersion: kots.io/v1beta1` and `kind: Application`. - -1. In the preview section of the Help pane: - - 1. If your Application manifest file is already populated with an `icon` key, the icon displays in the preview. Click **Preview a different icon** to access the preview options. - - 1. Drag and drop an icon image file to the drop zone. Alternatively, paste a link or Base64 encoded data URL in the text box. Click **Preview**. - - ![Application icon preview](/images/app-icon-preview.png) - - 1. (Air gap only) If you paste a link to the image in the text box, click **Preview** and **Base64 encode icon** to convert the image to a Base64 encoded data URL. An encoded URL displays that you can copy and paste into the Application manifest. Base64 encoding is required for images used with air gap installations. - - :::note - If you pasted a Base64 encoded data URL into the text box, the **Base64 encode icon** button does not display because the image is already encoded. If you drag and drop an icon, the icon is automatically encoded for you. - ::: - - ![Base64 encode image button](/images/app-icon-preview-base64.png) - - 1. Click **Preview a different icon** to preview a different icon if needed. - -1. In the Application manifest, under `spec`, add an `icon` key that includes a link or the Base64 encoded data URL to the desired image. - - **Example**: - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - title: My Application - icon: https://kots.io/images/kotsadm-logo-large@2x.png - ``` -1. Click **Save Release**. - - -## Icon Image File Recommendations - -For your custom application icon to look best in the Admin Console, consider the following recommendations: - -* Use a PNG or JPG file. -* Use an image that is at least 250 by 250 pixels. -* Export the image file at 2x. - -================ -File: docs/vendor/admin-console-customize-config-screen.md -================ -# Creating and Editing Configuration Fields - -This topic describes how to use the KOTS Config custom resource manifest file to add and edit fields in the KOTS Admin Console configuration screen. - -## About the Config Custom Resource - -Applications distributed with Replicated KOTS can include a configuration screen in the Admin Console to collect required or optional values from your users that are used to run your application. For more information about the configuration screen, see [About the Configuration Screen](config-screen-about). - -To include a configuration screen in the Admin Console for your application, you add a Config custom resource manifest file to a release for the application. - -You define the fields that appear on the configuration screen as an array of `groups` and `items` in the Config custom resource: - * `groups`: A set of `items`. Each group must have a `name`, `title`, `description`, and `items`. For example, you can create a group of several user input fields that are all related to configuring an SMTP mail server. - * `items`: An array of user input fields. Each array under `items` must have a `name`, `title`, and `type`. You can also include several optional properties. For example, in a group for configuring a SMTP mail server, you can have user input fields under `items` for the SMTP hostname, port, username, and password. - - There are several types of `items` supported in the Config manifest that allow you to collect different types of user inputs. For example, you can use the `password` input type to create a text field on the configuration screen that hides user input. - -For more information about the syntax of the Config custom resource manifest, see [Config](/reference/custom-resource-config). - -## About Regular Expression Validation - -You can use [RE2 regular expressions](https://github.com/google/re2/wiki/Syntax) (regex) to validate user input for config items, ensuring conformity to certain standards, such as valid email addresses, password complexity rules, IP addresses, and URLs. This prevents users from deploying an application with a verifiably invalid configuration. - -You add the `validation`, `regex`, `pattern` and `message` fields to items in the Config custom resource. Validation is supported for `text`, `textarea`, `password` and `file` config item types. For more information about regex validation fields, see [Item Validation](/reference/custom-resource-config#item-validation) in _Config_. - -The following example shows a common password complexity rule: - -``` -- name: smtp-settings - title: SMTP Settings - items: - - name: smtp_password - title: SMTP Password - type: password - help_text: Set SMTP password - validation: - regex: - pattern: ^(?:[\w@#$%^&+=!*()_\-{}[\]:;"'<>,.?\/|]){8,16}$ - message: The password must be between 8 and 16 characters long and can contain a combination of uppercase letter, lowercase letters, digits, and special characters. -``` - -## Add Fields to the Configuration Screen - -To add fields to the Admin Console configuration screen: - -1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. -1. Create or open the Config custom resource manifest file in the desired release. A Config custom resource manifest file has `kind: Config`. -1. In the Config custom resource manifest file, define custom user-input fields in an array of `groups` and `items`. - - **Example**: - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: my-application - spec: - groups: - - name: smtp_settings - title: SMTP Settings - description: Configure SMTP Settings - items: - - name: enable_smtp - title: Enable SMTP - help_text: Enable SMTP - type: bool - default: "0" - - name: smtp_host - title: SMTP Hostname - help_text: Set SMTP Hostname - type: text - - name: smtp_port - title: SMTP Port - help_text: Set SMTP Port - type: text - - name: smtp_user - title: SMTP User - help_text: Set SMTP User - type: text - - name: smtp_password - title: SMTP Password - type: password - default: 'password' - ``` - - The example above includes a single group with the name `smtp_settings`. - - The `items` array for the `smtp_settings` group includes the following user-input fields: `enable_smtp`, `smtp_host`, `smtp_port`, `smtp_user`, and `smtp_password`. Additional item properties are available, such as `affix` to make items appear horizontally on the same line. For more information about item properties, see [Item Properties](/reference/custom-resource-config#item-properties) in Config. - - The following screenshot shows how the SMTP Settings group from the example YAML above displays in the Admin Console configuration screen during application installation: - - ![User input fields on the configuration screen for the SMTP settings](/images/config-screen-smtp-example-large.png) - -1. (Optional) Add default values for the fields. You can add default values using one of the following properties: - * **With the `default` property**: When you include the `default` key, KOTS uses this value when rendering the manifest files for your application. The value then displays as a placeholder on the configuration screen in the Admin Console for your users. KOTS only uses the default value if the user does not provide a different value. - - :::note - If you change the `default` value in a later release of your application, installed instances of your application receive the updated value only if your users did not change the default from what it was when they initially installed the application. - - If a user did change a field from its default, the Admin Console does not overwrite the value they provided. - ::: - - * **With the `value` property**: When you include the `value` key, KOTS does not overwrite this value during an application update. The value that you provide for the `value` key is visually indistinguishable from other values that your user provides on the Admin Console configuration screen. KOTS treats user-supplied values and the value that you provide for the `value` key as the same. - -2. (Optional) Add regular expressions to validate user input for `text`, `textarea`, `password` and `file` config item types. For more information, see [About Regular Expression Validation](#about-regular-expression-validation). - - **Example**: - - ```yaml - - name: smtp_host - title: SMTP Hostname - help_text: Set SMTP Hostname - type: text - validation: - regex: ​ - pattern: ^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$ - message: Valid hostname starts with a letter (uppercase/lowercase), followed by zero or more groups of letters (uppercase/lowercase), digits, or hyphens, optionally followed by a period. Ends with a letter or digit. - ``` -3. (Optional) Mark fields as required by including `required: true`. When there are required fields, the user is prevented from proceeding with the installation until they provide a valid value for required fields. - - **Example**: - - ```yaml - - name: smtp_password - title: SMTP Password - type: password - required: true - ``` - -4. Save and promote the release to a development environment to test your changes. - -## Next Steps - -After you add user input fields to the configuration screen, you use template functions to map the user-supplied values to manifest files in your release. If you use a Helm chart for your application, you map the values to the Helm chart `values.yaml` file using the HelmChart custom resource. - -For more information, see [Mapping User-Supplied Values](config-screen-map-inputs). - -================ -File: docs/vendor/admin-console-display-app-status.md -================ -import StatusesTable from "../partials/status-informers/_statusesTable.mdx" -import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" -import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" -import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" - -# Adding Resource Status Informers - -This topic describes how to add status informers for your application. Status informers apply only to applications installed with Replicated KOTS. For information about how to collect application status data for applications installed with Helm, see [Enabling and Understanding Application Status](insights-app-status). - -## About Status Informers - -_Status informers_ are a feature of KOTS that report on the status of supported Kubernetes resources deployed as part of your application. You enable status informers by listing the target resources under the `statusInformers` property in the Replicated Application custom resource. KOTS watches all of the resources that you add to the `statusInformers` property for changes in state. - -Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information, see [Understanding Application Status](#understanding-application-status). - -When you one or more status informers to your application, KOTS automatically does the following: - -* Displays application status for your users on the dashboard of the Admin Console. This can help users diagnose and troubleshoot problems with their instance. The following shows an example of how an Unavailable status displays on the Admin Console dashboard: - - <img src="/images/kotsadm-dashboard-appstatus.png" alt="Unavailable status on the Admin Console dashboard" width="500px"/> - -* Sends application status data to the Vendor Portal. This is useful for viewing insights on instances of your application running in customer environments, such as the current status and the average uptime. For more information, see [Instance Details](instance-insights-details). - - The following shows an example of the Vendor Portal **Instance details** page with data about the status of an instance over time: - - <img src="/images/instance-details.png" alt="Instance details full page" width="700px"/> - - [View a larger version of this image](/images/instance-details.png) -## Add Status Informers - -To create status informers for your application, add one or more supported resource types to the `statusInformers` property in the Application custom resource. See [`statusInformers`](/reference/custom-resource-application#statusinformers) in _Application_. - -<SupportedResources/> - -You can target resources of the supported types that are deployed in any of the following ways: - -* Deployed by KOTS. -* Deployed by a Kubernetes Operator that is deployed by KOTS. For more information, see [About Packaging a Kubernetes Operator Application](operator-packaging-about). -* Deployed by Helm. For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). - -### Examples - -Status informers are in the format `[namespace/]type/name`, where namespace is optional and defaults to the current namespace. - -**Example**: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: my-application -spec: - statusInformers: - - deployment/my-web-svc - - deployment/my-worker -``` - -The `statusInformers` property also supports template functions. Using template functions allows you to include or exclude a status informer based on a customer-provided configuration value: - -**Example**: - -```yaml -statusInformers: - - deployment/my-web-svc - - '{{repl if ConfigOptionEquals "option" "value"}}deployment/my-worker{{repl else}}{{repl end}}' -``` - -In the example above, the `deployment/my-worker` status informer is excluded unless the statement in the `ConfigOptionEquals` template function evaluates to true. - -For more information about using template functions in application manifest files, see [About Template Functions](/reference/template-functions-about). - -## Understanding Application Status - -This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. - -### Resource Statuses - -Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. - -The following table lists the supported Kubernetes resources and the conditions that contribute to each status: - -<StatusesTable/> - -### Aggregate Application Status - -<AggregateStatusIntro/> - -<AggregateStatus/> - -================ -File: docs/vendor/admin-console-port-forward.mdx -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import ServicePortNote from "../partials/custom-resource-application/_servicePort-note.mdx" -import GiteaKotsApp from "../partials/getting-started/_gitea-kots-app-cr.mdx" -import GiteaHelmChart from "../partials/getting-started/_gitea-helmchart-cr.mdx" -import GiteaK8sApp from "../partials/getting-started/_gitea-k8s-app-cr.mdx" -import PortsApplicationURL from "../partials/custom-resource-application/_ports-applicationURL.mdx" -import NginxKotsApp from "../partials/application-links/_nginx-kots-app.mdx" -import NginxK8sApp from "../partials/application-links/_nginx-k8s-app.mdx" -import NginxService from "../partials/application-links/_nginx-service.mdx" -import NginxDeployment from "../partials/application-links/_nginx-deployment.mdx" - -# Port Forwarding Services with KOTS - -This topic describes how to add one or more ports to the Replicated KOTS port forward tunnel by configuring the `ports` key in the KOTS Application custom resource. - -The information in this topic applies to existing cluster installations. For information about exposing services for Replicated kURL or Replicated Embedded Cluster installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). - -## Overview - -For installations into existing clusters, KOTS automatically creates a port forward tunnel and exposes the Admin Console on port 8800 where it can be accessed by users. In addition to the 8800 Admin Console port, you can optionally add one or more extra ports to the port forward tunnel. - -Adding ports to the port forward tunnel allows you to port forward application services without needing to manually run the `kubectl port-forward` command. You can also add a link to the Admin Console dashboard that points to port-forwarded services. - -This can be particularly useful when developing and testing KOTS releases for your application, because it provides a quicker way to access an application after installation compared to setting up an ingress controller or adding a load balancer. - -## Port Forward a Service with the KOTS Application `ports` Key - -To port forward a service with KOTS for existing cluster installations: - -1. In a new release, configure the [`ports`](/reference/custom-resource-application#ports) key in the KOTS Application custom resource with details for the target service. For example: - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - ports: - - serviceName: my-service - servicePort: 3000 - localPort: 8888 - ``` - - 1. For `ports.serviceName`, add the name of the service. KOTS can create a port forward to ClusterIP, NodePort, or LoadBalancer services. For more information about Kubernetes service types, see [Service](https://kubernetes.io/docs/concepts/services-networking/service/) in the Kubernetes documentation. - - 1. For `ports.servicePort`, add the `containerPort` of the Pod where the service is running. This is the port where KOTS forwards traffic. - - <ServicePortNote/> - - 1. For `ports.localPort`, add the port to map on the local workstation. - -1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. - - When the application is in a Ready state and the KOTS port forward is running, you will see output similar to the following: - - ```bash - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console - • Go to http://localhost:8888 to access the application - ``` - Confirm that you can access the service at the URL provided in the KOTS CLI output. - -1. (Optional) Add a link to the service on the Admin Console dashboard. See [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link) below. - -## Add a Link to a Port-Forwarded Service on the Admin Console Dashboard {#add-link} - -After you add a service to the KOTS port forward tunnel, you can also optionally add a link to the port-forwarded service on the Admin Console dashboard. - -To add a link to a port-forwarded service, add the _same_ URL in the KOTS Application custom resource `ports.applicationURL` and Kubernetes SIG Application custom resource `spec.descriptor.links.url` fields. When the URLs in these fields match, KOTS adds a link on the Admin Console dashboard where the given service can be accessed. This process automatically links to the hostname in the browser (where the Admin Console is being accessed) and appends the specified `localPort`. - -To add a link to a port-forwarded service on the Admin Console dashboard: - -1. In a new release, open the KOTS Application custom resource and add a URL to the `ports.applicationURL` field. For example: - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - ports: - - serviceName: my-service - servicePort: 3000 - localPort: 8888 - applicationUrl: "http://my-service" - ``` - - Consider the following guidelines for this URL: - * Use HTTP instead of HTTPS unless TLS termination takes place in the application Pod. - * KOTS rewrites the URL with the hostname in the browser during deployment. So, you can use any hostname for the URL, such as the name of the service. For example, `http://my-service`. - -1. Add a Kubernetes SIG Application custom resource in the release. For example: - - ```yaml - # app.k8s.io/v1beta1 Application Custom resource - - apiVersion: app.k8s.io/v1beta1 - kind: Application - metadata: - name: "my-application" - spec: - descriptor: - links: - - description: Open App - # url matches ports.applicationURL in the KOTS Application custom resource - url: "http://my-service" - ``` - - 1. For `spec.descriptor.links.description`, add the link text that will appear on the Admin Console dashboard. For example, `Open App`. - - 1. For `spec.descriptor.links.url`, add the _same_ URL that you used in the `ports.applicationURL` in the KOTS Application custom resource. - -1. Promote the release to the channel that you use for internal testing, then install in a development environment to test your changes. - - When the application is in a Ready state, confirm that you can access the service by clicking the link that appears on the dashboard. For example: - - <img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> - - [View a larger version of this image](/images/gitea-open-app.png) - -## Access Port-Forwarded Services - -This section describes how to access port-forwarded services. - -### Command Line - -Run [`kubectl kots admin-console`](/reference/kots-cli-admin-console-index) to open the KOTS port forward tunnel. - -The `kots admin-console` command runs the equivalent of `kubectl port-forward svc/myapplication-service <local-port>:<remote-port>`, then prints a message with the URLs where the Admin Console and any port-forwarded services can be accessed. For more information about the `kubectl port-forward` command, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the Kubernetes documentation. - -For example: - -```bash -kubectl kots admin-console --namespace gitea -``` -```bash -• Press Ctrl+C to exit -• Go to http://localhost:8800 to access the Admin Console -• Go to http://localhost:8888 to access the application -``` - -### Admin Console - -You can optionally add a link to a port-forwarded service from the Admin Console dashboard. This requires additional configuration. For more information, see [Add a Link to a Port-Forwarded Service on the Admin Console Dashboard](#add-link). - -The following example shows an **Open App** link on the dashboard of the Admin Console for an application named Gitea: - -<img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> - -[View a larger version of this image](/images/gitea-open-app.png) - -## Examples - -This section provides examples of how to configure the `ports` key to port-forward a service in existing cluster installations and add links to services on the Admin Console dashboard. - -### Example: Bitnami Gitea Helm Chart with LoadBalancer Service - -This example uses a KOTS Application custom resource and a Kubernetes SIG Application custom resource to configure port forwarding for the Bitnami Gitea Helm chart in existing cluster installations, and add a link to the port-forwarded service on the Admin Console dashboard. To view the Gitea Helm chart source, see [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) in GitHub. - -To test this example: - -1. Pull version 1.0.6 of the Gitea Helm chart from Bitnami: - - ``` - helm pull oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 - ``` - -1. Add the `gitea-1.0.6.tgz` chart archive to a new, empty release in the Vendor Portal along with the `kots-app.yaml`, `k8s-app.yaml`, and `gitea.yaml` files provided below. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). - - <Tabs> - <TabItem value="kots-app" label="kots-app.yaml" default> - <h5>Description</h5> - <p>Based on the <a href="https://github.com/bitnami/charts/blob/main/bitnami/gitea/templates/svc.yaml">templates/svc.yaml</a> and <a href="https://github.com/bitnami/charts/blob/main/bitnami/gitea/values.yaml">values.yaml</a> files in the Gitea Helm chart, the following KOTS Application custom resource adds port 3000 to the port forward tunnel and maps local port 8888. Port 3000 is the container port of the Pod where the <code>gitea</code> service runs.</p> - <h5>YAML</h5> - <GiteaKotsApp/> - </TabItem> - <TabItem value="k8s-app" label="k8s-app.yaml" default> - <h5>Description</h5> - <p>The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service from the Admin Console dashboard. It also triggers KOTS to rewrite the URL to use the hostname in the browser and append the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".</p> - <h5>YAML</h5> - <GiteaK8sApp/> - </TabItem> - <TabItem value="helmchart" label="gitea.yaml" default> - <h5>Description</h5> - <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.</p> - <h5>YAML</h5> - <GiteaHelmChart/> - </TabItem> - </Tabs> - -1. Install the release to confirm that the service was port-forwarded successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). - -### Example: NGINX Application with ClusterIP and NodePort Services - -The following example demonstrates how to link to a port-forwarded ClusterIP service for existing cluster installations. - -It also shows how to use the `ports` key to add a link to a NodePort service for kURL installations. Although the primary purpose of the `ports` key is to port forward services for existing cluster installations, it is also possible to use the `ports` key so that links to NodePort services for Embedded Cluster or kURL installations use the hostname in the browser. For information about exposing NodePort services for Embedded Cluster or kURL installations, see [Exposing Services Using NodePorts](kurl-nodeport-services). - -To test this example: - -1. Add the `example-service.yaml`, `example-deployment.yaml`, `kots-app.yaml`, and `k8s-app.yaml` files provided below to a new, empty release in the Vendor Portal. Promote to the channel that you use for internal testing. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). - - <Tabs> - <TabItem value="service" label="example-service.yaml" default> - <h5>Description</h5> - <p>The YAML below contains ClusterIP and NodePort specifications for a service named <code>nginx</code>. Each specification uses the <code>kots.io/when</code> annotation with the Replicated IsKurl template function to conditionally include the service based on the installation type (existing cluster or kURL cluster). For more information, see <a href="/vendor/packaging-include-resources">Conditionally Including or Excluding Resources</a> and <a href="/reference/template-functions-static-context#iskurl">IsKurl</a>.</p> - <p>As shown below, both the ClusterIP and NodePort <code>nginx</code> services are exposed on port 80.</p> - <h5>YAML</h5> - <NginxService/> - </TabItem> - <TabItem value="deployment" label="example-deployment.yaml" default> - <h5>Description</h5> - <p>A basic Deployment specification for the NGINX application.</p> - <h5>YAML</h5> - <NginxDeployment/> - </TabItem> - <TabItem value="kots-app" label="kots-app.yaml" default> - <h5>Description</h5> - <p>The KOTS Application custom resource below adds port 80 to the KOTS port forward tunnel and maps port 8888 on the local machine. The specification also includes <code>applicationUrl: "http://nginx"</code> so that a link to the service can be added to the Admin Console dashboard.</p> - <h5>YAML</h5> - <NginxKotsApp/> - </TabItem> - <TabItem value="k8s-app" label="k8s-app.yaml" default> - <h5>Description</h5> - <p>The Kubernetes Application custom resource lists the same URL as the `ports.applicationUrl` field in the KOTS Application custom resource (`"http://nginx"`). This adds a link to the port-forwarded service on the Admin Console dashboard that uses the hostname in the browser and appends the specified `localPort`. The label to be used for the link in the Admin Console is "Open App".</p> - <h5>YAML</h5> - <NginxK8sApp/> - </TabItem> - </Tabs> - -1. Install the release into an existing cluster and confirm that the service was port-forwarded successfully by clicking **Open App** on the Admin Console dashboard. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). - -1. If there is not already a kURL installer promoted to the channel, add a kURL installer to the release to support kURL installs. For more information, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). - -1. Install the release on a VM and confirm that the service was exposed successfully. To test the port forward, click **Open App** on the Admin Console dashboard after the application reaches a Ready state. For more information, see [Online Installation with kURL](/enterprise/installing-kurl). - - :::note - Ensure that the VM where you install allows HTTP traffic. - ::: - -================ -File: docs/vendor/admin-console-prometheus-monitoring.mdx -================ -import OverviewProm from "../partials/monitoring/_overview-prom.mdx" -import LimitationEc from "../partials/monitoring/_limitation-ec.mdx" - -# Adding Custom Graphs - -This topic describes how to customize the graphs that are displayed on the Replicated Admin Console dashboard. - -## Overview of Monitoring with Prometheus - -<OverviewProm/> - -## About Customizing Graphs - -If your application exposes Prometheus metrics, you can add custom graphs to the Admin Console dashboard to expose these metrics to your users. You can also modify or remove the default graphs. - -To customize the graphs that are displayed on the Admin Console, edit the [`graphs`](/reference/custom-resource-application#graphs) property in the KOTS Application custom resource manifest file. At a minimum, each graph in the `graphs` property must include the following fields: -* `title`: Defines the graph title that is displayed on the Admin Console. -* `query`: A valid PromQL Prometheus query. You can also include a list of multiple queries by using the `queries` property. For more information about querying Prometheus with PromQL, see [Querying Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/) in the Prometheus documentation. - -:::note -By default, a kURL cluster exposes the Prometheus expression browser at NodePort 30900. For more information, see [Expression Browser](https://prometheus.io/docs/visualization/browser/) in the Prometheus documentation. -::: - -## Limitation - -<LimitationEc/> - -## Add and Modify Graphs - -To customize graphs on the Admin Console dashboard: - -1. In the [Vendor Portal](https://vendor.replicated.com/), click **Releases**. Then, either click **Create release** to create a new release, or click **Edit YAML** to edit an existing release. - -1. Create or open the [KOTS Application](/reference/custom-resource-application) custom resource manifest file. - -1. In the Application manifest file, under `spec`, add a `graphs` property. Edit the `graphs` property to modify or remove existing graphs or add a new custom graph. For more information, see [graphs](/reference/custom-resource-application#graphs) in _Application_. - - **Example**: - - The following example shows the YAML for adding a custom graph that displays the total number of user signups for an application. - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - graphs: - - title: User Signups - query: 'sum(user_signup_events_total)' - ``` - -1. (Optional) Under `graphs`, copy and paste the specs for the default Disk Usage, CPU Usage, and Memory Usage Admin Console graphs provided in the YAML below. - - Adding these default graphs to the Application custom resource manifest ensures that they are not overwritten when you add one or more custom graphs. When the default graphs are included in the Application custom resource, the Admin Console displays them in addition to any custom graphs. - - Alternatively, you can exclude the YAML specs for the default graphs to remove them from the Admin Console dashboard. - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - graphs: - - title: User Signups - query: 'sum(user_signup_events_total)' - # Disk Usage, CPU Usage, and Memory Usage below are the default graphs - - title: Disk Usage - queries: - - query: 'sum((node_filesystem_size_bytes{job="node-exporter",fstype!="",instance!=""} - node_filesystem_avail_bytes{job="node-exporter", fstype!=""})) by (instance)' - legend: 'Used: {{ instance }}' - - query: 'sum((node_filesystem_avail_bytes{job="node-exporter",fstype!="",instance!=""})) by (instance)' - legend: 'Available: {{ instance }}' - yAxisFormat: bytes - - title: CPU Usage - query: 'sum(rate(container_cpu_usage_seconds_total{namespace="{{repl Namespace}}",container!="POD",pod!=""}[5m])) by (pod)' - legend: '{{ pod }}' - - title: Memory Usage - query: 'sum(container_memory_usage_bytes{namespace="{{repl Namespace}}",container!="POD",pod!=""}) by (pod)' - legend: '{{ pod }}' - yAxisFormat: bytes - ``` -1. Save and promote the release to a development environment to test your changes. - -================ -File: docs/vendor/ci-overview.md -================ -import TestRecs from "../partials/ci-cd/_test-recs.mdx" - -# About Integrating with CI/CD - -This topic provides an introduction to integrating Replicated CLI commands in your continuous integration and continuous delivery (CI/CD) pipelines, including Replicated's best practices and recommendations. - -## Overview - -Using CI/CD workflows to automatically compile code and run tests improves the speed at which teams can test, iterate on, and deliver releases to customers. When you integrate Replicated CLI commands into your CI/CD workflows, you can automate the process of deploying your application to clusters for testing, rather than needing to manually create and then archive channels, customers, and environments for testing. - -You can also include continuous delivery workflows to automatically promote a release to a shared channel in your Replicated team. This allows you to more easily share releases with team members for internal testing and iteration, and then to promote releases when they are ready to be shared with customers. - -## Best Practices and Recommendations - -The following are Replicated's best practices and recommendations for CI/CD: - -* Include unique workflows for development and for releasing your application. This allows you to run tests on every commit, and then to promote releases to internal and customer-facing channels only when ready. For more information about the workflows that Replicated recommends, see [Recommended CI/CD Workflows](ci-workflows). - -* Integrate Replicated Compatibility Matrix into your CI/CD workflows to quickly create multiple different types of clusters where you can deploy and test your application. Supported distributions include OpenShift, GKE, EKS, and more. For more information, see [About Compatibility Matrix](testing-about). - -* If you use the GitHub Actions CI/CD platform, integrate the custom GitHub actions that Replicated maintains to replace repetitive tasks related to distributing application with Replicated or using Compatibility Matrix. For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). - -* To help show you are conforming to a secure supply chain, sign all commits and container images. Additionally, provide a verification mechanism for container images. - -* Use custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy that blocks the ability to promote releases to your production channel. For more information about creating custom RBAC policies in the Vendor Portal, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). - -* Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: - <TestRecs/> - -================ -File: docs/vendor/ci-workflows-github-actions.md -================ -# Integrating Replicated GitHub Actions - -This topic describes how to integrate Replicated's custom GitHub actions into continuous integration and continuous delivery (CI/CD) workflows that use the GitHub Actions platform. - -## Overview - -Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to distributing your application with Replicated and related to using the Compatibility Matrix, such as: - * Creating and removing customers, channels, and clusters - * Promoting releases - * Creating a matrix of clusters for testing based on the Kubernetes distributions and versions where your customers are running application instances - * Reporting the success or failure of tests - -If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. - -To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. - -## GitHub Actions Workflow Examples - -The [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions#examples) repository in GitHub contains example workflows that use the Replicated GitHub actions. You can use these workflows as a template for your own GitHub Actions CI/CD workflows: - -* For a simplified development workflow, see [development-helm-prepare-cluster.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm-prepare-cluster.yaml). -* For a customizable development workflow for applications installed with the Helm CLI, see [development-helm.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-helm.yaml). -* For a customizable development workflow for applications installed with KOTS, see [development-kots.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-kots.yaml). -* For a release workflow, see [release.yaml](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/release.yaml). - -## Integrate GitHub Actions - -The following table lists GitHub actions that are maintained by Replicated that you can integrate into your CI/CI workflows. The table also describes when to use the action in a workflow and indicates the related Replicated CLI command where applicable. - -:::note -For an up-to-date list of the avilable custom GitHub actions, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. -::: - -<table> - <tr> - <th width="25%">GitHub Action</th> - <th width="50%">When to Use</th> - <th width="25%">Related Replicated CLI Commands</th> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/archive-channel">archive-channel</a></td> - <td> - <p>In release workflows, a temporary channel is created to promote a release for testing. This action archives the temporary channel after tests complete.</p> - <p>See <a href="/vendor/ci-workflows#rel-cleanup">Archive the temporary channel and customer</a> in <em>Recommended CI/CD Workflows</em>.</p> - </td> - <td><a href="/reference/replicated-cli-channel-delete"><code>channel delete</code></a></td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/archive-customer">archive-customer</a></td> - <td> - <p>In release workflows, a temporary customer is created so that a release can be installed for testing. This action archives the temporary customer after tests complete.</p> - <p>See <a href="/vendor/ci-workflows#rel-cleanup">Archive the temporary channel and customer</a> in <em>Recommended CI/CD Workflows</em>.</p> - </td> - <td>N/A</td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster">create-cluster</a></td> - <td> - <p>In release workflows, use this action to create one or more clusters for testing.</p> - <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> - </td> - <td><a href="/reference/replicated-cli-cluster-create"><code>cluster create</code></a></td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/create-release">create-release</a></td> - <td> - <p>In release workflows, use this action to create a release to be installed and tested, and optionally to be promoted to a shared channel after tests complete.</p> - <p>See <a href="/vendor/ci-workflows#rel-release">Create a release and promote to a temporary channel</a> in <em>Recommended CI/CD Workflows</em>. </p> - </td> - <td><a href="/reference/replicated-cli-release-create"><code>release create</code></a></td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/get-customer-instances">get-customer-instances</a></td> - <td> - <p>In release workflows, use this action to create a matrix of clusters for running tests based on the Kubernetes distributions and versions of active instances of your application running in customer environments.</p> - <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> - </td> - <td>N/A</td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/helm-install">helm-install</a></td> - <td> - <p>In development or release workflows, use this action to install a release using the Helm CLI in one or more clusters for testing.</p> - <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> - </td> - <td>N/A</td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/kots-install">kots-install</a></td> - <td> - <p>In development or release workflows, use this action to install a release with Replicated KOTS in one or more clusters for testing.</p> - <p>See <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> - </td> - <td>N/A</td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/prepare-cluster">prepare-cluster</a></td> - <td> - <p>In development workflows, use this action to create a cluster, create a temporary customer of type <code>test</code>, and install an application in the cluster.</p> - <p>See <a href="/vendor/ci-workflows#dev-deploy">Prepare clusters, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> - </td> - <td><a href="/reference/replicated-cli-cluster-prepare"><code>cluster prepare</code></a></td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/promote-release">promote-release</a></td> - <td> - <p>In release workflows, use this action to promote a release to an internal or customer-facing channel (such as Unstable, Beta, or Stable) after tests pass.</p> - <p>See <a href="/vendor/ci-workflows#rel-promote">Promote to a shared channel</a> in <em>Recommended CI/CD Workflows</em>.</p> - </td> - <td><a href="/reference/replicated-cli-release-promote"><code>release promote</code></a></td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster">remove-cluster</a></td> - <td> - <p>In development or release workflows, use this action to remove a cluster after running tests if no <code>ttl</code> was set for the cluster.</p> - <p>See <a href="/vendor/ci-workflows#dev-deploy">Prepare clusters, deploy, and test</a> and <a href="/vendor/ci-workflows#rel-deploy">Create cluster matrix, deploy, and test</a> in <em>Recommended CI/CD Workflows</em>.</p> - </td> - <td><a href="/reference/replicated-cli-cluster-rm"><code>cluster rm</code></a></td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/report-compatibility-result">report-compatibility-result</a></td> - <td>In development or release workflows, use this action to report the success or failure of tests that ran in clusters provisioned by the Compatibility Matrix.</td> - <td><code>release compatibility</code></td> - </tr> - <tr> - <td><a href="https://github.com/replicatedhq/replicated-actions/tree/main/upgrade-cluster">upgrade-cluster</a></td> - <td>In release workflows, use this action to test your application's compatibility with Kubernetes API resource version migrations after upgrading.</td> - <td><a href="/reference/replicated-cli-cluster-upgrade"><code>cluster upgrade</code></a></td> - </tr> -</table> - -================ -File: docs/vendor/ci-workflows.mdx -================ -import Build from "../partials/ci-cd/_build-source-code.mdx" - -# Recommended CI/CD Workflows - -This topic provides Replicated's recommended development and release workflows for your continuous integration and continuous delivery (CI/CD) pipelines. - -## Overview - -Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). The development and release workflows in this topic describe the recommended steps and jobs to include in your own workflows, including how to integrate Replicated Compatibility Matrix into your workflows for testing. For more information about Compatibility Matrix, see [About Compatibility Matrix](testing-about). - -For each step, the corresponding Replicated CLI command is provided. Additionally, for users of the GitHub Actions platform, a corresponding custom GitHub action that is maintained by Replicated is also provided. For more information about using the Replicated CLI, see [Installing the Replicated CLI](/reference/replicated-cli-installing). For more information about the Replicated GitHub actions, see [Integrating Replicated GitHub Actions](ci-workflows-github-actions). - -:::note -How you implement CI/CD workflows varies depending on the platform, such as GitHub, GitLab, CircleCI, TravisCI, or Jenkins. Refer to the documentation for your CI/CD platform for additional guidance on how to create jobs and workflows. -::: - -## About Creating RBAC Policies for CI/CD - -Replicated recommends using custom RBAC policies to control the actions that can be performed in your CI/CD workflows. For example, you can create a policy using the [`kots/app/[]/channel/[]/promote`](/vendor/team-management-rbac-resource-names#kotsappchannelpromote) resource that blocks the ability to promote releases to your production channel. This allows for using CI/CD for the purpose of testing, without accidentally releasing to customers. - -For more information about creating custom RBAC policies in the Vendor Portal, including examples, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). - -For a full list of available RBAC resources, see [RBAC Resource Names](/vendor/team-management-rbac-resource-names). - -## Development Workflow - -In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. Additionally, for applications managed in the Replicated vendor portal, a release is created and promoted to a channel in the Replicated Vendor Portal where it can be shared with internal teams. - -The following diagram shows the recommended development workflow, where a commit to the application code repository triggers the source code to be built and the application to be deployed to clusters for testing: - -![Development CI workflow](/images/ci-workflow-dev.png) - -[View a larger version of this image](/images/ci-workflow-dev.png) - -The following describes the recommended steps to include in release workflows, as shown in the diagram above: -1. [Define workflow triggers](#dev-triggers) -1. [Build source code](#dev-build) -1. [Prepare clusters, deploy, and test](#dev-deploy) - -### Define workflow triggers {#dev-triggers} - -Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. - -The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: - -```yaml -name: development-workflow-example - -on: - push: - branches: - - '*' # matches every branch that doesn't contain a '/' - - '*/*' # matches every branch containing a single '/' - - '**' # matches every branch - - '!main' # excludes main - -jobs: - ... -``` - -### Build source code {#dev-build} - -<Build/> - -### Prepare clusters, deploy, and test {#dev-deploy} - -Add a job with the following steps to prepare clusters with Replicated Compatibility Matrix, deploy the application, and run tests: - -1. Use Replicated Compatibility Matrix to prepare one or more clusters and deploy the application. Consider the following recommendations: - - * For development workflows, Replicated recommends that you use the `cluster prepare` command to provision one or more clusters with Compatibility Matrix. The `cluster prepare` command creates a cluster, creates a release, and installs the release in the cluster, without the need to promote the release to a channel or create a temporary customer. See the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [prepare-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/prepare-cluster) GitHub action. - - :::note - The `cluster prepare` command is Beta. It is recommended for development only and is not recommended for production releases. For production releases, Replicated recommends that you use the `cluster create` command instead. For more information, see [Create cluster matrix and deploy](#rel-deploy) in _Release Workflow_ below. - ::: - - * The type and number of clusters that you choose to provision as part of a development workflow depends on how frequently you intend the workflow to run. For example, for workflows that run multiple times a day, you might prefer to provision cluster distributions that can be created quickly, such as kind clusters. - -1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. - -1. After the tests complete, remove the cluster. Alternatively, if you used the `--ttl` flag with the `cluster prepare` command, the cluster is automatically removed when the time period provided is reached. See the [`cluster remove`](/reference/replicated-cli-cluster-prepare) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. - -## Compatibility Matrix-Only Development Workflow - -In a development workflow (which runs multiple times per day and is triggered by a commit to the application code repository), the source code is built and the application is deployed to clusters for testing. - -This example development workflow does _not_ create releases or customers in the Replicated vendor platform. This workflow is useful for applications that are not distributed or managed in the Replicated platform. - -The following describes the recommended steps to include in a development workflow using Compatibility Matrix: - -1. [Define workflow triggers](#dev-triggers) -1. [Build source code](#dev-build) -1. [Create cluster matrix, deploy, and test](#dev-deploy) - -### Define workflow triggers {#dev-triggers} - -Run a development workflow on every commit to a branch in your code repository that is _not_ `main`. - -The following example shows defining a workflow trigger in GitHub Actions that runs the workflow when a commit is pushed to any branch other than `main`: - -```yaml -name: development-workflow-example - -on: - push: - branches: - - '*' # matches every branch that doesn't contain a '/' - - '*/*' # matches every branch containing a single '/' - - '**' # matches every branch - - '!main' # excludes main - -jobs: - ... -``` - -### Build source code {#dev-build} - -<Build/> - - -### Create cluster matrix, deploy, and test {#dev-deploy} - -Add a job with the following steps to provision clusters with Compatibility Matrix, deploy your application to the clusters, and run tests: - -1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. - - The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: - - ```yaml - # github actions cluster matrix example - - compatibility-matrix-example: - runs-on: ubuntu-22.04 - strategy: - matrix: - cluster: - - {distribution: kind, version: "1.25"} - - {distribution: kind, version: "1.26"} - - {distribution: eks, version: "1.26"} - - {distribution: gke, version: "1.27"} - - {distribution: openshift, version: "4.13.0-okd"} - ``` - -1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). - -1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. - -1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. - -## Replicated Platform Release Workflow - -In a release workflow (which is triggered by an action such as a commit to `main` or a tag being pushed to the repository), the source code is built, the application is deployed to clusters for testing, and then the application is made available to customers. In this example release workflow, a release is created and promoted to a channel in the Replicated vendor platform so that it can be installed by internal teams or by customers. - -The following diagram demonstrates a release workflow that promotes a release to the Beta channel when a tag with the format `"v*.*.*-beta.*"` is pushed: - -![Workflow that promotes to Beta channel](/images/ci-workflow-beta.png) - -[View a larger version of this image](/images/ci-workflow-beta.png) - -The following describes the recommended steps to include in release workflows, as shown in the diagram above: - -1. [Define workflow triggers](#rel-triggers) -1. [Build source code](#rel-build) -1. [Create a release and promote to a temporary channel](#rel-release) -1. [Create cluster matrix, deploy, and test](#rel-deploy) -1. [Promote to a shared channel](#rel-promote) -1. [Archive the temporary channel and customer](#rel-cleanup) - -### Define workflow triggers {#rel-triggers} - -Create unique workflows for promoting releases to your team's internal-only, beta, and stable channels. Define unique event triggers for each of your release workflows so that releases are only promoted to a channel when a given condition is met: - -* On every commit to the `main` branch in your code repository, promote a release to the channel that your team uses for internal testing (such as the default Unstable channel). - - The following example shows a workflow trigger in GitHub Actions that runs the workflow on commits to `main`: - - ```yaml - name: unstable-release-example - - on: - push: - branches: - - 'main' - - jobs: - ... - ``` - -* On pushing a tag that contains a version label with the semantic versioning format `x.y.z-beta-n` (such as `1.0.0-beta.1` or `v1.0.0-beta.2`), promote a release to your team's Beta channel. - - The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*-beta.*` is pushed: - - ```yaml - name: beta-release-example - - on: - push: - tags: - - "v*.*.*-beta.*" - - jobs: - ... - ``` - -* On pushing a tag that contains a version label with the semantic versioning format `x.y.z` (such as `1.0.0` or `v1.0.01`), promote a release to your team's Stable channel. - - The following example shows a workflow trigger in GitHub Actions that runs the workflow when a tag that matches the format `v*.*.*` is pushed: - - ```yaml - name: stable-release-example - - on: - push: - tags: - - "v*.*.*" - - jobs: - ... - ``` - -### Build source code {#rel-build} - -<Build/> - -### Create a release and promote to a temporary channel {#rel-release} - -Add a job that creates and promotes a release to a temporary channel. This allows the release to be installed for testing in the next step. See the [release create](/reference/replicated-cli-release-create) Replicated CLI command. Or, for GitHub Actions workflows, see [create-release](https://github.com/replicatedhq/replicated-actions/tree/main/create-release). - -Consider the following requirements and recommendations: - -* Use a consistent naming pattern for the temporary channels. Additionally, configure the workflow so that a new temporary channel with a unique name is created each time that the release workflow runs. - -* Use semantic versioning for the release version label. - - :::note - If semantic versioning is enabled on the channel where you promote the release, then the release version label _must_ be a valid semantic version number. See [Semantic Versioning](releases-about#semantic-versioning) in _About Channels and Releases_. - ::: - -* For Helm chart-based applications, the release version label must match the version in the `version` field of the Helm chart `Chart.yaml` file. To automatically update the `version` field in the `Chart.yaml` file, you can define a step in this job that updates the version label before packaging the Helm chart into a `.tgz` archive. - -* For releases that will be promoted to a customer-facing channel such as Beta or Stable, Replicated recommends that the version label for the release matches the tag that triggered the release workflow. For example, if the tag `1.0.0-beta.1` was used to trigger the workflow, then the version label for the release is also `1.0.0-beta.1`. - -### Create cluster matrix, deploy, and test {#rel-deploy} - -Add a job with the following steps to provision clusters with Compatibility Matrix, deploy the release to the clusters, and run tests: - -1. Create a temporary customer for installing the release. See the [customer create](/reference/replicated-cli-customer-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-customer](https://github.com/replicatedhq/replicated-actions/tree/main/create-customer) action. - -1. Use Compatibility Matrix to create a matrix of different Kubernetes cluster distributions and versions to run tests against. See the [cluster create](/reference/replicated-cli-cluster-create) Replicated CLI command. Or, for GitHub Actions workflows, see the [create-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/create-cluster) action. - - Consider the following recommendations: - - * For release workflows, Replicated recommends that you run tests against multiple clusters of different Kubernetes distributions and versions. To help build the matrix, you can review the most common Kubernetes distributions and versions used by your customers on the **Customers > Reporting** page in the Replicated vendor portal. For more information, see [Customer Reporting](/vendor/customer-reporting). - - * When using the Replicated CLI, a list of representative customer instances can be obtained using the `api get` command. For example, `replicated api get /v3/app/[APP_ID]/cluster-usage | jq .` You can further filter these results by `channel_id`, `channel_sequence`, and `version_label`. - - * GitHub Actions users can also use the `get-customer-instances` action to automate the creation of a cluster matrix based on the distributions of clusters where instances of your application are installed and running. For more information, see the [example workflow](https://github.com/replicatedhq/replicated-actions/blob/main/example-workflows/development-dynamic.yaml) that makes use of [get-customer-instances](https://github.com/replicatedhq/replicated-actions/tree/main/get-customer-instances) in GitHub. - - The following example shows creating a matrix of clusters of different distributions and versions using GitHub Actions: - - ```yaml - # github actions cluster matrix example - - compatibility-matrix-example: - runs-on: ubuntu-22.04 - strategy: - matrix: - cluster: - - {distribution: kind, version: "1.25.3"} - - {distribution: kind, version: "1.26.3"} - - {distribution: eks, version: "1.26"} - - {distribution: gke, version: "1.27"} - - {distribution: openshift, version: "4.13.0-okd"} - ``` - -1. For each cluster created, use the cluster's kubeconfig to update Kubernetes context and then install the target application in the cluster. For more information about accessing the kubeconfig for clusters created with Compatibility Matrix, see [cluster kubeconfig](/reference/replicated-cli-cluster-kubeconfig). - - For more information about installing in an existing cluster, see: - * [Installing with Helm](/vendor/install-with-helm) - * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) - -1. Run tests, such as integration, smoke, and canary tests. For more information about recommended types of tests to run, see [Best Practices and Recommendations](/vendor/ci-overview#best-practices-and-recommendations) in _About Integrating with CI/CD_. - -1. Delete the cluster when the tests complete. See the [cluster rm](/reference/replicated-cli-cluster-rm) Replicated CLI command. Or, for GitHub Actions workflows, see the [remove-cluster](https://github.com/replicatedhq/replicated-actions/tree/main/remove-cluster) action. - -### Promote to a shared channel {#rel-promote} - -Add a job that promotes the release to a shared internal-only or customer-facing channel, such as the default Unstable, Beta, or Stable channel. See the [release promote](/reference/replicated-cli-release-promote) Replicated CLI command. Or, for GitHub Actions workflows, see the [promote-release](https://github.com/replicatedhq/replicated-actions/tree/main/promote-release) action. - -Consider the following requirements and recommendations: - -* Replicated recommends that you include the `--version` flag with the `release promote` command to explicitly declare the version label for the release. Use the same version label that was used when the release was created as part of [Create a release and promote to a temporary channel](#rel-release) above. Although the `--version` flag is not required, declaring the same release version label during promotion provides additional consistency that makes the releases easier to track. - -* The channel to which the release is promoted depends on the event triggers that you defined for the workflow. For example, if the workflow runs on every commit to the `main` branch, then promote the release to an internal-only channel, such as Unstable. For more information, see [Define Workflow Triggers](#rel-triggers) above. - -* Use the `--release-notes` flag to include detailed release notes in markdown. - -### Archive the temporary channel and customer {#rel-cleanup} - -Finally, add a job to archive the temporary channel and customer that you created. This ensures that these artifacts are removed from your Replicated team and that they do not have to be manually archived after the release is promoted. - -See the [channel rm](/reference/replicated-cli-channel-rm) Replicated CLI command and the [customer/\{customer_id\}/archive](https://replicated-vendor-api.readme.io/reference/archivecustomer) endpoint in the Vendor API v3 documentation. Or, for GitHub Actions workflows, see the [archive-channel](https://github.com/replicatedhq/replicated-actions/tree/main/archive-channel) and [archive-customer](https://github.com/replicatedhq/replicated-actions/tree/main/archive-customer) actions. - -================ -File: docs/vendor/compatibility-matrix-usage.md -================ -# Viewing Compatibility Matrix Usage History -This topic describes using the Replicated Vendor Portal to understand -Compatibility Matrix usage across your team. - -## View Historical Usage -The **Compatibility Matrix > History** page provides -historical information about both clusters and VMs, as shown below: - -![Compatibility Matrix History Page](/images/compatibility-matrix-history.png) -[View a larger version of this image](/images/compatibility-matrix-history.png) - -Only _terminated_ clusters and VMs that have been deleted or errored are displayed on the **History** page. - -The top of the **History** page displays the total number of terminated clusters and VMs -in the selected time period as well as the total cost and usage time for -the terminated resources. - -The table includes cluster and VM entries with the following columns: -- **Name:** The name of the cluster or VM. -- **By:** The actor that created the resource. -- **Cost:** The cost of the resource. This is calculated at termination and is - based on the time the resource was running. -- **Distribution:** The distribution and version of the resource. For example, - `kind 1.32.1`. -- **Type:** The distribution type of the resource. Kubernetes clusters - are listed as `kubernetes` and VMs are listed as `vm`. -- **Status:** The status of the resource. For example `terminated` or `error`. -- **Instance:** The instance type of the resource. For example `r1.small`. -- **Nodes:** The node count for "kubernetes" resources. VMs do not use this - field. -- **Node Groups:** The node group count for "kubernetes" resources. VMs do not - use this field. -- **Created At:** The time the resource was created. -- **Running At:** The time the resource started running. For billing purposes, - this is the time when Replicated began charging for the resource. -- **Terminated At:** The time the resource was terminated. For billing - purposes, this is the time when Replicated stopped charging for the resource. -- **TTL:** The time-to-live for the resource. This is the maximum amount of - time the resource can run before it is automatically terminated. -- **Duration:** The total time the resource was running. This is the time - between the `running` and `terminated` states. -- **Tag:** Any tags that were applied to the resource. - -## Filter and Sort Usage History - -Each of the fields on the **History** page can be filtered and sorted. To sort by a specific field, click on the column header. - -To filter by a specific field, click on the filter icon in the column header, then use each specific filter input to filter the results, as shown below: - -![Compatibility Matrix History Page, filter input](/images/compatibility-matrix-column-filter-input.png) -[View a larger version of this image](/images/compatibility-matrix-column-filter-input.png) - -## Get Usage History with the Vendor API v3 - -For more information about using the Vendor API v3 to get Compatibility Matrix -usage history information, see the following API endpoints within the -Vendor API v3 documentation: - -* [/v3/cmx/stats](https://replicated-vendor-api.readme.io/reference/getcmxstats) -* [/v3/vms](https://replicated-vendor-api.readme.io/reference/listvms) -* [/v3/clusters](https://replicated-vendor-api.readme.io/reference/listclusters) -* [/v3/cmx/history](https://replicated-vendor-api.readme.io/reference/listcmxhistory) - -For examples of using these endpoints, see the sections below. - -### Credit Balance and Summarized Usage -You can use the `/v3/cmx/stats` endpoint to get summarized usage information in addition to your Compatibility Matrix -credit balance. - -This endpoint returns: - -- **`cluster_count`:** The total number of terminated clusters. -- **`vm_count`:** The total number of terminated VMs. -- **`usage_minutes`:** The total number of billed usage minutes. -- **`cost`:** The total cost of the terminated clusters and VMs in cents. -- **`credit_balance`:** The remaining credit balance in cents. - -```shell -curl --request GET \ - --url https://api.replicated.com/vendor/v3/customers \ - --header 'Accept: application/json' \ - --header 'Authorization: $REPLICATED_API_TOKEN' -{"cluster_count":2,"vm_count":4,"usage_minutes":152,"cost":276,"credit_balance":723}% -``` - -The `v3/cmx/stats` endpoint also supports filtering by `start-time` and -`end-time`. For example, the following request gets usage information for January 2025: - -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/stats?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' -``` - -### Currently Active Clusters -To get a list of active clusters: - -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/clusters' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' -``` - -You can also use a tool such as `jq` to filter and iterate over the output: - -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/clusters' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' | \ - jq '.clusters[] | {name: .name, ttl: .ttl, distribution: .distribution, version: .version}' - -{ - "name": "friendly_brown", - "ttl": "1h", - "distribution": "kind", - "version": "1.32.1" -} -``` - -### Currently Active Virtual Machines -To get a list of active VMs: - -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/vms' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' -``` - -### Historical Usage -To fetch historical usage information: - -```shell -curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' -``` - -You can also filter the response from the `/v3/cmx/history` endpoint by `distribution-type`, which -allows you to get a list of either clusters or VMs: - -- **For clusters use `distribution-type=kubernetes`:** - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=kubernetes' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` - -- **For VMs use `distribution-type=vm`:** - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?distribution-type=vm' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` - -### Filtering Endpoint Results -Each of these endpoints supports pagination and filtering. You can use the -following query parameters to filter the results. - -:::note -Each of the examples below -uses the `v3/cmx/history` endpoint, but the same query parameters can be used -with the other endpoints as well. -::: - -- **Pagination:** Use the `pageSize` and `currentPage` query parameters to - paginate through the results: - - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?pageSize=10¤tPage=1' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` - -- **Filter by date:** Use the `start-time` and `end-time` query parameters to - filter the results by a specific date range: - - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?start-time=2025-01-01T00:00:00Z&end-time=2025-01-31T23:59:59Z' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` - -- **Sort by:** Use the `tag-sort-key` query parameter to sort the results by a - specific field. The field can be any of the fields returned in the response. - - By default, the results are sorted in ascending order, use - `sortDesc=true` to sort in descending order: - - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-sort-key=created_at&sortDesc=true' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` - -- **Tag filters:** Use the `tag-filter` query parameter to filter the results by - a specific tag: - - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?tag-filter=tag1' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` - -- **Actor filters:** Use the `actor-filter` query parameter to filter the actor - that created the resource, or the type of actor such as `Web UI` or - `Replicated CLI`: - - ```shell - curl --request GET \ - --url 'https://api.replicated.com/vendor/v3/cmx/history?actor-filter=name' \ - --header 'Authorization: $REPLICATED_API_TOKEN' \ - --header 'accept: application/json' - ``` - - :::note - If any filter is passed for an object that does not exist, no warning is given. - For example, if you filter by `actor-filter=name` and there are no results - the response will be empty. - ::: - -================ -File: docs/vendor/config-screen-about.md -================ -# About the Configuration Screen - -This topic describes the configuration screen on the Config tab in the Replicated Admin Console. - -## About Collecting Configuration Values - -When you distribute your application with Replicated KOTS, you can include a configuration screen in the Admin Console. This configuration screen is used to collect required or optional values from your users that are used to run your application. You can use regular expressions to validate user input for some fields, such as passwords and email addresses. For more information about how to add custom fields to the configuration screen, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). - -If you use a Helm chart for your application, your users provide any values specific to their environment from the configuration screen, rather than in a Helm chart `values.yaml` file. This means that your users can provide configuration values through a user interface, rather than having to edit a YAML file or use `--set` CLI commands. The Admin Console configuration screen also allows you to control which options you expose to your users. - -For example, you can use the configuration screen to provide database configuration options for your application. Your users could connect your application to an external database by providing required values in the configuration screen, such as the host, port, and a username and password for the database. - -Or, you can also use the configuration screen to provide a database option that runs in the cluster as part of your application. For an example of this use case, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). - -## Viewing the Configuration Screen - -If you include a configuration screen with your application, users of your application can access the configuration screen from the Admin Console: -* During application installation. -* At any time after application installation on the Admin Console Config tab. - -### Application Installation - -The Admin Console displays the configuration screen when the user installs the application, after they upload their license file. - -The following shows an example of how the configuration screen displays during installation: - -![configuration screen that displays during application install](/images/config-screen-sentry-enterprise-app-install.png) - -[View a larger version of this image](/images/config-screen-sentry-enterprise-app-install.png) - -### Admin Console Config Tab - -Users can access the configuration screen any time after they install the application by going to the Config tab in the Admin Console. - -The following shows an example of how the configuration screen displays in the Admin Console Config tab: - -![configuration screen that displays in the Config tab](/images/config-screen-sentry-enterprise.png) - -[View a larger version of this image](/images/config-screen-sentry-enterprise.png) - -================ -File: docs/vendor/config-screen-conditional.mdx -================ -import IntegerComparison from "../partials/template-functions/_integer-comparison.mdx" -import PropertyWhen from "../partials/config/_property-when.mdx" -import DistroCheck from "../partials/template-functions/_string-comparison.mdx" -import NeComparison from "../partials/template-functions/_ne-comparison.mdx" - -# Using Conditional Statements in Configuration Fields - -This topic describes how to use Replicated KOTS template functions in the Config custom resource to conditionally show or hide configuration fields for your application on the Replicated KOTS Admin Console **Config** page. - -## Overview - -The `when` property in the Config custom resource denotes configuration groups or items that are displayed on the Admin Console **Config** page only when a condition evaluates to true. When the condition evaluates to false, the group or item is not displayed. - -<PropertyWhen/> - -For more information about the Config custom resource `when` property, see [when](/reference/custom-resource-config#when) in _Config_. - -## Conditional Statement Examples - -This section includes examples of common types of conditional statements used in the `when` property of the Config custom resource. - -For additional examples of using conditional statements in the Config custom resource, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. - -### Cluster Distribution Check - -It can be useful to show or hide configuration fields depending on the distribution of the cluster because different distributions often have unique requirements. - -In the following example, the `when` properties use the [Distribution](/reference/template-functions-static-context#distribution) template function to return the Kubernetes distribution of the cluster where Replicated KOTS is running. If the distribution of the cluster matches the specified distribution, then the `when` property evaluates to true. - -<DistroCheck/> - -### Embedded Cluster Distribution Check - -It can be useful to show or hide configuration fields if the distribution of the cluster is [Replicated Embedded Cluster](/vendor/embedded-overview) because you can include extensions in embedded cluster distributions to manage functionality such as ingress and storage. This means that embedded clusters frequently have fewer configuration options for the user. - -<NeComparison/> - -### kURL Distribution Check - -It can be useful to show or hide configuration fields if the cluster was provisioned by Replicated kURL because kURL distributions often include add-ons to manage functionality such as ingress and storage. This means that kURL clusters frequently have fewer configuration options for the user. - -In the following example, the `when` property of the `not_kurl` group uses the IsKurl template function to evaluate if the cluster was provisioned by kURL. For more information about the IsKurl template function, see [IsKurl](/reference/template-functions-static-context#iskurl) in _Static Context_. - -```yaml -# Config custom resource -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: all_distributions - title: Example Group - description: This group always displays. - items: - - name: example_item - title: This item always displays. - type: text - - name: not_kurl - title: Non-kURL Cluster Group - description: This group displays only if the cluster is not provisioned by kURL. - when: 'repl{{ not IsKurl }}' - items: - - name: example_item_non_kurl - title: The cluster is not provisioned by kURL. - type: label -``` - -As shown in the image below, both the `all_distributions` and `non_kurl` groups are displayed on the **Config** page when KOTS is _not_ running in a kURL cluster: - -![Config page displays both groups from the example](/images/config-example-iskurl-false.png) - -[View a larger version of this image](/images/config-example-iskurl-false.png) - -However, when KOTS is running in a kURL cluster, only the `all_distributions` group is displayed, as shown below: - -![Config page displaying only the first group from the example](/images/config-example-iskurl-true.png) - -[View a larger version of this image](/images/config-example-iskurl-true.png) - -### License Field Value Equality Check - -You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. - -In the following example, the `when` property of the `new_feature_config` item uses the LicenseFieldValue template function to determine if the user's license contains a `newFeatureEntitlement` field that is set to `true`. For more information about the LicenseFieldValue template function, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: example_settings - title: My Example Config - description: Example fields for using LicenseFieldValue template function - items: - - name: new_feature_config - type: label - title: "You have the new feature entitlement" - when: '{{repl (LicenseFieldValue "newFeatureEntitlement") }}' -``` - -As shown in the image below, the **Config** page displays the `new_feature_config` item when the user's license contains `newFeatureEntitlement: true`: - -![Config page displaying the text "You have the new feature entitlement"](/images/config-example-newfeature.png) - -[View a larger version of this image](/images/config-example-newfeature.png) - -### License Field Value Integer Comparison - -You can show or hide configuration fields based on the values in a license to ensure that users only see configuration options for the features and entitlements granted by their license. You can also compare integer values from license fields to control the configuration experience for your users. - -<IntegerComparison/> - -### User-Supplied Value Check - -You can show or hide configuration fields based on user-supplied values on the **Config** page to ensure that users only see options that are relevant to their selections. - -In the following example, the `database_host` and `database_passwords` items use the ConfigOptionEquals template function to evaluate if the user selected the `external` database option for the `db_type` item. For more information about the ConfigOptionEquals template function, see [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) in _Config Context_. - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: database_settings_group - title: Database Settings - items: - - name: db_type - title: Database Type - type: radio - default: external - items: - - name: external - title: External Database - - name: embedded - title: Embedded Database - - name: database_host - title: Database Hostname - type: text - when: '{{repl (ConfigOptionEquals "db_type" "external")}}' - - name: database_password - title: Database Password - type: password - when: '{{repl (ConfigOptionEquals "db_type" "external")}}' -``` -As shown in the images below, when the user selects the external database option, the `database_host` and `database_passwords` items are displayed. Alternatively, when the user selects the embedded database option, the items are _not_ displayed: - -![Config page displaying the database host and password fields](/images/config-example-external-db.png) - -[View a larger version of this image](/images/config-example-external-db.png) - -![Config page with embedded database option selected](/images/config-example-embedded-db.png) - -[View a larger version of this image](/images/config-example-embedded-db.png) - -## Use Multiple Conditions in the `when` Property - -You can use more than one template function in the `when` property to create more complex conditional statements. This allows you to show or hide configuration fields based on multiple conditions being true. - -The following example includes `when` properties that use both the ConfigOptionEquals and IsKurl template functions: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: ingress_settings - title: Ingress Settings - description: Configure Ingress - items: - - name: ingress_type - title: Ingress Type - help_text: | - Select how traffic will ingress to the appliction. - type: radio - items: - - name: ingress_controller - title: Ingress Controller - - name: load_balancer - title: Load Balancer - default: "ingress_controller" - required: true - when: 'repl{{ not IsKurl }}' - - name: ingress_host - title: Hostname - help_text: Hostname used to access the application. - type: text - default: "hostname.example.com" - required: true - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' - - name: ingress_annotations - type: textarea - title: Ingress Annotations - help_text: See your ingress controller’s documentation for the required annotations. - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' - - name: ingress_tls_type - title: Ingress TLS Type - type: radio - items: - - name: self_signed - title: Self Signed (Generate Self Signed Certificate) - - name: user_provided - title: User Provided (Upload a TLS Certificate and Key Pair) - required: true - default: self_signed - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "ingress_controller") }}' - - name: ingress_tls_cert - title: TLS Cert - type: file - when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' - required: true - - name: ingress_tls_key - title: TLS Key - type: file - when: '{{repl and (ConfigOptionEquals "ingress_type" "ingress_controller") (ConfigOptionEquals "ingress_tls_type" "user_provided") }}' - required: true - - name: load_balancer_port - title: Load Balancer Port - help_text: Port used to access the application through the Load Balancer. - type: text - default: "443" - required: true - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' - - name: load_balancer_annotations - type: textarea - title: Load Balancer Annotations - help_text: See your cloud provider’s documentation for the required annotations. - when: 'repl{{ and (not IsKurl) (ConfigOptionEquals "ingress_type" "load_balancer") }}' -``` - -As shown in the image below, the configuration fields that are specific to the ingress controller display only when the user selects the ingress controller option and KOTS is _not_ running in a kURL cluster: - -![Config page displaying the ingress controller options](/images/config-example-ingress-controller.png) - -[View a larger version of this image](/images/config-example-ingress-controller.png) - -Additionally, the options relevant to the load balancer display when the user selects the load balancer option and KOTS is _not_ running in a kURL cluster: - -![Config page displaying the load balancer options](/images/config-example-ingress-load-balancer.png) - -[View a larger version of this image](/images/config-example-ingress-load-balancer.png) - -================ -File: docs/vendor/config-screen-map-inputs.md -================ -# Mapping User-Supplied Values - -This topic describes how to map the values that your users provide in the Replicated Admin Console configuration screen to your application. - -This topic assumes that you have already added custom fields to the Admin Console configuration screen by editing the Config custom resource. For more information, see [Creating and Editing Configuration Fields](admin-console-customize-config-screen). - -## Overview of Mapping Values - -You use the values that your users provide in the Admin Console configuration screen to render YAML in the manifest files for your application. - -For example, if you provide an embedded database with your application, you might add a field on the Admin Console configuration screen where users input a password for the embedded database. You can then map the password that your user supplies in this field to the Secret manifest file for the database in your application. - -For an example of mapping database configuration options in a sample application, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). - -You can also conditionally deploy custom resources depending on the user input for a given field. For example, if a customer chooses to use their own database with your application rather than an embedded database option, it is not desirable to deploy the optional database resources such as a StatefulSet and a Service. - -For more information about including optional resources conditionally based on user-supplied values, see [Conditionally Including or Excluding Resources](packaging-include-resources). - -## About Mapping Values with Template Functions - -To map user-supplied values, you use Replicated KOTS template functions. The template functions are based on the Go text/template libraries. To use template functions, you add them as strings in the custom resource manifest files in your application. - -For more information about template functions, including use cases and examples, see [About Template Functions](/reference/template-functions-about). - -For more information about the syntax of the template functions for mapping configuration values, see [Config Context](/reference/template-functions-config-context) in the _Template Functions_ section. - -## Map User-Supplied Values - -Follow one of these procedures to map user inputs from the configuration screen, depending on if you use a Helm chart for your application: - -* **Without Helm**: See [Map Values to Manifest Files](#map-values-to-manifest-files). -* **With Helm**: See [Map Values to a Helm Chart](#map-values-to-a-helm-chart). - -### Map Values to Manifest Files - -To map user-supplied values from the configuration screen to manifest files in your application: - -1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. - -1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. - -1. In the Config manifest file, locate the name of the user-input field that you want to map. - - **Example**: - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: my-application - spec: - groups: - - name: smtp_settings - title: SMTP Settings - description: Configure SMTP Settings - items: - - name: smtp_host - title: SMTP Hostname - help_text: Set SMTP Hostname - type: text - ``` - - In the example above, the field name to map is `smtp_host`. - -1. In the same release in the Vendor Portal, open the manifest file where you want to map the value for the field that you selected. - -1. In the manifest file, use the ConfigOption template function to map the user-supplied value in a key value pair. For example: - - ```yaml - hostname: '{{repl ConfigOption "smtp_host"}}' - ``` - - For more information about the ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. - - **Example**: - - The following example shows mapping user-supplied TLS certificate and TLS private key files to the `tls.cert` and `tls.key` keys in a Secret custom resource manifest file. - - For more information about working with TLS secrets, including a strategy for re-using the certificates uploaded for the Admin Console itself, see the [Configuring Cluster Ingress](packaging-ingress) example. - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: tls-secret - type: kubernetes.io/tls - data: - tls.crt: '{{repl ConfigOption "tls_certificate_file" }}' - tls.key: '{{repl ConfigOption "tls_private_key_file" }}' - ``` - -1. Save and promote the release to a development environment to test your changes. - -### Map Values to a Helm Chart - -The `values.yaml` file in a Helm chart defines parameters that are specific to each environment in which the chart will be deployed. With Replicated KOTS, your users provide these values through the configuration screen in the Admin Console. You customize the configuration screen based on the required and optional configuration fields that you want to expose to your users. - -To map the values that your users provide in the Admin Console configuration screen to your Helm chart `values.yaml` file, you create a HelmChart custom resource. - -For a tutorial that shows how to set values in a sample Helm chart during installation with KOTS, see [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). - -To map user inputs from the configuration screen to the `values.yaml` file: - -1. In the [Vendor Portal](https://vendor.replicated.com/apps), click **Releases**. Then, click **View YAML** next to the desired release. - -1. Open the Config custom resource manifest file that you created in the [Add Fields to the Configuration Screen](admin-console-customize-config-screen#add-fields-to-the-configuration-screen) procedure. The Config custom resource manifest file has `kind: Config`. - -1. In the Config manifest file, locate the name of the user-input field that you want to map. - - **Example**: - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: my-application - spec: - groups: - - name: smtp_settings - title: SMTP Settings - description: Configure SMTP Settings - items: - - name: smtp_host - title: SMTP Hostname - help_text: Set SMTP Hostname - type: text - ``` - - In the example above, the field name to map is `smtp_host`. - -1. In the same release, create a HelmChart custom resource manifest file. A HelmChart custom resource manifest file has `kind: HelmChart`. - - For more information about the HelmChart custom resource, see [HelmChart](../reference/custom-resource-helmchart) in the _Custom Resources_ section. - -1. In the HelmChart manifest file, copy and paste the name of the property from your `values.yaml` file that corresponds to the field that you selected from the Config manifest file under `values`: - - ```yaml - values: - HELM_VALUE_KEY: - ``` - Replace `HELM_VALUE_KEY` with the property name from the `values.yaml` file. - -1. Use the ConfigOption template function to set the property from the `values.yaml` file equal to the corresponding configuration screen field: - - ```yaml - values: - HELM_VALUE_KEY: '{{repl ConfigOption "CONFIG_SCREEN_FIELD_NAME" }}' - ``` - Replace `CONFIG_SCREEN_FIELD_NAME` with the name of the field that you created in the Config custom resource. - - For more information about the KOTS ConfigOption template function, see [Config Context](../reference/template-functions-config-context#configoption) in the _Template Functions_ section. - - **Example:** - - ```yaml - apiVersion: kots.io/v1beta1 - kind: HelmChart - metadata: - name: samplechart - spec: - chart: - name: samplechart - chartVersion: 3.1.7 - helmVersion: v3 - useHelmInstall: true - values: - hostname: '{{repl ConfigOption "smtp_host" }}' - ``` - -1. Save and promote the release to a development environment to test your changes. - -================ -File: docs/vendor/custom-domains-using.md -================ -# Using Custom Domains - -This topic describes how to use the Replicated Vendor Portal to add and manage custom domains to alias the Replicated registry, the Replicated proxy registry, the Replicated app service, and the download portal. - -For information about adding and managing custom domains with the Vendor API v3, see the [customHostnames](https://replicated-vendor-api.readme.io/reference/createcustomhostname) section in the Vendor API v3 documentation. - -For an overview about custom domains and limitations, see [About Custom Domains](custom-domains). - -## Configure a Custom Domain - -Before you assign a custom domain for a registry or the download portal, you must first configure and verify the ownership and TLS certificate. - -To add and configure a custom domain: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Custom Domains**. - -1. In the **Add custom domain** dropdown, select the target Replicated endpoint. - - The **Configure a custom domain** wizard opens. - - <img src="/images/custom-domains-download-configure.png" alt="custom domain wizard" width="500"/> - - [View a larger version of this image](/images/custom-domains-download-configure.png) - -1. For **Domain**, enter the custom domain. Click **Save & continue**. - -1. For **Create CNAME**, copy the text string and use it to create a CNAME record in your DNS account. Click **Continue**. - -1. For **Verify ownership**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. - - Your changes can take up to 24 hours to propagate. - -1. For **TLS cert creation verification**, copy the text string and use it to create a TXT record in your DNS account if displayed. If a TXT record is not displayed, ownership will be validated automatically using an HTTP token. Click **Validate & continue**. - - Your changes can take up to 24 hours to propagate. - - :::note - If you set up a [CAA record](https://letsencrypt.org/docs/caa/) for this hostname, you must include all Certificate Authorities (CAs) that Cloudflare partners with. The following CAA records are required to ensure proper certificate issuance and renewal: - - ```dns - @ IN CAA 0 issue "letsencrypt.org" - @ IN CAA 0 issue "pki.goog; cansignhttpexchanges=yes" - @ IN CAA 0 issue "ssl.com" - @ IN CAA 0 issue "amazon.com" - @ IN CAA 0 issue "cloudflare.com" - @ IN CAA 0 issue "google.com" - ``` - - Failing to include any of these CAs might prevent certificate issuance or renewal, which can result in downtime for your customers. For additional security, you can add an IODEF record to receive notifications about certificate requests: - - ```dns - @ IN CAA 0 iodef "mailto:your-security-team@example.com" - ``` - ::: - -1. For **Use Domain**, to set the new domain as the default, click **Yes, set as default**. Otherwise, click **Not now**. - - :::note - Replicated recommends that you do _not_ set a domain as the default until you are ready for it to be used by customers. - ::: - -The Vendor Portal marks the domain as **Configured** after the verification checks for ownership and TLS certificate creation are complete. - -## Use Custom Domains - -After you configure one or more custom domains in the Vendor Portal, you assign a custom domain by setting it as the default for all channels and customers or by assigning it to an individual release channel. - -### Set a Default Domain - -Setting a default domain is useful for ensuring that the same domain is used across channels for all your customers. - -When you set a custom domain as the default, it is used by default for all new releases promoted to any channel, as long as the channel does not have a different domain assigned in its channel settings. - -Only releases that are promoted to a channel _after_ you set a default domain use the new default domain. Any existing releases that were promoted before you set the default continue to use the same domain that they used previously. - -To set a custom domain as the default: - -1. In the Vendor Portal, go to **Custom Domains**. - -1. Next to the target domain, click **Set as default**. - -1. In the confirmation dialog that opens, click **Yes, set as default**. - -### Assign a Domain to a Channel {#channel-domain} - -You can assign a domain to an individual channel by editing the channel settings. When you specify a domain in the channel settings, new releases promoted to the channel use the selected domain even if there is a different domain set as the default on the **Custom Domains** page. - -Assigning a domain to a release channel is useful when you need to override either the default Replicated domain or a default custom domain for a specific channel. For example: -* You need to use a different domain for releases promoted to your Beta and Stable channels. -* You need to test a domain in a development environment before you set the domain as the default for all channels. - -To assign a custom domain to a channel: - -1. In the Vendor Portal, go to **Channels** and click the settings icon for the target channel. - -1. Under **Custom domains**, in the drop-down for the target Replicated endpoint, select the domain to use for the channel. For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. - - <img alt="channel settings dialog" src="/images/channel-settings.png" width="500px"/> - - [View a larger version of this image](/images/channel-settings.png) - -## Reuse a Custom Domain for Another Application - -If you have configured a custom domain for one application, you can reuse the custom domain for another application in the same team without going through the ownership and TLS certificate verification process again. - -To reuse a custom domain for another application: - -1. In the Vendor Portal, select the application from the dropdown list. - -1. Click **Custom Domains**. - -1. In the section for the target endpoint, click Add your first custom domain for your first domain, or click **Add new domain** for additional domains. - - The **Configure a custom domain** wizard opens. - -1. In the text box, enter the custom domain name that you want to reuse. Click **Save & continue**. - - The last page of the wizard opens because the custom domain was verified previously. - -1. Do one of the following: - - - Click **Set as default**. In the confirmation dialog that opens, click **Yes, set as default**. - - - Click **Not now**. You can come back later to set the domain as the default. The Vendor Portal shows shows that the domain has a Configured status because it was configured for a previous application, though it is not yet assigned as the default for this application. - - -## Remove a Custom Domain - -You can remove a custom domain at any time, but you should plan the transition so that you do not break any existing installations or documentation. - -Removing a custom domain for the Replicated registry, proxy registry, or Replicated app service will break existing installations that use the custom domain. Existing installations need to be upgraded to a version that does not use the custom domain before it can be removed safely. - -If you remove a custom domain for the download portal, it is no longer accessible using the custom URL. You will need to point customers to an updated URL. - -To remove a custom domain: - -1. Log in to the [Vendor Portal](https://vendor.replicated.com) and click **Custom Domains**. - -1. Verify that the domain is not set as the default nor in use on any channels. You can edit the domains in use on a channel in the channel settings. For more information, see [Settings](releases-about#settings) in _About Channels and Releases_. - - :::important - When you remove a registry or Replicated app service custom domain, any installations that reference that custom domain will break. Ensure that the custom domain is no longer in use before you remove it from the Vendor Portal. - ::: - -1. Click **Remove** next to the unused domain in the list, and then click **Yes, remove domain**. - -================ -File: docs/vendor/custom-domains.md -================ -# About Custom Domains - -This topic provides an overview and the limitations of using custom domains to alias the Replicated private registry, Replicated proxy registry, Replicated app service, and the Download Portal. - -For information about configuring and managing custom domains, see [Using Custom Domains](custom-domains-using). - -## Overview - -You can use custom domains to alias Replicated endpoints by creating Canonical Name (CNAME) records for your domains. - -Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. - -TXT records must be created to verify: - -- Domain ownership: Domain ownership is verified when you initially add a record. -- TLS certificate creation: Each new domain must have a new TLS certificate to be verified. - -The TXT records can be removed after the verification is complete. - -You can configure custom domains for the following services, so that customer-facing URLs reflect your company's brand: - -- **Replicated registry:** Images and Helm charts can be pulled from the Replicated registry. By default, this registry uses the domain `registry.replicated.com`. We suggest using a CNAME such as `registry.{your app name}.com`. - -- **Proxy registry:** Images can be proxied from external private registries using the Replicated proxy registry. By default, the proxy registry uses the domain `proxy.replicated.com`. We suggest using a CNAME such as `proxy.{your app name}.com`. - -- **Replicated app service:** Upstream application YAML and metadata, including a license ID, are pulled from replicated.app. By default, this service uses the domain `replicated.app`. We suggest using a CNAME such as `updates.{your app name}.com`. - -- **Download Portal:** The Download Portal can be used to share customer license files, air gap bundles, and so on. By default, the Download Portal uses the domain `get.replicated.com`. We suggest using a CNAME such as `portal.{your app name}.com` or `enterprise.{your app name}.com`. - -## Limitations - -Using custom domains has the following limitations: - -- A single custom domain cannot be used for multiple endpoints. For example, a single domain can map to `registry.replicated.com` for any number of applications, but cannot map to both `registry.replicated.com` and `proxy.replicated.com`, even if the applications are different. - -- Custom domains cannot be used to alias api.replicated.com (legacy customer-facing APIs) or kURL. - -- Multiple custom domains can be configured, but only one custom domain can be the default for each Replicated endpoint. All configured custom domains work whether or not they are the default. - -- A particular custom domain can only be used by one team. - -================ -File: docs/vendor/custom-metrics.md -================ -# Configuring Custom Metrics (Beta) - -This topic describes how to configure an application to send custom metrics to the Replicated Vendor Portal. - -## Overview - -In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running customer environments. Custom metrics can be collected for application instances running in online or air gap environments. - -Custom metrics can be used to generate insights on customer usage and adoption of new features, which can help your team to make more informed prioritization decisions. For example: -* Decreased or plateaued usage for a customer can indicate a potential churn risk -* Increased usage for a customer can indicate the opportunity to invest in growth, co-marketing, and upsell efforts -* Low feature usage and adoption overall can indicate the need to invest in usability, discoverability, documentation, education, or in-product onboarding -* High usage volume for a customer can indicate that the customer might need help in scaling their instance infrastructure to keep up with projected usage - -## How the Vendor Portal Collects Custom Metrics - -The Vendor Portal collects custom metrics through the Replicated SDK that is installed in the cluster alongside the application. - -The SDK exposes an in-cluster API where you can configure your application to POST metric payloads. When an application instance sends data to the API, the SDK sends the data (including any custom and built-in metrics) to the Replicated app service. The app service is located at `replicated.app` or at your custom domain. - -If any values in the metric payload are different from the current values for the instance, then a new event is generated and displayed in the Vendor Portal. For more information about how the Vendor Portal generates events, see [How the Vendor Portal Generates Events and Insights](/vendor/instance-insights-event-data#about-events) in _About Instance and Event Data_. - -The following diagram demonstrates how a custom `activeUsers` metric is sent to the in-cluster API and ultimately displayed in the Vendor Portal, as described above: - -<img alt="Custom metrics flowing from customer environment to Vendor Portal" src="/images/custom-metrics-flow.png" width="800px"/> - -[View a larger version of this image](/images/custom-metrics-flow.png) - -## Requirements - -To support the collection of custom metrics in online and air gap environments, the Replicated SDK version 1.0.0-beta.12 or later must be running in the cluster alongside the application instance. - -The `PATCH` and `DELETE` methods described below are available in the Replicated SDK version 1.0.0-beta.23 or later. - -For more information about the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). - -If you have any customers running earlier versions of the SDK, Replicated recommends that you add logic to your application to gracefully handle a 404 from the in-cluster APIs. - -## Limitations - -Custom metrics have the following limitations: - -* The label that is used to display metrics in the Vendor Portal cannot be customized. Metrics are sent to the Vendor Portal with the same name that is sent in the `POST` or `PATCH` payload. The Vendor Portal then converts camel case to title case: for example, `activeUsers` is displayed as **Active Users**. - -* The in-cluster APIs accept only JSON scalar values for metrics. Any requests containing nested objects or arrays are rejected. - -* When using the `POST` method any existing keys that are not included in the payload will be deleted. To create new metrics or update existing ones without sending the entire dataset, simply use the `PATCH` method. - -## Configure Custom Metrics - -You can configure your application to `POST` or `PATCH` a set of metrics as key value pairs to the API that is running in the cluster alongside the application instance. - -To remove an existing custom metric use the `DELETE` endpoint with the custom metric name. - -The Replicated SDK provides an in-cluster API custom metrics endpoint at `http://replicated:3000/api/v1/app/custom-metrics`. - -**Example:** - -```bash -POST http://replicated:3000/api/v1/app/custom-metrics -``` - -```json -{ - "data": { - "num_projects": 5, - "weekly_active_users": 10 - } -} -``` - -```bash -PATCH http://replicated:3000/api/v1/app/custom-metrics -``` - -```json -{ - "data": { - "num_projects": 54, - "num_error": 2 - } -} -``` - -```bash -DELETE http://replicated:3000/api/v1/app/custom-metrics/num_projects -``` - -### POST vs PATCH - -The `POST` method will always replace the existing data with the most recent payload received. Any existing keys not included in the most recent payload will still be accessible in the instance events API, but they will no longer appear in the instance summary. - -The `PATCH` method will accept partial updates or add new custom metrics if a key:value pair that does not currently exist is passed. - -In most cases, simply using the `PATCH` method is recommended. - -For example, if a component of your application sends the following via the `POST` method: - -```json -{ - "numProjects": 5, - "activeUsers": 10, -} -``` - -Then, the component later sends the following also via the `POST` method: - -```json -{ - "activeUsers": 10, - "usingCustomReports": false -} -``` - -The instance detail will show `Active Users: 10` and `Using Custom Reports: false`, which represents the most recent payload received. The previously-sent `numProjects` value is discarded from the instance summary and is available in the instance events payload. In order to preseve `numProjects`from the initial payload and upsert `usingCustomReports` and `activeUsers` use the `PATCH` method instead of `POST` on subsequent calls to the endpoint. - -For example, if a component of your application initially sends the following via the `POST` method: - -```json -{ - "numProjects": 5, - "activeUsers": 10, -} -``` - -Then, the component later sends the following also via the `PATCH` method: -```json -{ - "usingCustomReports": false -} -``` - -The instance detail will show `Num Projects: 5`, `Active Users: 10`, `Using Custom Reports: false`, which represents the merged and upserted payload. - -### NodeJS Example - -The following example shows a NodeJS application that sends metrics on a weekly interval to the in-cluster API exposed by the SDK: - -```javascript -async function sendMetrics(db) { - - const projectsQuery = "SELECT COUNT(*) as num_projects from projects"; - const numProjects = (await db.getConnection().queryOne(projectsQuery)).num_projects; - - const usersQuery = - "SELECT COUNT(*) as active_users from users where DATEDIFF('day', last_active, CURRENT_TIMESTAMP) < 7"; - const activeUsers = (await db.getConnection().queryOne(usersQuery)).active_users; - - const metrics = { data: { numProjects, activeUsers }}; - - const res = await fetch('https://replicated:3000/api/v1/app/custom-metrics', { - method: 'POST', - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify(metrics), - }); - if (res.status !== 200) { - throw new Error(`Failed to send metrics: ${res.statusText}`); - } -} - -async function startMetricsLoop(db) { - - const ONE_DAY_IN_MS = 1000 * 60 * 60 * 24 - - // send metrics once on startup - await sendMetrics(db) - .catch((e) => { console.log("error sending metrics: ", e) }); - - // schedule weekly metrics payload - - setInterval( () => { - sendMetrics(db, licenseId) - .catch((e) => { console.log("error sending metrics: ", e) }); - }, ONE_DAY_IN_MS); -} - -startMetricsLoop(getDatabase()); -``` - -## View Custom Metrics - -You can view the custom metrics that you configure for each active instance of your application on the **Instance Details** page in the Vendor Portal. - -The following shows an example of an instance with custom metrics: - -<img alt="Custom Metrics section of Instance details page" src="/images/instance-custom-metrics.png" width="700px"/> - -[View a larger version of this image](/images/instance-custom-metrics.png) - -As shown in the image above, the **Custom Metrics** section of the **Instance Details** page includes the following information: -* The timestamp when the custom metric data was last updated. -* Each custom metric that you configured, along with the most recent value for the metric. -* A time-series graph depicting the historical data trends for the selected metric. - -Custom metrics are also included in the **Instance activity** stream of the **Instance Details** page. For more information, see [Instance Activity](/vendor/instance-insights-details#instance-activity) in _Instance Details_. - -## Export Custom Metrics - -You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Export Customer and Instance Data](/vendor/instance-data-export). - -================ -File: docs/vendor/customer-adoption.md -================ -# Adoption Report - -This topic describes the insights in the **Adoption** section on the Replicated Vendor Portal **Dashboard** page. - -## About Adoption Rate - -The **Adoption** section on the **Dashboard** provides insights about the rate at which your customers upgrade their instances and adopt the latest versions of your application. As an application vendor, you can use these adoption rate metrics to learn if your customers are completing upgrades regularly, which is a key indicator of the discoverability and ease of application upgrades. - -The Vendor Portal generates adoption rate data from all your customer's application instances that have checked-in during the selected time period. For more information about instance check-ins, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. - -The following screenshot shows an example of the **Adoption** section on the **Dashboard**: - -![Adoption report section on dashboard](/images/customer_adoption_rates.png) - -[View a larger version of this image](/images/customer_adoption_rates.png) - -As shown in the screenshot above, the **Adoption** report includes a graph and key adoption rate metrics. For more information about how to interpret this data, see [Adoption Graph](#graph) and [Adoption Metrics](#metrics) below. - -The **Adoption** report also displays the number of customers assigned to the selected channel and a link to the report that you can share with other members of your team. - -You can filter the graph and metrics in the **Adoption** report by: -* License type (Paid, Trial, Dev, or Community) -* Time period (the previous month, three months, six months, or twelve months) -* Release channel to which instance licenses are assigned, such as Stable or Beta - -## Adoption Graph {#graph} - -The **Adoption** report includes a graph that shows the percent of active instances that are running different versions of your application within the selected time period. - -The following shows an example of an adoption rate graph with three months of data: - -![Adoption report graph showing three months of data](/images/adoption_rate_graph.png) - -[View a larger version of this image](/images/adoption_rate_graph.png) - -As shown in the image above, the graph plots the number of active instances in each week in the selected time period, grouped by the version each instance is running. The key to the left of the graph shows the unique color that is assigned to each application version. You can use this color-coding to see at a glance the percent of active instances that were running different versions of your application across the selected time period. - -Newer versions will enter at the bottom of the area chart, with older versions shown higher up. - -You can also hover over a color-coded section in the graph to view the number and percentage of active instances that were running the version in a given period. - -If there are no active instances of your application, then the adoption rate graph displays a "No Instances" message. - -## Adoption Metrics {#metrics} - -The **Adoption** section includes metrics that show how frequently your customers discover and complete upgrades to new versions of your application. It is important that your users adopt new versions of your application so that they have access to the latest features and bug fixes. Additionally, when most of your users are on the latest versions, you can also reduce the number of versions for which you provide support and maintain documentation. - -The following shows an example of the metrics in the **Adoption** section: - -![Adoption rate metrics showing](/images/adoption_rate_metrics.png) - -[View a larger version of this image](/images/adoption_rate_metrics.png) - -As shown in the image above, the **Adoption** section displays the following metrics: -* Instances on last three versions -* Unique versions -* Median relative age -* Upgrades completed - -Based on the time period selected, each metric includes an arrow that shows the change in value compared to the previous period. For example, if the median relative age today is 68 days, the selected time period is three months, and three months ago the median relative age was 55 days, then the metric would show an upward-facing arrow with an increase of 13 days. - -The following table describes each metric in the **Adoption** section, including the formula used to calculate its value and the recommended trend for the metric over time: - -<table> - <tbody> - <tr> - <th width="25%">Metric</th> - <th width="45%">Description</th> - <th width="30%">Target Trend</th> - </tr> - <tr> - <td>Instances on last three versions</td> - <td> - <p>Percent of active instances that are running one the latest three versions of your application.</p> - <p><strong>Formula</strong>: <code>count(instances on last 3 versions) / count(instances)</code></p> - </td> - <td>Increase towards 100%</td> - </tr> - <tr> - <td>Unique versions</td> - <td> - <p>Number of unique versions of your application running in active instances.</p> - <p><strong>Formula</strong>: <code>count(distinct instance_version)</code></p> - </td> - <td>Decrease towards less than or equal to three</td> - </tr> - <tr> - <td>Median relative age</td> - <td> - <p>The <em>relative age</em> of a single instance is the number of days between the date that the instance's version was promoted to the channel and the date when the latest available application version was promoted to the channel.</p> - <p><em>Median relative age</em> is the median value across all active instances for the selected time period and channel.</p> - <p><strong>Formula</strong>: <code>median(relative_age(instance_version))</code></p> - </td> - <td><p>Depends on release cadence. For vendors who ship every four to eight weeks, decrease the median relative age towards 60 days or fewer.</p></td> - </tr> - <tr> - <td>Upgrades completed</td> - <td> - <p>Total number of completed upgrades across active instances for the selected time period and channel.</p> - <p>An upgrade is a single version change for an instance. An upgrade is considered complete when the instance deploys the new application version.</p> - <p>The instance does <em>not</em> need to become available (as indicated by reaching a Ready state) after deploying the new version for the upgrade to be counted as complete.</p> - <p><strong>Formula</strong>: <code>sum(instance.upgrade_count) across all instances</code></p> - </td> - <td>Increase compared to any previous period, unless you reduce your total number of live instances.</td> - </tr> - </tbody> -</table> - -================ -File: docs/vendor/customer-reporting.md -================ -# Customer Reporting - -This topic describes the customer and instance data displayed in the **Customers > Reporting** page of the Replicated Vendor Portal. - -## About the Customer Reporting Page {#reporting-page} - -The **Customers > Reporting** page displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page: - -![Customer reporting page showing two active instances](/images/customer-reporting-page.png) - -[View a larger version of this image](/images/customer-reporting-page.png) - -As shown in the image above, the **Reporting** page has the following main sections: -* [Manage Customer](#manage-customer) -* [Time to Install](#time-to-install) -* [Download Portal](#download-portal) -* [Instances](#instances) - -### Manage Customer - -The manage customer section displays the following information about the customer: - -* The customer name -* The channel the customer is assigned -* Details about the customer license: - * The license type - * The date the license was created - * The expiration date of the license -* The features the customer has enabled, including: - * GitOps - * Air gap - * Identity - * Snapshots - -In this section, you can also view the Helm CLI installation instructions for the customer and download the customer license. - -### Time to Install - -If the customer has one or more application instances that have reached a Ready status at least one time, then the **Time to install** section displays _License time to install_ and _Instance time to install_ metrics: - -* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. -* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. - -A _Ready_ status indicates that all Kubernetes resources for the application are Ready. For example, a Deployment resource is considered Ready when the number of Ready replicas equals the total desired number of replicas. For more information, see [Enabling and Understanding Application Status](insights-app-status). - -If the customer has no application instances that have ever reported a Ready status, or if you have not configured your application to deliver status data to the Vendor Portal, then the **Time to install** section displays a **No Ready Instances** message. - -If the customer has more than one application instance that has previously reported a Ready status, then the **Time to install** section displays metrics for the instance that most recently reported a Ready status for the first time. - -For example, Instance A reported its first Ready status at 9:00 AM today. Instance B reported its first Ready status at 8:00 AM today, moved to a Degraded status, then reported a Ready status again at 10:00 AM today. In this case, the Vendor Portal displays the time to install metrics for Instance A, which reported its _first_ Ready status most recently. - -For more information about how to interpret the time to install metrics, see [Time to Install](instance-insights-details#time-to-install) in _Instance Details_. - -### Download Portal - -From the **Download portal** section, you can: -* Manage the password for the Download Portal -* Access the unique Download Portal URL for the customer - -You can use the Download Portal to give your customers access to the files they need to install your application, such as their license file or air gap bundles. For more information, see [Downloading Assets from the Download Portal](releases-share-download-portal). - -### Instances - -The **Instances** section displays details about the active application instances associated with the customer. - -You can click any of the rows in the **Instances** section to open the **Instance details** page. The **Instance details** page displays additional event data and computed metrics to help you understand the performance and status of each active application instance. For more information, see [Instance Details](instance-insights-details). - -The following shows an example of a row for an active instance in the **Instances** section: - -![Row in the Instances section](/images/instance-row.png) -[View a larger version of this image](/images/instance-row.png) - -The **Instances** section displays the following details about each active instance: -* The first seven characters of the instance ID. -* The status of the instance. Possible statuses are Missing, Unavailable, Degraded, Ready, and Updating. For more information, see [Enabling and Understanding Application Status](insights-app-status). -* The application version. -* Details about the cluster where the instance is installed, including: - * The Kubernetes distribution for the cluster, if applicable. - * The Kubernetes version running in the cluster. - * Whether the instance is installed in a Replicated kURL cluster. - * (kURL Clusters Only) The number of nodes ready in the cluster. - * (KOTS Only) The KOTS version running in the cluster. - * The Replicated SDK version running in the cluster. - * The cloud provider and region, if applicable. -* Instance uptime data, including: - * The timestamp of the last recorded check-in for the instance. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. - * An uptime graph of the previous two weeks. For more information about how the Vendor Portal determines uptime, see [Instance Uptime](instance-insights-details#instance-uptime) in _Instance Details_. - * The uptime ratio in the previous two weeks. - -================ -File: docs/vendor/data-availability.md -================ -# Data Availability and Continuity - -Replicated uses redundancy and a cloud-native architecture in support of availability and continuity of vendor data. - -## Data Storage Architecture - -To ensure availability and continuity of necessary vendor data, Replicated uses a cloud-native architecture. This cloud-native architecture includes clustering and network redundancies to eliminate single point of failure. - -Replicated stores vendor data in various Amazon Web Services (AWS) S3 buckets and multiple databases. Data stored in the AWS S3 buckets includes registry images and air gap build data. - -The following diagram shows the flow of air gap build data and registry images from vendors to enterprise customers. - -![Architecture diagram of Replicated vendor data storage](/images/data-storage.png) - -[View a larger version of this image](/images/data-storage.png) - -As shown in the diagram above, vendors push application images to an image registry. Replicated stores this registry image data in AWS S3 buckets, which are logically isolated by vendor portal Team. Instances of the vendor's application that are installed by enterprise customers pull data from the image registry. - -For more information about how Replicated secures images pushed to the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). - -The diagram also shows how enterprise customers access air gap build data from the customer download portal. Replicated stores this air gap build data in AWS S3 buckets. - -## Data Recovery - -Our service provider's platform automatically restores customer applications and databases in the case of an outage. The provider's platform is designed to dynamically deploy applications within its cloud, monitor for failures, and recover failed platform components including customer applications and databases. - -For more information, see the [Replicated Security White Paper](https://www.replicated.com/downloads/Replicated-Security-Whitepaper.pdf). - -## Data Availability - -Replicated availability is continuously monitored. For availability reports, see https://status.replicated.com. - -## Offsite Data Backup Add-on - -For additional data redundancy, an offsite data backup add-on is available to copy customers data to a separate cloud provider. This add-on mitigates against potential data loss by our primary service provider. For more information, see [Offsite Data Backup](offsite-backup). - -================ -File: docs/vendor/database-config-adding-options.md -================ -# About Managing Stateful Services - -This topic provides recommendations for managing stateful services that you install into existing clusters. - -## Preflight Checks for Stateful Services - -If you expect to also install stateful services into existing clusters, you will likely want to expose [preflight analyzers that check for the existence of a storage class](https://troubleshoot.sh/reference/analyzers/storage-class/). - -If you are allowing end users to provide connection details for external databases, you can often use a troubleshoot.sh built-in [collector](https://troubleshoot.sh/docs/collect/) and [analyzer](https://troubleshoot.sh/docs/analyze/) to validate the connection details for [Postgres](https://troubleshoot.sh/docs/analyze/postgresql/), [Redis](https://troubleshoot.sh/docs/collect/redis/), and many other common datastores. These can be included in both `Preflight` and `SupportBundle` specifications. - -## About Adding Persistent Datastores - -You can integrate persistent stores, such as databases, queues, and caches. There are options to give an end user, such as embedding an instance alongside the application or connecting an application to an external instance that they will manage. - -For an example of integrating persistent datastores, see [Example: Adding Database Configuration Options](tutorial-adding-db-config). - -================ -File: docs/vendor/embedded-disaster-recovery.mdx -================ -# Disaster Recovery for Embedded Cluster (Alpha) - -This topic describes the disaster recovery feature for Replicated Embedded Cluster, including how to enable disaster recovery for your application. It also describes how end users can configure disaster recovery in the Replicated KOTS Admin Console and restore from a backup. - -:::important -Embedded Cluster disaster recovery is an Alpha feature. This feature is subject to change, including breaking changes. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). -::: - -:::note -Embedded Cluster does not support backup and restore with the KOTS snapshots feature. For more information about using snapshots for existing cluster installations with KOTS, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). -::: - -## Overview - -The Embedded Cluster disaster recovery feature allows your customers to take backups from the Admin Console and perform restores from the command line. Disaster recovery for Embedded Cluster is implemented with Velero. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. - -The backups that your customers take from the Admin Console will include both the Embedded Cluster infrastructure and the application resources that you specify. - -The Embedded Cluster infrastructure that is backed up includes components such as the KOTS Admin Console and the built-in registry that is deployed for air gap installations. No configuration is required to include Embedded Cluster infrastructure in backups. Vendors specify the application resources to include in backups by configuring a Velero Backup resource in the application release. - -## Requirements - -Embedded Cluster disaster recovery has the following requirements: - -* The disaster recovery feature flag must be enabled for your account. To get access to disaster recovery, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). -* Embedded Cluster version 1.22.0 or later -* Backups must be stored in S3-compatible storage - -## Limitations and Known Issues - -Embedded Cluster disaster recovery has the following limitations and known issues: - -* During a restore, the version of the Embedded Cluster installation assets must match the version of the application in the backup. So if version 0.1.97 of your application was backed up, the Embedded Cluster installation assets for 0.1.97 must be used to perform the restore. Use `./APP_SLUG version` to check the version of the installation assets, where `APP_SLUG` is the unique application slug. For example: - - <img alt="version command" src="/images/ec-version-command.png" width="450px"/> - - [View a larger version of this image](/images/ec-version-command.png) - -* Any Helm extensions included in the `extensions` field of the Embedded Cluster Config are _not_ included in backups. Helm extensions are reinstalled as part of the restore process. To include Helm extensions in backups, configure the Velero Backup resource to include the extensions using namespace-based or label-based selection. For more information, see [Configure the Velero Custom Resources](#config-velero-resources) below. - -* Users can only restore from the most recent backup. - -* Velero is installed only during the initial installation process. Enabling the disaster recovery license field for customers after they have already installed will not do anything. - -* If the `--admin-console-port` flag was used during install to change the port for the Admin Console, note that during a restore the Admin Console port will be used from the backup and cannot be changed. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - -## Configure Disaster Recovery - -This section describes how to configure disaster recovery for Embedded Cluster installations. It also describes how to enable access to the disaster recovery feature on a per-customer basis. - -### Configure the Velero Custom Resources {#config-velero-resources} - -This section describes how to set up Embedded Cluster disaster recovery for your application by configuring Velero [Backup](https://velero.io/docs/latest/api-types/backup/) and [Restore](https://velero.io/docs/latest/api-types/restore/) custom resources in a release. - -To configure Velero Backup and Restore custom resources for Embedded Cluster disaster recovery: - -1. In a new release containing your application files, add a Velero Backup resource. In the Backup resource, use namespace-based or label-based selection to indicate the application resources that you want to be included in the backup. For more information, see [Backup API Type](https://velero.io/docs/latest/api-types/backup/) in the Velero documentation. - - :::important - If you use namespace-based selection to include all of your application resources deployed in the `kotsadm` namespace, ensure that you exclude the Replicated resources that are also deployed in the `kotsadm` namespace. Because the Embedded Cluster infrastructure components are always included in backups automatically, this avoids duplication. - ::: - - **Example:** - - The following Backup resource uses namespace-based selection to include application resources deployed in the `kotsadm` namespace: - - ```yaml - apiVersion: velero.io/v1 - kind: Backup - metadata: - name: backup - spec: - # Back up the resources in the kotsadm namespace - includedNamespaces: - - kotsadm - orLabelSelectors: - - matchExpressions: - # Exclude Replicated resources from the backup - - { key: kots.io/kotsadm, operator: NotIn, values: ["true"] } - ``` - -1. In the same release, add a Velero Restore resource. In the `backupName` field of the Restore resource, include the name of the Backup resource that you created. For more information, see [Restore API Type](https://velero.io/docs/latest/api-types/restore/) in the Velero documentation. - - **Example**: - - ```yaml - apiVersion: velero.io/v1 - kind: Restore - metadata: - name: restore - spec: - # the name of the Backup resource that you created - backupName: backup - includedNamespaces: - - '*' - ``` - -1. For any image names that you include in your Backup and Restore resources, rewrite the image name using the Replicated KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions. This ensures that the image name is rendered correctly during deployment, allowing the image to be pulled from the user's local image registry (such as in air gap installations) or through the Replicated proxy registry. - - **Example:** - - ```yaml - apiVersion: velero.io/v1 - kind: Restore - metadata: - name: restore - spec: - hooks: - resources: - - name: restore-hook-1 - includedNamespaces: - - kotsadm - labelSelector: - matchLabels: - app: example - postHooks: - - init: - initContainers: - - name: restore-hook-init1 - image: - # Use HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace - # to template the image name - registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' - repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' - tag: 1.24-alpine - ``` - For more information about how to rewrite image names using the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions, including additional examples, see [Task 1: Rewrite Image Names](helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart v2 Custom Resource_. - -1. If you support air gap installations, add any images that are referenced in your Backup and Restore resources to the `additionalImages` field of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release so they can be used during the backup and restore process in environments with limited or no outbound internet access. For more information, see [additionalImages](/reference/custom-resource-application#additionalimages) in _Application_. - - **Example:** - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-app - spec: - additionalImages: - - elasticsearch:7.6.0 - - quay.io/orgname/private-image:v1.2.3 - ``` - -1. (Optional) Use Velero functionality like [backup](https://velero.io/docs/main/backup-hooks/) and [restore](https://velero.io/docs/main/restore-hooks/) hooks to customize the backup and restore process as needed. - - **Example:** - - For example, a Postgres database might be backed up using pg_dump to extract the database into a file as part of a backup hook. It can then be restored using the file in a restore hook: - - ```yaml - podAnnotations: - backup.velero.io/backup-volumes: backup - pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U {{repl ConfigOption "postgresql_username" }} -d {{repl ConfigOption "postgresql_database" }} -h 127.0.0.1 > /scratch/backup.sql"]' - pre.hook.backup.velero.io/timeout: 3m - post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U {{repl ConfigOption "postgresql_username" }} -h 127.0.0.1 -d {{repl ConfigOption "postgresql_database" }} -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' - post.hook.restore.velero.io/wait-for-ready: 'true' # waits for the pod to be ready before running the post-restore hook - ``` - -1. Save and the promote the release to a development channel for testing. - -### Enable the Disaster Recovery Feature for Your Customers - -After configuring disaster recovery for your application, you can enable it on a per-customer basis with the **Allow Disaster Recovery (Alpha)** license field. - -To enable disaster recovery for a customer: - -1. In the Vendor Portal, go to the [Customers](https://vendor.replicated.com/customers) page and select the target customer. - -1. On the **Manage customer** page, under **License options**, enable the **Allow Disaster Recovery (Alpha)** field. - - When your customer installs with Embedded Cluster, Velero will be deployed if the **Allow Disaster Recovery (Alpha)** license field is enabled. - -## Take Backups and Restore - -This section describes how your customers can configure backup storage, take backups, and restore from backups. - -### Configure Backup Storage and Take Backups in the Admin Console - -Customers with the **Allow Disaster Recovery (Alpha)** license field can configure their backup storage location and take backups from the Admin Console. - -To configure backup storage and take backups: - -1. After installing the application and logging in to the Admin Console, click the **Disaster Recovery** tab at the top of the Admin Console. - -1. For the desired S3-compatible backup storage location, enter the bucket, prefix (optional), access key ID, access key secret, endpoint, and region. Click **Update storage settings**. - - <img alt="backup storage settings" src="/images/dr-backup-storage-settings.png" width="400px"/> - - [View a larger version of this image](/images/dr-backup-storage-settings.png) - -1. (Optional) From this same page, configure scheduled backups and a retention policy for backups. - - <img src="/images/dr-scheduled-backups.png" width="400px" alt="scheduled backups"/> - - [View a larger version of this image](/images/dr-scheduled-backups.png) - -1. In the **Disaster Recovery** submenu, click **Backups**. Backups can be taken from this screen. - - <img src="/images/dr-backups.png" alt="backups page" width="600px"/> - - [View a larger version of this image](/images/dr-backups.png) - -### Restore from a Backup - -To restore from a backup: - -1. SSH onto a new machine where you want to restore from a backup. - -1. Download the Embedded Cluster installation assets for the version of the application that was included in the backup. You can find the command for downloading Embedded Cluster installation assets in the **Embedded Cluster install instructions dialog** for the customer. For more information, [Online Installation with Embedded Cluster](/enterprise/installing-embedded). - - :::note - The version of the Embedded Cluster installation assets must match the version that is in the backup. For more information, see [Limitations and Known Issues](#limitations-and-known-issues). - ::: - -1. Run the restore command: - - ```bash - sudo ./APP_SLUG restore - ``` - Where `APP_SLUG` is the unique application slug. - - Note the following requirements and guidance for the `restore` command: - - * If the installation is behind a proxy, the same proxy settings provided during install must be provided to the restore command using `--http-proxy`, `--https-proxy`, and `--no-proxy`. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - - * If the `--cidr` flag was used during install to the set IP address ranges for Pods and Services, this flag must be provided with the same CIDR during the restore. If this flag is not provided or is provided with a different CIDR, the restore will fail with an error message telling you to rerun with the appropriate value. However, it will take some time before that error occurs. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - - * If the `--local-artifact-mirror-port` flag was used during install to change the port for the Local Artifact Mirror (LAM), you can optionally use the `--local-artifact-mirror-port` flag to choose a different LAM port during restore. For example, `restore --local-artifact-mirror-port=50000`. If no LAM port is provided during restore, the LAM port that was supplied during installation will be used. For more information, see [Embedded Cluster Install Command Options](/reference/embedded-cluster-install). - - You will be guided through the process of restoring from a backup. - -1. When prompted, enter the information for the backup storage location. - - ![Restore prompts on the command line](/images/dr-restore.png) - [View a larger version of this image](/images/dr-restore.png) - -1. When prompted, confirm that you want to restore from the detected backup. - - ![Restore from detected backup prompt on the command line](/images/dr-restore-from-backup-confirmation.png) - [View a larger version of this image](/images/dr-restore-from-backup-confirmation.png) - - After some time, the Admin console URL is displayed: - - ![Restore from detected backup prompt on the command line](/images/dr-restore-admin-console-url.png) - [View a larger version of this image](/images/dr-restore-admin-console-url.png) - -1. (Optional) If the cluster should have multiple nodes, go to the Admin Console to get a join command and join additional nodes to the cluster. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). - -1. Type `continue` when you are ready to proceed with the restore process. - - ![Type continue when you are done adding nodes](/images/dr-restore-continue.png) - [View a larger version of this image](/images/dr-restore-continue.png) - - After some time, the restore process completes. - - If the `restore` command is interrupted during the restore process, you can resume by rerunning the `restore` command and selecting to resume the previous restore. This is useful if your SSH session is interrupted during the restore. - -================ -File: docs/vendor/embedded-overview.mdx -================ -import EmbeddedCluster from "../partials/embedded-cluster/_definition.mdx" -import Requirements from "../partials/embedded-cluster/_requirements.mdx" -import EmbeddedClusterPortRequirements from "../partials/embedded-cluster/_port-reqs.mdx" -import HaArchitecture from "../partials/embedded-cluster/_multi-node-ha-arch.mdx" - -# Embedded Cluster Overview - -This topic provides an introduction to Replicated Embedded Cluster, including a description of the built-in extensions installed by Embedded Cluster, an overview of the Embedded Cluster single-node and multi-node architecture, and requirements and limitations. - -:::note -If you are instead looking for information about creating Kubernetes Installers with Replicated kURL, see the [Replicated kURL](/vendor/packaging-embedded-kubernetes) section. -::: - -## Overview - -<EmbeddedCluster/> - -## Architecture - -This section describes the Embedded Cluster architecture, including the built-in extensions deployed by Embedded Cluster. - -### Single-Node Architecture - -The following diagram shows the architecture of a single-node Embedded Cluster installation for an application named Gitea: - -![Embedded Cluster single-node architecture](/images/embedded-architecture-single-node.png) - -[View a larger version of this image](/images/embedded-architecture-single-node.png) - -As shown in the diagram above, the user downloads the Embedded Cluster installation assets as a `.tgz` in their installation environment. These installation assets include the Embedded Cluster binary, the user's license file, and (for air gap installations) an air gap bundle containing the images needed to install and run the release in an environment with limited or no outbound internet access. - -When the user runs the Embedded Cluster install command, the Embedded Cluster binary first installs the k0s cluster as a systemd service. - -After all the Kubernetes components for the cluster are available, the Embedded Cluster binary then installs the Embedded Cluster built-in extensions. For more information about these extensions, see [Built-In Extensions](#built-in-extensions) below. - -Any Helm extensions that were included in the [`extensions`](/reference/embedded-config#extensions) field of the Embedded Cluster Config are also installed. The namespace or namespaces where Helm extensions are installed is defined by the vendor in the Embedded Cluster Config. - -Finally, Embedded Cluster also installs Local Artifact Mirror (LAM). In air gap installations, LAM is used to store and update images. - -### Multi-Node Architecture - -The following diagram shows the architecture of a multi-node Embedded Cluster installation: - -![Embedded Cluster multi-node architecture](/images/embedded-architecture-multi-node.png) - -[View a larger version of this image](/images/embedded-architecture-multi-node.png) - -As shown in the diagram above, in multi-node installations, the Embedded Cluster Operator, KOTS, and the image registry for air gap installations are all installed on one controller node. - -For installations that include disaster recovery with Velero, the Velero Node Agent runs on each node in the cluster. The Node Agent is a Kubernetes DaemonSet that performs backup and restore tasks such as creating snapshots and transferring data during restores. - -Additionally, any Helm [`extensions`](/reference/embedded-config#extensions) that you include in the Embedded Cluster Config are installed in the cluster depending on the given chart and how it is configured to be deployed. - -### Multi-Node Architecture with High Availability - -:::note -High availability (HA) for multi-node installations with Embedded Cluster is Alpha and is not enabled by default. For more informaiton about enabling HA, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha). -::: - -<HaArchitecture/> - -## Built-In Extensions {#built-in-extensions} - -Embedded Cluster includes several built-in extensions. The built-in extensions provide capabilities such as application management and storage. Each built-in extension is installed in its own namespace. - -The built-in extensions installed by Embedded Cluster include: - -* **Embedded Cluster Operator**: The Operator is used for reporting purposes as well as some clean up operations. - -* **KOTS:** Embedded Cluster installs the KOTS Admin Console in the kotsadm namespace. End customers use the Admin Console to configure and install the application. Rqlite is also installed in the kotsadm namespace alongside KOTS. Rqlite is a distributed relational database that uses SQLite as its storage engine. KOTS uses rqlite to store information such as support bundles, version history, application metadata, and other small amounts of data needed to manage the application. For more information about rqlite, see the [rqlite](https://rqlite.io/) website. - -* **OpenEBS:** Embedded Cluster uses OpenEBS to provide local PersistentVolume (PV) storage, including the PV storage for rqlite used by KOTS. For more information, see the [OpenEBS](https://openebs.io/docs/) documentation. - -* **(Disaster Recovery Only) Velero:** If the installation uses the Embedded Cluster disaster recovery feature, Embedded Cluster installs Velero, which is an open-source tool that provides backup and restore functionality. For more information about Velero, see the [Velero](https://velero.io/docs/latest/) documentation. For more information about the disaster recovery feature, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). - -* **(Air Gap Only) Image registry:** For air gap installations in environments with limited or no outbound internet access, Embedded Cluster installs an image registry where the images required to install and run the application are pushed. For more information about installing in air-gapped environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap). - -## Comparison to kURL - -Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster offers several improvements such as: -* Significantly faster installation, updates, and node joins -* A redesigned Admin Console UI for managing the cluster -* Improved support for multi-node clusters -* One-click updates of both the application and the cluster at the same time - -Additionally, Embedded Cluster automatically deploys several built-in extensions like KOTS and OpenEBS to provide capabilities such as application management and storage. This represents an improvement over kURL because vendors distributing their application with Embedded Cluster no longer need choose and define various add-ons in the installer spec. For additional functionality that is not included in the built-in extensions, such as an ingress controller, vendors can provide their own [`extensions`](/reference/embedded-config#extensions) that will be deployed alongside the application. - -## Requirements - -### System Requirements - -<Requirements/> - -### Port Requirements - -<EmbeddedClusterPortRequirements/> - -## Limitations - -Embedded Cluster has the following limitations: - -* **Reach out about migrating from kURL**: We are helping several customers migrate from kURL to Embedded Cluster. Reach out to Alex Parker at alexp@replicated.com for more information. - -* **Multi-node support is in beta**: Support for multi-node embedded clusters is in beta, and enabling high availability for multi-node clusters is in alpha. Only single-node embedded clusters are generally available. For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). - -* **Disaster recovery is in alpha**: Disaster Recovery for Embedded Cluster installations is in alpha. For more information, see [Disaster Recovery for Embedded Cluster (Alpha)](/vendor/embedded-disaster-recovery). - -* **Partial rollback support**: In Embedded Cluster 1.17.0 and later, rollbacks are supported only when rolling back to a version where there is no change to the [Embedded Cluster Config](/reference/embedded-config) compared to the currently-installed version. For example, users can roll back to release version 1.0.0 after upgrading to 1.1.0 only if both 1.0.0 and 1.1.0 use the same Embedded Cluster Config. For more information about how to enable rollbacks for your application in the KOTS Application custom resource, see [allowRollback](/reference/custom-resource-application#allowrollback) in _Application_. - -* **Changing node hostnames is not supported**: After a host is added to a Kubernetes cluster, Kubernetes assumes that the hostname and IP address of the host will not change. If you need to change the hostname or IP address of a node, you must first remove the node from the cluster. For more information about the requirements for naming nodes, see [Node name uniqueness](https://kubernetes.io/docs/concepts/architecture/nodes/#node-name-uniqueness) in the Kubernetes documentation. - -* **Automatic updates not supported**: Configuring automatic updates from the Admin Console so that new versions are automatically deployed is not supported for Embedded Cluster installations. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). - -* **Embedded Cluster installation assets not available through the Download Portal**: The assets required to install with Embedded Cluster cannot be shared with users through the Download Portal. Users can follow the Embedded Cluster installation instructions to download and extract the installation assets. For more information, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded). - -* **`minKotsVersion` and `targetKotsVersion` not supported**: The [`minKotsVersion`](/reference/custom-resource-application#minkotsversion-beta) and [`targetKotsVersion`](/reference/custom-resource-application#targetkotsversion) fields in the KOTS Application custom resource are not supported for Embedded Cluster installations. This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed`. To avoid installation failures, do not use targetKotsVersion or minKotsVersion in releases that support installation with Embedded Cluster. - -* **Support bundles over 100MB in the Admin Console**: Support bundles are stored in rqlite. Bundles over 100MB could cause rqlite to crash, causing errors in the installation. You can still generate a support bundle from the command line. For more information, see [Generating Support Bundles for Embedded Cluster](/vendor/support-bundle-embedded). - -* **Kubernetes version template functions not supported**: The KOTS [KubernetesVersion](/reference/template-functions-static-context#kubernetesversion), [KubernetesMajorVersion](/reference/template-functions-static-context#kubernetesmajorversion), and [KubernetesMinorVersion](/reference/template-functions-static-context#kubernetesminorversion) template functions do not provide accurate Kubernetes version information for Embedded Cluster installations. This is because these template functions are rendered before the Kubernetes cluster has been updated to the intended version. However, `KubernetesVersion` is not necessary for Embedded Cluster because vendors specify the Embedded Cluster version, which includes a known Kubernetes version. - -* **Custom domains not supported**: Embedded Cluster does not support the use of custom domains, even if custom domains are configured. We intend to add support for custom domains. For more information about custom domains, see [About Custom Domains](/vendor/custom-domains). - -* **KOTS Auto-GitOps workflow not supported**: Embedded Cluster does not support the KOTS Auto-GitOps workflow. If an end-user is interested in GitOps, consider the Helm install method instead. For more information, see [Installing with Helm](/vendor/install-with-helm). - -* **Downgrading Kubernetes not supported**: Embedded Cluster does not support downgrading Kubernetes. The admin console will not prevent end-users from attempting to downgrade Kubernetes if a more recent version of your application specifies a previous Embedded Cluster version. You must ensure that you do not promote new versions with previous Embedded Cluster versions. - -* **Templating not supported in Embedded Cluster Config**: The [Embedded Cluster Config](/reference/embedded-config) resource does not support the use of Go template functions, including [KOTS template functions](/reference/template-functions-about). This only applies to the Embedded Cluster Config. You can still use template functions in the rest of your release as usual. - -* **Policy enforcement on Embedded Cluster workloads is not supported**: The Embedded Cluster runs workloads that require higher levels of privilege. If your application installs a policy enforcement engine such as Gatekeeper or Kyverno, ensure that its policies are not enforced in the namespaces used by Embedded Cluster. - -* **Installing on STIG- and CIS-hardened OS images is not supported**: Embedded Cluster isn't tested on these images, and issues have arisen when trying to install on them. - -================ -File: docs/vendor/embedded-using.mdx -================ -import UpdateOverview from "../partials/embedded-cluster/_update-overview.mdx" -import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" -import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" -import EcConfig from "../partials/embedded-cluster/_ec-config.mdx" - -# Using Embedded Cluster - -This topic provides information about using Replicated Embedded Cluster, including how to get started, configure Embedded Cluster, access the cluster using kubectl, and more. For an introduction to Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). - -## Quick Start - -You can use the following steps to get started quickly with Embedded Cluster. More detailed documentation is available below. - -1. Create a new customer or edit an existing customer and select the **Embedded Cluster Enabled** license option. Save the customer. - -1. Create a new release that includes your application. In that release, create an Embedded Cluster Config that includes, at minimum, the Embedded Cluster version you want to use. See the Embedded Cluster [GitHub repo](https://github.com/replicatedhq/embedded-cluster/releases) to find the latest version. - - Example Embedded Cluster Config: - - <EcConfig/> - -1. Save the release and promote it to the channel the customer is assigned to. - -1. Return to the customer page where you enabled Embedded Cluster. At the top right, click **Install instructions** and choose **Embedded Cluster**. A dialog appears with instructions on how to download the Embedded Cluster installation assets and install your application. - - ![Customer install instructions drop down button](/images/customer-install-instructions-dropdown.png) - - [View a larger version of this image](/images/customer-install-instructions-dropdown.png) - -1. On your VM, run the commands in the **Embedded Cluster install instructions** dialog. - - <img alt="Embedded cluster install instruction dialog" src="/images/embedded-cluster-install-dialog-latest.png" width="550px"/> - - [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) - -1. Enter an Admin Console password when prompted. - - The Admin Console URL is printed when the installation finishes. Access the Admin Console to begin installing your application. During the installation process in the Admin Console, you have the opportunity to add nodes if you want a multi-node cluster. Then you can provide application config, run preflights, and deploy your application. - -## About Configuring Embedded Cluster - -To install an application with Embedded Cluster, an Embedded Cluster Config must be present in the application release. The Embedded Cluster Config lets you define several characteristics about the cluster that will be created. - -For more information, see [Embedded Cluster Config](/reference/embedded-config). - -## About Installing with Embedded Cluster - -This section provides an overview of installing applications with Embedded Cluster. - -### Installation Overview - -The following diagram demonstrates how Kubernetes and an application are installed into a customer environment using Embedded Cluster: - -![Embedded Cluster installs an app in a customer environment](/images/embedded-cluster-install.png) - -[View a larger version of this image](/images/embedded-cluster-install.png) - -As shown in the diagram above, the Embedded Cluster Config is included in the application release in the Replicated Vendor Portal and is used to generate the Embedded Cluster installation assets. Users can download these installation assets from the Replicated app service (`replicated.app`) on the command line, then run the Embedded Cluster installation command to install Kubernetes and the KOTS Admin Console. Finally, users access the Admin Console to optionally add nodes to the cluster and to configure and install the application. - -### Installation Options - -Embedded Cluster supports installations in online (internet-connected) environments and air gap environments with no outbound internet access. - -For online installations, Embedded Cluster also supports installing behind a proxy server. - -For more information about how to install with Embedded Cluster, see: -* [Online Installation wtih Embedded Cluster](/enterprise/installing-embedded) -* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) - -### Customer-Specific Installation Instructions - -To install with Embedded Cluster, you can follow the customer-specific instructions provided on the **Customer** page in the Vendor Portal. For example: - -<img alt="Embedded cluster install instruction dialog" src="/images/embedded-cluster-install-dialog.png" width="500px"/> - -[View a larger version of this image](/images/embedded-cluster-install-dialog.png) - -### (Optional) Serve Installation Assets Using the Vendor API - -To install with Embedded Cluster, you need to download the Embedded Cluster installer binary and a license. Air gap installations also require an air gap bundle. Some vendors already have a portal where their customers can log in to access documentation or download artifacts. In cases like this, you can serve the Embedded Cluster installation essets yourself using the Replicated Vendor API, rather than having customers download the assets from the Replicated app service using a curl command during installation. - -To serve Embedded Cluster installation assets with the Vendor API: - -1. If you have not done so already, create an API token for the Vendor API. See [Using the Vendor API v3](/reference/vendor-api-using#api-token-requirement). - -1. Call the [Get an Embedded Cluster release](https://replicated-vendor-api.readme.io/reference/getembeddedclusterrelease) endpoint to download the assets needed to install your application with Embedded Cluster. Your customers must take this binary and their license and copy them to the machine where they will install your application. - - Note the following: - - * (Recommended) Provide the `customerId` query parameter so that the customer’s license is included in the downloaded tarball. This mirrors what is returned when a customer downloads the binary directly using the Replicated app service and is the most useful option. Excluding the `customerId` is useful if you plan to distribute the license separately. - - * If you do not provide any query parameters, this endpoint downloads the Embedded Cluster binary for the latest release on the specified channel. You can provide the `channelSequence` query parameter to download the binary for a particular release. - -### About Host Preflight Checks - -During installation, Embedded Cluster automatically runs a default set of _host preflight checks_. The default host preflight checks are designed to verify that the installation environment meets the requirements for Embedded Cluster, such as: -* The system has sufficient disk space -* The system has at least 2G of memory and 2 CPU cores -* The system clock is synchronized - -For Embedded Cluster requirements, see [Embedded Cluster Installation Requirements](/enterprise/installing-embedded-requirements). For the full default host preflight spec for Embedded Cluster, see [`host-preflight.yaml`](https://github.com/replicatedhq/embedded-cluster/blob/main/pkg/preflights/host-preflight.yaml) in the `embedded-cluster` repository in GitHub. - -If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. For more information about host preflight checks for installations on VMs or bare metal servers, see [About Host Preflights](preflight-support-bundle-about#host-preflights). - -#### Limitations - -Embedded Cluster host preflight checks have the following limitations: - -* The default host preflight checks for Embedded Cluster cannot be modified, and vendors cannot provide their own custom host preflight spec for Embedded Cluster. -* Host preflight checks do not check that any application-specific requirements are met. For more information about defining preflight checks for your application, see [Defining Preflight Checks](/vendor/preflight-defining). - -#### Skip Host Preflight Checks - -You can skip host preflight checks by passing the `--skip-host-preflights` flag with the Embedded Cluster `install` command. For example: - -```bash -sudo ./my-app install --license license.yaml --skip-host-preflights -``` - -When you skip host preflight checks, the Admin Console still runs any application-specific preflight checks that are defined in the release before the application is deployed. - -:::note -Skipping host preflight checks is _not_ recommended for production installations. -::: - -## About Managing Multi-Node Clusters with Embedded Cluster - -This section describes managing nodes in multi-node clusters created with Embedded Cluster. - -### Defining Node Roles for Multi-Node Clusters - -You can optionally define node roles in the Embedded Cluster Config. For multi-node clusters, roles can be useful for the purpose of assigning specific application workloads to nodes. If nodes roles are defined, users access the Admin Console to assign one or more roles to a node when it is joined to the cluster. - -For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. - -### Adding Nodes - -Users can add nodes to a cluster with Embedded Cluster from the Admin Console. The Admin Console provides the join command used to add nodes to the cluster. - -For more information, see [Managing Multi-Node Clusters with Embedded Cluster](/enterprise/embedded-manage-nodes). - -### High Availability for Multi-Node Clusters (Alpha) - -Multi-node clusters are not highly available by default. Enabling high availability (HA) requires that at least three controller nodes are present in the cluster. Users can enable HA when joining the third node. - -For more information about creating HA multi-node clusters with Embedded Cluster, see [Enable High Availability for Multi-Node Clusters (Alpha)](/enterprise/embedded-manage-nodes#ha) in _Managing Multi-Node Clusters with Embedded Cluster_. - -## About Performing Updates with Embedded Cluster - -<UpdateOverview/> - -For more information about updating, see [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). - -## Access the Cluster - -With Embedded Cluster, end-users are rarely supposed to need to use the CLI. Typical workflows, like updating the application and the cluster, are driven through the Admin Console. - -Nonetheless, there are times when vendors or their customers need to use the CLI for development or troubleshooting. - -To access the cluster and use other included binaries: - -1. SSH onto a controller node. - -1. Use the Embedded Cluster shell command to start a shell with access to the cluster: - - ``` - sudo ./APP_SLUG shell - ``` - - The output looks similar to the following: - ``` - __4___ - _ \ \ \ \ Welcome to APP_SLUG debug shell. - <'\ /_/_/_/ This terminal is now configured to access your cluster. - ((____!___/) Type 'exit' (or CTRL+d) to exit. - \0\0\0\0\/ Happy hacking. - ~~~~~~~~~~~ - root@alex-ec-2:/home/alex# export KUBECONFIG="/var/lib/embedded-cluster/k0s/pki/admin.conf" - root@alex-ec-2:/home/alex# export PATH="$PATH:/var/lib/embedded-cluster/bin" - root@alex-ec-2:/home/alex# source <(kubectl completion bash) - root@alex-ec-2:/home/alex# source /etc/bash_completion - ``` - - The appropriate kubeconfig is exported, and the location of useful binaries like kubectl and Replicated’s preflight and support-bundle plugins is added to PATH. - - :::note - You cannot run the `shell` command on worker nodes. - ::: - -1. Use the available binaries as needed. - - **Example**: - - ```bash - kubectl version - ``` - ``` - Client Version: v1.29.1 - Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3 - Server Version: v1.29.1+k0s - ``` - -1. Type `exit` or **Ctrl + D** to exit the shell. - - :::note - If you encounter a typical workflow where your customers have to use the Embedded Cluster shell, reach out to Alex Parker at alexp@replicated.com. These workflows might be candidates for additional Admin Console functionality. - ::: - -## Reset a Node - -Resetting a node removes the cluster and your application from that node. This is useful for iteration, development, and when mistakes are made, so you can reset a machine and reuse it instead of having to procure another machine. - -If you want to completely remove a cluster, you need to reset each node individually. - -When resetting a node, OpenEBS PVCs on the node are deleted. Only PVCs created as part of a StatefulSet will be recreated automatically on another node. To recreate other PVCs, the application will need to be redeployed. - -To reset a node: - -1. SSH onto the machine. Ensure that the Embedded Cluster binary is still available on that machine. - -1. Run the following command to reset the node and automatically reboot the machine to ensure that transient configuration is also reset: - - ``` - sudo ./APP_SLUG reset - ``` - Where `APP_SLUG` is the unique slug for the application. - - :::note - Pass the `--no-prompt` flag to disable interactive prompts. Pass the `--force` flag to ignore any errors encountered during the reset. - ::: - -## Additional Use Cases - -This section outlines some additional use cases for Embedded Cluster. These are not officially supported features from Replicated, but are ways of using Embedded Cluster that we or our customers have experimented with that might be useful to you. - -### NVIDIA GPU Operator - -The NVIDIA GPU Operator uses the operator framework within Kubernetes to automate the management of all NVIDIA software components needed to provision GPUs. For more information about this operator, see the [NVIDIA GPU Operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/overview.html) documentation. - -You can include the NVIDIA GPU Operator in your release as an additional Helm chart, or using Embedded Cluster Helm extensions. For information about adding Helm extensions, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. - -Using the NVIDIA GPU Operator with Embedded Cluster requires configuring the containerd options in the operator as follows: - -```yaml -# Embedded Cluster Config - - extensions: - helm: - repositories: - - name: nvidia - url: https://nvidia.github.io/gpu-operator - charts: - - name: gpu-operator - chartname: nvidia/gpu-operator - namespace: gpu-operator - version: "v24.9.1" - values: | - # configure the containerd options - toolkit: - env: - - name: CONTAINERD_CONFIG - value: /etc/k0s/containerd.d/nvidia.toml - - name: CONTAINERD_SOCKET - value: /run/k0s/containerd.sock -``` -When the containerd options are configured as shown above, the NVIDIA GPU Operator automatically creates the required configurations in the `/etc/k0s/containerd.d/nvidia.toml` file. It is not necessary to create this file manually, or modify any other configuration on the hosts. - -:::note -If you include the NVIDIA GPU Operator as a Helm extension, remove any existing containerd services that are running on the host (such as those deployed by Docker) before attempting to install the release with Embedded Cluster. If there are any containerd services on the host, the NVIDIA GPU Operator will generate an invalid containerd config, causing the installation to fail. -::: - -## Troubleshoot with Support Bundles - -<SupportBundleIntro/> - -<EmbeddedClusterSupportBundle/> - -================ -File: docs/vendor/helm-image-registry.mdx -================ -import StepCreds from "../partials/proxy-service/_step-creds.mdx" -import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" - -# Using the Proxy Registry with Helm Installations - -This topic describes how to use the Replicated proxy registry to proxy images for installations with the Helm CLI. For more information about the proxy registry, see [About the Replicated Proxy Registry](private-images-about). - -## Overview - -With the Replicated proxy registry, each customer's unique license can grant proxy access to images in an external private registry. To enable the proxy registry for Helm installations, you must create a Secret with `type: kubernetes.io/dockerconfigjson` to authenticate with the proxy registry. - -During Helm installations, after customers provide their license ID, a `global.replicated.dockerconfigjson` field that contains a base64 encoded Docker configuration file is automatically injected in the Helm chart values. You can use this `global.replicated.dockerconfigjson` field to create the required pull secret. - -For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. - -## Enable the Proxy Registry - -This section describes how to enable the proxy registry for applications deployed with Helm, including how to use the `global.replicated.dockerconfigjson` field that is injected during application deployment to create the required pull secret. - -To enable the proxy registry: - -1. <StepCreds/> - -1. <StepCustomDomain/> - -1. In your Helm chart templates, create a Kubernetes Secret to evaluate if the `global.replicated.dockerconfigjson` value is set, and then write the rendered value into a Secret on the cluster: - - ```yaml - # /templates/replicated-pull-secret.yaml - - {{ if .Values.global.replicated.dockerconfigjson }} - apiVersion: v1 - kind: Secret - metadata: - name: replicated-pull-secret - type: kubernetes.io/dockerconfigjson - data: - .dockerconfigjson: {{ .Values.global.replicated.dockerconfigjson }} - {{ end }} - ``` - - :::note - If you use the Replicated SDK, do not use `replicated` for the name of the image pull secret because the SDK automatically creates a Secret named `replicated`. Using the same name causes an error. - ::: - -1. Ensure that you have a field in your Helm chart values file for your image repository URL, and that any references to the image in your Helm chart access the field from your values file. - - **Example**: - - ```yaml - # values.yaml - ... - dockerconfigjson: '{{ .Values.global.replicated.dockerconfigjson }}' - images: - myapp: - # Add image URL in the values file - apiImageRepository: quay.io/my-org/api - apiImageTag: v1.0.1 - ``` - ```yaml - # /templates/deployment.yaml - - apiVersion: apps/v1 - kind: Deployment - metadata: - name: example - spec: - template: - spec: - containers: - - name: api - # Access the apiImageRepository field from the values file - image: {{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }} - ``` - -1. In your Helm chart templates, add the image pull secret that you created to any manifests that reference the private image: - - ```yaml - # /templates/example.yaml - ... - {{ if .Values.global.replicated.dockerconfigjson }} - imagePullSecrets: - - name: replicated-pull-secret - {{ end }} - ``` - - **Example:** - - ```yaml - # /templates/deployment.yaml - ... - image: "{{ .Values.images.myapp.apiImageRepository }}:{{ .Values.images.myapp.apiImageTag }}" - {{ if .Values.global.replicated.dockerconfigjson }} - imagePullSecrets: - - name: replicated-pull-secret - {{ end }} - name: myapp - ports: - - containerPort: 3000 - name: http - ``` - -1. Package your Helm chart and add it to a release. Promote the release to a development channel. See [Managing Releases with Vendor Portal](releases-creating-releases). - -1. Install the chart in a development environment to test your changes: - - 1. Create a local `values.yaml` file to override the default external registry image URL with the URL for the image on `proxy.replicated.com`. - - The proxy registry URL has the following format: `proxy.replicated.com/proxy/APP_SLUG/EXTERNAL_REGISTRY_IMAGE_URL` - - Where: - * `APP_SLUG` is the slug of your Replicated application. - * `EXTERNAL_REGISTRY_IMAGE_URL` is the path to the private image on your external registry. - - **Example** - ```yaml - # A local values.yaml file - ... - images: - myapp: - apiImageRepository: proxy.replicated.com/proxy/my-app/quay.io/my-org/api - apiImageTag: v1.0.1 - - ``` - - :::note - If you configured a custom domain for the proxy registry, use the custom domain instead of `proxy.replicated.com`. For more information, see [Using Custom Domains](custom-domains-using). - ::: - - 1. Log in to the Replicated registry and install the chart, passing the local `values.yaml` file you created with the `--values` flag. See [Installing with Helm](install-with-helm). - -================ -File: docs/vendor/helm-install-airgap.mdx -================ -import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" - -# Installing and Updating with Helm in Air Gap Environments - -## Overview - -Replicated supports installing and updating Helm charts in air gap environments with no outbound internet access. In air gap Helm installations, customers are guided through the process with instructions provided in the [Replicated Download Portal](/vendor/releases-share-download-portal). - -When air gap Helm installations are enabled, an **Existing cluster with Helm** option is displayed in the Download Portal on the left nav. When selected, **Existing cluster with Helm** displays three tabs (**Install**, **Manual Update**, **Automate Updates**), as shown in the screenshot below: - -![download helm option](/images/download-helm.png) - -[View a larger version of this image](/images/download-helm.png) - -Each tab provides instructions for how to install, perform a manual update, or configure automatic updates, respectively. - -These installing and updating instructions assume that your customer is accessing the Download Portal from a workstation that can access the internet and their internal private registry. Direct access to the target cluster is not required. - -Each method assumes that your customer is familiar with `curl`, `docker`, `helm`, `kubernetes`, and a bit of `bash`, particularly for automating updates. - -## Prerequisites - -Before you install, complete the following prerequisites: - -* Reach out to your account rep to enable the Helm air gap installation feature. - -<Prerequisites/> - -## Install - -The installation instructions provided in the Download Portal are designed to walk your customer through the first installation of your chart in an air gap environment. - -To install with Helm in an air gap environment: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers > [Customer Name] > Reporting**. - -1. In the **Download portal** section, click **Visit download portal** to log in to the Download Portal for the customer. - -1. In the Download Portal left nav, click **Existing cluster with Helm**. - - ![download helm option](/images/download-helm.png) - - [View a larger version of this image](/images/download-helm.png) - -1. On the **Install** tab, in the **App version** dropdown, select the target application version to install. - -1. Run the first command to authenticate into the Replicated proxy registry with the customer's credentials (the `license_id`). - -1. Under **Get the list of images**, run the command provided to generate the list of images needed to install. - -1. For **(Optional) Specify registry URI**, provide the URI for an internal image registry where you want to push images. If a registry URI is provided, Replicatd automatically updates the commands for tagging and pushing images with the URI. - -1. For **Pull, tag, and push each image to your private registry**, copy and paste the docker commands provided to pull, tag, and push each image to your internal registry. - - :::note - If you did not provide a URI in the previous step, ensure that you manually replace the image names in the `tag` and `push` commands with the target registry URI. - ::: - -1. Run the command to authenticate into the OCI registry that contains your Helm chart. - -1. Run the command to install the `preflight` plugin. This allows you to run preflight checks before installing to ensure that the installation environment meets the requirements for the application. - -1. For **Download a copy of the values.yaml file** and **Edit the values.yaml file**, run the `helm show values` command provided to download the values file for the Helm chart. Then, edit the values file as needed to customize the configuration of the given chart. - - If you are installing a release that contains multiple Helm charts, repeat these steps to download and edit each values file. - - :::note - For installations with mutliple charts where two or more of the top-level charts in the release use the same name, ensure that each values file has a unique name to avoid installation error. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. - ::: - -1. For **Determine install method**, select one of the options depending on your ability to access the internet and the cluster from your workstation. - -1. Use the commands provided and the values file or files that you edited to run preflight checks and then install the release. - -## Perform Updates - -This section describes the processes of performing manual and automatic updates with Helm in air gap environments using the instructions provided in the Download Portal. - -### Manual Updates - -The manual update instructions provided in the Download Portal are similar to the installation instructions. - -However, the first step prompts the customer to select their current version an the target version to install. This step takes [required releases](/vendor/releases-about#properties) into consideration, thereby guiding the customer to the versions that are upgradable from their current version. - -The additional steps are consistent with installation process until the `preflight` and `install` commands where customers provide the existing values from the cluster with the `helm get values` command. Your customer will then need to edit the `values.yaml` to reference the new image tags. - -If the new version introduces new images or other values, Replicated recommends that you explain this at the top of your release notes so that customers know they will need to make additional edits to the `values.yaml` before installing. - -### Automate Updates - -The instructions in the Download Portal for automating updates use API endpoints that your customers can automate against. - -The instructions in the Download Portal provide customers with example commands that can be put into a script that they run periodically (nightly, weekly) using GitHub Actions, Jenkins, or other platforms. - -This method assumes that the customer has already done a successful manual installation, including the configuration of the appropriate `values`. - -After logging into the registry, the customer exports their current version and uses that to query an endpoint that provides the latest installable version number (either the next required release, or the latest release) and export it as the target version. With the target version, they can now query an API for the list of images. - -With the list of images the provided `bash` script will automate the process of pulling updated images from the repository, tagging them with a name for an internal registry, and then pushing the newly tagged images to their internal registry. - -Unless the customer has set up the `values` to preserve the updated tag (for example, by using the `latest` tag), they need to edit the `values.yaml` to reference the new image tags. After doing so, they can log in to the OCI registry and perform the commands to install the updated chart. - -## Use a Harbor or Artifactory Registry Proxy - -You can integrate the Replicated proxy registry with an existing Harbor or jFrog Artifactory instance to proxy and cache images on demand. For more information, see [Using a Registry Proxy for Helm Air Gap Installations](using-third-party-registry-proxy). - -================ -File: docs/vendor/helm-install-overview.mdx -================ -import Helm from "../partials/helm/_helm-definition.mdx" - -# About Helm Installations with Replicated - -This topic provides an introduction to Helm installations for applications distributed with Replicated. - -## Overview - -<Helm/> - -Replicated strongly recommends that all applications are packaged using Helm because many enterprise users expect to be able to install an application with the Helm CLI. - -Existing releases in the Replicated Platform that already support installation with Replicated KOTS and Replicated Embedded Cluster (and that include one or more Helm charts) can also be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. - -For information about how to install with Helm, see: -* [Installing with Helm](/vendor/install-with-helm) -* [Installing and Updating with Helm in Air Gap Environments (Alpha)](helm-install-airgap) - -The following diagram shows how Helm charts distributed with Replicated are installed with Helm in online (internet-connected) customer environments: - -<img src="/images/helm-install-diagram.png" alt="diagram of a helm chart in a custom environment" width="700px"/> - -[View a larger version of this image](/images/helm-install-diagram.png) - -As shown in the diagram above, when a release containing one or more Helm charts is promoted to a channel, the Replicated Vendor Portal automatically extracts any Helm charts included in the release. These charts are pushed as OCI objects to the Replicated registry. The Replicated registry is a private OCI registry hosted by Replicated at `registry.replicated.com`. For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). - -For example, if your application in the Vendor Portal is named My App and you promote a release containing a Helm chart with `name: my-chart` to a channel with the slug `beta`, then the Vendor Portal pushes the chart to the following location: `oci://registry.replicated.com/my-app/beta/my-chart`. - -Customers can install your Helm chart by first logging in to the Replicated registry with their unique license ID. This step ensures that any customer who installs your chart from the registry has a valid, unexpired license. After the customer logs in to the Replicated registry, they can run `helm install` to install the chart from the registry. - -During installation, the Replicated registry injects values into the `global.replicated` key of the parent Helm chart's values file. For more information about the values schema, see [Helm global.replicated Values Schema](helm-install-values-schema). - -## Limitations - -Helm installations have the following limitations: - -* Installing with Helm in air gap environments is an Beta feature. For more information, see [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap). -* Helm CLI installations do not provide access to any of the features of the Replicated KOTS installer, such as: - * The KOTS Admin Console - * Strict preflight checks that block installation - * Backup and restore with snapshots - * Required releases with the **Prevent this release from being skipped during upgrades** option - -================ -File: docs/vendor/helm-install-release.md -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" -import HelmPackage from "../partials/helm/_helm-package.mdx" - -# Packaging a Helm Chart for a Release - -This topic describes how to package a Helm chart and the Replicated SDK into a chart archive that can be added to a release. - -## Overview - -To add a Helm chart to a release, you first add the Replicated SDK as a dependency of the Helm chart and then package the chart and its dependencies as a `.tgz` chart archive. - -The Replicated SDK is a Helm chart can be installed as a small service alongside your application. The SDK provides access to key Replicated features, such as support for collecting custom metrics on application instances. For more information, see [About the Replicated SDK](replicated-sdk-overview). - -## Requirements and Recommendations - -This section includes requirements and recommendations for Helm charts. - -### Chart Version Requirement - -The chart version in your Helm chart must comply with image tag format requirements. A valid tag can contain only lowercase and uppercase letters, digits, underscores, periods, and dashes. - -The chart version must also comply with the Semantic Versioning (SemVer) specification. When you run the `helm install` command without the `--version` flag, Helm retrieves the list of all available image tags for the chart from the registry and compares them using the SemVer comparison rules described in the SemVer specification. The version that is installed is the version with the largest tag value. For more information about the SemVer specification, see the [Semantic Versioning](https://semver.org) documentation. - -### Chart Naming - -For releases that contain more than one Helm chart, Replicated recommends that you use unique names for each top-level Helm chart in the release. This aligns with Helm best practices and also avoids potential conflicts in filenames during installation that could cause the installation to fail. For more information, see [Installation Fails for Release With Multiple Helm Charts](helm-install-troubleshooting#air-gap-values-file-conflict) in _Troubleshooting Helm Installations_. - -### Helm Best Practices - -Replicated recommends that you review the [Best Practices](https://helm.sh/docs/chart_best_practices/) guide in the Helm documentation to ensure that your Helm chart or charts follows the required and recommended conventions. - -## Package a Helm Chart {#release} - -This procedure shows how to create a Helm chart archive to add to a release. For more information about the Helm CLI commands in this procedure, see the [Helm Commands](https://helm.sh/docs/helm/helm/) section in the Helm documentation. - -To package a Helm chart so that it can be added to a release: - -1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. - - <DependencyYaml/> - - For additional guidelines related to adding the SDK as a dependency, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. - -1. Update dependencies and package the chart as a `.tgz` file: - - <HelmPackage/> - - :::note - <RegistryLogout/> - ::: - -1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - - After the release is promoted, your Helm chart is automatically pushed to the Replicated registry. For information about how to install a release with the Helm CLI, see [Installing with Helm](install-with-helm). For information about how to install Helm charts with KOTS, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). - -================ -File: docs/vendor/helm-install-troubleshooting.mdx -================ -# Troubleshooting Helm Installations with Replicated - -This topic provides troubleshooting information for common issues related to performing installations and upgrades with the Helm CLI. - -## Installation Fails for Release With Multiple Helm Charts {#air-gap-values-file-conflict} - -#### Symptom - -When performing installing a release with multiple Helm charts, the installation fails. You might also see the following error message: - -``` -Error: INSTALLATION FAILED: cannot re-use a name that is still in use -``` - -#### Cause - -In the Download Portal, each chart's values file is named according to the chart's name. For example, the values file for the Helm chart Gitea would be named `gitea-values.yaml`. - -If any top-level charts in the release use the same name, the associated values files will also be assigned the same name. This causes each new values file downloaded with the `helm show values` command to overwrite any previously-downloaded values file of the same name. - -#### Solution - -Replicated recommends that you use unique names for top-level Helm charts in the same release. - -Alternatively, if a release contains charts that must use the same name, convert one or both of the charts into subcharts and use Helm conditions to differentiate them. See [Conditions and Tags](https://helm.sh/docs/chart_best_practices/dependencies/#conditions-and-tags) in the Helm documentation. - -================ -File: docs/vendor/helm-install-values-schema.mdx -================ -import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" - -# Helm global.replicated Values Schema - -This topic describes the `global.replicated` values that are injected in the values file of an application's parent Helm chart during Helm installations with Replicated. - -## Overview - -When a user installs a Helm application with the Helm CLI, the Replicated registry injects a set of customer-specific values into the `global.replicated` key of the parent Helm chart's values file. - -The values in the `global.replicated` field include the following: - -* The fields in the customer's license, such as the field names, descriptions, signatures, values, and any custom license fields that you define. Vendors can use this license information to check entitlements before the application is installed. For more information, see [Checking Entitlements in Helm Charts Before Deployment](/vendor/licenses-reference-helm). - -* A base64 encoded Docker configuration file. To proxy images from an external private registry with the Replicated proxy registry, you can use the `global.replicated.dockerconfigjson` field to create an image pull secret for the proxy registry. For more information, see [Proxying Images for Helm Installations](/vendor/helm-image-registry). - -The following is an example of a Helm values file containing the `global.replicated` values: - -```yaml -# Helm values.yaml -global: - replicated: - channelName: Stable - customerEmail: username@example.com - customerName: Example Customer - dockerconfigjson: eyJhdXRocyI6eyJd1dIRk5NbEZFVGsxd2JGUmFhWGxYWm5scloyNVRSV1pPT2pKT2NGaHhUVEpSUkU1... - licenseFields: - expires_at: - description: License Expiration - name: expires_at - signature: - v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... - title: Expiration - value: "2023-05-30T00:00:00Z" - valueType: String - licenseID: YiIXRTjiB7R... - licenseType: dev -``` - -## `global.replicated` Values Schema - -The `global.replicated` values schema contains the following fields: - -| Field | Type | Description | -| --- | --- | --- | -| `channelName` | String | The name of the release channel | -| `customerEmail` | String | The email address of the customer | -| `customerName` | String | The name of the customer | -| `dockerconfigjson` | String | Base64 encoded docker config json for pulling images | -| `licenseFields`| | A list containing each license field in the customer's license. Each element under `licenseFields` has the following properties: `description`, `signature`, `title`, `value`, `valueType`. `expires_at` is the default `licenseField` that all licenses include. Other elements under `licenseField` include the custom license fields added by vendors in the Vendor Portal. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). | -| `licenseFields.[FIELD_NAME].description` | String | Description of the license field | -| `licenseFields.[FIELD_NAME].signature.v1` | Object | Signature of the license field | -| `licenseFields.[FIELD_NAME].title` | String | Title of the license field | -| `licenseFields.[FIELD_NAME].value` | String | Value of the license field | -| `licenseFields.[FIELD_NAME].valueType` | String | Type of the license field value | -| `licenseID` | String | The unique identifier for the license | -| `licenseType` | String | The type of license, such as "dev" or "prod". For more information, see [Customer Types](/vendor/licenses-about#customer-types) in _About Customers and Licensing_. | - -## Replicated SDK Helm Values - -<SdkValues/> - -================ -File: docs/vendor/helm-native-about.mdx -================ -import GitOpsLimitation from "../partials/helm/_gitops-limitation.mdx" -import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" -import TemplateLimitation from "../partials/helm/_helm-template-limitation.mdx" -import VersionLimitation from "../partials/helm/_helm-version-limitation.mdx" -import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" -import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" -import Deprecated from "../partials/helm/_replicated-deprecated.mdx" -import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" -import ReplicatedHelmMigration from "../partials/helm/_replicated-helm-migration.mdx" -import Helm from "../partials/helm/_helm-definition.mdx" - -# About Distributing Helm Charts with KOTS - -This topic provides an overview of how Replicated KOTS deploys Helm charts, including an introduction to the KOTS HelmChart custom resource, limitations of deploying Helm charts with KOTS, and more. - -## Overview - -<Helm/> - -KOTS can install applications that include: -* One or more Helm charts -* More than a single instance of any chart -* A combination of Helm charts and Kubernetes manifests - -Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise users expect to be able to install an application with the Helm CLI. - -Deploying Helm charts with KOTS provides additional functionality not directly available with the Helm CLI, such as: -* The KOTS Admin Console -* Backup and restore with snapshots -* Support for air gap installations -* Support for embedded cluster installations on VMs or bare metal servers - -Additionally, for applications packaged as Helm charts, you can support Helm CLI and KOTS installations from the same release without having to maintain separate sets of Helm charts and application manifests. The following diagram demonstrates how a single release containing one or more Helm charts can be installed using the Helm CLI and KOTS: - -<img src="/images/helm-kots-install-options.png" width="650px" alt="One release being installed into three different customer environments"/> - -[View a larger version of this image](/images/helm-kots-install-options.png) - -For a tutorial that demonstrates how to add a sample Helm chart to a release and then install the release using both KOTS and the Helm CLI, see [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup). - -## How KOTS Deploys Helm Charts - -This section describes how KOTS uses the HelmChart custom resource to deploy Helm charts. - -### About the HelmChart Custom Resource - -<KotsHelmCrDescription/> - -The HelmChart custom resource with `apiVersion: kots.io/v1beta2` (HelmChart v2) is supported with KOTS v1.99.0 and later. For more information, see [About the HelmChart kots.io/v1beta2 Installation Method](#v2-install) below. - -KOTS versions earlier than v1.99.0 can install Helm charts with `apiVersion: kots.io/v1beta1` of the HelmChart custom resource. The `kots.io/v1beta1` HelmChart custom resource is deprecated. For more information, see [Deprecated HelmChart kots.io/v1beta1 Installation Methods](#deprecated-helmchart-kotsiov1beta1-installation-methods) below. - -### About the HelmChart v2 Installation Method {#v2-install} - -When you include a HelmChart custom resource with `apiVersion: kots.io/v1beta2` in a release, KOTS v1.99.0 or later does a Helm install or upgrade of the associated Helm chart directly. - -The `kots.io/v1beta2` HelmChart custom resource does _not_ modify the chart during installation. This results in Helm chart installations that are consistent, reliable, and easy to troubleshoot. For example, you can reproduce the exact installation outside of KOTS by downloading a copy of the application files from the cluster with `kots download`, then using those files to install with `helm install`. And, you can use `helm get values` to view the values that were used to install. - -The `kots.io/v1beta2` HelmChart custom resource requires configuration. For more information, see [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). - -For information about the fields and syntax of the HelmChart custom resource, see [HelmChart v2](/reference/custom-resource-helmchart-v2). - -### Limitations - -The following limitations apply when deploying Helm charts with the `kots.io/v1beta2` HelmChart custom resource: - -* Available only for Helm v3. - -* Available only for KOTS v1.99.0 and later. - -* The rendered manifests shown in the `rendered` directory might not reflect the final manifests that will be deployed to the cluster. This is because the manifests in the `rendered` directory are generated using `helm template`, which is not run with cluster context. So values returned by the `lookup` function and the built-in `Capabilities` object might differ. - -* When updating the HelmChart custom resource in a release from `kots.io/v1beta1` to `kots.io/v1beta2`, the diff viewer shows a large diff because the underlying file structure of the rendered manifests is different. - -* Editing downstream Kustomization files to make changes to the application before deploying is not supported. This is because KOTS does not use Kustomize when installing Helm charts with the `kots.io/v1beta2` HelmChart custom resource. For more information about patching applications with Kustomize, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). - -* <GitOpsLimitation/> - - <GitOpsNotRecommended/> - - For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). -## Support for Helm Hooks {#hooks} - -KOTS supports the following hooks for Helm charts: -* `pre-install`: Executes after resources are rendered but before any resources are installed. -* `post-install`: Executes after resources are installed. -* `pre-upgrade`: Executes after resources are rendered but before any resources are upgraded. -* `post-upgrade`: Executes after resources are upgraded. -* `pre-delete`: Executes before any resources are deleted. -* `post-delete`: Executes after resources are deleted. - -The following limitations apply to using hooks with Helm charts deployed by KOTS: - -* <HooksLimitation/> - -* <HookWeightsLimitation/> - -For more information about Helm hooks, see [Chart Hooks](https://helm.sh/docs/topics/charts_hooks/) in the Helm documentation. - -## Air Gap Installations - -KOTS supports installation of Helm charts into air gap environments with configuration of the HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. The `builder` key specifies the Helm values to use when building the air gap bundle for the application. - -For more information about how to configure the `builder` key to support air gap installations, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). - -## Resource Deployment Order - -When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. - -For information about how to set the deployment order for Helm charts with KOTS, see [Orchestrating Resource Deployment](/vendor/orchestrating-resource-deployment). - -## Deprecated HelmChart kots.io/v1beta1 Installation Methods - -This section describes the deprecated Helm chart installation methods that use the HelmChart custom resource `apiVersion: kots.io/v1beta1`. - -:::important -<Deprecated/> -::: - -### useHelmInstall: true {#v1beta1} - -:::note -This method was previously referred to as _Native Helm_. -::: - -When you include version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`, KOTS uses Kustomize to render the chart with configuration values, license field values, and rewritten image names. KOTS then packages the resulting manifests into a new Helm chart to install. For more information about Kustomize, see the [Kustomize documentation](https://kubectl.docs.kubernetes.io/). - -The following diagram shows how KOTS processes Helm charts for deployment with the `kots.io/v1beta1` method: - -![Flow chart of a v1beta1 Helm chart deployment to a cluster](/images/native-helm-flowchart.png) - -[View a larger image](/images/native-helm-flowchart.png) - -As shown in the diagram above, when given a Helm chart, KOTS: - -- Uses Kustomize to merge instructions from KOTS and the end user to chart resources (see steps 2 - 4 below) -- Packages the resulting manifest files into a new Helm chart (see step 5 below) -- Deploys the new Helm chart (see step 5 below) - -To deploy Helm charts with version `kots.io/v1beta1` of the HelmChart custom resource, KOTS does the following: - -1. **Checks for previous installations of the chart**: If the Helm chart has already been deployed with a HelmChart custom resource that has `useHelmInstall: false`, then KOTS does not attempt the install the chart. The following error message is displayed if this check fails: `Deployment method for chart <chart_name> has changed`. For more information, see [HelmChart kots.io/v1beta1 (useHelmInstall: false)](#v1beta1-false) below. - -1. **Writes base files**: KOTS extracts Helm manifests, renders them with Replicated templating, and then adds all files from the original Helm tarball to a `base/charts/` directory. - - Under `base/charts/`, KOTS adds a Kustomization file named `kustomization.yaml` in the directories for each chart and subchart. KOTS uses these Kustomization files later in the deployment process to merge instructions from Kustomize to the chart resources. For more information about Kustomize, see the [Kustomize website](https://kustomize.io). - - The following screenshot from the Replicated Admin Console shows a `base/charts/` directory for a deployed application. The `base/charts/` directory contains a Helm chart named postgresql with one subchart: - - ![Base directory in the Admin Console](/images/native-helm-base.png) - - In the screenshot above, a Kustomization file that targets the resources from the postgresql Helm chart appears in the `base/charts/postgresql/` directory: - - ```yaml - apiVersion: kustomize.config.k8s.io/v1beta1 - kind: Kustomization - resources: - - secrets.yaml - - statefulset.yaml - - svc-headless.yaml - - svc.yaml - ``` - -1. **Writes midstream files with Kustomize instructions from KOTS**: KOTS then copies the directory structure from `base/charts/` to an `overlays/midstream/charts/` directory. The following screenshot shows an example of the midstream directory for the postgresql Helm chart: - - ![Midstream directory in the Admin Console UI](/images/native-helm-midstream.png) - - As shown in the screenshot above, the midstream directory also contains a Kustomization file with instructions from KOTS for all deployed resources, such as image pull secrets, image rewrites, and backup labels. For example, in the midstream Kustomization file, KOTS rewrites any private images to pull from the Replicated proxy registry. - - The following shows an example of a midstream Kustomization file for the postgresql Helm chart: - - ```yaml - apiVersion: kustomize.config.k8s.io/v1beta1 - bases: - - ../../../../base/charts/postgresql - commonAnnotations: - kots.io/app-slug: helm-test - images: - - name: gcr.io/replicated-qa/postgresql - newName: proxy.replicated.com/proxy/helm-test/gcr.io/replicated-qa/postgresql - kind: Kustomization - patchesStrategicMerge: - - pullsecrets.yaml - resources: - - secret.yaml - transformers: - - backup-label-transformer.yaml - ``` - - As shown in the example above, all midstream Kustomization files have a `bases` entry that references the corresponding Kustomization file from the `base/charts/` directory. - -1. **Writes downstream files for end user Kustomize instructions**: KOTS then creates an `overlays/downstream/this-cluster/charts` directory and again copies the directory structure of `base/charts/` to this downstream directory: - - ![Downstream directory in the Admin Console UI](/images/native-helm-downstream.png) - - As shown in the screenshot above, each chart and subchart directory in the downstream directory also contains a Kustomization file. These downstream Kustomization files contain only a `bases` entry that references the corresponding Kustomization file from the midstream directory. For example: - - ```yaml - apiVersion: kustomize.config.k8s.io/v1beta1 - bases: - - ../../../../midstream/charts/postgresql - kind: Kustomization - ``` - - End users can edit the downstream Kustomization files to make changes before deploying the application. Any instructions that users add to the Kustomization files in the downstream directory take priority over midstream and base Kustomization files. For more information about how users can make changes before deploying, see [Patching with Kustomize](/enterprise/updating-patching-with-kustomize). - -1. **Deploys the Helm chart**: KOTS runs `kustomize build` for any Kustomization files in the `overlays/downstream/charts` directory. KOTS then packages the resulting manifests into a new chart for Helm to consume. - - Finally, KOTS runs `helm upgrade -i <release-name> <chart> --timeout 3600s -n <namespace>`. The Helm binary processes hooks and weights, applies manifests to the Kubernetes cluster, and saves a release secret similar to `sh.helm.release.v1.chart-name.v1`. Helm uses this secret to track upgrades and rollbacks of applications. - -### useHelmInstall: false {#v1beta1-false} - -:::note -This method was previously referred to as _Replicated Helm_. -::: - -When you use version `kots.io/v1beta1` of HelmChart custom resource with `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. KOTS also has additional functionality for specific Helm hooks. For example, when KOTS encounters an upstream Helm chart with a `helm.sh/hook-delete-policy` annotation, it automatically adds the same `kots.io/hook-delete-policy` to the Job object. - -The resulting deployment is comprised of standard Kubernetes manifests. Therefore, cluster operators can view the exact differences between what is currently deployed and what an update will deploy. - -### Limitations {#replicated-helm-limitations} - -This section lists the limitations for version `kots.io/v1beta1` of the HelmChart custom resource. -#### kots.io/v1beta1 (useHelmInstall: true) Limitations - -The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: true`: - -* <Deprecated/> - -* Available only for Helm V3. - -* <GitOpsLimitation/> - - For more information, see [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow). - -* <HooksLimitation/> - -* <HookWeightsLimitation/> - -* <TemplateLimitation/> - -* <VersionLimitation/> - - For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. - -#### kots.io/v1beta1 (useHelmInstall: false) Limitations {#v1beta1-false-limitations} - -The following limitations apply when using version `kots.io/v1beta1` of the HelmChart custom resource with `useHelmInstall: false`: - -* <ReplicatedHelmMigration/> - -* <TemplateLimitation/> - -* <VersionLimitation/> - - For more information, see [helmVersion](/reference/custom-resource-helmchart#helmversion) in _HelmChart_. - -================ -File: docs/vendor/helm-native-v2-using.md -================ -import KotsHelmCrDescription from "../partials/helm/_kots-helm-cr-description.mdx" - -# Configuring the HelmChart Custom Resource v2 - -This topic describes how to configure the Replicated HelmChart custom resource version `kots.io/v1beta2` to support Helm chart installations with Replicated KOTS. - -## Workflow - -To support Helm chart installations with the KOTS `kots.io/v1beta2` HelmChart custom resource, do the following: -1. Rewrite image names to use the Replicated proxy registry. See [Rewrite Image Names](#rewrite-image-names). -1. Inject a KOTS-generated image pull secret that grants proxy access to private images. See [Inject Image Pull Secrets](#inject-image-pull-secrets). -1. Add a pull secret for any Docker Hub images that could be rate limited. See [Add Pull Secret for Rate-Limited Docker Hub Images](#docker-secret). -1. Configure the `builder` key to allow your users to push images to their own local registries. See [Support Local Image Registries](#local-registries). -1. (KOTS Existing Cluster and kURL Installations Only) Add backup labels to your resources to support backup and restore with the KOTS snapshots feature. See [Add Backup Labels for Snapshots](#add-backup-labels-for-snapshots). - :::note - Snapshots is not supported for installations with Replicated Embedded Cluster. For more information about configuring disaster recovery for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery). - ::: - -## Task 1: Rewrite Image Names {#rewrite-image-names} - -Configure the KOTS HelmChart custom resource `values` key so that KOTS rewrites the names for both private and public images in your Helm values during deployment. This allows images to be accessed at one of the following locations, depending on where they were pushed: -* The [Replicated proxy registry](private-images-about) (`proxy.replicated.com` or your custom domain) -* A public image registry -* Your customer's local registry -* The built-in registry used in Replicated Embedded Cluster or Replicated kURL installations in air-gapped environments - -You will use the following KOTS template functions to conditionally rewrite image names depending on where the given image should be accessed: -* [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry): Returns true if the installation environment is configured to use a local image registry. HasLocalRegistry is always true in air gap installations. HasLocalRegistry is also true in online installations if the user configured a local private registry. -* [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost): Returns the host of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryHost returns the host of the built-in registry. -* [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace): Returns the namespace of the local registry that the user configured. Alternatively, for air gap installations with Embedded Cluster or kURL, LocalRegistryNamespace returns the namespace of the built-in registry. - - <details> - <summary>What is the registry namespace?</summary> - - The registry namespace is the path between the registry and the image name. For example, `images.mycompany.com/namespace/image:tag`. - </details> - -### Task 1a: Rewrite Private Image Names - -For any private images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the Replicated proxy registry (for online installations) or to the local registry in the user's installation environment (for air gap installations or online installations where the user configured a local registry). - -To rewrite image names to the location of the image in the proxy registry, use the format `<proxy-domain>/proxy/<app-slug>/<image>`, where: -* `<proxy-domain>` is `proxy.replicated.com` or your custom domain. For more information about configuring a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). -* `<app-slug>` is the unique application slug in the Vendor Portal -* `<image>` is the path to the image in your registry - -For example, if the private image is `quay.io/my-org/nginx:v1.0.1` and `images.mycompany.com` is the custom proxy registry domain, then the image name should be rewritten to `images.mycompany.com/proxy/my-app-slug/quay.io/my-org/nginx:v1.0.1`. - -For more information, see the example below. - -#### Example - -The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: - -```yaml -# kots.io/v1beta2 HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - ... - values: - image: - # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry's hostname - # Else use proxy.replicated.com or your custom proxy registry domain - registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "images.mycompany.com" }}' - # If a registry is configured by the user or by Embedded Cluster/kURL, use that registry namespace - # Else use the image's namespace at the proxy registry domain - repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/quay.io/my-org" }}/nginx' - tag: v1.0.1 -``` - -The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource above correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown below: - -```yaml -# Helm chart values.yaml file - -image: - registry: quay.io - repository: my-org/nginx - tag: v1.0.1 -``` - -During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. - -Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: nginx -spec: - containers: - - name: - image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} -``` - -### Task 1b: Rewrite Public Image Names - -For any public images used by your application, configure the HelmChart custom resource so that image names are rewritten to either the location of the image in the public registry (for online installations) or the local registry (for air gap installations or online installations where the user configured a local registry. - -For more information, see the example below. - -#### Example - -The following HelmChart custom resource uses the KOTS [HasLocalRegistry](/reference/template-functions-config-context#haslocalregistry), [LocalRegistryHost](/reference/template-functions-config-context#localregistryhost), and [LocalRegistryNamespace](/reference/template-functions-config-context#localregistrynamespace) template functions to conditionally rewrite an image registry and repository depending on if a local registry is used: - -```yaml -# kots.io/v1beta2 HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - ... - values: - image: - # If a local registry is used, use that registry's hostname - # Else, use the public registry host (ghcr.io) - registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "ghcr.io" }}' - # If a local registry is used, use the registry namespace provided - # Else, use the path to the image in the public registry - repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "cloudnative-pg" }}/cloudnative-pg' - tag: catalog-1.24.0 -``` - -The `spec.values.image.registry` and `spec.values.image.repository` fields in the HelmChart custom resource correspond to `image.registry` and `image.repository` fields in the Helm chart `values.yaml` file, as shown in the example below: - -```yaml -# Helm chart values.yaml file - -image: - registry: ghcr.io - repository: cloudnative-pg/cloudnative-pg - tag: catalog-1.24.0 -``` - -During installation, KOTS renders the template functions and sets the `image.registry` and `image.repository` fields in your Helm chart `values.yaml` file based on the value of the corresponding fields in the HelmChart custom resource. Any templates in the Helm chart that access the `image.registry` and `image.repository` fields are updated to use the appropriate value, as shown in the example below: - -```yaml -apiVersion: v1 -kind: Pod -spec: - containers: - - name: - image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }} -``` - -## Task 2: Inject Image Pull Secrets {#inject-image-pull-secrets} - -Kubernetes requires a Secret of type `kubernetes.io/dockerconfigjson` to authenticate with a registry and pull a private image. When you reference a private image in a Pod definition, you also provide the name of the Secret in a `imagePullSecrets` key in the Pod definition. For more information, see [Specifying imagePullSecrets on a Pod](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) in the Kubernetes documentation. - -During installation, KOTS creates a `kubernetes.io/dockerconfigjson` type Secret that is based on the customer license. This pull secret grants access to the private image through the Replicated proxy registry or in the Replicated registry. Additionally, if the user configured a local image registry, then the pull secret contains the credentials for the local registry. You must provide the name of this KOTS-generated pull secret in any Pod definitions that reference the private image. - -You can inject the name of this pull secret into a field in the HelmChart custom resource using the Replicated ImagePullSecretName template function. During installation, KOTS sets the value of the corresponding field in your Helm chart `values.yaml` file with the rendered value of the ImagePullSecretName template function. - -#### Example - -The following example shows a `spec.values.image.pullSecrets` array in the HelmChart custom resource that uses the ImagePullSecretName template function to inject the name of the KOTS-generated pull secret: - -```yaml -# kots.io/v1beta2 HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - values: - image: - # Note: Use proxy.replicated.com or your custom domain - registry: '{{repl HasLocalRegistry | ternary LocalRegistryHost "proxy.replicated.com" }}' - repository: '{{repl HasLocalRegistry | ternary LocalRegistryNamespace "proxy/my-app/ecr.us-east-1.amazonaws.com/my-org" }}/api' - pullSecrets: - - name: '{{repl ImagePullSecretName }}' -``` - -The `spec.values.image.pullSecrets` array in the HelmChart custom resource corresponds to a `image.pullSecrets` array in the Helm chart `values.yaml` file, as shown in the example below: - -```yaml -# Helm chart values.yaml file - -image: - registry: ecr.us-east-1.amazonaws.com - repository: my-org/api/nginx - pullSecrets: - - name: my-org-secret -``` - -During installation, KOTS renders the ImagePullSecretName template function and adds the rendered pull secret name to the `image.pullSecrets` array in the Helm chart `values.yaml` file. - -Any templates in the Helm chart that access the `image.pullSecrets` field are updated to use the name of the KOTS-generated pull secret, as shown in the example below: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: nginx -spec: - containers: - - name: nginx - image: {{ .Values.image.registry }}/{{ .Values.image.repository }} - {{- with .Values.image.pullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 2 }} - {{- end }} -``` - -## Task 3: Add Pull Secret for Rate-Limited Docker Hub Images {#docker-secret} - -Docker Hub enforces rate limits for Anonymous and Free users. To avoid errors caused by reaching the rate limit, your users can run the `kots docker ensure-secret` command, which creates an `<app-slug>-kotsadm-dockerhub` secret for pulling Docker Hub images and applies the secret to Kubernetes manifests that have images. For more information, see [Avoiding Docker Hub Rate Limits](/enterprise/image-registry-rate-limits). - -If you are deploying a Helm chart with Docker Hub images that could be rate limited, to support the use of the `kots docker ensure-secret` command, any Pod definitions in your Helm chart templates that reference the rate-limited image must be updated to access the `<app-slug>-kotsadm-dockerhub` pull secret, where `<app-slug>` is your application slug. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). - -You can do this by adding the `<app-slug>-kotsadm-dockerhub` pull secret to a field in the `values` key of the HelmChart custom resource, along with a matching field in your Helm chart `values.yaml` file. During installation, KOTS sets the value of the matching field in the `values.yaml` file with the `<app-slug>-kotsadm-dockerhub` pull secret, and any Helm chart templates that access the value are updated. - -For more information about Docker Hub rate limiting, see [Understanding Docker Hub rate limiting](https://www.docker.com/increase-rate-limits) on the Docker website. - -#### Example - -The following Helm chart `values.yaml` file includes `image.registry`, `image.repository`, and `image.pullSecrets` for a rate-limited Docker Hub image: - -```yaml -# Helm chart values.yaml file - -image: - registry: docker.io - repository: my-org/example-docker-hub-image - pullSecrets: [] -``` - -The following HelmChart custom resource includes `spec.values.image.registry`, `spec.values.image.repository`, and `spec.values.image.pullSecrets`, which correspond to those in the Helm chart `values.yaml` file above. - -The `spec.values.image.pullSecrets` array lists the `<app-slug>-kotsadm-dockerhub` pull secret, where the slug for the application is `example-app-slug`: - -```yaml -# kots.io/v1beta2 HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - values: - image: - registry: docker.io - repository: my-org/example-docker-hub-image - pullSecrets: - - name: example-app-slug-kotsadm-dockerhub -``` - -During installation, KOTS adds the `example-app-slug-kotsadm-dockerhub` secret to the `image.pullSecrets` array in the Helm chart `values.yaml` file. Any templates in the Helm chart that access `image.pullSecrets` are updated to use `example-app-slug-kotsadm-dockerhub`: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: example -spec: - containers: - - name: example - image: {{ .Values.image.registry }}/{{ .Values.image.repository }} - {{- with .Values.image.pullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 2 }} - {{- end }} -``` - -## Task 4: Support the Use of Local Image Registries {#local-registries} - -Local image registries are required for KOTS installations in air-gapped environments with no outbound internet connection. Also, users in online environments can optionally use a local registry. For more information about how users configure a local image registry with KOTS, see [Configuring Local Image Registries](/enterprise/image-registry-settings). - -To support the use of local registries, configure the `builder` key. For more information about how to configure the `builder` key, see [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. - -## Task 5: Add Backup Labels for Snapshots (KOTS Existing Cluster and kURL Installations Only) {#add-backup-labels-for-snapshots} - -:::note -The Replicated [snapshots](snapshots-overview) feature for backup and restsore is supported only for existing cluster installations with KOTS. Snapshots are not support for installations with Embedded Cluster. For more information about disaster recovery for installations with Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery.mdx). -::: - -The snapshots feature requires the following labels on all resources in your Helm chart that you want to be included in the backup: -* `kots.io/backup: velero` -* `kots.io/app-slug: APP_SLUG`, where `APP_SLUG` is the slug of your Replicated application. - -For more information about snapshots, see [Understanding Backup and Restore](snapshots-overview). - -To support backup and restore with snapshots, add the `kots.io/backup: velero` and `kots.io/app-slug: APP_SLUG` labels to fields under the HelmChart custom resource `optionalValues` key. Add a `when` statement that evaluates to true only when the customer license has the `isSnapshotSupported` entitlement. - -The fields that you create under the `optionalValues` key must map to fields in your Helm chart `values.yaml` file. For more information about working with the `optionalValues` key, see [optionalValues](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. - -#### Example - -The following example shows how to add backup labels for snapshots in the `optionalValues` key of the HelmChart custom resource: - -```yaml -# kots.io/v1beta2 HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - ... - optionalValues: - # add backup labels only if the license supports snapshots - - when: "repl{{ LicenseFieldValue `isSnapshotSupported` }}" - recursiveMerge: true - values: - mariadb: - commonLabels: - kots.io/backup: velero - kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} - podLabels: - kots.io/backup: velero - kots.io/app-slug: repl{{ LicenseFieldValue "appSlug" }} -``` - -## Additional Information - -### About the HelmChart Custom Resource - - -<KotsHelmCrDescription/> - -For more information about the HelmChart custom resource, including the unique requirements and limitations for the keys described in this topic, see [HelmChart v2](/reference/custom-resource-helmchart-v2). - -### HelmChart v1 and v2 Differences - -To support the use of local registries with version `kots.io/v1beta2` of the HelmChart custom resource, provide the necessary values in the builder field to render the Helm chart with all of the necessary images so that KOTS knows where to pull the images from to push them into the local registry. - -For more information about how to configure the `builder` key, see [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles) and [`builder`](/reference/custom-resource-helmchart-v2#builder) in _HelmChart v2_. - -The `kots.io/v1beta2` HelmChart custom resource has the following differences from `kots.io/v1beta1`: - -<table> - <tr> - <th>HelmChart v1beta2</th> - <th>HelmChart v1beta1</th> - <th>Description</th> - </tr> - <tr> - <td><code>apiVersion: kots.io/v1beta2</code></td> - <td><code>apiVersion: kots.io/v1beta1</code></td> - <td><code>apiVersion</code> is updated to <code>kots.io/v1beta2</code></td> - </tr> - <tr> - <td><code>releaseName</code></td> - <td><code>chart.releaseName</code></td> - <td><code>releaseName</code> is a top level field under <code>spec</code></td> - </tr> - <tr> - <td>N/A</td> - <td><code>helmVersion</code></td> - <td><code>helmVersion</code> field is removed</td> - </tr> - <tr> - <td>N/A</td> - <td><code>useHelmInstall</code></td> - <td><code>useHelmInstall</code> field is removed</td> - </tr> -</table> - -### Migrate Existing KOTS Installations to HelmChart v2 - -Existing KOTS installations can be migrated to use the KOTS HelmChart v2 method, without having to reinstall the application. - -There are different steps for migrating to HelmChart v2 depending on the application deployment method used previously. For more information, see [Migrating Existing Installations to HelmChart v2](helm-v2-migrate). - -================ -File: docs/vendor/helm-optional-charts.md -================ -# Example: Including Optional Helm Charts - -This topic describes using optional Helm charts in your application. It also provides an example of how to configure the Replicated HelmChart custom resource to exclude optional Helm charts from your application when a given condition is met. - -## About Optional Helm Charts - -By default, KOTS creates an instance of a Helm chart for every HelmChart custom resource manifest file in the upstream application manifests. However, you can configure your application so that KOTS excludes certain Helm charts based on a conditional statement. - -To create this conditional statement, you add a Replicated KOTS template function to an `exclude` field in the HelmChart custom resource file. For example, you can add a template function that evaluates to `true` or `false` depending on the user's selection for a configuration field on the KOTS Admin Console Config page. -KOTS renders the template function in the `exclude` field, and excludes the chart if the template function evaluates to `true`. - -For all optional components, Replicated recommends that you add a configuration option to allow the user to optionally enable or disable the component. -This lets you support enterprises that want everything to run in the cluster and those that want to bring their own services for stateful components. - -For more information about template functions, see [About Template Functions](/reference/template-functions-about). - -## Example - -This example uses an application that has a Postgres database. -The community-supported Postgres Helm chart is available at https://github.com/bitnami/charts/tree/main/bitnami/postgresql. - -In this example, you create a configuration field on the Admin Console Config page that lets the user provide their own Postgres instance or use a Postgres service that is embedded with the application. Then, you configure the HelmChart custom resource in a release for an application in the Replicated Vendor Portal to conditionally exclude the optional Postgres component. - -### Step 1: Create the Configuration Fields - -To start, define the Admin Console Config page that gives the user a choice of "Embedded Postgres" or "External Postgres", where "External Postgres" is user-supplied. - -1. Log in to the [Vendor Portal](https://vendor.replicated.com). Create a new application for this example, or open an existing application. Then, click **Releases > Create release** to create a new release for the application. - -1. In the Config custom resource manifest file in the release, add the following YAML to create the "Embedded Postgres" or "External Postgres" configuration options: - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: example-application - spec: - groups: - - name: database - title: Database - description: Database Options - items: - - name: postgres_type - type: radio - title: Postgres - default: embedded_postgres - items: - - name: embedded_postgres - title: Embedded Postgres - - name: external_postgres - title: External Postgres - - name: embedded_postgres_password - type: password - value: "{{repl RandomString 32}}" - hidden: true - - name: external_postgres_uri - type: text - title: External Postgres Connection String - help_text: Connection string for a Postgres 10.x server - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - ``` - - The YAML above does the following: - * Creates a field with "Embedded Postgres" or "External Postgres" radio buttons - * Uses the Replicated RandomString template function to generate a unique default password for the embedded Postgres instance at installation time - * Creates fields for the Postgres password and connection string, if the user selects the External Postgres option - - The following shows how this Config custom resource manifest file displays on the Admin Console Config page: - - ![Postgres Config Screen](/images/postgres-config-screen.gif) - -### Step 2: Create a Secret for Postgres - -The application has a few components that use Postgres, and they all mount the Postgres connection string from a single Secret. - -Define a Secret for Postgres that renders differently if the user selects the Embedded Postgres or External Postgres option: - -1. In the release, create a Secret file and add the following YAML: - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: postgresql-secret - stringData: - uri: postgres://username:password@postgresql:5432/database?sslmode=disable - ``` - -1. Edit the `uri` field in the Secret to add a conditional statement that renders either a connection string to the embedded Postgres chart or to the user supplied instance: - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: postgresql-secret - stringData: - uri: repl{{ if ConfigOptionEquals "postgres_type" "embedded_postgres" }}postgres://myapplication:repl{{ ConfigOption "embedded_postgres_password" }}@postgres:5432/mydatabase?sslmode=disablerepl{{ else }}repl{{ ConfigOption "external_postgres_uri" }}repl{{ end }} - ``` - - As shown above, you must use a single line for the conditional statement. Optionally, you can use the Replicated Base64Encode function to pipe a string through. See [Base64Encode](/reference/template-functions-static-context#base64encode) in _Static Context_. - -### Step 3: Add the Helm Chart - -Next, package the Helm chart and add it to the release in the Vendor Portal: - -1. Run the following commands to generate a `.tgz` package of the Helm chart: - - ``` - helm repo add bitnami https://charts.bitnami.com/bitnami - helm fetch bitnami/postgresql - ``` - -1. Drag and drop the `.tgz` file into the file tree of the release. The Vendor Portal automatically creates a new HelmChart custom resource named `postgresql.yaml`, which references the `.tgz` file you uploaded. - - For more information about adding Helm charts to a release in the Vendor Portal, see [Managing Releases with the Vendor Portal](releases-creating-releases). - -### Step 4: Edit the HelmChart Custom Resource - -Finally, edit the HelmChart custom resource: - -1. In the HelmChart custom resource, add a mapping to the `values` key so that it uses the password you created. Also, add an `exclude` field to specify that the Postgres Helm chart must only be included when the user selects the embedded Postgres option on the Config page: - - ```yaml - apiVersion: kots.io/v1beta2 - kind: HelmChart - metadata: - name: postgresql - spec: - exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' - chart: - name: postgresql - chartVersion: 12.1.7 - - releaseName: samplechart-release-1 - - # values are used in the customer environment, as a pre-render step - # these values will be supplied to helm template - values: - auth: - username: username - password: "repl{{ ConfigOption `embedded_postgres_password` }}" - database: mydatabase - ``` - -1. Save and promote the release. Then, install the release in a development environment to test the embedded and external Postgres options. For more information, see [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). - -================ -File: docs/vendor/helm-optional-value-keys.md -================ -import Values from "../partials/helm/_helm-cr-values.mdx" -import OptionalValues from "../partials/helm/_helm-cr-optional-values.mdx" -import OptionalValuesWhen from "../partials/helm/_helm-cr-optional-values-when.mdx" -import OptionalValuesRecursiveMerge from "../partials/helm/_helm-cr-optional-values-recursive-merge.mdx" -import ConfigExample from "../partials/helm/_set-values-config-example.mdx" -import LicenseExample from "../partials/helm/_set-values-license-example.mdx" -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -# Setting Helm Values with KOTS - -This topic describes how to use the Replicated KOTS HelmChart custom resource to set and delete values in `values.yaml` files for Helm charts deployed with Replicated KOTS. - -For a tutorial that demonstrates how to set Helm values in a sample Helm chart using the KOTS HelmChart custom resource, see [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup). - -## Overview - -The KOTS HelmChart custom resource [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) keys create a mapping between KOTS and the `values.yaml` file for the corresponding Helm chart. This allows you to set or delete Helm values during installation or upgrade with KOTS, without having to make any changes to the Helm chart itself. - -You can create this mapping by adding a value under `values` or `optionalValues` that uses the exact same key name as a value in the corresponding Helm chart `values.yaml` file. During installation or upgrade, KOTS sets the Helm chart `values.yaml` file with any matching values from the `values` or `optionalValues` keys. - -The `values` and `optionalValues` keys also support the use of Replicated KOTS template functions. When you use KOTS template functions in the `values` and `optionalValues` keys, KOTS renders the template functions and then sets any matching values in the corresponding Helm chart `values.yaml` with the rendered values. For more information, see [About Template Functions](/reference/template-functions-about). - -Common use cases for the HelmChart custom resource `values` and `optionalValues` keys include: -* Setting Helm values based on user-supplied values from the KOTS Admin Console configuration page -* Setting values based on the user's unique license entitlements -* Conditionally setting values when a given condition is met -* Deleting a default value key from the `values.yaml` file that should not be included for KOTS installations - -For more information about the syntax for these fields, see [`values`](/reference/custom-resource-helmchart-v2#values) and [`optionalValues`](/reference/custom-resource-helmchart-v2#optionalvalues) in _HelmChart v2_. - -## Set Values - -This section describes how to use KOTS template functions or static values in the HelmChart custom resource `values` key to set existing Helm values. - -### Using a Static Value - -You can use static values in the HelmChart custom resource `values` key when a given Helm value must be set the same for all KOTS installations. This allows you to set values for KOTS installations only, without affecting values for any installations that use the Helm CLI. - -For example, the following Helm chart `values.yaml` file contains `kotsOnlyValue.enabled`, which is set to `false` by default: - -```yaml -# Helm chart values.yaml -kotsOnlyValue: - enabled: false -``` - -The following HelmChart custom resource contains a mapping to `kotsOnlyValue.enabled` in its `values` key, which is set to `true`: - -```yaml -# KOTS HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - - releaseName: samplechart-release-1 - - values: - kotsOnlyValue: - enabled: true -``` - -During installation or upgrade with KOTS, KOTS sets `kotsOnlyValue.enabled` in the Helm chart `values.yaml` file to `true` so that the KOTS-only value is enabled for the installation. For installations that use the Helm CLI instead of KOTS, `kotsOnlyValue.enabled` remains `false`. - -### Using KOTS Template Functions - -You can use KOTS template functions in the HelmChart custom resource `values` key to set Helm values with the rendered template functions. For more information, see [About Template Functions](/reference/template-functions-about). - -<Tabs> - <TabItem value="config" label="Config Context Example" default> - <ConfigExample/> - </TabItem> - <TabItem value="license" label="License Context Example" default> - <LicenseExample/> - </TabItem> -</Tabs> - -## Conditionally Set Values - -<OptionalValues/> - -For example, the following HelmChart custom resource uses the `optionalValues` key and the [ConfigOptionEquals](/reference/template-functions-config-context#configoptionequals) template function to set user-supplied values for an external MariaDB database: - -```yaml -# KOTS HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: wordpress -spec: - chart: - name: wordpress - chartVersion: 15.3.2 - - releaseName: sample-release-1 - - optionalValues: - - when: "repl{{ ConfigOptionEquals `mariadb_type` `external`}}" - recursiveMerge: false - values: - externalDatabase: - host: "repl{{ ConfigOption `external_db_host`}}" - user: "repl{{ ConfigOption `external_db_user`}}" - password: "repl{{ ConfigOption `external_db_password`}}" - database: "repl{{ ConfigOption `external_db_database`}}" - port: "repl{{ ConfigOption `external_ db_port`}}" -``` - -During installation, KOTS renders the template functions and sets the `externalDatabase` values in the HelmChart `values.yaml` file only when the user selects the `external` option for `mariadb_type` on the Admin Console configuration page. - -### About Recursive Merge for optionalValues {#recursive-merge} - -<OptionalValuesRecursiveMerge/> - -For example, the following HelmChart custom resource has both `values` and `optionalValues`: - -```yaml -values: - favorite: - drink: - hot: tea - cold: soda - dessert: ice cream - day: saturday - -optionalValues: - - when: '{{repl ConfigOptionEquals "example_config_option" "1" }}' - recursiveMerge: false - values: - example_config_option: - enabled: true - favorite: - drink: - cold: lemonade -``` - -The `values.yaml` file for the associated Helm chart defines the following key value pairs: - -```yaml -favorite: - drink: - hot: coffee - cold: soda - dessert: pie -``` -The `templates/configmap.yaml` file for the Helm chart maps these values to the following fields: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: test-configmap -data: - favorite_day: {{ .Values.favorite.day }} - favorite_dessert: {{ .Values.favorite.dessert }} - favorite_drink_cold: {{ .Values.favorite.drink.cold }} - favorite_drink_hot: {{ .Values.favorite.drink.hot }} -``` - -When `recursiveMerge` is set to `false`, the ConfigMap for the deployed application includes the following key value pairs: - -```yaml -favorite_day: null -favorite_dessert: pie -favorite_drink_cold: lemonade -favorite_drink_hot: coffee -``` - -In this case, the top level keys in `optionalValues` override the top level keys in `values`. - -KOTS then uses the values from the Helm chart `values.yaml` to populate the remaining fields in the ConfigMap: `favorite_day`, `favorite_dessert`, and `favorite_drink_hot`. - -When `recursiveMerge` is set to `true`, the ConfigMap for the deployed application includes the following key value pairs: - -```yaml -favorite_day: saturday -favorite_dessert: ice cream -favorite_drink_cold: lemonade -favorite_drink_hot: tea -``` - -In this case, all keys from `values` and `optionalValues` are merged. Because both include `favorite.drink.cold`, KOTS uses `lemonade` from `optionalValues`. - -## Delete a Default Key - -If the Helm chart `values.yaml` contains a static value that must be deleted when deploying with KOTS, you can set the value to `"null"` (including the quotation marks) in the `values` key of the HelmChart custom resource. - -A common use case for deleting default value keys is when you include a community Helm chart as a dependency. Because you cannot control how the community chart is built and structured, you might want to change some of the default behavior. - -For example, the following HelmChart custom resource sets an `exampleKey` value to `"null"` when the chart is deployed with KOTS: - -```yaml -# KOTS HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - - releaseName: samplechart-release-1 - - values: - exampleKey: "null" -``` - -For more information about using a `null` value to delete a key, see [Deleting a Default Key](https://helm.sh/docs/chart_template_guide/values_files/#deleting-a-default-key) in the Helm documentation. - -================ -File: docs/vendor/helm-packaging-airgap-bundles.mdx -================ -import HelmBuilderRequirements from "../partials/helm/_helm-builder-requirements.mdx" -import BuilderAirgapIntro from "../partials/helm/_helm-cr-builder-airgap-intro.mdx" -import BuilderExample from "../partials/helm/_helm-cr-builder-example.mdx" -import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" - -# Packaging Air Gap Bundles for Helm Charts - -This topic describes how to package and build air gap bundles for releases that contain one or more Helm charts. This topic applies to applications deployed with Replicated KOTS. - -## Overview - -<AirGapBundle/> - -When building the `.airgap` bundle for a release that contains one or more Helm charts, the Vendor Portal renders the Helm chart templates in the release using values supplied in the KOTS HelmChart custom resource [`builder`](/reference/custom-resource-helmchart-v2#builder) key. - -## Configure the `builder` Key - -You should configure the `builder` key if you need to change any default values in your Helm chart so that the `.airgap` bundle for the release includes all images needed to successfully deploy the chart. For example, you can change the default Helm values so that images for any conditionally-deployed components are always included in the air gap bundle. Additionally, you can use the `builder` key to set any `required` values in your Helm chart that must be set for the chart to render. - -The values in the `builder` key map to values in the given Helm chart's `values.yaml` file. For example, `spec.builder.postgres.enabled` in the example HelmChart custom resource below would map to a `postgres.enabled` field in the `values.yaml` file for the `samplechart` chart: - -```yaml -# KOTS HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - builder: - postgres: - enabled: true -``` - -For requirements, recommendations, and examples of common use cases for the `builder` key, see the sections below. - -### Requirements and Recommendations - -<HelmBuilderRequirements/> - -### Example: Set the Image Registry for Air Gap Installations - -For air gap installations, if the [Replicated proxy registry](/vendor/private-images-about) domain `proxy.replicated.com` is used as the default image name for any images, you need to rewrite the image to the upstream image name so that it can be processed and included in the air gap bundle. You can use the `builder` key to do this by hardcoding the upstream location of the image (image registry, repository, and tag), as shown in the example below: - -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - builder: - my-service: - image: - registry: 12345.dkr.ecr.us-west-1.amazonaws.com - repository: my-app - tag: "1.0.2" -``` -When building the `.airgap` bundle for the release, the Vendor Portal uses the registry, repository, and tag values supplied in the `builder` key to template the Helm chart, rather than the default values defined in the Helm `values.yaml` file. This ensures that the image is pulled from the upstream registry using the credentials supplied in the Vendor Portal, without requiring any changes to the Helm chart directly. - -### Example: Include Conditional Images - -Many applications have images that are included or excluded based on a given condition. For example, enterprise users might have the option to deploy an embedded database with the application or bring their own database. To support this use case for air gap installations, the images for any conditionally-deployed components must always be included in the air gap bundle. - -<BuilderExample/> - -## Related Topics - -* [builder](/reference/custom-resource-helmchart-v2#builder) -* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) -* [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped) - -================ -File: docs/vendor/helm-v2-migrate.md -================ -# Migrating Existing Installations to HelmChart v2 - -This topic describes how to migrate existing Replicated KOTS installations to the KOTS HelmChart `kots.io/v1beta2` (HelmChart v2) installation method, without having to reinstall the application. It also includes information about how to support both HelmChart v1 and HelmChart v2 installations from a single release, and lists frequently-asked questions (FAQs) related to migrating to HelmChart v2. - -## Migrate to HelmChart v2 - -### Requirements - -* The HelmChart v2 custom resource is supported with KOTS v1.99.0 and later. If any of your customers are running a version of KOTS earlier than v1.99.0, see [Support Customers on KOTS Versions Earlier Than v1.99.0](#support-both-v1-v2) below for more information about how to support both HelmChart v1 and HelmChart v2 installations from the same release. - -* The Helm `--take-ownership` flag is supported with KOTS v1.124.0 and later. - -* The `kots.io/keep` annotation is supported with KOTS v1.122.0 and later. - -### Migrate From HelmChart v1 with `useHelmInstall: true` - -To migrate existing installations from HelmChart v1 with `useHelmInstall: true` to HelmChart v2: - -1. In a development environment, install an application release using the KOTS HelmChart v1 with `useHelmInstall: true` method. You will use this installation to test the migration to HelmChart v2. - -1. Create a new release containing your application files. - -1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). - -1. Promote the release to an internal-only channel that your team uses for testing. - -1. In your development environment, log in to the Admin Console and confirm that you can upgrade to the new HelmChart v2 release. - -1. When you are done testing, promote the release to one or more of your customer-facing channels. Customers can follow the standard upgrade process in the Admin Console to update their instance. - -### Migrate From HelmChart v1 with `useHelmInstall: false` - -This section describes how to migrate existing HelmChart v1 installations with `useHelmInstall: false`. - -:::note -When the `useHelmInstall` field is _not_ set in the HelmChart custom resource, `false` is the default value. -::: - -These migration steps ensure that KOTS does not uninstall any resources that were previously deployed without Helm, and that Helm takes ownership of these existing resources. - -To migrate existing installations from HelmChart v1 and `useHelmInstall: false` to HelmChart v2: - -1. Create a new release containing your application files: - - 1. In the release, for any resources defined in Kubernetes manifests or in your Helm `templates` that were previously installed with HelmChart v1 and `useHelmInstall: false`, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling these resources when upgrading using the HelmChart v2 method. - - **Example:** - - ```yaml - apiVersion: apps/v1 - kind: Statefulset - metadata: - name: postgresql - # Add the kots.io/keep annotation - annotations: - kots.io/keep: "true" - ``` - - 1. Save the release. - -1. Create another new release: - - 1. For each Helm chart in the release, find the corresponding HelmChart custom resource and update `apiVersion` to `kots.io/v1beta2`. Then update it to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). - - 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: - - ```yaml - # HelmChart v2 - apiVersion: kots.io/v1beta2 - kind: HelmChart - metadata: - name: samplechart - spec: - helmUpgradeFlags: - - --take-ownership - ``` - - When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. - - 1. Save the release. - -1. Test the migration process: - - 1. Promote the first release to an internal-only channel that your team uses for testing. - - 1. In a development environment, install the first release. - - 1. Promote the second release to the same channel. - - 1. In your development environment, access the Admin Console to upgrade to the second release. - -1. When you are done testing, promote the first release to one or more of your customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. - -1. Promote the second release to the same customer-facing channel(s). Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. - -1. Instruct customers to migrate by first upgrading to the release where the `kots.io.keep` annotation is applied to your resources, then upgrading to the release with HelmChart v2. - -1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. - -### Migrate From Standard Kubernetes Manifests - -This section describes how to migrate existing KOTS installations of applications that were previously packaged as standard Kubernetes manifests and are now packaged as one or more Helm charts. This migration path involves performing two upgrades to ensure that KOTS does not uninstall any resources that were adopted into Helm charts, and that Helm can take ownership of resources that were previously deployed without Helm. - -To migrate applications that were previously packaged as standard Kubernetes manifests: - -1. Create a new release containing the Kubernetes manifests for your application: - - 1. For each of the application manifests in the release, add the `kots.io/keep` annotation. The `kots.io/keep` annotation prevents KOTS from uninstalling resources that were previously installed without Helm when upgrading using the HelmChart v2 method. - - **Example:** - - ```yaml - apiVersion: apps/v1 - kind: Statefulset - metadata: - name: postgresql - annotations: - kots.io/keep: "true" - ``` - - 1. Save the release. - -1. Create another new release: - - 1. In the release, add your application Helm chart(s). Remove the application manifests for resources that were adopted into the Helm chart(s). - - 1. For each Helm chart in the release, add a corresponding KOTS HelmChart custom resource with `apiVersion` set to `kots.io/v1beta2`. Configure the resource to rewrite images, inject image pull secrets, and add backup labels. See [Configuring the HelmChart Custom Resource v2](helm-native-v2-using). - - 1. In the HelmChart custom resource, under the `helmUpgradeFlags` field, add the `--take-ownership` flag: - - ```yaml - # HelmChart v1 beta2 - apiVersion: kots.io/v1beta2 - kind: HelmChart - metadata: - name: samplechart - spec: - helmUpgradeFlags: - - --take-ownership - ``` - - When the `--take-ownership` upgrade flag is enabled, Helm automatically takes ownership of resources that were previously deployed without Helm. - - 1. Save the release. - -1. Test the migration process: - - 1. Promote the first release to an internal-only channel that your team uses for testing. - - 1. In a development environment, install the first release. - - 1. Promote the second release to the same channel. - - 1. In your development environment, access the Admin Console to upgrade to the second release. Upgrading to the second release migrates the installation to HelmChart v2. - -1. After you are done testing the migration process, promote the first release containing your application manifests with the `kots.io/keep` annotation to one or more customer-facing channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. For more information about required releases, see [Properties](/vendor/releases-about#properties) in _About Channels and Releases_. - -1. Promote the second release containing your Helm chart(s) to the same channels. Replicated recommends that you mark the release as required by enabling **Prevent this release from being skipped during upgrades**. - -1. Instruct customers to migrate by first upgrading to the release containing the standard manifests, then upgrading to the release packaged with Helm. - -1. In subsequent releases, remove the `--take-ownership` flag from the `helmUpgradeFlags` field and remove the `kots.io/keep` annotation from resources in your Helm templates. - -## Support Customers on KOTS Versions Earlier Than v1.99.0 {#support-both-v1-v2} - -The HelmChart v2 installation method requires KOTS v1.99.0 or later. If you have existing customers that have not yet upgraded to KOTS v1.99.0 or later, Replicated recommends that you support both the HelmChart v2 and v1 installation methods from the same release until all installations are running KOTS v1.99.0 or later. - -To support both installation methods from the same release, include both versions of the HelmChart custom resource for each Helm chart in your application releases (HelmChart `kots.io/v1beta2` and HelmChart `kots.io/v1beta1` with `useHelmInstall: true`). - -When you include both versions of the HelmChart custom resource for a Helm chart, installations with KOTS v1.98.0 or earlier use the v1 method, while installations with KOTS v1.99.0 or later use v2. - -After all customers are using KOTS v1.99.0 or later, you can remove the HelmChart v1 custom resources so that all customers are using the HelmChart v2 method. - -## HelmChart v2 Migration FAQs - -This section includes FAQs related to migrating existing installations to the KOTS HelmChart v2 method. - -### Which migration scenarios require the `kots.io/keep` annotation? - -When applied to a resource in a release, the `kots.io/keep` annotation prevents the given resource from being uninstalled. The `kots.io/keep` annotation can be used to prevent KOTS from deleting resources that were adopted into Helm charts or otherwise previously deployed without Helm. - -To prevent existing resources from being uninstalled during upgrade, the `kots.io/keep` annotation is required for the following types of migrations: - * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 - * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 - -`kots.io/keep` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. - -### Which migration scenarios require the `--take-ownership` flag? - -When the `--take-ownership` flag is enabled, Helm automatically takes ownership of resources that were previously deployed to the cluster without Helm. - -The `--take-ownership` flag is required for the following types of migrations: - * Applications previously packaged as Kubernetes manifests migrating to HelmChart v2 - * HelmChart v1 with `useHelmInstall: false` migrating to HelmChart v2 - -`--take-ownership` is _not_ needed when migrating from HelmChart v1 with `useHelmInstall: true` to HelmChart v2. - -### What is the difference between HelmChart v1 with `useHelmInstall: false` and `useHelmInstall: true`? - -With HelmChart v1 and `useHelmInstall: false`, KOTS renders the Helm templates and deploys them as standard Kubernetes manifests using `kubectl apply`. This differs from both the HelmChart v1 with `useHelmInstall: true` and HelmChart v2 methods, where KOTS installs the application using Helm. - -Because the HelmChart v1 with `useHelmInstall: false` method does not deploy resources with Helm, it is necessary to use the `kots.io/keep` annotation and the Helm `--take-ownership` flag when migrating to the HelmChart v2 installation method. These ensure that Helm can take ownership of existing resources and that the resources are not uninstalled during upgrade. - -For more information about how KOTS deploys Helm charts, including information about the deprecated HelmChart v1 installation methods, see [About Distributing Helm Charts with KOTS](helm-native-about). - -================ -File: docs/vendor/identity-service-configuring.md -================ -:::important -This topic is deleted from the product documentation because this Beta feature is deprecated. -::: - -# Enabling and Configuring Identity Service (Beta) - -This topic describes how to enable the identity service (Beta) feature, and how to regulate access to application resources using role based access control (RBAC). - -## About Identity Service - -When you enable the identity service for an application, the Replicated app manager deploys [Dex](https://dexidp.io/) as an intermediary that can be configured to control access to the application. Dex implements an array of protocols for querying other user-management systems, known as connectors. For more information about connectors, see [Connectors](https://dexidp.io/docs/connectors/) in the Dex documentation. - - -## Limitations and Requirements - -Identity service has the following limitations and requirements: - -* Requires the identity service option is enabled in customer licenses. -* Is available only for embedded cluster installations with the kURL installer. -* Is available only through the Replicated Admin Console. - -## Enable and Configure Identity Service - -Use the Identity custom resource to enable and configure the identity service for your application. For an example application that demonstrates how to configure the identity service, see the [`kots-idp-example-app`](https://github.com/replicatedhq/kots-idp-example-app) on GitHub. - -To begin, create a new release in the [Vendor Portal](https://vendor.replicated.com). Add an Identity custom resource file and customize the file for your application. For more information about the Identity custom resource, see [Identity (Beta)](/reference/custom-resource-identity) in _Reference_. - -**Example:** - -```YAML -apiVersion: kots.io/v1beta1 -kind: Identity -metadata: - name: identity -spec: - requireIdentityProvider: true - identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver - oidcRedirectUris: - - https://{{repl ConfigOption "ingress_hostname"}}/callback - roles: - - id: access - name: Access - description: Restrict access to IDP Example App -``` - -Make the identity service accessible from the browser by configuring the service name and port. The app manager provides the service name and port to the application through the identity template functions so that the application can configure ingress for the identity service. For more information about the identity template functions, see [Identity Context](/reference/template-functions-identity-context) in _Reference_. - -**Example:** - -```YAML -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: idp-app - annotations: - kubernetes.io/ingress.allow-http: 'false' - ingress.kubernetes.io/force-ssl-redirect: 'true' - kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} - labels: - app: idp-app -spec: - tls: - - hosts: - - repl{{ ConfigOption "ingress_hostname" }} - secretName: idp-ingress-tls - rules: - - host: repl{{ or (ConfigOption "ingress_hostname") "~" }} - http: - paths: - - path: / - backend: - serviceName: idp-app - servicePort: 80 - - path: /oidcserver - backend: - serviceName: repl{{ IdentityServiceName }} - servicePort: repl{{ IdentityServicePort }} -``` -In your Deployment manifest file, add environment variables to configure all of the information that your application needs to communicate and integrate with the identity service. - -**Example:** - -```YAML -apiVersion: apps/v1 -kind: Deployment -metadata: - name: idp-app - labels: - app: idp-app -spec: - replicas: 1 - selector: - matchLabels: - app: idp-app - template: - metadata: - labels: - app: idp-app - spec: - containers: - - name: idp-app - image: replicated/kots-idp-example-app:latest - imagePullPolicy: Always - ports: - - containerPort: 5555 - volumeMounts: - - name: tls-ca-volume - mountPath: /idp-example - readOnly: true - args: ["--issuer-root-ca=/idp-example/tls.ca"] - env: - - name: CERT_SHA - value: repl{{ sha256sum (ConfigOption "tls_cert") }} - - name: LISTEN - value: http://0.0.0.0:5555 - - name: ISSUER - value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver - - name: CLIENT_ID - value: repl{{ IdentityServiceClientID }} - - name: CLIENT_SECRET - value: repl{{ IdentityServiceClientSecret }} # TODO: secret - - name: REDIRECT_URI - value: https://{{repl ConfigOption "ingress_hostname"}}/callback - - name: EXTRA_SCOPES - value: groups - - name: RESTRICTED_GROUPS - value: | - {{repl IdentityServiceRoles | keys | toJson }} - hostAliases: - - ip: 172.17.0.1 - hostnames: - - myapp.kotsadmdevenv.com - volumes: - - name: tls-ca-volume - secret: - secretName: idp-app-ca -``` - -## Configuring Access with RBAC - -You can also regulate access to your application resources using role based access control (RBAC). - -In the Identity custom resource, provide a list of the available roles within your application in the `roles` section. For more information, see [`roles`](/reference/custom-resource-identity#roles) in _Reference_. - -**Example:** - -```YAML -apiVersion: kots.io/v1beta1 -kind: Identity -metadata: - name: identity -spec: - requireIdentityProvider: true - identityIssuerURL: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver - oidcRedirectUris: - - https://{{repl ConfigOption "ingress_hostname"}}/callback - roles: - - id: access - name: Access - description: Restrict access to IDP Example App -``` - -Then, using the Admin Console, your customer has the ability to create groups and assign specific roles to each group. -This mapping of roles to groups is returned to your application through the `IdentityServiceRoles` template function that you configure in your Deployment manifest file under the environment variable `RESTRICTED_GROUPS`. For more information, see [`IdentityServiceRoles`](/reference/template-functions-identity-context#identityserviceroles) in _Reference_. - -**Example:** - -```YAML -apiVersion: apps/v1 -kind: Deployment -metadata: - name: idp-app - labels: - app: idp-app -spec: - replicas: 1 - selector: - matchLabels: - app: idp-app - template: - metadata: - labels: - app: idp-app - spec: - containers: - - name: idp-app - image: replicated/kots-idp-example-app:latest - imagePullPolicy: Always - ports: - - containerPort: 5555 - volumeMounts: - - name: tls-ca-volume - mountPath: /idp-example - readOnly: true - args: ["--issuer-root-ca=/idp-example/tls.ca"] - env: - - name: CERT_SHA - value: repl{{ sha256sum (ConfigOption "tls_cert") }} - - name: LISTEN - value: http://0.0.0.0:5555 - - name: ISSUER - value: https://{{repl ConfigOption "ingress_hostname"}}/oidcserver - - name: CLIENT_ID - value: repl{{ IdentityServiceClientID }} - - name: CLIENT_SECRET - value: repl{{ IdentityServiceClientSecret }} # TODO: secret - - name: REDIRECT_URI - value: https://{{repl ConfigOption "ingress_hostname"}}/callback - - name: EXTRA_SCOPES - value: groups - - name: RESTRICTED_GROUPS - value: | - {{repl IdentityServiceRoles | keys | toJson }} - hostAliases: - - ip: 172.17.0.1 - hostnames: - - myapp.kotsadmdevenv.com - volumes: - - name: tls-ca-volume - secret: - secretName: idp-app-ca -``` - -================ -File: docs/vendor/insights-app-status.md -================ -import StatusesTable from "../partials/status-informers/_statusesTable.mdx" -import AggregateStatus from "../partials/status-informers/_aggregateStatus.mdx" -import AggregateStatusIntro from "../partials/status-informers/_aggregate-status-intro.mdx" -import SupportedResources from "../partials/instance-insights/_supported-resources-status.mdx" - -# Enabling and Understanding Application Status - -This topic describes how to configure your application so that you can view the status of application instances in the Replicated Vendor Portal. It also describes the meaning of the different application statuses. - -## Overview - -The Vendor Portal displays data on the status of instances of your application that are running in customer environments, including the current state (such as Ready or Degraded), the instance uptime, and the average amount of time it takes your application to reach a Ready state during installation. For more information about viewing instance data, see [Instance Details](instance-insights-details). - -To compute and display these insights, the Vendor Portal interprets and aggregates the state of one or more of the supported Kubernetes resources that are deployed to the cluster as part of your application. - -<SupportedResources/> - -For more information about how instance data is sent to the Vendor Portal, see [About Instance and Event Data](instance-insights-event-data). - -## Enable Application Status Insights - -To display insights on application status, the Vendor Portal requires that your application has one or more _status informers_. Status informers indicate the Kubernetes resources deployed as part of your application that are monitored for changes in state. - -To enable status informers for your application, do one of the following, depending on the installation method: -* [Helm Installations](#helm-installations) -* [KOTS Installations](#kots-installations) - -### Helm Installations - -To get instance status data for applications installed with Helm, the Replicated SDK must be installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). - -After you include the SDK as a dependency, the requirements for enabling status informers vary depending on how your application is installed: - -* For applications installed by running `helm install` or `helm upgrade`, the Replicated SDK automatically detects and reports the status of the resources that are part of the Helm release. No additional configuration is required to get instance status data. - -* For applications installed by running `helm template` then `kubectl apply`, the SDK cannot automatically detect and report the status of resources. You must configure custom status informers by overriding the `statusInformers` value in the Replicated SDK chart. For example: - - ```yaml - # Helm chart values.yaml file - - replicated: - statusInformers: - - deployment/nginx - - statefulset/mysql - ``` - - :::note - Applications installed by running `helm install` or `helm upgrade` can also use custom status informers. When the `replicated.statusInformers` field is set, the SDK detects and reports the status of only the resources included in the `replicated.statusInformers` field. - ::: - -### KOTS Installations - -For applications installed with Replicated KOTS, configure one or more status informers in the KOTS Application custom resource. For more information, see [Adding Resource Status Informers](admin-console-display-app-status). - -When Helm-based applications that include the Replicated SDK and are deployed by KOTS, the SDK inherits the status informers configured in the KOTS Application custom resource. In this case, the SDK does _not_ automatically report the status of the resources that are part of the Helm release. This prevents discrepancies in the instance data in the vendor platform. - -## View Resource Status Insights {#resource-status} - -For applications that include the Replicated SDK, the Vendor Portal also displays granular resource status insights in addition to the aggregate application status. For example, you can hover over the **App status** field on the **Instance details** page to view the statuses of the indiviudal resources deployed by the application, as shown below: - -<img src="/images/resource-status-hover-current-state.png" alt="resource status pop up" width="400px"/> - -[View a larger version of this image](/images/resource-status-hover-current-state.png) - -Viewing these resource status details is helpful for understanding which resources are contributing to the aggregate application status. For example, when an application has an Unavailable status, that means that one or more resources are Unavailable. By viewing the resource status insights on the **Instance details** page, you can quickly understand which resource or resources are Unavailable for the purpose of troubleshooting. - -Granular resource status details are automatically available when the Replicated SDK is installed alongside the application. For information about how to distribute and install the SDK with your application, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). - -## Understanding Application Status - -This section provides information about how Replicated interprets and aggregates the status of Kubernetes resources for your application to report an application status. - -### About Resource Statuses {#resource-statuses} - -Possible resource statuses are Ready, Updating, Degraded, Unavailable, and Missing. - -The following table lists the supported Kubernetes resources and the conditions that contribute to each status: - -<StatusesTable/> - -### Aggregate Application Status - -<AggregateStatusIntro/> - -<AggregateStatus/> - -================ -File: docs/vendor/install-with-helm.mdx -================ -import Prerequisites from "../partials/helm/_helm-install-prereqs.mdx" -import FirewallOpeningsIntro from "../partials/install/_firewall-openings-intro.mdx" - -# Installing with Helm - -This topic describes how to use Helm to install releases that contain one or more Helm charts. For more information about the `helm install` command, including how to override values in a chart during installation, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. - -## Prerequisites - -Before you install, complete the following prerequisites: - -<Prerequisites/> - -## Firewall Openings for Online Installations with Helm {#firewall} - -<FirewallOpeningsIntro/> - -<table> - <tr> - <th width="50%">Domain</th> - <th>Description</th> - </tr> - <tr> - <td>`replicated.app` *</td> - <td><p>Upstream application YAML and metadata is pulled from `replicated.app`. The current running version of the application (if any), as well as a license ID and application ID to authenticate, are all sent to `replicated.app`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `replicated.app`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L60-L65) in GitHub.</p></td> - </tr> - <tr> - <td>`registry.replicated.com`</td> - <td><p>Some applications host private images in the Replicated registry at this domain. The on-prem docker client uses a license ID to authenticate to `registry.replicated.com`. This domain is owned by Replicated, Inc which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `registry.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L20-L25) in GitHub.</p></td> - </tr> - <tr> - <td>`proxy.replicated.com`</td> - <td><p>Private Docker images are proxied through `proxy.replicated.com`. This domain is owned by Replicated, Inc., which is headquartered in Los Angeles, CA.</p><p>For the range of IP addresses for `proxy.replicated.com`, see [replicatedhq/ips](https://github.com/replicatedhq/ips/blob/main/ip_addresses.json#L52-L57) in GitHub.</p></td> - </tr> -</table> - -* Required only if the [Replicated SDK](/vendor/replicated-sdk-overview) is included as a dependency of the application Helm chart. - -## Install - -To install a Helm chart: - -1. In the Vendor Portal, go to **Customers** and click on the target customer. - -1. Click **Helm install instructions**. - - <img alt="Helm install button" src="/images/helm-install-button.png" width="700px"/> - - [View a larger image](/images/helm-install-button.png) - -1. In the **Helm install instructions** dialog, run the first command to log in to the Replicated registry: - - ```bash - helm registry login registry.replicated.com --username EMAIL_ADDRESS --password LICENSE_ID - ``` - Where: - * `EMAIL_ADDRESS` is the customer's email address - * `LICENSE_ID` is the ID of the customer's license - - :::note - You can safely ignore the following warning message: `WARNING: Using --password via the CLI is insecure.` This message is displayed because using the `--password` flag stores the password in bash history. This login method is not insecure. - - Alternatively, to avoid the warning message, you can click **(show advanced)** in the **Helm install instructions** dialog to display a login command that excludes the `--password` flag. With the advanced login command, you are prompted for the password after running the command. - ::: - -1. (Optional) Run the second and third commands to install the preflight plugin and run preflight checks. If no preflight checks are defined, these commands are not displayed. For more information about defining and running preflight checks, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). - -1. Run the fourth command to install using Helm: - - ```bash - helm install RELEASE_NAME oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART_NAME - ``` - Where: - * `RELEASE_NAME` is the name of the Helm release. - * `APP_SLUG` is the slug for the application. For information about how to find the application slug, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug). - * `CHANNEL` is the lowercased name of the channel where the release was promoted, such as `beta` or `unstable`. Channel is not required for releases promoted to the Stable channel. - * `CHART_NAME` is the name of the Helm chart. - - :::note - To install the SDK with custom RBAC permissions, include the `--set` flag with the `helm install` command to override the value of the `replicated.serviceAccountName` field with a custom service account. For more information, see [Customizing RBAC for the SDK](/vendor/replicated-sdk-customizing#customize-rbac-for-the-sdk). - ::: - -1. (Optional) In the Vendor Portal, click **Customers**. You can see that the customer you used to install is marked as **Active** and the details about the application instance are listed under the customer name. - - **Example**: - - ![example customer in the Vendor Portal with an active instance](/images/sdk-customer-active-example.png) - [View a larger version of this image](/images/sdk-customer-active-example.png) - -================ -File: docs/vendor/installer-history.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Installer History - -<KurlAvailability/> - -This topic describes how to access the installation commands for all active and inactive kURL installers promoted to a channel. - -## About Using Inactive Installers - -Each release channel in the Replicated Vendor Portal saves the history of kURL installers that were promoted to the channel. You can view the list of historical installers on the **kURL Installer History** page for each channel. For more information, see [About the Installer History Page](#about) below. - -It can be useful to access the installation commands for inactive installers to reproduce an issue that a user is experiencing for troubleshooting purposes. For example, if the user's cluster is running the inactive installer version 1.0.0, then you can install with version 1.0.0 in a test environment to troubleshoot. - -You can also send the installation commands for inactive installers to your users as needed. For example, a user might have unique requirements for specific versions of Kubernetes or add-ons. - -## About the Installer History Page {#about} - -The **kURL Installer History** page for each channel includes a list of all the kURL installers that have been promoted to the channel, including the active installer and any inactive installers. - -To access the **kURL Installer History** page, go to **Channels** and click the **Installer history** button on the target channel. - -The following image shows an example **kURL Installer History** page with three installers listed: - -![Installer History page in the Vendor Portal](/images/installer-history-page.png) - -[View a larger version of this image](/images/installer-history-page.png) - -The installers are listed in the order in which they were promoted to the channel. The installer at the top of the list is the active installer for the channel. - -The **kURL Installer History** page includes the following information for each installer listed: - -* Version label, if provided when the installer was promoted -* Sequence number -* Installation command -* Installer YAML content - -================ -File: docs/vendor/instance-data-export.md -================ -import Download from "../partials/customers/_download.mdx" - -# Export Customer and Instance Data - -This topic describes how to download and export customer and instance data from the Replicated Vendor Portal. - -## Overview - -While you can always consume customer and instance insight data directly in the Replicated Vendor Portal, the data is also available in a CSV format so that it can be imported into any other system, such as: -* Customer Relationship Management (CRM) systems like Salesforce or Gainsight -* Data warehouses like Redshift, Snowflake, or BigQuery -* Business intelligence (BI) tools like Looker, Tableau, or PowerBI - -By collecting and organizing this data wherever it is most visible and valuable, you can enable your team to make better decisions about where to focus efforts across product, sales, engineering, and customer success. - -## Bulk Export Instance Event Timeseries Data - -You can use the Vendor API v3 `/app/{app_id}/events` endpoint to programatically access historical timeseries data containing instance level events, including any custom metrics that you have defined. For more information about the endpoint, see [Get instance events in either JSON or CSV format](https://replicated-vendor-api.readme.io/reference/listappinstanceevents) in the Vendor API v3 documentation. - -The `/app/{app_id}/events` endpoint returns data scoped to a given application identifier. It also allows filtering based on time periods, instances identifiers, customers identifers, and event types. You must provide at least **one** query parameter to scope the query in order to receive a response. - -By bulk exporting this instance event data with the `/app/{app_id}/events` endpoint, you can: -* Identify trends and potential problem areas -* Demonstrate the impact, adoption, and usage of recent product features - -### Filter Bulk Data Exports - -You can use the following types of filters to filter timeseries data for bulk export: - -- **Filter by date**: - - Get instance events recorded _at or before_ the query date. For example: - ```bash - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?before=2023-10-15" - ``` - - Get instance events recorded _at or after_ the query date. For example: - ```shell - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-10-15" - ``` - - Get instance events recorded within a range of dates [after, before]. For example: - ```shell - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?after=2023-05-02&before=2023-10-15" - ``` -- **Filter by customer**: Get instance events from one or more customers using a comma-separated list of customer IDs. For example: - ```bash - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?customerIDs=1b13241,2Rjk2923481" - ``` -- **Filter by event type**: Get instance events by event type using a comma-separated list of event types. For example: - ```bash - curl -H "Authorization: $REPLICATED_API_TOKEN" \ - "https://api.replicated.com/vendor/v3/app/:appID/events?eventTypes=numUsers,numProjects" - ``` - -:::note -If any filter is passed for an object that does not exist, no warning is given. For example, if a `customerIDs` filter is passed for an ID that does not exist, or for an ID that the user does not have access to, then an empty array is returned. -::: - - -## Download Customer Instance Data CSVs -<Download/> - -### Data Dictionary - -The following table lists the data fields that can be included in the customers and instances CSV downloads, including the label, data type, and description. - -<table> - <tr> - <th>Label</th> - <th>Type</th> - <th>Description</th> - </tr> - <tr> - <td>customer_id</td> - <td>string</td> - <td>Customer identifier</td> - </tr> - <tr> - <td>customer_name</td> - <td>string</td> - <td>The customer name</td> - </tr> - <tr> - <td>customer_created_date</td> - <td>timestamptz</td> - <td>The date the license was created</td> - </tr> - <tr> - <td>customer_license_expiration_date</td> - <td>timestamptz</td> - <td>The expiration date of the license</td> - </tr> - <tr> - <td>customer_channel_id</td> - <td>string</td> - <td>The channel id the customer is assigned</td> - </tr> - <tr> - <td>customer_channel_name</td> - <td>string</td> - <td>The channel name the customer is assigned</td> - </tr> - <tr> - <td>customer_app_id</td> - <td>string</td> - <td>App identifier</td> - </tr> - <tr> - <td>customer_last_active</td> - <td>timestamptz</td> - <td>The date the customer was last active</td> - </tr> - <tr> - <td>customer_type</td> - <td>string</td> - <td>One of prod, trial, dev, or community</td> - </tr> - <tr> - <td>customer_status</td> - <td>string</td> - <td>The current status of the customer</td> - </tr> - <tr> - <td>customer_is_airgap_enabled</td> - <td>boolean</td> - <td>The feature the customer has enabled - Airgap</td> - </tr> - <tr> - <td>customer_is_geoaxis_supported</td> - <td>boolean</td> - <td>The feature the customer has enabled - GeoAxis</td> - </tr> - <tr> - <td>customer_is_gitops_supported</td> - <td>boolean</td> - <td>The feature the customer has enabled - KOTS Auto-GitOps</td> - </tr> - <tr> - <td>customer_is_embedded_cluster_download_enabled</td> - <td>boolean</td> - <td>The feature the customer has enabled - Embedded Cluster</td> - </tr> - <tr> - <td>customer_is_identity_service_supported</td> - <td>boolean</td> - <td>The feature the customer has enabled - Identity</td> - </tr> - <tr> - <td>customer_is_snapshot_supported</td> - <td>boolean</td> - <td>The feature the customer has enabled - Snapshot</td> - </tr> - <tr> - <td>customer_has_entitlements</td> - <td>boolean</td> - <td>Indicates the presence or absence of entitlements and entitlment_* columns</td> - </tr> - <tr> - <td>customer_entitlement__*</td> - <td>string/integer/boolean</td> - <td>The values of any custom license fields configured for the customer. For example, customer_entitlement__active-users.</td> - </tr> - <tr> - <td>customer_created_by_id</td> - <td>string</td> - <td>The ID of the actor that created this customer: user ID or a hashed value of a token.</td> - </tr> - <tr> - <td>customer_created_by_type</td> - <td>string</td> - <td>The type of the actor that created this customer: user, service-account, or service-account.</td> - </tr> - <tr> - <td>customer_created_by_description</td> - <td>string</td> - <td>The description of the actor that created this customer. Includes username or token name depending on actor type.</td> - </tr> - <tr> - <td>customer_created_by_link</td> - <td>string</td> - <td>The link to the actor that created this customer.</td> - </tr> - <tr> - <td>customer_created_by_timestamp</td> - <td>timestamptz</td> - <td>The date the customer was created by this actor. When available, matches the value in the customer_created_date column</td> - </tr> - <tr> - <td>customer_updated_by_id</td> - <td>string</td> - <td>The ID of the actor that updated this customer: user ID or a hashed value of a token.</td> - </tr> - <tr> - <td>customer_updated_by_type</td> - <td>string</td> - <td>The type of the actor that updated this customer: user, service-account, or service-account.</td> - </tr> - <tr> - <td>customer_updated_by_description</td> - <td>string</td> - <td>The description of the actor that updated this customer. Includes username or token name depending on actor type.</td> - </tr> - <tr> - <td>customer_updated_by_link</td> - <td>string</td> - <td>The link to the actor that updated this customer.</td> - </tr> - <tr> - <td>customer_updated_by_timestamp</td> - <td>timestamptz</td> - <td>The date the customer was updated by this actor.</td> - </tr> - <tr> - <td>instance_id</td> - <td>string</td> - <td>Instance identifier</td> - </tr> - <tr> - <td>instance_is_active</td> - <td>boolean</td> - <td>The instance has pinged within the last 24 hours</td> - </tr> - <tr> - <td>instance_first_reported_at</td> - <td>timestamptz</td> - <td>The timestamp of the first recorded check-in for the instance.</td> - </tr> - <tr> - <td>instance_last_reported_at</td> - <td>timestamptz</td> - <td>The timestamp of the last recorded check-in for the instance.</td> - </tr> - <tr> - <td>instance_first_ready_at</td> - <td>timestamptz</td> - <td>The timestamp of when the cluster was considered ready</td> - </tr> - <tr> - <td>instance_kots_version</td> - <td>string</td> - <td>The version of KOTS or the Replicated SDK that the instance is running. The version is displayed as a Semantic Versioning compliant string.</td> - </tr> - <tr> - <td>instance_k8s_version</td> - <td>string</td> - <td>The version of Kubernetes running in the cluster.</td> - </tr> - <tr> - <td>instance_is_airgap</td> - <td>boolean</td> - <td>The cluster is airgaped</td> - </tr> - <tr> - <td>instance_is_kurl</td> - <td>boolean</td> - <td>The instance is installed in a Replicated kURL cluster (embedded cluster)</td> - </tr> - <tr> - <td>instance_last_app_status</td> - <td>string</td> - <td>The instance's last reported app status</td> - </tr> - <tr> - <td>instance_client</td> - <td>string</td> - <td>Indicates whether this instance is managed by KOTS or if it's a Helm CLI deployed instance using the SDK.</td> - </tr> - <tr> - <td>instance_kurl_node_count_total</td> - <td>integer</td> - <td>Total number of nodes in the cluster. Applies only to kURL clusters.</td> - </tr> - <tr> - <td>instance_kurl_node_count_ready</td> - <td>integer</td> - <td>Number of nodes in the cluster that are in a healthy state and ready to run Pods. Applies only to kURL clusters.</td> - </tr> - <tr> - <td>instance_cloud_provider</td> - <td>string</td> - <td>The cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.</td> - </tr> - <tr> - <td>instance_cloud_provider_region</td> - <td>string</td> - <td>The cloud provider region where the instance is running. For example, us-central1-b</td> - </tr> - <tr> - <td>instance_app_version</td> - <td>string</td> - <td>The current application version</td> - </tr> - <tr> - <td>instance_version_age</td> - <td>string</td> - <td>The age (in days) of the currently deployed release. This is relative to the latest available release on the channel.</td> - </tr> - <tr> - <td>instance_is_gitops_enabled</td> - <td>boolean</td> - <td>Reflects whether the end user has enabled KOTS Auto-GitOps for deployments in their environment</td> - </tr> - <tr> - <td>instance_gitops_provider</td> - <td>string</td> - <td>If KOTS Auto-GitOps is enabled, reflects the GitOps provider in use. For example, GitHub Enterprise.</td> - </tr> - <tr> - <td>instance_is_skip_preflights</td> - <td>boolean</td> - <td>Indicates whether an end user elected to skip preflight check warnings or errors</td> - </tr> - <tr> - <td>instance_preflight_status</td> - <td>string</td> - <td>The last reported preflight check status for the instance</td> - </tr> - <tr> - <td>instance_k8s_distribution</td> - <td>string</td> - <td>The Kubernetes distribution of the cluster.</td> - </tr> - <tr> - <td>instance_has_custom_metrics</td> - <td>boolean</td> - <td>Indicates the presence or absence of custom metrics and custom_metric__* columns</td> - </tr> - <tr> - <td>instance_custom_metrics_reported_at</td> - <td>timestamptz</td> - <td>Timestamp of latest custom_metric</td> - </tr> - <tr> - <td>custom_metric__*</td> - <td>string/integer/boolean</td> - <td>The values of any custom metrics that have been sent by the instance. For example, custom_metric__active_users</td> - </tr> - <tr> - <td>instance_has_tags</td> - <td>boolean</td> - <td>Indicates the presence or absence of instance tags and instance_tag__* columns</td> - </tr> - <tr> - <td>instance_tag__*</td> - <td>string/integer/boolean</td> - <td>The values of any instance tag that have been set by the vendor. For example, instance_tag__name</td> - </tr> -</table> - -================ -File: docs/vendor/instance-insights-details.md -================ -# Instance Details - -This topic describes using the Replicated Vendor Portal to quickly understand the recent events and performance of application instances installed in your customers' environments. -## About the Instance Details Page {#about-page} - -The Vendor Portal provides insights about the health, status, and performance of the active application instances associated with each customer license on the **Instance details** page. You can use the insights on the **Instance details** page to more quickly troubleshoot issues with your customers' active instances, helping to reduce support burden. - -For example, you can use the **Instance details** page to track the following events for each instance: - -* Recent performance degradation or downtime -* Length of instance downtime -* Recent changes to the cluster or infrastructure -* Changes in the number of nodes, such as nodes lost or added -* Changes in the cluster's Kubernetes version -* Changes in the application version that the instance is running - -To access the **Instance details** page, go to **Customers** and click the **Customer reporting** button for the customer that you want to view: - -![Customer reporting button on the Customers page](/images/customer-reporting-button.png) - -From the **Reporting** page for the selected customer, click the **View details** button for the desired application instance. - -The following shows an example of the **Instance details** page: - -![Instance details full page](/images/instance-details.png) - -[View a larger version of this image](/images/instance-details.png) - -As shown in the image above, the **Instance details** page includes the following sections: - -* **Current State**: Information about the state of the instance, such as the current application version. See [Current State](#current-state) below. -* **Instance Insights**: Key performance indicators (KPIs) related to health, performance, and adoption. See [Insights](#insights) below. -* **Instance Information**: Information about the cluster where the instance is installed, such as the version of Kubernetes running on the cluster. See [Instance Information](#instance-information) below. -* **Custom Metrics**: The values for any custom metrics that are configured for the application, from the most recent check-in. For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). -* **Instance Uptime**: Details about instance uptime over time. See [Instance Uptime](#instance-uptime) below. -* **Instance Activity**: Event data stream. See [Instance Activity](#instance-activity) below. - -### Current State - -The **Current State** section displays the following event data about the status and version of the instance: - -* **App status**: The status of the application. Possible statuses are Ready, Updating, Degraded, Unavailable, and Missing. For more information about enabling application status insights and how to interpret the different statuses, see [Enabling and Understanding Application Status](insights-app-status). - - Additionally, for applications that include the [Replicated SDK](/vendor/replicated-sdk-overview), you can hover over the **App status** field to view the statuses of the indiviudal resources deployed by the application, as shown in the example below: - - <img src="/images/resource-status-hover-current-state.png" alt="resource status pop up" width="400px"/> - - [View a larger version of this image](/images/resource-status-hover-current-state.png) - -* **App version**: The version label of the currently running release. You define the version label in the release properties when you promote the release. For more information about defining release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. - - If there is no version label for the release, then the Vendor Portal displays the release sequence in the **App version** field. You can find the sequence number associated with a release by running the `replicated release ls` command. See [release ls](/reference/replicated-cli-release-ls) in the _Replicated CLI_ documentation. - -* **Version age**: The absolute and relative ages of the instance: - - * **Absolute age**: `now - current_release.promoted_date` - - The number of days since the currently running application version was promoted to the channel. For example, if the instance is currently running version 1.0.0, and version 1.0.0 was promoted to the channel 30 days ago, then the absolute age is 30. - - * **Relative age (Days Behind Latest)**: `channel.latest_release.promoted_date - current_release.promoted_date` - - The number of days between when the currently running application version was promoted to the channel and when the latest available version on the channel was promoted. - - For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. The latest version available on the Stable channel is 1.5.0. If 1.0.0 was promoted 30 days ago and 1.5.0 was promoted 10 days ago, then the relative age of the application instance is 20 days. - -* **Versions behind**: The number of versions between the currently running version and the latest version available on the channel where the instance is assigned. - - For example, the instance is currently running version 1.0.0, which was promoted to the Stable channel. If the later versions 1.1.0, 1.2.0, 1.3.0, 1.4.0, and 1.5.0 were also promoted to the Stable channel, then the instance is five versions behind. - -* **Last check-in**: The timestamp when the instance most recently sent data to the Vendor Portal. - -### Instance Insights {#insights} - -The **Insights** section includes the following metrics computed by the Vendor Portal: - -* [Uptime](#uptime) -* [Time to Install](#time-to-install) - -#### Uptime - -The Vendor Portal computes the total uptime for the instance as the fraction of time that the instance spends with a Ready, Updating, or Degraded status. The Vendor Portal also provides more granular details about uptime in the **Instance Uptime** graph. See [Instance Uptime](#instance-uptime) below. - -High uptime indicates that the application is reliable and able to handle the demands of the customer environment. Low uptime might indicate that the application is prone to errors or failures. By measuring the total uptime, you can better understand the performance of your application. - -The following table lists the application statuses that are associated with an Up or Down state in the total uptime calculation: - -<table> - <tr> - <th>Uptime State</th> - <th>Application Statuses</th> - </tr> - <tr> - <td>Up</td> - <td>Ready, Updating, or Degraded</td> - </tr> - <tr> - <td>Down</td> - <td>Missing or Unavailable</td> - </tr> -</table> - -:::note -The Vendor Portal includes time spent in a Degraded status in the total uptime for an instance because an app may still be capable of serving traffic when some subset of desired replicas are available. Further, it is possible that a Degraded state is expected during upgrade. -::: - -#### Time to Install - -The Vendor Portal computes both _License time to install_ and _Instance time to install_ metrics to represent how quickly the customer was able to deploy the application to a Ready state in their environment. - -Replicated recommends that you use Time to Install as an indicator of the quality of the packaging, configuration, and documentation of your application. - -If the installation process for your application is challenging, poorly documented, lacks appropriate preflight checks, or relies heavily on manual steps, then it can take days or weeks to deploy the application in customer environments. A longer Time to Install generally represents a significantly increased support burden and a degraded customer installation experience. - -The following describes the _License time to install_ and _Instance time to install_ metrics: - -* **License time to install**: The time between when you create the customer license in the Vendor Portal, and when the application instance reaches a Ready status in the customer environment. - - License time to install represents the time that it takes for a customer to successfully deploy your application after you intend to distribute the application to the customer. Replicated uses the timestamp of when you create the customer license in the Vendor Portal to represent your intent to distribute the application because creating the license file is generally the final step before you share the installation materials with the customer. - - License time to install includes several activities that are involved in deploying the application, including the customer receiving the necessary materials and documentation, downloading the assets, provisioning the required hardware, networking, external systems, completing the preflight checks, and finally installing, configuring, and deploying the application. - -* **Instance time to install**: The time between when the Vendor Portal records the first event for the application instance in the customer environment, and when the instance reaches a Ready status. - - Instance time to install is the length of time that it takes for the application to reach a Ready state after the customer starts a deployment attempt in their environment. Replicated considers a deployment attempt started when the Vendor Portal first records an event for the instance. - - For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. - - :::note - Instance time to install does _not_ include any deployment attempts that a customer might have made that did not generate an event. For example, time spent by the customer discarding the server used in a failed attempt before attempting to deploy the instance again on a new server. - ::: - -### Instance Information - -The **Instance Information** section displays the following details about the cluster infrastructure where the application is installed as well as vendor-defined metadata about the instance: - -* The Kubernetes distribution for the cluster. For example, GKE or EKS. -* The version of Kubernetes running in the cluster. -* The version of KOTS or the Replicated SDK installed in the cluster. -* For **First Seen**, the timestamp of the first event that the Vendor Portal generated for the instance. For more information about how the Vendor Portal generates events, see [About Events](instance-insights-event-data#about-events) in _Event Data_. -* If detected, the cloud provider and region where the cluster is running. For example, `GCP: us-central1`. -* An optional vendor-defined name for the instance. -* Optional vendor-defined instance tags in the form of key-value pairs. Each instance can have a maximum of 10 tags. - -In addition to the details listed above, the **Instance Information** section also displays the following for embedded clusters provisioned by Replicated kURL: -* Node operating systems -* Node operating systems versions -* Total number of cluster nodes -* Number of cluster nodes in a Ready state -* ID of the kURL installer specification - -### Instance Uptime - -The **Instance Uptime** graph shows the percentage of a given time period that the instance was in an Up, Degraded, or Down state. - -To determine if the instance is Up, Degraded, or Down, the Vendor Portal uses the application status. Possible application statuses are Ready, Updating, Degraded, Unavailable, and Missing. The following table lists the application statuses that are associated with each state in the **Instance Uptime** graph: - -<table> - <tr> - <th>Uptime State</th> - <th>Application Statuses</th> - </tr> - <tr> - <td>Up</td> - <td>Ready or Updating</td> - </tr> - <tr> - <td>Degraded</td> - <td>Degraded</td> - </tr> - <tr> - <td>Down</td> - <td>Missing or Unavailable</td> - </tr> -</table> - -The following shows an example of an **Instance Uptime** graph: - -![Uptime Graph on the Instance details page](/images/instance-uptime-graph.png) - -You can hover over the bars in the **Instance Uptime** graph to view more detail about the percent of time that the instance was in each state during the given time period. - -![Uptime Graph with event markers on the Instance details page](/images/instance-uptime-graph-event-markers.png) - -You can hover over the event markers in the **Instance Uptime** graph to view more detail about the events that occurred during that given interval on the graph. If more than two events occurred in that period, the event marker displays the number of events that occurred during that period. If you click the event marker or the event in the tooltip, the **Instance Activity** section highlights the event or the first event in the group. - -### Instance Activity - -The **Instance Activity** section displays recent events for the instance. The data stream is updated each time an instance _check-in_ occurs. For more information about what triggers an instance check-in, see [How the Vendor Portal Collects Instance Data](instance-insights-event-data#about-reporting) in _About Instance and Event Data_. - -The timestamp of events displayed in the **Instance Activity** stream is the timestamp when the Replicated Vendor API received data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. - -The following shows an example of the **Instance Activity** data stream: - -![Instance Activity section of Instance details page](/images/instance-activity.png) - -You can filter the **Instance Activity** stream by the following categories: - -* [App install/upgrade](#app-install-upgrade) -* [App status](#app-status) -* [Cluster status](#cluster) -* [Custom metrics](#custom-metrics) -* [Infrastructure status](#infrastructure) -* [KOTS version](#kots) -* [Replicated SDK version](#sdk) -* [Upstream update](#upstream) - -The following tables describe the events that can be displayed in the **Instance Activity** stream for each of the categories above: -#### App install/upgrade {#app-install-upgrade} - -<table> - <tr> - <th>Label</th> - <th>Description</th> - </tr> - <tr> - <td>App Channel</td> - <td>The ID of the channel the application instance is assigned.</td> - </tr> - <tr> - <td>App Version</td> - <td>The version label of the release that the instance is currently running. The version label is the version that you assigned to the release when promoting it to a channel.</td> - </tr> -</table> - -#### App status {#app-status} - -<table> - <tr> - <th>Label</th> - <th>Description</th> - </tr> - <tr> - <td>App Status</td> - <td> - <p>A string that represents the status of the application. Possible values: Ready, Updating, Degraded, Unavailable, Missing. For applications that include the <a href="/vendor/replicated-sdk-overview">Replicated SDK</a>, hover over the application status to view the statuses of the indiviudal resources deployed by the application.</p> - <p>For more information, see <a href="insights-app-status">Enabling and Understanding Application Status</a>.</p> - </td> - </tr> -</table> - -#### Cluster status {#cluster} - -<table> - <tr> - <th>Label</th> - <th>Description</th> - </tr> - <tr> - <td>Cluster Type</td> - <td> - <p>Indicates if the cluster was provisioned by kURL.</p> - <p>Possible values:</p> - <ul> - <li><code>kURL</code>: The cluster is provisioned by kURL.</li> - <li><code>Existing</code>: The cluster is <em>not</em> provisioned by kURL.</li> - </ul> - <p>For more information about kURL clusters, see <a href="packaging-embedded-kubernetes">Creating a kURL installer</a>.</p> - </td> - </tr> - <tr> - <td>Kubernetes Version</td> - <td>The version of Kubernetes running in the cluster.</td> - </tr> - <tr> - <td>Kubernetes Distribution</td> - <td> - <p>The Kubernetes distribution of the cluster.</p> - <p>Possible values:</p> - <ul> - <li>EKS</li> - <li>GKE</li> - <li>K3S</li> - <li>RKE2</li> - </ul> - </td> - </tr> - <tr> - <td>kURL Nodes Total</td> - <td> - <p>Total number of nodes in the cluster.</p> - <p><strong>Note:</strong> Applies only to kURL clusters.</p> - </td> - </tr> - <tr> - <td>kURL Nodes Ready</td> - <td> - <p>Number of nodes in the cluster that are in a healthy state and ready to run Pods.</p> - <p><strong>Note:</strong> Applies only to kURL clusters.</p> - </td> - </tr> - <tr> - <td>New kURL Installer</td> - <td> - <p>The ID of the kURL installer specification that kURL used to provision the cluster. Indicates that a new Installer specification was added. An installer specification is a manifest file that has <code>apiVersion: cluster.kurl.sh/v1beta1</code> and <code>kind: Installer</code>. </p> - <p>For more information about installer specifications for kURL, see <a href="packaging-embedded-kubernetes">Creating a kURL installer</a>.</p> - <p><strong>Note:</strong> Applies only to kURL clusters.</p> - </td> - </tr> -</table> - -#### Custom metrics {#custom-metrics} - -You can filter the activity feed by any custom metrics that are configured for the application. The labels for the custom metrics vary depending on the custom key value pairs included in the data set that is sent to the Vendor Portal. For example, the key value pair `"num_projects": 5` is displayed as **Num Projects: 5** in the activity feed. - -For more information about configuring custom metrics, see [Configuring Custom Metrics](/vendor/custom-metrics). -#### Infrastructure status {#infrastructure} - -<table> - <tr> - <th>Label</th> - <th>Description</th> - </tr> - <tr> - <td>Cloud Provider</td> - <td> - <p>The cloud provider where the instance is running. Cloud provider is determined by the IP address that makes the request.</p> - <p>Possible values:</p> - <ul> - <li>AWS</li> - <li>GCP</li> - <li>DigitalOcean</li> - </ul> - </td> - </tr> - <tr> - <td>Cloud Region</td> - <td> - <p>The cloud provider region where the instance is running. For example, <code>us-central1-b</code></p> - </td> - </tr> -</table> - -#### KOTS version {#kots} - -<table> - <tr> - <th>Label</th> - <th>Description</th> - </tr> - <tr> - <td>KOTS Version</td> - <td>The version of KOTS that the instance is running. KOTS version is displayed as a Semantic Versioning compliant string.</td> - </tr> -</table> - -#### Replicated SDK version {#sdk} - -<table> - <tr> - <th>Label</th> - <th>Description</th> - </tr> - <tr> - <td>Replicated SDK Version</td> - <td>The version of the Replicated SDK that the instance is running. SDK version is displayed as a Semantic Versioning compliant string.</td> - </tr> -</table> - -#### Upstream update {#upstream} - -<table> - <tr> - <th>Label</th> - <th>Description</th> - </tr> - <tr> - <td>Versions Behind</td> - <td> - <p>The number of versions between the version that the instance is currently running and the latest version available on the channel.</p> - <p>Computed by the Vendor Portal each time it receives instance data.</p> - </td> - </tr> -</table> - -================ -File: docs/vendor/instance-insights-event-data.mdx -================ -import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" - -# About Instance and Event Data - -This topic provides an overview of the customer and instance insights that you can view in the Replicated Vendor Portal. It includes information about how the Vendor Portal accesses data as well as requirements and limitations. - -## How the Vendor Portal Collects Instance Data {#about-reporting} - -This section describes how the Vendor Portal collects instance data from online and air gap environments. - -### Online Instances - -For instances running in online (internet-connected) environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), then both send instance data. - -The data sent to the Vendor Portal includes properties such as the current version and status of the instance. For a full overview of what data might be included, see the [Replicated Data Transmission Policy](https://docs.replicated.com/vendor/policies-data-transmission). - -The following diagram shows the flow of different types of data from customer environments to the Vendor Portal: - -![Telemetry sent from instances to vendor platform](/images/telemetry-diagram.png) - -[View a larger version of this image](/images/telemetry-diagram.png) - -As shown in the diagram above, application instance data, application status data, and details about the KOTS and the SDK instances running in the cluster are all sent to the Vendor Portal through the Replicated app service: -* When both KOTS and the SDK are installed in the cluster, they both send application instance data, including information about the cluster where the instance is running. -* KOTS and the SDK both send information about themselves, including the version of KOTS or the SDK running in the cluster. -* Any custom metrics configured by the software vendor are sent to the Vendor Portal through the Replicated SDK API. For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). -* Application status data, such as if the instance is ready or degraded, is sent by KOTS. If KOTS is not installed in the cluster, then the SDK sends the application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). - -### Air Gap Instances - -<AirGapTelemetry/> - -For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). - -## Frequency of Data Sent to the Vendor Portal - -This section describes how frequently data is sent to the Vendor Portal for online and air gap instances. - -### From the Replicated SDK (Online Instances Only) - -When installed alongside the application in an online environment, the SDK automatically sends instance data to the Vendor Portal when any of the following occur: - -* The SDK sends data every four hours. - -* The instance checks for updates. An update check occurs when the instance makes a request to the `/api/v1/app/updates` SDK API endpoint. See [app](/reference/replicated-sdk-apis#app) in _Replicated SDK API (Alpha)_. - -* The instance completes a Helm update to a new application version. After the update completes, the SDK sends data when it restarts. - -* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). - -### From KOTS (Online Instances Only) - -When installed alongisde the application in an online environment, KOTS automatically sends instance data to the Vendor Portal when any of the following occur: - -* The instance checks for updates. By default, KOTS checks for updates every four hours. Additionally, an update check can occur when a user clicks the **Check for updates** button in the Replicated Admin Console. - - :::note - KOTS users can modify or disable automatic update checks from the Admin Console. For more information, see [Configuring Automatic Updates](/enterprise/updating-apps). - ::: - -* The status of an instance changes. For example, an instance can change from a Ready to Degraded status. For more information, see [Enabling and Understanding Application Status](insights-app-status). - -* (KOTS v1.92 and later only) The instance deploys a new application version. - -### From Air Gap Instances - -For air gap instances, the frequency of data sent to the Vendor Portal depends on how frequently support bundles are collected in the customer environment and uploaded to the Vendor Portal. - -For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). - -## How the Vendor Portal Generates Events and Insights {#about-events} - -When the Vendor Portal receives instance data, it evaluates each data field to determine if there was a change in its value. For each field that changes in value, the Vendor Portal creates an _event_ to record the change. For example, a change from Ready to Degraded in the application status generates an event. - -In addition to creating events for changes in data sent by the instance, the Vendor Portal also generates events for changes in values of computed metrics. The Vendor Portal updates the values of computed metrics each time it receives instance data. For example, the Vendor Portal computes a _Versions behind_ metric that tracks the number of versions behind the latest available version for the instance. When the instance checks for updates and a new update is available, the value of this metric changes and the Vendor Portal generates an event. - -The Vendor Portal uses events to display insights for each active instance in a **Instance details** dashboard. For more information about using the Vendor Portal **Instance details** page to monitor active instances of your application, see [Instance Details](instance-insights-details). - -## Requirements - -The following requirements apply to collecting instance telemetry: - -* Replicated KOTS or the Replicated SDK must be installed in the cluster where the application instance is running. - -* For KOTS installations and for Helm CLI installations that use `helm template` then `kubectl apply`, additional configuration is required to get application status data. For more information, see [Enabling and Understanding Application Status](/vendor/insights-app-status). - -* To view resource status details for an instance on the **Instance details** page, the Replicated SDK must be installed in the cluster alongside the application. For more information, see [View Resource Status Insights](insights-app-status#resource-status) in _Enabling and Understanding Application Status_. - -* There are additional requirements for collecting telemetry from air gap instances. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). - -## Limitations - -The Vendor Portal has the following limitations for reporting instance data and generating events: - -* **Active instances**: Instance data is available for _active_ instances. An instance is considered inactive when its most recent check-in was more than 24 hours ago. An instance can become inactive if it is decommissioned, stops checking for updates, or otherwise stops reporting. - - The Vendor Portal continues to display data for an inactive instance from its most-recently seen state. This means that data for an inactive instance might continue to show a Ready status after the instance becomes inactive. Replicated recommends that you use the timestamp in the **Last Check-in** field to understand if an instance might have become inactive, causing its data to be out-of-date. -* **Air gap instances**: There are additional limitations for air gap telemetry. For more information, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). -* **Instance data freshness**: The rate at which data is updated in the Vendor Portal varies depending on how often the Vendor Portal receives instance data. -* **Event timestamps**: The timestamp of events displayed on the **Instances details** page is the timestamp when the Replicated Vendor API received the data from the instance. The timestamp of events does not necessarily reflect the timestamp of when the event occurred. -* **Caching for kURL cluster data**: For clusters created with Replicated kURL (embedded clusters), KOTS stores the counts of total nodes and ready nodes in a cache for five minutes. If KOTS sends instance data to the Vendor Portal within the five minute window, then the reported data for total nodes and ready nodes reflects the data in the cache. This means that events displayed on the **Instances details** page for the total nodes and ready nodes can show values that differ from the current values of these fields. - -================ -File: docs/vendor/instance-notifications-config.mdx -================ -import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" - - -# Configuring Instance Notifications (Beta) - -<NotificationsAbout/> - -This topic describes how to configure Slack or email notifications in the Replicted Vendor Portal for instances of your application. - -For information about creating and managing instance notifications with the Vendor API v3, see the [notifications](https://replicated-vendor-api.readme.io/reference/subscribeinstanceevents) section in the Vendor API v3 documentation. - -## Overview - -Teams can receive notifications about customer instances through a Slack channel. Individual users can also receive email notifications. - -Instance notifications can be disabled when they are no longer needed. For example, a team member can turn off their email notifications for a customer instance when they are no longer responsible for supporting that customer. - -## Prerequisite - -For Slack notifications, you must configure a Slack webhook in the Vendor Portal at the Team level before you can turn on instance notifications. For more information, see [Configuring a Slack Webhook (Beta)](team-management-slack-config). - -For email notification, no prior configuration is required. The email address listed in your Vendor Portal account settings is used. - -## Configure Notifications - -Follow this procedure to configure Slack or email notifications for application instances. You can enable notifications for application status changes, system events such as Kubernetes upgrades, or changes in the values of any custom metrics configured for the application. - -To configure notifications: - -1. Go to **Applications > Customers**, and click an active customer instance that you want to receive notifications for. - - <img src="/images/customer-instances.png" alt="Customer instances list in the Vendor Portal" width="600"/> - -1. On the Instance Details page, click **Notifications**. - - <img width="600px" src="/images/instance-notifications.png" /> - -1. From the **Configure Instance Notifications** dialog, select the types of notifications to enable. - - ![Configure Instance Notifications dialog](/images/instance-notifications-dialog.png) - - [View a larger version of this image](/images/instance-notifications-dialog.png) - -1. Click **Save**. - -1. Repeat these steps to configure notifications for other application instances. - - -## Test Notifications - -After you enable notifications for a running development instance, test that your notifications are working as expected. - -Do this by forcing your application into a non-ready state. For example, you can delete one or more application Pods and wait for a ReplicationController to recreate them. - -Then, look for notifications in the assigned Slack channel. You also receive an email if you enabled email notifications. - -:::note -There is a 30-second buffer between event detection and notifications being sent. This buffer provides better roll-ups and reduces noise. -::: - -================ -File: docs/vendor/kots-faq.mdx -================ -import SDKOverview from "../partials/replicated-sdk/_overview.mdx" -import EmbeddedKubernetes from "../partials/kots/_embedded-kubernetes-definition.mdx" -import Helm from "../partials/helm/_helm-definition.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Replicated FAQs - -This topic lists frequently-asked questions (FAQs) for different components of the Replicated Platform. - -## Getting Started FAQs - -### What are the supported application packaging options? - -Replicated strongly recommends that all applications are packaged using Helm. - -<Helm/> - -Many enterprise customers expect to be able to install an application with Helm in their own cluster. Packaging with Helm allows you to support installation with the Helm CLI and with the Replicated installers (Replicated Emebdded Cluster and Replicated KOTS) from a single release in the Replicated Platform. - -For vendors that do not want to use Helm, applications distributed with Replicated can be packaged as Kubernetes manifest files. - -### How do I get started with Replicated? - -Replicated recommends that new users start by completing one or more labs or tutorials to get familiar with the processes of creating, installing, and iterating on releases for an application with the Replicated Platform. - -Then, when you are ready to begin onboarding your own application to the Replicated Platform, see [Replicated Onboarding](replicated-onboarding) for a list of Replicated features to begin integrating. - -#### Labs - -The following labs in Instruqt provide a hands-on introduction to working with Replicated features, without needing your own sample application or development environment: - -* [Distributing Your Application with Replicated](https://play.instruqt.com/embed/replicated/tracks/distributing-with-replicated?token=em_VHOEfNnBgU3auAnN): Learn how to quickly get value from the Replicated Platform for your application. -* [Delivering Your Application as a Kubernetes Appliance](https://play.instruqt.com/embed/replicated/tracks/delivering-as-an-appliance?token=em_lUZdcv0LrF6alIa3): Use Embedded Cluster to distribute Kubernetes and an application together as a single appliance. -* [Avoiding Installation Pitfalls](https://play.instruqt.com/embed/replicated/tracks/avoiding-installation-pitfalls?token=em_gJjtIzzTTtdd5RFG): Learn how to use preflight checks to avoid common installation issues and assure your customer is installing into a supported environment. -* [Closing the Support Information Gap](https://play.instruqt.com/embed/replicated/tracks/closing-information-gap?token=em_MO2XXCz3bAgwtEca): Learn how to use support bundles to close the information gap between your customers and your support team. -* [Protecting Your Assets](https://play.instruqt.com/embed/replicated/tracks/protecting-your-assets?token=em_7QjY34G_UHKoREBd): Assure your customers have the right access to your application artifacts and features using Replicated licensing. - -#### Tutorials - -The following getting started tutorials demonstrate how to integrate key Replicated features with a sample Helm chart application: -* [Install a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup): Create a release that can be installed on a VM with the Embedded Cluster installer. -* [Install a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup): Create a release that can be installed with both the KOTS installer and the Helm CLI. -* [Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup): Configure the Admin Console Config screen to collect user-supplied values. -* [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup): Create preflight checks for your application by addin a spec for preflight checks to a Secret in the Helm templates. - -### What are air gap installations? - -_Air gap_ refers to a computer or network that does not have outbound internet access. Air-gapped environments are common for enterprises that require high security, such as government agencies or financial institutions. - -Traditionally, air-gapped systems are physically isolated from the network. For example, an air-gapped server might be stored in a separate location away from network-connected servers. Physical access to air-gapped servers is often restricted as well. - -It is also possible to use _virtual_ or _logical_ air gaps, in which security controls such as firewalls, role-based access control (RBAC), and encryption are used to logically isolate a device from a network. In this way, network access is still restricted, but there is not a phyiscal air gap that disconnects the device from the network. - -Replicated supports installations into air-gapped environments. In an air gap installation, users first download the images and other assets required for installation on an internet-connected device. These installation assets are usually provided in an _air gap bundle_ that ISVs can build in the Replicated Vendor Portal. Then, users transfer the installation assets to their air-gapped machine where they can push the images to an internal private registry and install. - -For more information, see: -* [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) -* [Installing and Updating with Helm in Air Gap Environments](/vendor/helm-install-airgap) - -### What is the Commercial Sotware Distribution Lifecycle? - -Commercial software distribution is the business process that independent software vendors (ISVs) use to enable enterprise customers to self-host a fully private instance of the vendor's application in an environment controlled by the customer. - -Replicated has developed the Commercial Software Distribution Lifecycle to represent the stages that are essential for every company that wants to deliver their software securely and reliably to customer-controlled environments. - -This lifecycle was inspired by the DevOps lifecycle and the Software Development Lifecycle (SDLC), but it focuses on the unique things requirements for successfully distributing commercial software to tens, hundreds, or thousands of enterprise customers. - -The phases are: -* Develop -* Test -* Release -* License -* Install -* Report -* Support - -For more information about the Replicated features that enhance each phase of the lifecycle, see [Introduction to Replicated](../intro-replicated). - -## Compatibility Matrix FAQs - -### What types of clusters can I create with Compatibility Matrix? - -You can use Compatibility Matrix to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports a variety of VM and cloud distributions, including Red Hat OpenShift, Replicated Embedded Cluster, and Oracle Container Engine for Kubernetes (OKE). For a complete list, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). - -### How does billing work? - -Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a running status and ends when the cluster is deleted. For more information, see [Billing and Credits](/vendor/testing-about#billing-and-credits). - -### How do I buy credits? - -To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to **[Compatibility Matrix > Buy additional credits](https://vendor.replicated.com/compatibility-matrix)**. Otherwise, to request credits, log in to the Vendor Portal and go to **[Compatibility Matrix > Request more credits](https://vendor.replicated.com/compatibility-matrix)**. - -### How do I add Comaptibility Matrix to my CI/CD pipelines? - -You can use Replicated CLI commands to integrate Compatibility Matrix into your CI/CD development and production workflows. This allows you to programmatically create multiple different types of clusters where you can deploy and test your application before releasing. - -For more information, see [About Integrating with CI/CD](/vendor/ci-overview). - -## KOTS and Embedded Cluster FAQs - -### What is the Admin Console? - -The Admin Console is the user interface deployed by the Replicated KOTS installer. Users log in to the Admin Console to configure and install the application. Users also access to the Admin Console after installation to complete application mangement tasks such as performing updates, syncing their license, and generating support bundles. For installations with Embedded Cluster, the Admin Console also includes a **Cluster Management** tab where users can manage the nodes in the cluster. - -The Admin Console is available in installations with Replicated Embedded Cluster and Replicated KOTS. - -The following shows an example of the Admin Console dashboard for an Embedded Cluster installation of an application named "Gitea": - -<img src="/images/gitea-ec-ready.png" width="800px" alt="admin console dashboard"/> - -[View a larger version of this image](/images/gitea-ec-ready.png) - -### How do Embedded Cluster installations work? - -To install with Embedded Cluster, users first download and extract the Embedded Cluster installation assets for the target application release on their VM or bare metal server. Then, they run an Embedded Cluster installation command to provision the cluster. During installation, Embedded Cluster also installs Replicated KOTS in the cluster, which deploys the Admin Console. - -After the installation command finishes, users log in to the Admin Console to provide application configuration values, optionally join more nodes to the cluster, run preflight checks, and deploy the application. - -Customer-specific Embedded Cluster installation instructions are provided in the Replicated Vendor Portal. For more information, see [Installing with Embedded Cluster](/enterprise/installing-embedded). - -### Does Replicated support installations into air gap environments? - -Yes. The Embedded Cluster and KOTS installers support installation in _air gap_ environments with no outbound internet access. - -To support air gap installations, vendors can build air gap bundles for their application in the Vendor Portal that contain all the required assets for a specific release of the application. Additionally, Replicated provides bundles that contain the assets for the Replicated installers. - -For more information about how to install with Embedded Cluster and KOTS in air gap environments, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). - -### Can I deploy Helm charts with KOTS? - -Yes. An application deployed with KOTS can use one or more Helm charts, can include Helm charts as components, and can use more than a single instance of any Helm chart. Each Helm chart requires a unique HelmChart custom resource (`apiVersion: kots.io/v1beta2`) in the release. - -For more information, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about). - -### What's the difference between Embedded Cluster and kURL? - -Replicated Embedded Cluster is a successor to Replicated kURL. Compared to kURL, Embedded Cluster feature offers significantly faster installation, updates, and node joins, a redesigned Admin Console UI, improved support for multi-node clusters, one-click updates that update the application and the cluster at the same time, and more. - -<KurlAvailability/> - -For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). - -### How do I enable Embedded Cluster and KOTS installations for my application? - -Releases that support installation with KOTS include the manifests required by KOTS to define the Admin Console experience and install the application. - -In addition to the KOTS manifests, releases that support installation with Embedded Cluster also include the Embedded Cluster Config. The Embedded Cluster Config defines aspects of the cluster that will be provisioned and also sets the version of KOTS that will be installed. - -For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). - -### Can I use my own branding? - -The KOTS Admin Console and the Replicated Download Portal support the use of a custom logo. Additionally, software vendors can use custom domains to alias the endpoints for Replicated services. - -For more information, see [Customizing the Admin Console and Download Portal](/vendor/admin-console-customize-app-icon) and [About Custom Domains](custom-domains). - -## Replicated SDK FAQs - -### What is the SDK? - -<SDKOverview/> - -### Is the SDK supported in air gap environments? - -Yes. The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. - -For more information, see [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). - -### How do I develop against the SDK API? - -You can use the Replicated SDK in _integration mode_ to develop locally against the SDK API without needing to make real changes in the Replicated Vendor Portal or in your environment. - -For more information, see [Developing Against the SDK API](/vendor/replicated-sdk-development). - -### How does the Replicated SDK work with KOTS? - -The Replicated SDK is a Helm chart that can be installed as a small service alongside an application, or as a standalone component. The SDK can be installed using the Helm CLI or KOTS. - -Replicated recommends that all applications include the SDK because it provides access to key functionality not available through KOTS, such as support for sending custom metrics from application instances. When both the SDK and KOTS are installed in a cluster alongside an application, both send instance telemetry to the Vendor Portal. - -For more information about the SDK installation options, see [Installing the Replicated SDK](/vendor/replicated-sdk-installing). - -## Vendor Portal FAQs - -### How do I add and remove team members? - -Admins can add, remove, and manage team members from the Vendor Portal. For more information, see [Managing Team Members](/vendor/team-management). - -### How do I manage RBAC policies for my team members? - -By default, every team has two policies created automatically: Admin and Read Only. If you have an Enterprise plan, you will also have the Sales and Support policies created automatically. These default policies are not configurable. - -You can also configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. - -For more information, see [Configuring RBAC Policies](/vendor/team-management-rbac-configuring). - -### Can I alias Replicated endpoints? - -Yes. Replicated supports the use of custom domains to alias the endpoints for Replicated services, such as the Replicated app service and the Replicated proxy registry. - -Replicated domains are external to your domain and can require additional security reviews by your customer. Using custom domains as aliases can bring the domains inside an existing security review and reduce your exposure. - -For more information, see [Using Custom Domains](/vendor/custom-domains-using). - -### How does Replicated collect telemetry from instances of my application? - -For instances running in online (internet-connected) customer environments, either Replicated KOTS or the Replicated SDK periodically sends a small amount of data to the Vendor Portal, depending on which is installed in the cluster alongside the application. If both KOTS and the SDK are installed in the cluster, then both send instance data. - -For air gap instances, Replicated KOTS and the Replicated SDK collect and store instance telemetry in a Kubernetes Secret in the customer environment. The telemetry stored in the Secret is collected when a support bundle is generated in the environment. When the support bundle is uploaded to the Vendor Portal, the telemetry is associated with the correct customer and instance ID, and the Vendor Portal updates the instance insights and event data accordingly. - -For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). - -================ -File: docs/vendor/kurl-about.mdx -================ -import KurlDefinition from "../partials/kurl/_kurl-definition.mdx" -import Installers from "../partials/kurl/_installers.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Introduction to kURL - -<KurlAvailability/> - -This topic provides an introduction to the Replicated kURL installer, including information about kURL specifications and installations. - -:::note -The Replicated KOTS entitlement is required to install applications with KOTS and kURL. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. -::: - -## Overview - -<KurlDefinition/> - -### kURL Installers - -<Installers/> - -To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release. For more information about creating kURL installers, see [Creating a kURL Installer](/vendor/packaging-embedded-kubernetes). - -### kURL Installations - -To install with kURL, users run a kURL installation script on their VM or bare metal server to provision a cluster. - -When the KOTS add-on is included in the kURL installer spec, the kURL installation script installs the KOTS CLI and KOTS Admin Console in the cluster. After the installation script completes, users can access the Admin Console at the URL provided in the ouput of the command to configure and deploy the application with KOTS. - -The following shows an example of the output of the kURL installation script: - -```bash - Installation - Complete ✔ - -Kotsadm: http://10.128.0.35:8800 -Login with password (will not be shown again): 3Hy8WYYid - -This password has been set for you by default. It is recommended that you change -this password; this can be done with the following command: -kubectl kots reset-password default -``` - -kURL installations are supported in online (internet-connected) and air gapped environments. - -For information about how to install applications with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). - -## About the Open Source kURL Documentation - -The open source documentation for the kURL project is available at [kurl.sh](https://kurl.sh/docs/introduction/). - -The open source kURL documentation contains additional information including kURL installation options, kURL add-ons, and procedural content such as how to add and manage nodes in kURL clusters. Software vendors can use the open source kURL documentation to find detailed reference information when creating kURL installer specs or testing installation. - -================ -File: docs/vendor/kurl-nodeport-services.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Exposing Services Using NodePorts - -<KurlAvailability/> - -This topic describes how to expose NodePort services in [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about) installations on VMs or bare metal servers. - -## Overview - -For installations into existing clusters, KOTS automatically creates a port forward tunnel to expose the Admin Console. Unlike installations into existing clusters, KOTS does _not_ automatically open the port forward tunnel for installations in embedded clusters provisioned on virtual machines (VMs) or bare metal servers. This is because it cannot be verified that the ports are secure and authenticated. For more information about the KOTS port forward tunnel, see [Port Forwarding Services with KOTS](/vendor/admin-console-port-forward). - -Instead, to expose the Admin Console in installations with [Embedded Cluster](/vendor/embedded-overview) or [kURL](/vendor/kurl-about), KOTS creates the Admin Console as a NodePort service so it can be accessed at the node's IP address on a node port (port 8800 for kURL installations and port 30000 for Embedded Cluster installations). Additionally, for kURL installations, the UIs of Prometheus, Grafana, and Alertmanager are also exposed using NodePorts. - -For installations on VMs or bare metal servers where your application must be accessible from the user's local machine rather than from inside the cluster, you can expose application services as NodePorts to provide access to the application after installation. - -## Add a NodePort Service - -Services with `type: NodePort` are able to be contacted from outside the cluster by connecting to any node using the appropriate protocol and port. For more information about working with the NodePort service type, see [type: NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) in the Kubernetes documentation. - -The following shows an example of a NodePort type service: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: sentry - labels: - app: sentry -spec: - type: NodePort - ports: - - port: 9000 - targetPort: 9000 - nodePort: 9000 - protocol: TCP - name: sentry - selector: - app: sentry - role: web -``` - -After configuring a NodePort service for your application, you can add a link to the service on the Admin Console dashboard where it can be accessed by users after the application is installed. For more information, see [About Accessing NodePort Services](#about-accessing-nodeport-services) below. - -### Use KOTS Annotations to Conditionally Deploy NodePort Services - -You can use the KOTS [`kots.io/when`](/vendor/packaging-include-resources#kotsiowhen) annotation to conditionally deploy a service. This is useful when you want to deploy a ClusterIP or LoadBalancer service for existing cluster installations, and deploy a NodePort service for Embedded Cluster or kURL installations. - -To conditionally deploy a service based on the installation method, you can use the following KOTS template functions in the `kots.io/when` annotation: -* [IsKurl](/reference/template-functions-static-context#iskurl): Detects kURL installations. For example, `repl{{ IsKurl }}` returns true for kURL installations, and `repl{{ not IsKurl }}` returns true for non-kURL installations. -* [Distribution](/reference/template-functions-static-context#distribution): Returns the distribution of the cluster where KOTS is running. For example, `repl{{ eq Distribution "embedded-cluster" }}` returns true for Embedded Cluster installations and `repl{{ ne Distribution "embedded-cluster" }}` returns true for non-Embedded Cluster installations. - -For example, the following `sentry` service with `type: NodePort` includes `annotation.kots.io/when: repl{{ eq Distribution "embedded-cluster" }}`. This creates a NodePort service _only_ when installing with Embedded Cluster: - - ```yaml - apiVersion: v1 - kind: Service - metadata: - name: sentry - labels: - app: sentry - annotations: - # This annotation ensures that the NodePort service - # is only created in Embedded Cluster installations - kots.io/when: repl{{ eq Distribution "embedded-cluster" }} - spec: - type: NodePort - ports: - - port: 9000 - targetPort: 9000 - nodePort: 9000 - protocol: TCP - name: sentry - selector: - app: sentry - role: web - ``` - -Similarly, to ensure that a `sentry` service with `type: ClusterIP` is only created in existing cluster installations, add `annotations.kots.io/when: repl{{ ne Distribution "embedded-cluster" }}` to the ClusterIP specification: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: sentry - labels: - app: sentry -annotations: - # This annotation ensures that the ClusterIP service - # is only created in existing cluster installations - kots.io/when: repl{{ ne Distribution "embedded-cluster" }} -spec: - type: ClusterIP - ports: - - port: 9000 - targetPort: 9000 - protocol: TCP - name: sentry - selector: - app: sentry - role: web -``` - -## About Accessing NodePort Services - -This section describes providing access to NodePort services after installation. - -### VM Firewall Requirements - -To be able to access the Admin Console and any NodePort services for your application, the firewall for the VM where the user installs must allow HTTP traffic and allow inbound traffic to the port where the service is exposed from their workstation. Users can consult their cloud provider's documentation for more information about updating firewall rules. - -### Add a Link on the Admin Console Dashboard {#add-link} - -You can provide a link to a NodePort service on the Admin Console dashboard by configuring the `links` array in the Kubernetes SIG Application custom resource. This provides users with an easy way to access the application after installation. For more information, see [Adding Links to the Dashboard](admin-console-adding-buttons-links). - -For example: - -<img alt="Admin Console dashboard with Open App link" src="/images/gitea-open-app.png" width="700px"/> - -[View a larger version of this image](/images/gitea-open-app.png) - -================ -File: docs/vendor/kurl-reset.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Resetting a kURL Cluster - -<KurlAvailability/> - -This topic describes how to use the kURL `reset` command to reset a kURL cluster. - -## Overview - -If you need to reset a kURL installation, such as when you are testing releases with kURL, You can use the kURL `tasks.sh` `reset` command to remove Kubernetes from the system. - -Alterntaively, you can discard your current VM (if you are using one) and recreate the VM with a new OS to reinstall with kURL. - -For more information about the `reset` command, see [Resetting a Node](https://kurl.sh/docs/install-with-kurl/managing-nodes#reset-a-node) in the kURL documentation. - -To reset a kURL installation: - -1. Access the machine where you installed with kURL. - -1. Run the following command to remove Kubernetes from the system: - - ``` - curl -sSL https://k8s.kurl.sh/latest/tasks.sh | sudo bash -s reset - ``` - -1. Follow the instructions in the output of the command to manually remove any files that the `reset` command does not remove. - -If the `reset` command is unsuccessful, discard your current VM, and recreate the VM with a new OS to reinstall the Admin Console and an application. - -================ -File: docs/vendor/licenses-about-types.md -================ -# About Community Licenses - -This topic describes community licenses. For more information about other types of licenses, see [Customer Types](licenses-about#customer-types) in _About Customers_. - -## Overview - -Community licenses are intended for use with a free or low cost version of your application. For example, you could use community licenses for an open source version of your application. - -After installing an application with a community license, users can replace their community license with a new license of a different type without having to completely reinstall the application. This means that, if you have several community users who install with the same license, then you can upgrade a single community user without editing the license for all community users. - -Community licenses are supported for applications that are installed with Replicated KOTS or with the Helm CLI. - -For applications installed with KOTS, community license users can upload a new license file of a different type in the Replicated admin console. For more information, see [Upgrade from a Community License](/enterprise/updating-licenses#upgrade-from-a-community-license) in _Updating Licenses in the Admin Console_. - -## Limitations - -Community licenses function in the same way as the other types of licenses, with the following -exceptions: - -* Updating a community license to another type of license cannot be reverted. -* Community license users are not supported by the Replicated Support team. -* Community licenses cannot support air gapped installations. -* Community licenses cannot include an expiration date. - -## Community License Admin Console Branding - -For applications installed with KOTS, the branding in the admin console for community users differs in the following ways: - -* The license tile on the admin console **Dashboard** page is highlighted in yellow and with the words **Community Edition**. - - ![Community License Dashboard](/images/community-license-dashboard.png) - - [View a larger version of this image](/images/community-license-dashboard.png) - -* All support bundles and analysis in the admin console are clearly marked as **Community Edition**. - - ![Community License Support Bundle](/images/community-license-bundle.png) - - [View a larger version of this image](/images/community-license-bundle.png) - -================ -File: docs/vendor/licenses-about.mdx -================ -import ChangeChannel from "../partials/customers/_change-channel.mdx" - -# About Customers and Licensing - -This topic provides an overview of customers and licenses in the Replicated Platform. - -## Overview - -The licensing features of the Replicated Platform allow vendors to securely grant access to software, making license agreements available to the application in end customer environments at startup and runtime. - -The Replicated Vendor Portal also allows vendors to create and manage customer records. Each customer record includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements. - -Vendors can use these licensing features to enforce entitlements such as license expiration dates, and to track and report on software usage for the purpose of surfacing insights to both internal teams and customers. - -The following diagram provides an overview of licensing with the Replicated Platform: - -![App instance communicates with the Replicated licensing server](/images/licensing-overview.png) - -[View a larger version of this image](/images/licensing-overview.png) - -As shown in the diagram above, the Replicated license and update server manages and distributes customer license information. The license server retrieves this license information from customer records managed by vendors in the Vendor Portal. - -During installation or upgrade, the customer's license ID is used to authenticate with the license server. The license ID also provides authentication for the Replicated proxy registry, securely granting proxy access to images in the vendor's external registry. - -The license server is identified with a CNAME record where it can be accessed from end customer environments. When running alongside an application in a customer environment, the Replicated SDK retrieves up-to-date customer license information from the license server during runtime. The in-cluster SDK API `/license/` endpoints can be used to get customer license information on-demand, allowing vendors to programmatically enforce and report on license agreements. - -Vendors can also integrate internal Customer Relationship Management (CRM) tools such as Salesforce with the Replicated Platform so that any changes to a customer's entitlements are automatically reflected in the Vendor Portal. This ensures that updates to license agreements are reflected in the customer environment in real time. - -## About Customers - -Each customer that you create in the Replicated Vendor Portal has a unique license ID. Your customers use their license when they install or update your application. - -You assign customers to channels in the Vendor Portal to control their access to your application releases. Customers can install or upgrade to releases that are promoted to the channel they are assigned. For example, assigning a customer to your Beta channel allows that customer to install or upgrade to only releases promoted to the Beta channel. - -Each customer license includes several fields that uniquely identify the customer and the application, specify the customer's assigned release channel, and define the customer's entitlements, such as if the license has an expiration date or what application functionality the customer can access. Replicated securely delivers these entitlements to the application and makes them available at installation or at runtime. - -For more information about how to create and manage customers, see [Creating and Managing Customers](releases-creating-customer). - -### Customer Channel Assignment {#channel-assignment} - -<ChangeChannel/> - -For example, if the latest release promoted to the Beta channel is version 1.25.0 and version 1.10.0 is marked as required, when you edit an existing customer to assign them to the Beta channel, then the KOTS Admin Console always fetches 1.25.0, even though 1.10.0 is marked as required. The required release 1.10.0 is ignored and is not available to the customer for upgrade. - -For more information about how to mark a release as required, see [Properties](releases-about#properties) in _About Channels and Releases_. For more information about how to synchronize licenses in the Admin Console, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). - -### Customer Types - -Each customer is assigned one of the following types: - -* **Development**: The Development type can be used internally by the development -team for testing and integration. -* **Trial**: The Trial type can be used for customers who are on 2-4 week trials -of your software. -* **Paid**: The Paid type identifies the customer as a paying customer for which -additional information can be provided. -* **Community**: The Community type is designed for a free or low cost version of your application. For more details about this type, see [Community Licenses](licenses-about-types). -* (Beta) **Single Tenant Vendor Managed**: The Single Tenant Vendor Managed type is for customers for whom your team is operating the application in infrastructure you fully control and operate. Single Tenant Vendor Managed licenses are free to use, but come with limited support. The Single Tenant Vendor Managed type is a Beta feature. Reach out to your Replicated account representative to get access. - -Except Community licenses, the license type is used solely for reporting purposes and a customer's access to your application is not affected by the type that you assign. - -You can change the type of a license at any time in the Vendor Portal. For example, if a customer upgraded from a trial to a paid account, then you could change their license type from Trial to Paid for reporting purposes. - -### About Managing Customers - -Each customer record in the Vendor Portal has built-in fields and also supports custom fields: -* The built-in fields include values such as the customer name, customer email, and the license expiration date. You can optionally set initial values for the built-in fields so that each new customer created in the Vendor Portal starts with the same set of values. -* You can also create custom fields to define entitlements for your application. For example, you can create a custom field to set the number of active users permitted. - -For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). - -You can make changes to a customer record in the Vendor Portal at any time. The license ID, which is the unique identifier for the customer, never changes. For more information about managing customers in the Vendor Portal, see [Creating and Managing Customers](releases-creating-customer). - -### About the Customers Page - -The following shows an example of the **Customers** page: - -![Customers page](/images/customers-page.png) - -[View a larger version of this image](/images/customers-page.png) - -From the **Customers** page, you can do the following: - -* Create new customers. - -* Download CSVs with customer and instance data. - -* Search and filter customers. - -* Click the **Manage customer** button to edit details such as the customer name and email, the custom license fields assigned to the customer, and the license expiration policy. For more information, see [Creating and Managing Customers](releases-creating-customer). - -* Download the license file for each customer. - -* Click the **Customer reporting** button to view data about the active application instances associated with each customer. For more information, see [Customer Reporting](customer-reporting). - -* View instance details for each customer, including the version of the application that this instance is running, the Kubernetes distribution of the cluster, the last check-in time, and more: - - <img width="800px" src="/images/customer-reporting-details.png" /> - - [View a larger version of this image](/images/customer-reporting-details.png) - -* Archive customers. For more information, see [Creating and Managing Customers](releases-creating-customer). - -* Click on a customer on the **Customers** page to access the following customer-specific pages: - * [Reporting](#about-the-customer-reporting-page) - * [Manage customer](#about-the-manage-customer-page) - * [Support bundles](#about-the-customer-support-bundles-page) - -### About the Customer Reporting Page - -The **Reporting** page for a customer displays data about the active application instances associated with each customer. The following shows an example of the **Reporting** page for a customer that has two active application instances: - -![Customer reporting page in the Vendor Portal](/images/customer-reporting-page.png) -[View a larger version of this image](/images/customer-reporting-page.png) - -For more information about interpreting the data on the **Reporting** page, see [Customer Reporting](customer-reporting). - -### About the Manage Customer Page - -The **Manage customer** page for a customer displays details about the customer license, including the customer name and email, the license expiration policy, custom license fields, and more. - -The following shows an example of the **Manage customer** page: - -![Manage customer page in the Vendor Portal](/images/customer-details.png) -[View a larger version of this image](/images/customer-details.png) - -From the **Manage customer** page, you can view and edit the customer's license fields or archive the customer. For more information, see [Creating and Managing Customers](releases-creating-customer). - -### About the Customer Support Bundles Page - -The **Support bundles** page for a customer displays details about the support bundles collected from the customer. Customers with the **Support Bundle Upload Enabled** entitlement can provide support bundles through the KOTS Admin Console, or you can upload support bundles manually in the Vendor Portal by going to **Troubleshoot > Upload a support bundle**. For more information about uploading and analyzing support bundles, see [Inspecting Support Bundles](support-inspecting-support-bundles). - -The following shows an example of the **Support bundles** page: - -![Support bundles page in the Vendor Portal](/images/customer-support-bundles.png) -[View a larger version of this image](/images/customer-support-bundles.png) - -As shown in the screenshot above, the **Support bundles** page lists details about the collected support bundles, such as the date the support bundle was collected and the debugging insights found. You can click on a support bundle to view it in the **Support bundle analysis** page. You can also click **Delete** to delete the support bundle, or click **Customer Reporting** to view the **Reporting** page for the customer. - -## About Licensing with Replicated - -### About Syncing Licenses - -When you edit customer licenses for an application installed with a Replicated installer (Embedded Cluster, KOTS, kURL), your customers can use the KOTS Admin Console to get the latest license details from the Vendor Portal, then deploy a new version that includes the license changes. Deploying a new version with the license changes ensures that any license fields that you have templated in your release using [KOTS template functions](/reference/template-functions-about) are rendered with the latest license details. - -For online instances, KOTS pulls license details from the Vendor Portal when: -* A customer clicks **Sync license** in the Admin Console. -* An automatic or manual update check is performed by KOTS. -* An update is performed with Replicated Embedded Cluster. See [Performing Updates with Embedded Cluster](/enterprise/updating-embedded). -* An application status changes. See [Current State](instance-insights-details#current-state) in _Instance Details_. - -For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). - -### About Syncing Licenses in Air-Gapped Environments - -To update licenses in air gap installations, customers need to upload the updated license file to the Admin Console. - -After you update the license fields in the Vendor Portal, you can notify customers by either sending them a new license file or instructing them to log into their Download Portal to downlaod the new license. - -For more information, see [Updating Licenses in the Admin Console](/enterprise/updating-licenses). - -### Retrieving License Details with the SDK API - -The [Replicated SDK](replicated-sdk-overview) includes an in-cluster API that can be used to retrieve up-to-date customer license information from the Vendor Portal during runtime through the [`license`](/reference/replicated-sdk-apis#license) endpoints. This means that you can add logic to your application to get the latest license information without the customer needing to perform a license update. The SDK API polls the Vendor Portal for updated data every four hours. - -In KOTS installations that include the SDK, users need to update their licenses from the Admin Console as described in [About Syncing Licenses](#about-syncing-licenses) above. However, any logic in your application that uses the SDK API will update the user's license information without the customer needing to deploy a license update in the Admin Console. - -For information about how to use the SDK API to query license entitlements at runtime, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). - -### License Expiration Handling {#expiration} - -The built-in `expires_at` license field defines the expiration date for a customer license. When you set an expiration date in the Vendor Portal, the `expires_at` field is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. - -Replicated enforces the following logic when a license expires: -* By default, instances with expired licenses continue to run. - To change the behavior of your application when a license expires, you can can add custom logic in your application that queries the `expires_at` field using the Replicated SDK in-cluster API. For more information, see [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk). -* Expired licenses cannot log in to the Replicated registry to pull a Helm chart for installation or upgrade. -* Expired licenses cannot pull application images through the Replicated proxy registry or from the Replicated registry. -* In Replicated KOTS installations, KOTS prevents instances with expired licenses from receiving updates. - -### Replacing Licenses for Existing Installations - -Community licenses are the only license type that can be replaced with a new license without needing to reinstall the application. For more information, see [Community Licenses](licenses-about-types). - -Unless the existing customer is using a community license, it is not possible to replace one license with another license without reinstalling the application. When you need to make changes to a customer's entitlements, Replicated recommends that you edit the customer's license details in the Vendor Portal, rather than issuing a new license. - -================ -File: docs/vendor/licenses-adding-custom-fields.md -================ -# Managing Customer License Fields - -This topic describes how to manage customer license fields in the Replicated Vendor Portal, including how to add custom fields and set initial values for the built-in fields. - -## Set Initial Values for Built-In License Fields (Beta) - -You can set initial values to populate the **Create Customer** form in the Vendor Portal when a new customer is created. This ensures that each new customer created from the Vendor Portal UI starts with the same set of built-in license field values. - -:::note -Initial values are not applied to new customers created through the Vendor API v3. For more information, see [Create a customer](https://replicated-vendor-api.readme.io/reference/createcustomer-1) in the Vendor API v3 documentation. -::: - -These _initial_ values differ from _default_ values in that setting initial values does not update the license field values for any existing customers. - -To set initial values for built-in license fields: - -1. In the Vendor Portal, go to **License Fields**. - -1. Under **Built-in license options**, click **Edit** next to each license field where you want to set an initial value. - - ![Edit Initial Value](/images/edit-initial-value.png) - - [View a larger version of this image](/images/edit-initial-value.png) - -## Manage Custom License Fields - -You can create custom license fields in the Vendor Portal. For example, you can create a custom license field to set the number of active users permitted. Or, you can create a field that sets the number of nodes a customer is permitted on their cluster. - -The custom license fields that you create are displayed in the Vendor Portal for all new and existing customers. If the custom field is not hidden, it is also displayed to customers under the **Licenses** tab in the Replicated Admin Console. - -### Limitation - -The maximum size for a license field value is 64KB. - -### Create Custom License Fields - -To create a custom license field: - -1. Log in to the Vendor Portal and select the application. - -1. On the **License Fields** page, click **Create license field**. - - <img width="500" alt="create a new License Field dialog" src="/images/license-add-custom-field.png"/> - - [View a larger version of this image](/images/license-add-custom-field.png) - -1. Complete the following fields: - - | Field | Description | - |-----------------------|------------------------| - | Field | The name used to reference the field. This value cannot be changed. | - | Title| The display name for the field. This is how the field appears in the Vendor Portal and the Admin Console. You can change the title in the Vendor Portal. | - | Type| The field type. Supported formats include integer, string, text (multi-line string), and boolean values. This value cannot be changed. | - | Default | The default value for the field for both existing and new customers. It is a best practice to provide a default value when possible. The maximum size for a license field value is 64KB. | - | Required | If checked, this prevents the creation of customers unless this field is explicitly defined with a value. | - | Hidden | If checked, the field is not visible to your customer in the Replicated Admin Console. The field is still visible to you in the Vendor Portal. **Note**: The Hidden field is displayed only for vendors with access to the Replicated installers (KOTS, kURL, Embedded Cluster). | - -### Update Custom License Fields - -To update a custom license field: - -1. Log in to the Vendor Portal and select the application. -1. On the **License Fields** page, click **Edit Field** on the right side of the target row. Changing the default value for a field updates the value for each existing customer record that has not overridden the default value. - - :::important - Enabling **Is this field is required?** updates the license field to be required on all new and existing customers. If you enable **Is this field is required?**, you must either set a default value for the field or manually update each existing customer to provide a value for the field. - ::: - -### Set Customer-Specific Values for Custom License Fields - -To set a customer-specific value for a custom license field: - -1. Log in to the Vendor Portal and select the application. -1. Click **Customers**. -1. For the target customer, click the **Manage customer** button. -1. Under **Custom fields**, enter values for the target custom license fields for the customer. - - :::note - The maximum size for a license field value is 64KB. - ::: - - <img width="600" alt="Custom license fields section in the manage customer page" src="/images/customer-license-custom-fields.png"/> - - [View a larger version of this image](/images/customer-license-custom-fields.png) - -### Delete Custom License Fields - -Deleted license fields and their values do not appear in the customer's license in any location, including your view in the Vendor Portal, the downloaded YAML version of the license, and the Admin Console **License** screen. - -By default, deleting a custom license field also deletes all of the values associated with the field in each customer record. - -Only administrators can delete license fields. - -:::important -Replicated recommends that you take care when deleting license fields. - -Outages can occur for existing deployments if your application or the Admin Console **Config** page expect a license file to provide a required value. -::: - -To delete a custom license field: - -1. Log in to the Vendor Portal and select the application. -1. On the **License Fields** page, click **Edit Field** on the right side of the target row. -1. Click **Delete** on the bottom left of the dialog. -1. (Optional) Enable **Preserve License Values** to save values for the license field that were not set by the default in each customer record. Preserved license values are not visible to you or the customer. - - :::note - If you enable **Preserve License Values**, you can create a new field with the same name and `type` as the deleted field to reinstate the preserved values. - ::: - -1. Follow the instructions in the dialog and click **Delete**. - -================ -File: docs/vendor/licenses-download.md -================ -import AirGapLicenseDownload from "../partials/install/_airgap-license-download.mdx" - -# Downloading Customer Licenses - -This topic describes how to download a license file from the Replicated Vendor Portal. - -For information about how to download customer licenses with the Vendor API v3, see [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) in the Vendor API v3 documentation. - -## Download Licenses - -You can download license files for your customers from the **Customer** page in the Vendor Portal. - -To download a license: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page. -1. In the row for the target customer, click the **Download License** button. - - ![Download license button](/images/download-license-button.png) - - [View a larger version of this image](/images/download-license-button.png) - -## Enable and Download Air Gap Licenses {#air-gap-license} - -The **Airgap Download Enabled** license option allows KOTS to install an application without outbound internet access using the `.airgap` bundle. - -To enable the air gap entitlement and download the license: - -<AirGapLicenseDownload/> - -================ -File: docs/vendor/licenses-install-types.mdx -================ -import InstallerOnlyAnnotation from "../partials/helm/_installer-only-annotation.mdx" - -# Managing Install Types for a License - -This topic describes how to manage which installation types and options are enabled for a license. - -## Overview - -You can control which installation methods are available to each of your customers by enabling or disabling **Install types** fields in the customer's license. - -The following shows an example of the **Install types** field in a license: - -![Install types license fields](/images/license-install-types.png) - -[View a larger version of this image](/images/license-install-types.png) - -The installation types that are enabled or disabled for a license determine the following: -* The Replicated installers ([Replicated KOTS](../intro-kots), [Replicated Embedded Cluster](/vendor/embedded-overview), [Replicated kURL](/vendor/kurl-about)) that the customer's license entitles them to use -* The installation assets and/or instructions provided in the Replicated Download Portal for the customer -* The customer's KOTS Admin Console experience - -Setting the supported installation types on a per-customer basis gives you greater control over the installation method used by each customer. It also allows you to provide a more curated Download Portal experience, in that customers will only see the installation assets and instructions that are relevant to them. - -## Understanding Install Types {#install-types} - -In the customer license, under **Install types**, the **Available install types** field allows you to enable and disable different installation methods for the customer. - -You can enable one or more installation types for a license. - -The following describes each installation type available, as well as the requirements for enabling each type: - -<table> - <tr> - <th width="30%">Install Type</th> - <th width="35%">Description</th> - <th>Requirements</th> - </tr> - <tr> - <th>Existing Cluster (Helm CLI)</th> - <td><p>Allows the customer to install with Helm in an existing cluster. The customer does not have access to the Replicated installers (Embedded Cluster, KOTS, and kURL).</p><p>When the <strong>Helm CLI Air Gap Instructions (Helm CLI only)</strong> install option is also enabled, the Download Portal displays instructions on how to pull Helm installable images into a local repository. See <a href="#install-options">Understanding Additional Install Options</a> below.</p></td> - <td> - <p>The latest release promoted to the channel where the customer is assigned must contain one or more Helm charts. It can also include Replicated custom resources, such as the Embedded Cluster Config custom resource, the KOTS HelmChart, Config, and Application custom resources, or the Troubleshoot Preflight and SupportBundle custom resources.</p> - <InstallerOnlyAnnotation/> - </td> - </tr> - <tr> - <th>Existing Cluster (KOTS install)</th> - <td>Allows the customer to install with Replicated KOTS in an existing cluster.</td> - <td> - <ul> - <li>Your Vendor Portal team must have the KOTS entitlement</li> - <li>The latest release promoted to the channel where the customer is assigned must contain KOTS custom resources, such as the KOTS HelmChart, Config, and Application custom resources. For more information, see [About Custom Resources](/reference/custom-resource-about).</li> - </ul> - </td> - </tr> - <tr> - <th>kURL Embedded Cluster (first generation product)</th> - <td> - <p>Allows the customer to install with Replicated kURL on a VM or bare metal server.</p> - <p><strong>Note:</strong> For new installations, enable Replicated Embedded Cluster (current generation product) instead of Replicated kURL (first generation product).</p> - </td> - <td> - <ul> - <li>Your Vendor Portal team must have the kURL entitlement</li> - <li>A kURL installer spec must be promoted to the channel where the customer is assigned. For more information, see <a href="/vendor/packaging-embedded-kubernetes">Creating a kURL Installer</a>.</li> - </ul> - </td> - </tr> - <tr> - <th>Embedded Cluster (current generation product)</th> - <td>Allows the customer to install with Replicated Embedded Cluster on a VM or bare metal server.</td> - <td> - <ul> - <li>Your Vendor Portal team must have the Embedded Cluster entitlement</li> - <li>The latest release promoted to the channel where the customer is assigned must contain an Embedded Cluster Config custom resource. For more information, see <a href="/reference/embedded-config">Embedded Cluster Config</a>.</li> - </ul> - </td> - </tr> -</table> - -## Understanding Additional Install Options {#install-options} - -After enabling installation types in the **Available install types** field, you can also enable the following options in the **Additional install options** field: - -<table> - <tr> - <th width="30%">Install Type</th> - <th>Description</th> - <th>Requirements</th> - </tr> - <tr> - <th>Helm CLI Air Gap Instructions (Helm CLI only)</th> - <td><p>When enabled, a customer will see instructions on the Download Portal on how to pull Helm installable images into their local repository.</p><p><strong>Helm CLI Air Gap Instructions</strong> is enabled by default when you select the <strong>Existing Cluster (Helm CLI)</strong> install type. For more information see [Installing with Helm in Air Gap Environments](/vendor/helm-install-airgap)</p></td> - <td>The <strong>Existing Cluster (Helm CLI)</strong> install type must be enabled</td> - </tr> - <tr> - <th>Air Gap Installation Option (Replicated Installers only)</th> - <td><p>When enabled, new installations with this license have an option in their Download Portal to install from an air gap package or do a traditional online installation.</p></td> - <td> - <p>At least one of the following Replicated install types must be enabled:</p> - <ul> - <li>Existing Cluster (KOTS install)</li> - <li>kURL Embedded Cluster (first generation product)</li> - <li>Embedded Cluster (current generation product)</li> - </ul> - </td> - </tr> -</table> - -## About Migrating Existing Licenses to Use Install Types - -By default, when an existing customer license is migrated to include the Beta **Install types** field, the Vendor Portal automatically enables certain install types so that the customer does not experience any interruptions or errors in their deployment. - -The Vendor Portal uses the following logic to enable install types for migrated licenses: - -If the existing license has the **KOTS Install Enabled** field enabled, then the Vendor Portal enables the following install types in the migrated license by default: -* Existing Cluster (Helm CLI) -* Existing Cluster (KOTS install) -* kURL Embedded Cluster (first generation product) -* Embedded Cluster (current generation product) - -Additionally, if the existing **KOTS Install Enabled** license also has the **Airgap Download Enabled** option enabled, then the Vendor Portal enables both of the air gap install options in the migrated license (**Helm CLI Air Gap Instructions (Helm CLI only)** and **Air Gap Installation Option (Replicated Installers only)**). - -Otherwise, if the **KOTS Install Enabled** field is disabled for the existing license, then the Vendor Portal enables only the **Existing Cluster (Helm CLI)** install type by default. All other install types will be disabled by default. - -================ -File: docs/vendor/licenses-reference-helm.md -================ -# Checking Entitlements in Helm Charts Before Deployment - -This topic describes how to check license entitlements before a Helm chart is installed or upgraded. The information in this topic applies to Helm charts installed with Replicated KOTS or Helm. - -The Replicated SDK API can be used to check entitlements at runtime. For more information, see [Querying Entitlements with the Replicated SDK API](licenses-reference-sdk). - -## Overview - -The Replicated registry automatically injects customer entitlement information in the `global.replicated.licenseFields` field of your Helm chart values. For example: - -```yaml -# Helm chart values.yaml -global: - replicated: - licenseFields: - expires_at: - description: License Expiration - name: expires_at - signature: - v1: iZBpESXx7fpdtnbMKingYHiJH42rP8fPs0x8izy1mODckGBwVoA... - title: Expiration - value: "2023-05-30T00:00:00Z" - valueType: String -``` - -You can access the values in the `global.replicated.licenseFields` field from your Helm templates to check customer entitlements before installation. - -## Prerequisite - -Add the Replicated SDK to your application: -* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ -* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Kubernetes Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ - -## Check Entitlements Before Installation or Upgrade - -To check entitlements before installation: - -1. Create or edit a customer to use for testing: - - 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). - - 1. Edit the built-in license fields or add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). - -1. In your Helm chart, update the Helm templates with one or more directives to access the license field. For example, you can access the built-in `expires_at` field with `{{ .Values.global.replicated.licenseFields.expires_at }}`. Add the desired logic to control application behavior based on the values of license fields. - - For more information about accessing values files from Helm templates, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the _Chart Template Guide_ section of the Helm documentation. - -1. Test your changes by promoting a new release and installing in a development environment: - - 1. Package your Helm chart and its dependencies into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). - - 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - - 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). - -1. Repeat these steps to add and test new license fields. - -================ -File: docs/vendor/licenses-reference-kots-runtime.mdx -================ -# Querying Entitlements with the KOTS License API - -This topic describes how to use the Replicated KOTS License API to query license fields during runtme. The information in this topic applies to applications installed with KOTS. - -:::important -Using the KOTS License API to check entitlements during runtime is _not_ recommended for new applications distributed with Replciated. Instead, Replicated recommends that you include the Replicated SDK with your application and query entitlements during runtime using the SDK in-cluster API. See [Checking Entitlements with the Replicated SDK](licenses-reference-sdk). -::: - -## Overview - -KOTS includes default logic to control access to features in the KOTS Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. - -For information about creating custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). - -The KOTS Admin Console runs on the customer's cluster and provides entitlement information during application runtime. You can query the admin console `/license/v1/license` endpoint to enforce entitlements at runtime. - -## Query Fields - -To reference license fields at runtime, send an HTTP request to the admin console `/license/v1/license` endpoint at the following location: - -``` -http://kotsadm:3000/license/v1/license -``` - -The query returns a response in YAML format. For example: - -```javascript -{"license_id":"WicPRaoCv1pJ57ZMf-iYRxTj25eZalw3", -"installation_id":"a4r1s31mj48qw03b5vwbxvm5x0fqtdl6", -"assignee":"FirstCustomer", -"release_channel":"Unstable", -"license_type":"trial", -"expiration_time":"2026-01-23T00:00:00Z", -"fields":[ - {"field":"Customer ID","title":"Customer ID (Internal)","type":"Integer","value":121,"hide_from_customer":true}, - {"field":"Modules","title":"Enabled Modules","type":"String","value":"Analytics, Integration"}]} -``` -## Parse the API Response - -To return a license field value, parse the response using the name of the license -field. - -For example, the following Javascript parses the response for the value of a -`seat_count` custom field: - -```javascript -import * as rp from "request-promise"; - -rp({ - uri: "http://kotsadm:3000/license/v1/license", - json: true -}).then(license => { - const seatCount = license.fields.find((field) => { - return field.field === "seat_count"; - }); - console.log(seatCount.value); -}).catch(err => { - // Handle error response from `kotsadm` -}); -``` - -================ -File: docs/vendor/licenses-reference-sdk.mdx -================ -# Querying Entitlements with the Replicated SDK API - -This topic describes how to query license entitlements at runtime using the Replicated SDK in-cluster API. The information in this topic applies to applications installed with Replicated KOTS or Helm. - -## Overview - -The Replicated SDK retrieves up-to-date customer license information from the Vendor Portal during runtime. This means that any changes to customer licenses are reflected in real time in the customer environment. For example, you can revoke access to your application when a license expires, expose additional product functionality dynamically based on entitlements, and more. For more information about distributing the SDK with your application, see [About the Replicated SDK](replicated-sdk-overview). - -After the Replicated SDK is initialized and running in a customer environment, you can use the following SDK API endpoints to get information about the license: -* `/api/v1/license/info`: List license details, including the license ID, the channel the customer is assigned, and the license type. -* `/api/v1/license/fields`: List all the fields in the license. -* `/api/v1/license/fields/{field_name}`: List details about a specific license field, including the field name, description, type, and the value. - -For more information about these endpoints, see [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API_. - -## Prerequisite - -Add the Replicated SDK to your application: -* For Helm-based applications, see [Install the SDK as a Subchart](/vendor/replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_ -* For applications that use standard Kubernetes manifests, see [Install the SDK Alongside a Standard Manifest-Based Application](/vendor/replicated-sdk-installing#manifest-app) in _Installing the Replicated SDK_ - -## Query License Entitlements at Runtime {#runtime} - -To use the SDK API to query entitlements at runtime: - -1. Create or edit a customer to use for testing: - - 1. In the Vendor Portal, click **Customers**. Select a customer and click the **Manage customer** tab. Alternatively, click **+ Create customer** to create a new customer. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). - - 1. Edit the built-in fields and add custom fields for the customer. For example, you can set a license expiration date in the **Expiration policy** field. Or, you can create a custom field that limits the number of nodes a user is permitted in their cluster. For more information, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields). - -1. (Recommended) Develop against the SDK API `license` endpoints locally: - - 1. Install the Replicated SDK as a standalone component in your cluster. This is called _integration mode_. Installing in integration mode allows you to develop locally against the SDK API without needing to create releases for your application in the Vendor Portal. See [Developing Against the SDK API](/vendor/replicated-sdk-development). - - 1. In your application, add logic to control application behavior based on the customer license information returned by the SDK API service running in your cluster. See [license](/reference/replicated-sdk-apis#license) in _Replicated SDK API (Beta)_. - - **Example:** - - ```bash - curl replicated:3000/api/v1/license/fields/expires_at - ``` - - ```json - { - "name": "expires_at", - "title": "Expiration", - "description": "License Expiration", - "value": "2023-05-30T00:00:00Z", - "valueType": "String", - "signature": { - "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2M..." - } - } - ``` - -1. When you are ready to test your changes outside of integration mode, do the following: - - 1. Package your Helm chart and its dependencies (including the Replicated SDK) into a `.tgz` chart archive. See [Packaging a Helm Chart for a Release](helm-install-release). - - 1. Add the `.tgz` archive to a release and promote to a development channel, such as Unstable. See [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - - 1. Install in a development environment using the license ID for the test customer that you created. See [Installing with Helm](install-with-helm). - - 1. (Optional) As needed, verify the license information returned by the SDK API in your development environment using port forwarding to access the SDK service locally: - - 1. Use port forwarding to access the `replicated` service from the local development environment on port 3000: - - ```bash - kubectl port-forward service/replicated 3000 - ``` - - The output looks similar to the following: - - ```bash - Forwarding from 127.0.0.1:3000 -> 3000 - ``` - - For more information about `kubectl port-forward`, see [port-forward](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#port-forward) in the kubectl reference documentation. - - 1. With the port forward running, in another terminal, use the SDK API to return information about the license. - - **Example:** - - ``` - curl localhost:3000/api/v1/license/fields/expires_at - ``` - -1. Repeat these steps to add and test new license fields. - -1. (Recommended) Use signature verification in your application to ensure the integrity of the license field. See [Verifying License Field Signatures with the Replicated SDK API](/vendor/licenses-verify-fields-sdk-api). - -================ -File: docs/vendor/licenses-referencing-fields.md -================ -# Checking Entitlements in Preflights with KOTS Template Functions - -This topic describes how to check custom entitlements before installation or upgrade using preflight checks and KOTS template functions in the License context. The information in this topic applies to applications installed with KOTS. - -## Overview - -KOTS includes default logic to control access to features in the Replicated Admin Console and KOTS CLI based on the values for the built-in fields in the customer's license. For example, by default, KOTS uses the built-in `expires_at` field to prevent an instance from receiving updates when the customer license expires. You can add custom logic to your application to control the behavior of your application based on the built-in fields or any of the custom fields that you create. - -For more information, see [Managing Customer License Fields](licenses-adding-custom-fields). For the list of built-in fields in customer licenses, see [Built-In License Fields](/vendor/licenses-using-builtin-fields). - -## Add Preflights to Check Entitlements Before Installation or Upgrade {#install} - -To enforce entitlements when your customer installs or updates your application, -you can use the Replicated LicenseFieldValue template function in your application to read the value of license fields. The LicenseFieldValue template function accepts the built-in license fields and any custom fields that you configure. For more information, see [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) in _License Context_. - -For example, a license might limit how many nodes are permitted in a customer's -cluster. You could define this limit by creating a `node_count` custom license field: - -| Name | Key | Type | Description | -|------|-----|------|-------------| -| Node Count | node_count | Integer | The maximum number of nodes permitted | - -To enforce the node count when a customer installs or updates your application, -you can use LicenseFieldValue to create a preflight check that references the custom `node_count` field: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: example-preflight-checks -spec: - analyzers: - - nodeResources: - checkName: Node Count Check - outcomes: - - fail: - when: 'count() > {{repl LicenseFieldValue "node_count"}}' - message: The cluster has more nodes than the {{repl LicenseFieldValue "node_count"}} you are licensed for. - - pass: - message: The number of nodes matches your license ({{repl LicenseFieldValue "node_count"}}) -``` - -In the example above, the preflight check uses the `nodeResources` analyzer and the value of the custom `node_count` field to determine if the customer has exceeded the maximum number of nodes permitted by their license. If the preflight checks fails, a failure message is displayed to the user and KOTS prevents the installation or upgrade from continuing. - -For more information about this example, see [How Can I Use License Custom Fields Value in a Pre-Flight Check?](https://help.replicated.com/community/t/how-can-i-use-license-custom-fields-value-in-a-pre-flight-check/624) in Replicated Community. - -For more information about defining preflight checks, see [Defining Preflight Checks](preflight-defining). - -================ -File: docs/vendor/licenses-using-builtin-fields.mdx -================ -import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" - -# Built-In License Fields - -This topic describes the built-in license fields that appear customer licenses for applications distributed with Replicated. - -## Overview - -The license associated with each customer record in the Replicated Vendor Portal includes several built-in fields. These built-in fields include customer properties (such as the customer name, customer email, and the Vendor Portal channel where the customer is assigned), the license expiration date, as well as the Replicated features that are enabled for the customer (such as the supported install types or Admin Console features). - -When a customer installs an application distributed with Replicated, the values for each built-in and custom field in their license can be accessed using the [Replicated SDK](/vendor/replicated-sdk-overview) in-cluster API [license](/reference/replicated-sdk-apis#license) endpoints. Applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster) can also access license fields using the Replicated KOTS [LicenseFieldValue](/reference/template-functions-license-context#licensefieldvalue) template function. - -The following shows an example of a customer license: - -```yaml -apiVersion: kots.io/v1beta1 -kind: License -metadata: - name: customertest -spec: - appSlug: gitea - channelID: 2iy68JBTkvUqamgD... - channelName: Beta - channels: - - channelID: 2iy68JBTkvUqamgD... - channelName: Beta - channelSlug: beta - endpoint: https://replicated.app - isDefault: true - isSemverRequired: true - replicatedProxyDomain: proxy.replicated.com - customerEmail: example@replicated.com - customerName: Customer Test - endpoint: https://replicated.app - entitlements: - expires_at: - description: License Expiration - signature: {} - title: Expiration - value: "" - valueType: String - isAirgapSupported: true - isEmbeddedClusterDownloadEnabled: true - isKotsInstallEnabled: true - isSemverRequired: true - isSupportBundleUploadSupported: true - licenseID: 2sY6ZC2J9sO2... - licenseSequence: 4 - licenseType: prod - replicatedProxyDomain: proxy.replicated.com - signature: eyJsaWNlbnNlRGF... -``` - -## License Field Names - -This section lists the built-in fields that are included in customer licenses for applications distributed with Replicated. - -:::note -The built-in license fields are reserved field names. -::: - -### General License Fields - -<table> - <tr> - <td>Field Name</td> - <td>Description</td> - </tr> - <tr> - <td>`appSlug`</td> - <td>The unique application slug that the customer is associated with. This value never changes.</td> - </tr> - <tr> - <td>`channelID`</td> - <td>The ID of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.</td> - </tr> - <tr> - <td>`channelName`</td> - <td>The name of the channel where the customer is assigned. When the customer's assigned channel changes, the latest release from that channel will be downloaded on the next update check.</td> - </tr> - <tr> - <td>`licenseID`, `licenseId`</td> - <td>Unique ID for the installed license. This value never changes.</td> - </tr> - <tr> - <td>`customerEmail`</td> - <td>The customer email address.</td> - </tr> - <tr> - <td>`endpoint`</td> - <td>For applications installed with a Replicated installer (KOTS, kURL, Embedded Cluster), this is the endpoint that the KOTS Admin Console uses to synchronize the licenses and download updates. This is typically `https://replicated.app`.</td> - </tr> - <tr> - <td>`entitlementValues`</td> - <td>Includes both the built-in `expires_at` field and any custom license fields. For more information about adding custom license fields, see [Managing Customer License Fields](licenses-adding-custom-fields).</td> - </tr> - <tr> - <td>`expires_at`</td> - <td><p>Defines the expiration date for the license. The date is encoded in ISO 8601 format (`2026-01-23T00:00:00Z`) and is set to midnight UTC at the beginning of the calendar day (`00:00:00`) on the date selected. If a license does not expire, this field is missing.</p><p>For information about the default behavior when a license expires, see [License Expiration Handling](licenses-about#expiration) in _About Customers_.</p></td> - </tr> - <tr> - <td>`licenseSequence`</td> - <td>Every time a license is updated, its sequence number is incremented. This value represents the license sequence that the client currently has.</td> - </tr> - <tr> - <td>`customerName`</td> - <td>The name of the customer.</td> - </tr> - <tr> - <td>`signature`</td> - <td>The base64-encoded license signature. This value will change when the license is updated.</td> - </tr> - <tr> - <td>`licenseType`</td> - <td>A string value that describes the type of the license, which is one of the following: `paid`, `trial`, `dev`, `single-tenant-vendor-managed` or `community`. For more information about license types, see [Managing License Type](licenses-about-types).</td> - </tr> -</table> - -### Install Types - -The table below describes the built-in license fields related to the supported install type(s). For more information, see [Managing Install Types for a License](/vendor/licenses-install-types). - -<table> - <tr> - <td>Field Name</td> - <td>Description</td> - </tr> - <tr> - <td>`isEmbeddedClusterDownloadEnabled`</td> - <td><p>If a license supports installation with Replicated Embedded Cluster, this field is set to `true` or missing. If Embedded Cluster installations are not supported, this field is `false`.</p><p>This field requires that the vendor has the Embedded Cluster entitlement and that the release at the head of the channel includes an [Embedded Cluster Config](/reference/embedded-config) custom resource. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> - </tr> - <tr> - <td>`isHelmInstallEnabled`</td> - <td><p>If a license supports Helm installations, this field is set to `true` or missing. If Helm installations are not supported, this field is set to `false`. This field requires that the vendor packages the application as Helm charts and, optionally, Replicated custom resources.</p><p> This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> - </tr> - <tr> - <td>`isKotsInstallEnabled`</td> - <td><p>If a license supports installation with Replicated KOTS, this field is set to `true`. If KOTS installations are not supported, this field is either `false` or missing.</p><p>This field requires that the vendor has the KOTS entitlement.</p></td> - </tr> - <tr> - <td>`isKurlInstallEnabled`</td> - <td><p>If a license supports installation with Replicated kURL, this field is set to `true` or missing. If kURL installations are not supported, this field is `false`. </p><p>This field requires that the vendor has the kURL entitlement and a promoted kURL installer spec. This field also requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> - </tr> -</table> - -### Install Options - -The table below describes the built-in license fields related to install options. - -<table> - <tr> - <td>Field Name</td> - <td>Description</td> - </tr> - <tr> - <td>`isAirgapSupported`</td> - <td><p>If a license supports air gap installations with the Replicated installers (KOTS, kURL, Embedded Cluster), then this field is set to `true`. If Replicated installer air gap installations are not supported, this field is missing.</p><p>When you enable this field for a license, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.</p></td> - </tr> - <tr> - <td>`isHelmAirgapEnabled`</td> - <td><p>If a license supports Helm air gap installations, then this field is set to `true` or missing. If Helm air gap is not supported, this field is missing.</p><p> When you enable this feature, the `license.yaml` file will have license metadata embedded in it and must be re-downloaded.</p><p>This field requires that the "Install Types" feature is enabled for your Vendor Portal team. Reach out to your Replicated account representative to get access.</p></td> - </tr> -</table> - -### Admin Console Feature Options - -The table below describes the built-in license fields related to the Admin Console feature options. The Admin Console feature options apply only to licenses that support installation with the Replicated installers (KOTS, kURL, Embedded Cluster). - -<table> - <tr> - <td>Field Name</td> - <td>Description</td> - </tr> - <tr> - <td>`isDisasterRecoverySupported`</td> - <td>If a license supports the Embedded Cluster disaster recovery feature, this field is set to `true`. If a license does not support disaster recovery for Embedded Cluster, this field is either missing or `false`. **Note**: Embedded Cluster Disaster Recovery is in Alpha. To get access to this feature, reach out to Alex Parker at [alexp@replicated.com](mailto:alexp@replicated.com). For more information, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery).</td> - </tr> - <tr> - <td>`isGeoaxisSupported`</td> - <td>(kURL Only) If a license supports integration with GeoAxis, this field is set to `true`. If GeoAxis is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor has the GeoAxis entitlement. It also requires that the vendor has access to the Identity Service entitlement.</td> - </tr> - <tr> - <td>`isGitOpsSupported`</td> - <td><GitOpsNotRecommended/> If a license supports the KOTS AutoGitOps workflow in the Admin Console, this field is set to `true`. If Auto-GitOps is not supported, this field is either `false` or missing. See [KOTS Auto-GitOps Workflow](/enterprise/gitops-workflow).</td> - </tr> - <tr> - <td>`isIdentityServiceSupported`</td> - <td>If a license supports identity-service enablement for the Admin Console, this field is set to `true`. If identity service is not supported, this field is either `false` or missing. **Note**: This field requires that the vendor have access to the Identity Service entitlement.</td> - </tr> - <tr> - <td>`isSemverRequired`</td> - <td>If set to `true`, this field requires that the Admin Console orders releases according to Semantic Versioning. This field is controlled at the channel level. For more information about enabling Semantic Versioning on a channel, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_.</td> - </tr> - <tr> - <td>`isSnapshotSupported`</td> - <td>If a license supports the snapshots backup and restore feature, this field is set to `true`. If a license does not support snapshots, this field is either missing or `false`. **Note**: This field requires that the vendor have access to the Snapshots entitlement.</td> - </tr> - <tr> - <td>`isSupportBundleUploadSupported`</td> - <td>If a license supports uploading a support bundle in the Admin Console, this field is set to `true`. If a license does not support uploading a support bundle, this field is either missing or `false`.</td> - </tr> -</table> - -================ -File: docs/vendor/licenses-verify-fields-sdk-api.md -================ -# Verifying License Field Signatures with the Replicated SDK API - -This topic describes how to verify the signatures of license fields when checking customer license entitlements with the Replicated SDK. - -## Overview - -To prevent man-in-the-middle attacks or spoofing by your customers, license fields are cryptographically signed with a probabilistic signature scheme (PSS) signature to ensure their integrity. The PSS signature for a license field is included in the response from the Replicated SDK API `/license/fields` and `/license/fields/{field-name}` endpoints as a Base64 encoded string. - -The following shows an example of a Base64 encoded PSS signature for an `expires_at` field returned by the SDK API: - -```json -{ - "name": "expires_at", - "title": "Expiration", - "description": "License Expiration", - "value": "2023-05-30T00:00:00Z", - "valueType": "String", - "signature": { - "v1": "c6rsImpilJhW0eK+Kk37jeRQvBpvWgJeXK2MD0YBlIAZEs1zXpmvwLdfcoTsZMOj0lZbxkPN5dPhEPIVcQgrzfzwU5HIwQbwc2jwDrLBQS4hGOKdxOWXnBUNbztsHXMqlAYQsmAhspRLDhBiEoYpFV/8oaaAuNBrmRu/IVAW6ahB4KtP/ytruVdBup3gn1U/uPAl5lhzuBifaW+NDFfJxAX..." - } -} -``` - -Replicated recommends that you use signature verification to ensure the integrity of each license field you use in your application. For more information about how to check entitlements in your application for Helm CLI installations, see [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). - -## Requirement - -Include the Replicated SDK as a dependency of your application Helm chart. For more information, see [Install the SDK as a Subchart](replicated-sdk-installing#install-the-sdk-as-a-subchart) in _Installing the Replicated SDK_. - -## Use Your Public Key to Verify License Field Signatures - -In your application, you can use your public key (available in the Vendor Portal) and the MD5 hash of a license field value to verify the PSS signature of the license field. - -To use your public key to verify license field signatures: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Settings** page. - -1. Click the **Replicated SDK Signature Verification** tab. - - ![signature verification page](/images/signature-verification.png) - [View a larger version of this image](/images/signature-verification.png) - -1. Under **Your public key**, copy the key and save it in a secure location. - -1. (Optional) Under **Verification**, select the tab for the necessary programming language, and copy the code sample provided. - -1. In your application, add logic that uses the public key to verify the integrity of license field signatures. If you copied one of the code samples from the Vendor Portal in the previous step, paste it into your application and make any additional edits as required. - - If you are not using one of the code samples provided, consider the following requirements for verifying license field values: - * License field signatures included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints are Base64 encoded and must be decoded before they are verified. - * The MD5 hash of the license field value is required to verify the signature of the license field. The raw license field value is included in the response from the SDK API `/license/fields` and `/license/fields/{field-name}` endpoints. The MD5 hash of the value must be calculated and used for signature verification. - -================ -File: docs/vendor/namespaces.md -================ -# Application Namespaces - -Replicated strongly recommends that applications are architected to deploy a single application into a single namespace when possible. - -If you are distributing your application with Replicated KOTS, you can implement an architecture in which a single application is deployed into a single namespace. - -To do this, omit any namespace in the application manifests `metadata.namespace`. Do not use the Config custom resource object to make the namespace user-configurable. - -When you do not specify a namespace in application manifests, KOTS deploys to whatever namespace it is already running in. This gives the most flexibility when deploying to end user environments, as users already select the namespace where KOTS runs. Scoping to a single namespace also allows the app to run with minimal Kubernetes permissions, which can reduce friction when an application runs as a tenant in a large cluster. Overall, letting the end user manage namespaces is the easiest way to reduce friction. - -The following examples demonstrate the recommended approach of excluding the namespace from the application manifests, as well as the incorrect approaches of hardcoding the namespace or injecting the namespace as a user-supplied value: - -**Recommended** - -```yaml -# good, namespace absent -apiVersion: apps/v1 -kind: Deployment -metadata: - name: spline-reticulator -spec: -``` - -**Not Recommended** - -```yaml -# bad, hardcoded -apiVersion: apps/v1 -kind: Deployment -metadata: - name: spline-reticulator - namespace: graphviz-pro -spec: -``` - -```yaml -# bad, configurable -apiVersion: apps/v1 -kind: Deployment -metadata: - name: spline-reticulator - namespace: repl{{ ConfigOption "gv_namespace" }} -spec: -``` - -================ -File: docs/vendor/offsite-backup.md -================ -# Offsite Data Backup - -Replicated stores customer data in multiple databases across Amazon Web -Services (AWS) S3 buckets. Clustering and network redundancies help to avoid a -single point of failure. - -The offsite data backup add-on provides additional redundancy by copying data to -an offsite Google Cloud Provider (GCP) storage location. This helps to mitigate -any potential data loss caused by an outage to AWS. - -:::note -The offsite data backup add-on is available only to [Replicated Enterprise](https://www.replicated.com/pricing/) customers at an additional cost. Please [open a product request](https://vendor.replicated.com/support?requestType=feature&productArea=vendor) if you are interested in this feature. -::: - -## Overview - -When the offsite data backup add-on is enabled, data is migrated from Replicated's existing AWS S3 buckets to a dedicated second set of AWS S3 buckets. These buckets are only used for vendors with this add-on enabled, and all vendor data remains logically isolated by vendor Team. After data is migrated from existing S3 buckets to the secondary buckets, -all data is deleted from the original S3 buckets. - -To ensure customer data in the offsite GCP storage remains up-to-date, the GCP -account uses the Google Storage Transfer service to synchronize at least daily with the -secondary dedicated S3 buckets. - -The offsite GCP data backup functions only as secondary data storage and does not serve customer -data. Customer data continues to be served from the AWS S3 buckets. In the case of an AWS outage, Replicated can use a manual -process to restore customer data from the GCP backups into a production-grade database. - -For more information, see [Architecture](#architecture) below. - -## Architecture - -The following diagram shows the flow of air gap build data and registry image data -when the offsite data backup add-on is enabled. The flow of data that is backed -up offsite in GCP is depicted with green arrows. - -![architecture of offsite data storage with the offsite data backup add-on](../../static/images/offsite-backup.png) - -[View a larger version of this image](../../static/images/offsite-backup.png) - -As shown in the diagram above, when the offsite data backup add-on is enabled, -registry and air gap data are stored in dedicated S3 buckets. Both of -these dedicated S3 buckets back up data to offsite storage in GCP. - -The diagram also shows how customer installations continue to pull data from the -vendor registry and the customer portal when offsite data backup is enabled. - -================ -File: docs/vendor/operator-defining-additional-images.mdx -================ -import AirGapBundle from "../partials/airgap/_airgap-bundle.mdx" - -# Defining Additional Images - -This topic describes how to define additional images to be included in the `.airgap` bundle for a release. - -## Overview - -<AirGapBundle/> - -When building the `.airgap` bundle for a release, the Replicated Vendor Portal finds and includes all images defined in the Pod specs for the release. During installation or upgrade, KOTS retags images from the `.airgap` bundle and pushes them to the registry configured in KOTS. - -Any required images that are _not_ defined in your application manifests must be listed in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the `.airgap` bundle for the release. - -## Define Additional Images for Air Gap Bundles - -KOTS supports including the following types of images in the `additionalImages` field: - -* Public images referenced by the docker pullable image name. -* Images pushed to a private registry that was configured in the Vendor Portal, referenced by the docker-pullable, upstream image name. For more information about configuring private registries, see [Connecting to an External Registry](/vendor/packaging-private-images). - :::note - If you use the [Replicated proxy registry](/vendor/private-images-about) for online (internet-connected) installations, be sure to use the _upstream_ image name in the `additionalImages` field, rather than referencing the location of the image at `proxy.replicated.com`. - ::: -* Images pushed to the Replicated registry referenced by the `registry.replicated.com` name. - -The following example demonstrates adding multiple images to `additionalImages`: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: my-app -spec: - additionalImages: - - elasticsearch:7.6.0 - - quay.io/orgname/private-image:v1.2.3 - - registry.replicated.com/my-operator/my-private-image:abd123f -``` - -================ -File: docs/vendor/operator-defining-additional-namespaces.md -================ -# Defining Additional Namespaces - -Operators often need to be able to manage resources in multiple namespaces in the cluster. -When deploying an application to an existing cluster, Replicated KOTS creates a Kubernetes Role and RoleBinding that are limited to only accessing the namespace that the application is being installed into. - -In addition to RBAC policies, clusters running in air gap environments or clusters that are configured to use a local registry also need to ensure that image pull secrets exist in all namespaces that the operator will manage resource in. - -## Creating additional namespaces - -An application can identify additional namespaces to create during installation time. -You can define these additional namespaces in the Application custom resource by adding an `additionalNamespaces` attribute to the Application custom resource manifest file. For more information, see [Application](../reference/custom-resource-application) in the _Custom Resources_ section. - -When these are defined, `kots install` will create the namespaces and ensure that the KOTS Admin Console has full access to manage resources in these namespaces. -This is accomplished by creating a Role and RoleBinding per namespace, and setting the Subject to the Admin Console service account. -If the current user account does not have access to create these additional namespaces, the installer will show an error and fail. - -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: my-operator -spec: - additionalNamespaces: - - namespace1 - - namespace2 -``` - -In addition to creating these namespaces, the Admin Console will ensure that the application pull secret exists in them, and that this secret has access to pull the application images. This includes both images that are used and additional images defined in the Application custom resource manifest. For more information, see [Defining Additional Images](operator-defining-additional-images). - -Pull secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. -An operator can reliably depend on this secret existing in all installs (online and air gapped), and can use this secret name in any created `podspec` to pull private images. - -## Dynamic namespaces - -Some applications need access to dynamically created namespaces or even all namespaces. -In this case, an application spec can list `"*"` as one of its `addtionalNamespaces` in the Application manifest file. -When KOTS encounters the wildcard, it will not create any namespaces, but it will ensure that the application image pull secret is copied to all namespaces. -The Admin Console will run an informer internally to watch namespaces in the cluster, and when a new namespace is created, the secret will automatically be copied to it. - -```yaml -apiVersion: kots.io/v1beta1 -kind: Application -metadata: - name: my-operator -spec: - additionalNamespaces: - - "*" -``` - -When the wildcard (`"*"`) is listed in `additionalNamespaces`, KOTS will use a ClusterRole and ClusterRoleBinding for the Admin Console. -This will ensure that the Admin Console will continue to have permissions to all newly created namespaces, even after the install has finished. - -================ -File: docs/vendor/operator-packaging-about.md -================ -# About Packaging a Kubernetes Operator Application - -Kubernetes Operators can be packaged and delivered as an application using the same methods as other Kubernetes applications. - -Operators are good for [specific use cases](https://blog.replicated.com/operators-in-kots/). In general, we recommend thinking deeply about the problem space an application solves before going down the Operator path because, although powerful, Operators take a lot of time to build and maintain. - -Operators are generally defined using one or more `CustomResourceDefinition` manifests, and the controller is often a `StatefulSet`, along with other additional objects. -These Kubernetes manifests can be included in an application by adding them to a release and promoting the release to a channel. - -Kubernetes Operators differ from traditional applications because they interact with the Kubernetes API to create and manage other objects at runtime. -When a `CustomResource` is deployed to the cluster that has the operator running, the Operator may need to create new Kubernetes objects to fulfill the request. -When an Operator creates an object that includes a `PodSpec`, the Operator should use locally-available images in order to remain compatible with air gapped environments and customers who have configured a local registry to push all images to. -Even environments that aren't air gapped may need access to private images that are included as part of the application at runtime. - -An application includes a definition for the developer to list the additional images that are required for the application, and by exposing the local registry details (endpoint, namespace and secrets) to the application so that they can be referenced when creating a `PodSpec` at runtime. - -================ -File: docs/vendor/operator-referencing-images.md -================ -# Referencing Images - -This topic explains how to support the use of private image registries for applications that are packaged with Kubernetes Operators. - -## Overview - -To support the use of private images in all environments, the Kubernetes Operator code must use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. - -There are several template functions available to assist with this. -This might require two new environment variables to be added to a manager to read these values. - -The steps to ensure that an Operator is using the correct image names and has the correct image pull secrets in dynamically created pods are: - -1. Add a new environment variables to the Manager Pod so that the Manager knows the location of the private registry, if one is set. -2. Add a new environment variable to the Manager Pod so that the Manager also knows the `imagePullSecret` that's needed to pull the local image. - -## Step 1: Add a reference to the local registry - -The manager of an Operator is often a `Statefulset`, but could be a `Deployment` or another kind. -Regardless of where the spec is defined, the location of the private images can be read using the Replicated KOTS template functions. For more information about using template functions, see [About Template Functions](/reference/template-functions-about). - -#### Option 1: Define each image -If an Operator only requires one additional image, the easiest way to determine this location is to use the `LocalImageName` function. -This will always return the image name to use, whether the customer's environment is configured to use a local registry or not. - -**Example:** - -```yaml -env: - - name: IMAGE_NAME_ONE - value: 'repl{{ LocalImageName "elasticsearch:7.6.0" }}' -``` - -For online installations (no local registry), this will be written with no changes -- the variable will contain `elasticsearch:7.6.0`. -For installations that are air gapped or have a locally-configured registry, this will be rewritten as the locally referenceable image name. For example, `registry.somebigbank.com/my-app/elasticsearch:7.6.0`. - -**Example:** - -```yaml -env: - - name: IMAGE_NAME_TWO - value: 'repl{{ LocalImageName "quay.io/orgname/private-image:v1.2.3" }}' -``` - -In the above example, this is a private image, and will always be rewritten. For online installations, this will return `proxy.replicated.com/proxy/app-name/quay.io/orgname/private-image:v1.2.3` and for installations with a locally-configured registry it will return `registry.somebigbank.com/org/my-app-private-image:v.1.2.3`. - -#### Option 2: Build image names manually - -For applications that have multiple images or dynamically construct the image name at runtime, the KOTS template functions can also return the elements that make up the local registry endpoint and secrets, and let the application developer construct the locally-referenceable image name. - -**Example:** - -```yaml -env: - - name: REGISTRY_HOST - value: 'repl{{ LocalRegistryHost }}' - - name: REGISTRY_NAMESPACE - value: 'repl{{ LocalRegistryNamespace }}' -``` - -## Step 2: Determine the imagePullSecret - -Private, local images will need to reference an image pull secret to be pulled. -The value of the secret's `.dockerconfigjson` is provided in a template function, and the application can write this pull secret as a new secret to the namespace. -If the application is deploying the pod to the same namespace as the Operator, the pull secret will already exist in the namespace, and the secret name can be obtained using the [ImagePullSecretName](../reference/template-functions-config-context/#imagepullsecretname) template function. -KOTS will create this secret automatically, but only in the namespace that the Operator is running in. -It's the responsibility of the application developer (the Operator code) to ensure that this secret is present in any namespace that new pods will be deployed to. - -This template function returns the base64-encoded, docker auth that can be written directly to a secret, and referenced in the `imagePullSecrets` attribute of the PodSpec. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: myregistrykey - namespace: awesomeapps -data: - .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' -type: kubernetes.io/dockerconfigjson -``` - -This will return an image pull secret for the locally configured registry. - -If your application has both public and private images, it is recommended that the image name is passed to the image pull secret for the locally configured registry. This will ensure that installs without a local registry can differentiate between private, proxied and public images. - -**Example:** - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: my-pull-secret - namespace: awesomeapps -data: - .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' -type: kubernetes.io/dockerconfigjson -``` - -In the above example, the `LocalRegistryImagePullSecret()` function will return an empty auth array if the installation is not air gapped, does not have a local registry configured, and the `elasticsearch:7.6.0` image is public. -If the image is private, the function will return the license-key derived pull secret. -And finally, if the installation is using a local registry, the image pull secret will contain the credentials needed to pull from the local registry. - -**Example:** - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: my-pull-secret - namespace: awesomeapps -data: - .dockerconfigjson: '{{repl LocalRegistryImagePullSecret }}' -type: kubernetes.io/dockerconfigjson -``` - -The above example will always return an image pull secret. -For installations without a local registry, it will be the Replicated license secret, and for installations with a local registry, it will be the local registry. - -## Using the local registry at runtime - -The developer of the Operator should use these environment variables to change the `image.name` in any deployed PodSpec to ensure that it will work in air gapped environments. - -================ -File: docs/vendor/orchestrating-resource-deployment.md -================ -import WeightLimitation from "../partials/helm/_helm-cr-weight-limitation.mdx" -import HooksLimitation from "../partials/helm/_hooks-limitation.mdx" -import HookWeightsLimitation from "../partials/helm/_hook-weights-limitation.mdx" - -# Orchestrating Resource Deployment - -This topic describes how to orchestrate the deployment order of resources deployed as part of your application. The information in this topic applies to Helm chart- and standard manifest-based applications deployed with Replicated KOTS. - -## Overview - -Many applications require that certain resources are deployed and in a ready state before other resources can be deployed. - -When installing an application that includes one or more Helm charts, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying any Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. - -For applications deployed with KOTS, you can manage the order in which resources are deployed using the following methods: - -* For Helm charts, set the `weight` property in the corresponding HelmChart custom resource. See [HelmChart `weight`](#weight). - -* For standard manifests, add KOTS annotations to the resources. See [Standard Manifest Deployment Order with KOTS Annotations](#manifests). - -## Helm Chart Deployment Order with `weight` {#weight} - -You can configure the [`weight`](/reference/custom-resource-helmchart-v2#weight) property of the Replicated HelmChart custom resource to define the order in which the Helm charts in your release are installed. - -KOTS directs Helm to install the Helm charts based on the value of `weight` in ascending order, deploying the chart with the lowest weight first. Any dependencies are installed along with the parent chart. For example, a chart with a `weight` of `-1` deploys before a chart with a `weight` of `0`. - -The value for the `weight` property can be any negative or positive integer or `0`. By default, when you do not provide a `weight` for a Helm chart, the `weight` is `0`. - -For example: - -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: samplechart -spec: - chart: - name: samplechart - chartVersion: 3.1.7 - releaseName: samplechart-release-1 - # weight determines the order that charts are applied, with lower weights first. - weight: 4 -``` - -#### Limitations - -The `weight` field in the HelmChart custom resource has the following limitations: - -* <WeightLimitation/> - -* When installing a Helm chart-based application, KOTS always deploys standard Kubernetes manifests to the cluster _before_ deploying Helm charts. For example, if your release contains a Helm chart, a CRD, and a ConfigMap, then the CRD and ConfigMap resources are deployed before the Helm chart. The `weight` property does not allow Helm charts to be deployed before standard manifests. - -## Standard Manifest Deployment Order with KOTS Annotations {#manifests} - -You can use the KOTS annotations described in this section to control the order in which standard manifests are deployed. - -### Requirement - -You must quote the boolean or integer values in annotations because Kubernetes annotations must be strings. For more information about working with annotations in Kubernetes resources, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. - -### `kots.io/creation-phase` - -When the `kots.io/creation-phase: '<integer>'` annotation is present on a resource, KOTS groups the resource into the specified creation phase. KOTS deploys each phase in order from lowest to highest. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. - -Resources in the same phase are deployed in the same order that Helm installs resources. To view the order in which KOTS deploys resources of the same phase, see [Helm installs resources in the following order](https://helm.sh/docs/intro/using_helm/#:~:text=Helm%20installs%20resources%20in%20the,order) in the Helm documentation. - -The following example deploys the `CustomResourceDefinition` before the default creation phase: - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: myresources.example.com - annotations: - kots.io/creation-phase: "-1" -... -``` - -### `kots.io/deletion-phase` - -When the `kots.io/deletion-phase: '<integer>'` annotation is present on a resource, KOTS groups the resource into the specified deletion phase. KOTS deletes each phase in order from lowest to highest. Resources within the same phase are deleted in the reverse order from which they were created. Phases can be any positive or negative integer ranging from `'-9999'` to `'9999'`. - -The following example deploys the `CustomResourceDefinition` before the default creation phase and deletes the resource after the default deletion phase: - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: myresources.example.com - annotations: - kots.io/creation-phase: "-1" - kots.io/deletion-phase: "1" -... -``` -### `kots.io/wait-for-ready` - -When the `kots.io/wait-for-ready: '<bool>'` annotation is present on a resource and evaluates to `'true'`, KOTS waits for the resource to be in a ready state before deploying any other resources. For most resource types, KOTS has existing logic to determine if a resource is ready. If there is no existing logic for the given resource type, then KOTS waits until the resource exists and is queryable from the Kubernetes API server. - -In the following example, KOTS waits for the Postgres `StatefulSet` to be ready before continuing to deploy other resources: - -```yaml -apiVersion: apps/v1 -kind: Statefulset -metadata: - name: postgresql - annotations: - kots.io/wait-for-ready: 'true' - labels: - app: postgresql -spec: - selector: - matchLabels: - app: postgresql - strategy: - type: Recreate - template: - metadata: - labels: - app: postgresql - spec: - containers: - - name: postgresql - image: "postgres:9.6" - imagePullPolicy: "" -... -``` - -### `kots.io/wait-for-properties` - -When the `kots.io/wait-for-properties: '<jsonpath>=<value>,<jsonpath>=<value>'` annotation is present on a resource, KOTS waits for one or more specified resource properties to match the desired values before deploying other resources. This annotation is useful when the `kots.io/wait-for-ready` annotation, which waits for a resource to exist, is not sufficient. - -The value for this annotation is a comma-separated list of key-value pairs, where the key is a JSONPath specifying the path to the property and the value is the desired value for the property. In the following example, KOTS waits for a resource to reach a desired state before deploying other resources. In this case, KOTS waits until each of the three status properties have the target values: - -```yaml -kind: MyResource -metadata: - name: my-resource - annotations: - kots.io/wait-for-properties: '.status.tasks.extract=true,.status.tasks.transform=true,.status.tasks.load=true' -... -status: - tasks: - extract: false - transform: false - load: false -``` - -================ -File: docs/vendor/packaging-air-gap-excluding-minio.md -================ -# Excluding MinIO from Air Gap Bundles (Beta) - -The Replicated KOTS Admin Console requires an S3-compatible object store to store application archives and support bundles. By default, KOTS deploys MinIO to satisfy the object storage requirement. For more information about the options for installing without MinIO in existing clusters, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). - -As a software vendor, you can exclude MinIO images from all Admin Console air gap distributions (`kotsadm.tar.gz`) in the download portal. Excluding MinIO from the `kotsadm.tar.gz` air gap bundle is useful if you want to prevent MinIO images from appearing in the air gap distribution that your end users download. It also reduces the file size of `kotsadm.tar.gz`. - -:::note -You can still retrieve a bundle with MinIO images from the KOTS release page in GitHub when this feature is enabled. See [replicatedhq/kots](https://github.com/replicatedhq/kots/releases/) in GitHub. -::: - -To exclude MinIO from the `kotsadm.tar.gz` Admin Console air gap bundle: - -1. Log in to your Vendor Portal account. Select **Support** > **Request a feature**, and submit a feature request for "Exclude MinIO image from air gap bundle". After this feature is enabled, all `kotsadm.tar.gz` files in the download portal will not include MinIO. - -1. Instruct your end users to set the flag `--with-minio=false` with the `kots install` command during an air gap installation. For more information about setting this runtime flag, see [Installing KOTS in Existing Clusters Without Object Storage](/enterprise/installing-stateful-component-requirements). - - :::important - If you have this feature enabled in your Team account and the end user does not include `--with-minio=false` with the `kots install` command, then the installation fails. - ::: - -================ -File: docs/vendor/packaging-cleaning-up-jobs.md -================ -# Cleaning Up Kubernetes Jobs - -This topic describes how to use the Replicated KOTS `kots.io/hook-delete-policy` annotation to remove Kubernetes job objects from the cluster after they complete. - -## About Kubernetes Jobs - -Kubernetes Jobs are designed to run and then terminate. But, they remain in the namespace after completion. Because Job objects are immutable, this can cause conflicts and errors when attempting to update the Job later. - -A common workaround is to use a content SHA from the Job object in the name. However, a user can update their application instance through various events (upstream update, license sync, config update, CLI upload). If the Job is already completed, it is an error to reapply the same job to the cluster again. - -The built-in Replicated KOTS operator/controller can help by deleting Jobs upon completion. -This allows the same Job to be deployed again without polluting the namespace with completed Jobs. - -For more information about Job objects, see [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) in the Kubernetes documentation. - -## KOTS `hook-delete-policy` Annotation - -To enable the built-in KOTS operator/controller to automatically delete Jobs when they complete, specify a delete hook policy as an annotation on the Job object. - -The KOTS annotation key is `kots.io/hook-delete-policy` and there are two possible values (you can use both simultaneously): `hook-succeeded` and `hook-failed`. - -When this annotation is present and includes `hook-succeeded`, the job is deleted when it completes successfully. -If this annotation is present and includes `hook-failed`, the job is deleted on failure. - -For Helm charts deployed with KOTS, KOTS automatically adds this `kots.io/hook-delete-policy` annotation to any Job objects in the Helm chart that include a `helm.sh/hook-delete-policy` annotation. This means that there is nothing extra to configure when deploying a Helm chart with Helm delete hooks. - -The following example shows a Job object with the `kots.io/hook-delete-policy` annotation: - -```yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: pi - annotations: - "kots.io/hook-delete-policy": "hook-succeeded, hook-failed" -spec: - template: - spec: - containers: - - name: pi - image: perl - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] - restartPolicy: Never - backoffLimit: 4 -``` - -================ -File: docs/vendor/packaging-embedded-kubernetes.mdx -================ -import Installers from "../partials/kurl/_installers.mdx" -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Creating a kURL Installer - -<KurlAvailability/> - -This topic describes how to create a kURL installer spec in the Replicated Vendor Portal to support installations with Replicated kURL. - -For information about creating kURL installers with the Replicated CLI, see [installer create](/reference/replicated-cli-installer-create). - -## Overview - -<Installers/> - -For more information about kURL, see [Introduction to kURL](kurl-about). - -## Create an Installer - -To distribute a kURL installer alongside your application, you can promote the installer to a channel or include the installer as a manifest file within a given release: - -<table> - <tr> - <th width="30%">Method</th> - <th width="70%">Description</th> - </tr> - <tr> - <td><a href="packaging-embedded-kubernetes#channel">Promote the installer to a channel</a></td> - <td><p>The installer is promoted to one or more channels. All releases on the channel use the kURL installer that is currently promoted to that channel. There can be only one active kURL installer on each channel at a time.</p><p>The benefit of promoting an installer to one or more channels is that you can create a single installer without needing to add a separate installer for each release. However, because all the releases on the channel will use the same installer, problems can occur if all releases are not tested with the given installer.</p></td> - </tr> - <tr> - <td><a href="packaging-embedded-kubernetes#release">Include the installer in a release (Beta)</a></td> - <td><p>The installer is included as a manifest file in a release. This makes it easier to test the installer and release together. It also makes it easier to know which installer spec customers are using based on the application version that they have installed.</p></td> - </tr> -</table> - -### Promote the Installer to a Channel {#channel} - -To promote a kURL installer to a channel: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **kURL Installers**. - -1. On the **kURL Installers** page, click **Create kURL installer**. - - <img alt="vendor portal kurl installers page" src="/images/kurl-installers-page.png" width="650px"/> - - [View a larger version of this image](/images/kurl-installers-page.png) - -1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [Requirements and Recommendations](#requirements-and-recommendations) below. - - You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: - - <img alt="kurl.sh landing page" src="/images/kurl-build-an-installer.png" width="650px"/> - - [View a larger version of this image](/images/kurl-build-an-installer.png) - -1. Click **Save installer**. You can continue to edit your file until it is promoted. - -1. Click **Promote**. In the **Promote Installer** dialog that opens, edit the fields: - - <img alt="promote installer dialog" src="/images/promote-installer.png" width="450px"/> - - [View a larger version of this image](/images/promote-installer.png) - - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Description</th> - </tr> - <tr> - <td>Channel</td> - <td>Select the channel or channels where you want to promote the installer.</td> - </tr> - <tr> - <td>Version label</td> - <td>Enter a version label for the installer.</td> - </tr> - </table> - -1. Click **Promote** again. The installer appears on the **kURL Installers** page. - - To make changes after promoting, create and promote a new installer. - -### Include an Installer in a Release (Beta) {#release} - -To include the kURL installer in a release: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Releases**. Then, either click **Create Release** to create a new release, or click **Edit YAML** to edit an existing release. - - The YAML editor opens. - -1. Create a new file in the release with `apiVersion: cluster.kurl.sh/v1beta1` and `kind: Installer`: - - ```yaml - apiVersion: cluster.kurl.sh/v1beta1 - kind: Installer - metadata: - name: "latest" - spec: - - ``` - -1. Edit the file to customize the installer. For guidance on which add-ons to choose, see [ kURL Add-on Requirements and Recommendations](#requirements-and-recommendations) below. - - You can also go to the landing page at [kurl.sh](https://kurl.sh/) to build an installer then copy the provided YAML: - - <img alt="kurl.sh landing page" src="/images/kurl-build-an-installer.png" width="650px"/> - - [View a larger version of this image](/images/kurl-build-an-installer.png) - -1. Click **Save**. This saves a draft that you can continue to edit until you promote it. - -1. Click **Promote**. - - To make changes after promoting, create a new release. - -## kURL Add-on Requirements and Recommendations {#requirements-and-recommendations} - -KURL includes several add-ons for networking, storage, ingress, and more. The add-ons that you choose depend on the requirements for KOTS and the unique requirements for your application. For more information about each add-on, see the open source [kURL documentation](https://kurl.sh/docs/introduction/). - -When creating a kURL installer, consider the following requirements and guidelines for kURL add-ons: - -- You must include the KOTS add-on to support installation with KOTS and provision the KOTS Admin Console. See [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the kURL documentation. - -- To support the use of KOTS snapshots, Velero must be installed in the cluster. Replicated recommends that you include the Velero add-on in your kURL installer so that your customers do not have to manually install Velero. - - :::note - During installation, the Velero add-on automatically deploys internal storage for backups. The Velero add-on requires the MinIO or Rook add-on to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. - ::: - -- You must select storage add-ons based on the KOTS requirements and the unique requirements for your application. For more information, see [About Selecting Storage Add-ons](packaging-installer-storage). - -- kURL installers that are included in releases must pin specific add-on versions and cannot pin `latest` versions or x-ranges (such as 1.2.x). Pinning specific versions ensures the most testable and reproducible installations. For example, pin `Kubernetes 1.23.0` in your manifest to ensure that version 1.23.0 of Kubernetes is installed. For more information about pinning Kubernetes versions, see [Versions](https://kurl.sh/docs/create-installer/#versions) and [Versioned Releases](https://kurl.sh/docs/install-with-kurl/#versioned-releases) in the kURL open source documentation. - - :::note - For kURL installers that are _not_ included in a release, pinning specific versions of Kubernetes and Kubernetes add-ons in the kURL installer manifest is not required, though is highly recommended. - ::: - -- After you configure a kURL installer, Replicated recommends that you customize host preflight checks to support the installation experience with kURL. Host preflight checks help ensure successful installation and the ongoing health of the cluster. For more information about customizing host preflight checks, see [Customizing Host Preflight Checks for Kubernetes Installers](preflight-host-preflights). - -- For installers included in a release, Replicated recommends that you define a preflight check in the release to ensure that the target kURL installer is deployed before the release is installed. For more information about how to define preflight checks, see [Defining Preflight Checks](preflight-defining). - - For example, the following preflight check uses the `yamlCompare` analyzer with the `kots.io/installer: "true"` annotation to compare the target kURL installer that is included in the release against the kURL installer that is currently deployed in the customer's environment. For more information about the `yamlCompare` analyzer, see [`yamlCompare`](https://troubleshoot.sh/docs/analyze/yaml-compare/) in the open source Troubleshoot documentation. - - ```yaml - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: installer-preflight-example - spec: - analyzers: - - yamlCompare: - annotations: - kots.io/installer: "true" - checkName: Kubernetes Installer - outcomes: - - fail: - message: The kURL installer for this version differs from what you have installed. It is recommended that you run the updated kURL installer before deploying this version. - uri: https://kurl.sh/my-application - - pass: - message: The kURL installer for this version matches what is currently installed. - ``` - -================ -File: docs/vendor/packaging-include-resources.md -================ -# Conditionally Including or Excluding Resources - -This topic describes how to include or exclude optional application resources based on one or more conditional statements. The information in this topic applies to Helm chart- and standard manifest-based applications. - -## Overview - -Software vendors often need a way to conditionally deploy resources for an application depending on users' configuration choices. For example, a common use case is giving the user the choice to use an external database or an embedded database. In this scenario, when a user chooses to use their own external database, it is not desirable to deploy the embedded database resources. - -There are different options for creating conditional statements to include or exclude resources based on the application type (Helm chart- or standard manifest-based) and the installation method (Replicated KOTS or Helm CLI). - -### About Replicated Template Functions - -For applications deployed with KOTS, Replicated template functions are available for creating the conditional statements that control which optional resources are deployed for a given user. Replicated template functions can be used in standard manifest files such as Replicated custom resources or Kubernetes resources like StatefulSets, Secrets, and Services. - -For example, the Replicated ConfigOptionEquals template functions returns true if the specified configuration option value is equal to a supplied value. This is useful for creating conditional statements that include or exclude a resource based on a user's application configuration choices. - -For more information about the available Replicated template functions, see [About Template Functions](/reference/template-functions-about). - -## Include or Exclude Helm Charts - -This section describes methods for including or excluding Helm charts from your application deployment. - -### Helm Optional Dependencies - -Helm supports adding a `condition` field to dependencies in the Helm chart `Chart.yaml` file to include subcharts based on one or more boolean values evaluating to true. - -For more information about working with dependencies and defining optional dependencies for Helm charts, see [Dependencies](https://helm.sh/docs/chart_best_practices/dependencies/) in the Helm documentation. - -### HelmChart `exclude` Field - -For Helm chart-based applications installed with KOTS, you can configure KOTS to exclude certain Helm charts from deployment using the HelmChart custom resource [`exclude`](/reference/custom-resource-helmchart#exclude) field. When the `exclude` field is set to a conditional statement, KOTS excludes the chart if the condition evaluates to `true`. - -The following example uses the `exclude` field and the ConfigOptionEquals template function to exclude a postgresql Helm chart when the `external_postgres` option is selected on the Replicated Admin Console **Config** page: - -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: postgresql -spec: - exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' - chart: - name: postgresql - chartVersion: 12.1.7 - releaseName: samplechart-release-1 -``` - -## Include or Exclude Standard Manifests - -For standard manifest-based applications installed with KOTS, you can use the `kots.io/exclude` or `kots.io/when` annotations to include or exclude resources based on a conditional statement. - -By default, if neither `kots.io/exclude` nor `kots.io/when` is present on a resource, the resource is included. - -### Requirements - -The `kots.io/exclude` and `kots.io/when` annotations have the following requirements: - -* Only one of the `kots.io/exclude` nor `kots.io/when` annotations can be present on a single resource. If both are present, the `kots.io/exclude` annotation is applied, and the `kots.io/when` annotation is ignored. - -* The values of the `kots.io/exclude` and `kots.io/when` annotations must be wrapped in quotes. This is because Kubernetes annotations must be strings. For more information about working with Kubernetes annotations, see [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) in the Kubernetes documentation. - -### `kots.io/exclude` - -When the `kots.io/exclude: '<bool>'` annotation is present on a resource and evaluates to true, the resource is excluded from the deployment. - -The following example uses the `kots.io/exclude` annotation and the ConfigOptionEquals template function to exclude the postgresql `StatefulSet` when an `install_postgres` checkbox on the Admin Console **Config** page is disabled: - -```yaml -apiVersion: apps/v1 -kind: Statefulset -metadata: - name: postgresql - annotations: - kots.io/exclude: '{{repl ConfigOptionEquals "install_postgres" "0" }}' - labels: - app: postgresql -spec: - selector: - matchLabels: - app: postgresql - strategy: - type: Recreate - template: - metadata: - labels: - app: postgresql - spec: - containers: - - name: postgresql - image: "postgres:9.6" - imagePullPolicy: "" -... -``` - -### `kots.io/when` - -When the `kots.io/when: '<bool>'` annotation is present on a resource and evaluates to true, the resource is included in the deployment. - -The following example uses the `kots.io/when` annotation and the ConfigOptionEquals template function to include the postgresql `StatefulSet` resource when the `install_postgres` checkbox on the Admin Console **Config** page is enabled: - -```yaml -apiVersion: apps/v1 -kind: Statefulset -metadata: - name: postgresql - annotations: - kots.io/when: '{{repl ConfigOptionEquals "install_postgres" "1" }}' - labels: - app: postgresql -spec: - selector: - matchLabels: - app: postgresql - strategy: - type: Recreate - template: - metadata: - labels: - app: postgresql - spec: - containers: - - name: postgresql - image: "postgres:9.6" - imagePullPolicy: "" -... -``` - -================ -File: docs/vendor/packaging-ingress.md -================ -# Adding Cluster Ingress Options - -When delivering a configurable application, ingress can be challenging as it is very cluster specific. -Below is an example of a flexible `ingress.yaml` file designed to work in most Kubernetes clusters, including embedded clusters created with Replicated kURL. - -## Example - -The following example includes an Ingress resource with a single host based routing rule. -The resource works in both existing clusters and kURL clusters. - -### Config - -A config option `enable_ingress` has been provided to allow the end-user to choose whether or not to enable the Ingress resource. -In some clusters a custom Ingress resource may be desired — when an ingress controller is not available, other means of exposing services may be preferred. - -An `annotations` text area has been made available for the end-user to add additional annotations to the ingress. -Here, cluster specific annotations can be added to support a variety of ingress controllers. -For example, when using the [ALB ingress controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) in AWS, it is necessary to include the `kubernetes.io/ingress.class: alb` annotation on your Ingress resource. - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: example-application -spec: - groups: - - name: ingress - title: Ingress - items: - - name: enable_ingress - type: bool - title: Enable Kubernetes Ingress - help_text: | - When checked, deploy the provided Kubernetes Ingress resource. - default: "1" - - name: hostname - type: text - title: Hostname - help_text: | - Use this field to provide a hostname for your Example Application installation. - required: true - when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} - - name: allow_http - type: bool - title: Allow Unsecured Access through HTTP - help_text: | - Uncheck this box to disable HTTP traffic between the client and the load balancer. - default: "1" - when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} - - name: annotations - type: textarea - title: Annotations - help_text: | - Use this textarea to provide annotations specific to your ingress controller. - For example, `kubernetes.io/ingress.class: alb` when using the ALB ingress controller. - when: repl{{ ConfigOptionEquals "enable_ingress" "1" }} -``` - -### Ingress - -For ingress, you must create two separate resources. -The first of which will be deployed to existing cluster installations, while the second will only be deployed to an embedded cluster. -Both of these resources are selectively excluded with the [`exclude` annotation](packaging-include-resources). - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-application-ingress - annotations: - kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) IsKurl }}' - kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' - nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' - kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} -spec: - rules: - - host: repl{{ or (ConfigOption "hostname") "~" }} - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - service: - name: nginx - port: - number: 80 -``` - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: example-application-ingress-embedded - annotations: - kots.io/exclude: '{{repl or (ConfigOptionEquals "enable_ingress" "1" | not) (not IsKurl) }}' - kubernetes.io/ingress.allow-http: '{{repl ConfigOptionEquals "allow_http" "1" }}' - nginx.ingress.kubernetes.io/force-ssl-redirect: '{{repl ConfigOptionEquals "allow_http" "1" | not }}' - kots.io/placeholder: repl{{ printf "'true'" }}repl{{ ConfigOption "annotations" | nindent 4 }} -spec: - tls: - - hosts: - - repl{{ ConfigOption "hostname" }} - secretName: kotsadm-tls - rules: - - host: repl{{ ConfigOption "hostname" }} - http: - paths: - - path: / - pathType: ImplementationSpecific - backend: - service: - name: nginx - port: - number: 80 -``` - -================ -File: docs/vendor/packaging-installer-storage.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# About Selecting Storage Add-ons - -<KurlAvailability/> - -This topic provides guidance for selecting Replicated kURL add-ons to provide highly available data storage in kURL clusters. For additional guidance, see [Choosing a PV Provisioner](https://kurl.sh/docs/create-installer/choosing-a-pv-provisioner) in the open source kURL documentation. - -## Overview - -kURL includes add-ons for object storage and for dynamic provisioning of PersistentVolumes (PVs) in clusters. You configure these add-ons in your kURL installer to define how data for your application and data for Replicated KOTS is managed in the cluster. - -The following lists the kURL add-ons for data storage: -* **MinIO**: MinIO is an open source, S3-compatible object store. See [MinIO Add-on](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. -* **Rook**: Rook provides dynamic PV provisioning of distributed Ceph storage. Ceph is a distributed storage system that provides S3-compatible object storage. See [Rook Add-on](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. -* **OpenEBS**: OpenEBS Local PV creates a StorageClass to dynamically provision local PersistentVolumes (PVs) in a cluster. See [OpenEBS Add-on](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. -* **Longhorn**: Longhorn is an open source distributed block storage system for Kubernetes. See [Longhorn Add-on](https://kurl.sh/docs/add-ons/longhorn) in the kURL documentation. - - :::important - The Longhorn add-on is deprecated and not supported in production clusters. If you are currently using Longhorn, you must migrate data from Longhorn to either OpenEBS or Rook. For more information about migrating from Longhorn, see [Migrating to Change CSI Add-On](https://kurl.sh/docs/install-with-kurl/migrating-csi) in the kURL documentation. - ::: - -## About Persistent Storage for KOTS - -This section describes the default storage requirements for KOTS. Each of the [Supported Storage Configurations](#supported-storage-configurations) described below satisfy these storage requirements for KOTS. - -### rqlite StatefulSet - -KOTS deploys a rqlite StatefulSet to store the version history, application metadata and other small amounts of data needed to manage the application(s). No configuration is required to deploy rqlite. - -Rqlite is a distributed relational database that uses SQLite as its storage engine. For more information, see the [rqlite](https://rqlite.io/) website. - -### Object Storage or Local PV - -By default, KOTS requires an S3-compatible object store to store the following: -* Support bundles -* Application archives -* Backups taken with Replicated snapshots that are configured to NFS or host path storage destinations - -Both the Rook add-on and the MinIO add-on satisfy this object store requirement. - -Alternatively, you can configure KOTS to be deployed without object storage. This installs KOTS as a StatefulSet using a persistent volume (PV) for storage. When there is no object storage available, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in the local PV. In this case, the OpenEBS add-on can be included to provide the local PV storage. For more information, see [Installing Without Object Storage](/enterprise/installing-stateful-component-requirements). - -### Distributed Storage in KOTS v1.88 and Earlier - -KOTS v1.88 and earlier requires distributed storage. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. For more information, see [Rook Ceph](#rook-ceph) below. - -## Factors to Consider When Choosing a Storage Configuration - -The object store and/or PV provisioner add-ons that you choose to include in your kURL installer depend on the following factors: -* **KOTS storage requirements**: The storage requirements for the version of the KOTS add-on that you include in the spec. For example, KOTS v1.88 and earlier requires distributed storage. -* **Other add-on storage requirements**: The storage requirements for the other add-ons that you include in the spec. For example, the Velero add-on requires object storage to deploy the default internal storage for snapshots during installation. -* **Application storage requirements**: The storage requirements for your application. For example, you might include different add-ons depending on if your application requires a single or multi-node cluster, or if your application requires distributed storage. - -## Supported Storage Configurations - -This section describes the supported storage configurations for embedded clusters provisioned by kURL. - -### OpenEBS Without Object Storage (Single Node) {#single-node} - -If your application can be deployed to a single node cluster and does not require object storage, then you can choose to exclude object storage and instead use the OpenEBS add-on only to provide local storage on the single node in the cluster. - -When configured to use local PV storage instead of object storage, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in a PV on the single node in the cluster. - -#### Requirements - -To use the OpenEBS add-on without object storage, your kURL installer must meet the following requirements: - -* When neither the MinIO nor the Rook add-on are included in the kURL installer, you must set the `disableS3` field to `true` in the KOTS add-on. Setting `disableS3: true` in the KOTS add-on allows KOTS to use the local PV storage provided by OpenEBS instead of using object storage. For more information, see [Effects of the disableS3 Flag](https://kurl.sh/docs/add-ons/kotsadm#effects-of-the-disables3-flag) in _KOTS Add-on_ in the kURL documentation. - -* When neither the MinIO nor the Rook add-on are included in the kURL installer, the Velero add-on cannot be included. This is because, during installation, the Velero add-on automatically deploys internal storage for backups taken with the Replicated snapshots feature. The Velero add-on requires object storage to deploy this internal storage. If you include the Velero add-on without either the MinIO add-on or the Rook add-on, installation fails with the following error message: `Only Rook and Longhorn are supported for Velero Internal backup storage`. - - When the Velero add-on is not included, your users must install and configure Velero on the cluster after installation in order to use Replicated snapshots for backup and restore. See [About Backup and Restore with Snapshots](/vendor/snapshots-overview). - - For a storage configuration for single node clusters that supports the use of the Velero add-on, see [OpenEBS with MinIO (Single or Multi-Node)](#openebs-minio) below. - -#### Example - -The following is an example installer that uses OpenEBS v3.3.x with Local PV for local storage and disables object storage for KOTS: - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "local" -spec: - ... - openebs: - version: "3.3.x" - isLocalPVEnabled: true - localPVStorageClassName: "default" - kotsadm: - disables3: true -``` - -For more information about properties for the OpenEBS add-on, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) in the kURL documentation. - -### OpenEBS with MinIO (Single or Multi-Node) {#openebs-minio} - -Using the OpenEBS add-on with the MinIO add-on provides a highly available data storage solution for multi-node clusters that is lighter-weight compared to using Rook Ceph. Replicated recommends that you use OpenEBS Local PV with MinIO for multi-node clusters if your application does _not_ require distributed storage. If your application requires distributed storage, see [Rook Ceph](#rook-ceph) below. - -When both the MinIO and OpenEBS add-ons are included, KOTS stores support bundles, application archives, and snapshots that have an NFS or host path storage destination in MinIO object storage. Additionally, KOTS uses OpenEBS Local PV to provision the PVs on each node that MinIO uses for local storage. - -#### Requirement - -To use both the OpenEBS add-on and the MinIO add-on, the KOTS add-on must use KOTS v1.89 or later. - -KOTS v1.88 and earlier requires distributed storage, which is not provided by OpenEBS Local PV. To support multi-node clusters, kURL installers that use a KOTS version earlier than v1.88 in the KOTS add-on must use the Rook add-on for distributed storage. See [Rook Ceph](#rook-ceph) below. - -#### Example - -The following is an example installer that uses both the OpenEBS add-on version 3.3.x and MinIO add-on version `2022-09-07T22-25-02Z`: - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "openebs-with-minio" -spec: - ... - openebs: - version: "3.3.x" - isLocalPVEnabled: true - localPVStorageClassName: "default" - minio: - version: "2022-09-07T22-25-02Z" -``` - -For more information about properties for the OpenEBS and MinIO add-ons, see [OpenEBS](https://kurl.sh/docs/add-ons/openebs) and [MinIO](https://kurl.sh/docs/add-ons/minio) in the kURL documentation. - -### Rook Ceph (Multi-Node) {#rook-ceph} - -If your application requires multiple nodes and distributed storage, Replicated recommends that you use the Rook add-on for storage. The Rook add-on creates an S3-compatible, distributed object store with Ceph and also creates a StorageClass for dynamically provisioning PVs. - -#### Requirement - -Rook versions 1.4.3 and later require a dedicated block device attached to each node in the cluster. The block device must be unformatted and dedicated for use by Rook only. The device cannot be used for other purposes, such as being part of a Raid configuration. If the device is used for purposes other than Rook, then the installer fails, indicating that it cannot find an available block device for Rook. - -For Rook Ceph versions earlier than 1.4.3, a dedicated block device is recommended in production clusters. Running distributed storage such as Rook on block devices is recommended for improved data stability and performance. - -#### Example - -The following is an example installer that uses the Rook add-on version 1.7.x: - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "distributed" -spec: - ... - rook: - version: "1.7.x" - storageClassName: "distributed" - isSharedFilesystemDisabled: true -``` - -For more information about properties for the Rook add-on, see [Rook](https://kurl.sh/docs/add-ons/rook) in the kURL documentation. - -================ -File: docs/vendor/packaging-kots-versions.md -================ -# Setting Minimum and Target Versions for KOTS - -This topic describes how to set minimum and target version for Replicated KOTS in the KOTS [Application](/reference/custom-resource-application) custom resource. - -## Limitation - -Setting minimum and target versions for KOTS is not supported for installations with [Replicated Embedded Cluster](/vendor/embedded-overview). - -This is because each version of Embedded Cluster includes a particular version of KOTS. Setting `targetKotsVersion` or `minKotsVersion` to a version of KOTS that does not coincide with the version that is included in the specified version of Embedded Cluster will cause Embedded Cluster installations to fail with an error message like: `Error: This version of App Name requires a different version of KOTS from what you currently have installed.`. - -To avoid installation failures, do not use `targetKotsVersion` or `minKotsVersion` in releases that support installation with Embedded Cluster. - -## Using Minimum KOTS Versions (Beta) - -The `minKotsVersion` attribute in the Application custom resource defines the minimum version of Replicated KOTS that is required by the application release. This can be useful when you want to get users who are lagging behind to update to a more recent KOTS version, or if your application requires functionality that was introduced in a particular KOTS version. - -Including this attribute enforces compatibility checks for both new installations and application updates. An installation or update is blocked if the currently deployed KOTS version is earlier than the specified minimum KOTS version. Users must upgrade to at least the specified minimum version of KOTS before they can install or update the application. - -### How the Admin Console Handles minKotsVersion - -When you promote a new release specifying a minimum KOTS version that is later than what a user currently has deployed, and that user checks for updates, that application version appears in the version history of the Admin Console. However, it is not downloaded. - -The Admin Console temporarily displays an error message that informs the user that they must update KOTS before downloading the application version. This error also displays when the user checks for updates with the [`kots upstream upgrade`](/reference/kots-cli-upstream-upgrade) command. - -KOTS cannot update itself automatically, and users cannot update KOTS from the Admin Console. For more information on how to update KOTS in existing clusters or in kURL clusters, see [Performing Updates in Existing Clusters](/enterprise/updating-app-manager) and [Performing Updates in kURL Clusters](/enterprise/updating-kurl). - -After updating KOTS to the minimum version or later, users can use the Admin Console or the [`kots upstream download`](/reference/kots-cli-upstream-download) command to download the release and subsequently deploy it. - - -## Using Target KOTS Versions - -Including `targetKotsVersion` in the Application custom resource enforces compatibility checks for new installations. It blocks the installation if a user tries to install a version of KOTS that is later than the target version. For example, this can prevent users from installing a version of KOTS that you have not tested yet. - -If the latest release in a channel includes `targetKotsVersion`, the install command for existing clusters is modified to install that specific version of KOTS. The install command for existing clusters is on the channel card in the [Vendor Portal](https://vendor.replicated.com). - -### How the Admin Console Handles targetKotsVersion - -Specifying a `targetKotsVersion` does not prevent an end user from upgrading to a later version of KOTS after the initial installation. - -If a new version of the application specifies a later target KOTS version than what is currently installed, users are not prevented from deploying that version of the application. - -If a user's Admin Console is running a version of KOTS that is earlier than the target version specified in a new version of the application, the Admin Console displays a notification in the footer, indicating that a newer supported version of KOTS is available. - -### Using Target Versions with kURL - -For installations in a cluster created by Replicated kURL, the version of the KOTS add-on must not be later than the target KOTS version specified in the Application custom resource. If the KOTS add-on version is later than the version specified for `targetKotsVersion`, the initial installation fails. - -For more information about the KOTS add-on, see [KOTS add-on](https://kurl.sh/docs/add-ons/kotsadm) in the open source kURL documentation. - -================ -File: docs/vendor/packaging-private-images.md -================ -# Connecting to an External Registry - -This topic describes how to add credentials for an external private registry using the Replicated Vendor Portal or Replicated CLI. Adding an external registry allows you to grant proxy access to private images using the Replicated proxy registry. For more information, see [About the Replicated Proxy Registry](private-images-about). - -For information about adding a registry with the Vendor API v3, see [Create an external registry with the specified parameters](https://replicated-vendor-api.readme.io/reference/createexternalregistry) in the Vendor API v3 documentation. - -## Supported Registries - -Replicated recommends that application vendors use one the following external private registries: - -* Amazon Elastic Container Registry (ECR) -* DockerHub -* GitHub Container Registry -* Google Artifact Registry -* Google Container Registry (Deprecated) -* Sonatype Nexus -* Quay.io - -These registries have been tested for compatibility with KOTS. - -You can also configure access to most other external registries if the registry conforms to the Open Container Initiative (OCI) standard. - -## Add Credentials for an External Registry - -All applications in your team have access to the external registry that you add. This means that you can use the images in the external registry across multiple apps in the same team. - -### Using the Vendor Portal - -To add an external registry using the Vendor Portal: - -1. Log in to the [Vendor Portal](https://vendor.replicated.com) and go to the **Images** page. -1. Click **Add External Registry**. - - <img src="/images/add-external-registry.png" alt="/images/add-external-registry.png" width="400px"></img> - - [View a larger version of this image](/images/add-external-registry.png) - -1. In the **Provider** drop-down, select your registry provider. - -1. Complete the fields in the dialog, depending on the provider that you chose: - - :::note - Replicated stores your credentials encrypted and securely. Your credentials and the encryption key do not leave Replicated servers. - ::: - - * **Amazon ECR** - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Instructions</th> - </tr> - <tr> - <td>Hostname</td> - <td>Enter the host name for the registry, such as 123456689.dkr.ecr.us-east-1.amazonaws.com</td> - </tr> - <tr> - <td>Access Key ID</td> - <td>Enter the Access Key ID for a Service Account User that has pull access to the registry. See <a href="tutorial-ecr-private-images#setting-up-the-service-account-user">Setting up the Service Account User</a>.</td> - </tr> - <tr> - <td>Secret Access Key</td> - <td>Enter the Secret Access Key for the Service Account User.</td> - </tr> - </table> - - * **DockerHub** - - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Instructions</th> - </tr> - <tr> - <td>Hostname</td> - <td>Enter the host name for the registry, such as index.docker.io.</td> - </tr> - <tr> - <td>Auth Type</td> - <td>Select the authentication type for a DockerHub account that has pull access to the registry.</td> - </tr> - <tr> - <td>Username</td> - <td>Enter the host name for the account.</td> - </tr> - <tr> - <td>Password or Token</td> - <td>Enter the password or token for the account, depending on the authentication type you selected.</td> - </tr> - </table> - - * **GitHub Container Registry** - - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Instructions</th> - </tr> - <tr> - <td>Hostname</td> - <td>Enter the host name for the registry.</td> - </tr> - <tr> - <td>Username</td> - <td>Enter the username for an account that has pull access to the registry.</td> - </tr> - <tr> - <td>GitHub Token</td> - <td>Enter the token for the account.</td> - </tr> - </table> - - * **Google Artifact Registry** - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Instructions</th> - </tr> - <tr> - <td>Hostname</td> - <td>Enter the host name for the registry, such as <br/>us-east1-docker.pkg.dev</td> - </tr> - <tr> - <td>Auth Type</td> - <td>Select the authentication type for a Google Cloud Platform account that has pull access to the registry.</td> - </tr> - <tr> - <td>Service Account JSON Key or Token</td> - <td> - <p>Enter the JSON Key from Google Cloud Platform assigned with the Artifact Registry Reader role, or token for the account, depending on the authentication type you selected.</p> - <p>For more information about creating a Service Account, see <a href="https://cloud.google.com/container-registry/docs/access-control">Access Control with IAM</a> in the Google Cloud documentation.</p> - </td> - </tr> - </table> - * **Google Container Registry** - :::important - Google Container Registry is deprecated. For more information, see <a href="https://cloud.google.com/container-registry/docs/deprecations/container-registry-deprecation">Container Registry deprecation</a> in the Google documentation. - ::: - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Instructions</th> - </tr> - <tr> - <td>Hostname</td> - <td>Enter the host name for the registry, such as gcr.io.</td> - </tr> - <tr> - <td>Service Account JSON Key</td> - <td><p>Enter the JSON Key for a Service Account in Google Cloud Platform that is assigned the Storage Object Viewer role.</p><p>For more information about creating a Service Account, see <a href="https://cloud.google.com/container-registry/docs/access-control">Access Control with IAM</a> in the Google Cloud documentation.</p></td> - </tr> - </table> - - * **Quay.io** - - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Instructions</th> - </tr> - <tr> - <td>Hostname</td> - <td>Enter the host name for the registry, such as quay.io.</td> - </tr> - <tr> - <td>Username and Password</td> - <td>Enter the username and password for an account that has pull access to the registry.</td> - </tr> - </table> - - * **Sonatype Nexus** - - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Instructions</th> - </tr> - <tr> - <td>Hostname</td> - <td>Enter the host name for the registry, such as nexus.example.net.</td> - </tr> - <tr> - <td>Username and Password</td> - <td>Enter the username and password for an account that has pull access to the registry.</td> - </tr> - </table> - - * **Other** - - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Instructions</th> - </tr> - <tr> - <td>Hostname</td> - <td>Enter the host name for the registry, such as example.registry.com.</td> - </tr> - <tr> - <td>Username and Password</td> - <td>Enter the username and password for an account that has pull access to the registry.</td> - </tr> - </table> - -1. For **Image name & tag**, enter the image name and image tag and click **Test** to confirm that the Vendor Portal can access the image. For example, `api:v1.0.1` or `my-app/api:v1.01`. - -1. Click **Link registry**. - -### Using the CLI - -To configure access to private images in an external registry using the Replicated CLI: - -1. Install and configure the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -1. Run the `registry add` command for your external private registry. For more information about the `registry add` command, see [registry add](/reference/replicated-cli-registry-add) in _Replicated CLI_. - - For example, to add a DockerHub registry: - - ```bash - replicated registry add dockerhub --username USERNAME \ - --password PASSWORD - ``` - - Where: - * `USERNAME` is the username for DockerHub credentials with access to the registry. - * `PASSWORD` is the password for DockerHub credentials with access to the registry. - - :::note - To prevent the password from being saved in your shell history, Replicated recommends that you use the `--password-stdin` flag and entering the password when prompted. - ::: - -## Test External Registry Credentials - -Replicated recommends that you test external registry credentials to ensure that the saved credentials on Replicated servers can pull the specified image. - -To validate that the configured registry can pull specific images: - -```bash -replicated registry test HOSTNAME \ - --image IMAGE_NAME -``` - -Where: -* `HOSTNAME` is the name of the host, such as `index.docker.io`. -* `IMAGE_NAME` is the name of the target image in the registry. - -For example: - -```bash -replicated registry test index.docker.io --image my-company/my-image:v1.2.3 -``` - -## Related Topic - -[Tutorial: Using ECR for Private Images](tutorial-ecr-private-images) - -================ -File: docs/vendor/packaging-private-registry-security.md -================ -# Replicated Registry Security - -This document lists the security measures and processes in place to ensure that images pushed to the Replicated registry remain private. For more information about pushing images to the Replicated registry, see [Using the Replicated Registry for KOTS Installations](private-images-replicated). - - -## Single Tenant Isolation - -The registry is deployed and managed as a multi-tenant application, but each tenant is completely isolated from data that is created and pulled by other tenants. Docker images have shared base layers, but the private registry does not share these between tenants. For example, if a tenant creates an image `FROM postgres:10.3` and pushes the image to Replicated, all of the layers are uploaded, even if other tenants have this base layer uploaded. - -A tenant in the private registry is a team on the Replicated [Vendor Portal](https://vendor.replicated.com). Licenses and customers created by the team are also granted some permissions to the registry data, as specified in the following sections. Cross-tenant access is never allowed in the private registry. - - -## Authentication and Authorization - -The private registry supports several methods of authentication. Public access is never allowed because the registry only accepts authenticated requests. - - -### Vendor Authentication - -All accounts with read/write access on the Vendor Portal have full access to all images pushed by the tenant to the registry. These users can push and pull images to and from the registry. - - -### End Customer Authentication - -A valid (unexpired) license file has an embedded `registry_token` value. Replicated components shipped to customers use this value to authenticate to the registry. Only pull access is enabled when authenticating using a `registry_token`. A `registry_token` has pull access to all images in the tenant's account. All requests to pull images are denied when a license expires or the expiration date is changed to a past date. - - -## Networking and Infrastructure - -A dedicated cluster is used to run the private registry and is not used for any services. - -The registry metadata is stored in a shared database instance. This database contains information about each layer in an image, but not the image data itself. - -The registry image data is securely stored in an encrypted S3 bucket. Each layer is encrypted at rest, using a shared key stored in [Amazon Key Management Service](https://aws.amazon.com/kms/). Each tenant has a unique directory in the shared bucket and access is limited to the team or license making the request. - -The registry cluster runs on a hardened operating system image (CentOS-based), and all instances are on a private virtual private cloud (VPC). Public IP addresses are not assigned to the instances running the cluster and the registry images. Instead, only port 443 traffic is allowed from a layer 7 load balancer to these servers. - -There are no SSH public keys on these servers, and password-based SSH login is disallowed. The servers are not configured to have any remote access. All deployments to these servers are automated using tools such as Terraform and a custom-built CI/CD process. Only verified images are pulled and run. - - -## Runtime Monitoring - -Replicated uses a Web Application Firewall (WAF) on the cluster that monitors and blocks any unusual activity. When unusual activity is detected, access from that endpoint is automatically blocked for a period of time, and a Replicated site reliability engineer (SRE) is alerted. - - -## Penetration Testing - -Replicated completed a formal pen test that included the private registry in the scope of the test. Replicated also runs a bug bounty program and encourages responsible disclosure on any vulnerabilities that are found. - -================ -File: docs/vendor/packaging-public-images.mdx -================ -# Connecting to a Public Registry through the Proxy Registry - -This topic describes how to pull images from public registries using the Replicated proxy registry. - -For more information about the Replicated proxy registry, see [About the Replicated Proxy Registry](private-images-about). - -## Pull Public Images Through the Replicated Proxy Registry - -You can use the Replicated proxy registry to pull both public and private images. Using the Replicated proxy registry for public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. - -For public images, you need to first configure registry credentials. - -To pull public images through the Replicated proxy registry, use the following `docker` command: - -```bash -docker pull REPLICATED_PROXY_DOMAIN/proxy/APPSLUG/UPSTREAM_REGISTRY_HOSTNAME/IMAGE:TAG -``` -Where: -* `APPSLUG` is your Replicated app slug found on the [app settings page](https://vendor.replicated.com/settings). -* `REPLICATED_PROXY_DOMAIN` is `proxy.replicated.com` or your custom domain. For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). -* `UPSTREAM_REGISTRY_HOSTNAME` is the hostname for the public registry where the image is located. If the image is located in a namespace within the registry, include the namespace after the hostname. For example, `quay.io/namespace`. -* `IMAGE` is the image name. -* `TAG` is the image tag. - -## Examples - -This section includes examples of pulling public images through the Replicated proxy registry. - -### Pull Images from DockerHub - -The following examples show how to pull public images from DockerHub: - -```bash -# DockerHub is the default when no hostname is specified -docker pull proxy.replicated.com/proxy/APPSLUG/busybox -docker pull proxy.replicated.com/proxy/APPSLUG/nginx:1.16.0 -``` -```bash -# You can also optionally specify docker.io -docker pull proxy.replicated.com/proxy/APPSLUG/docker.io/replicated/replicated-sdk:1.0.0 -``` - -### Pull Images from Other Registries - -The following example shows how to pull images from the Amazon ECR Public Gallery: - -```bash -docker pull proxy.replicated.com/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest -``` - -### Pull Images Using a Custom Domain for the Proxy Registry - -The following example shows how to pull a public image when a custom domain is configured for the proxy registry: - -```bash -docker pull my.customdomain.io/proxy/APPSLUG/public.ecr.aws/nginx/nginx:latest -``` -For information about how to set a custom domain for the proxy registry, see [Using Custom Domains](/vendor/custom-domains-using). - -## Related Topic - -[Connecting to an External Registry](packaging-private-images) - -================ -File: docs/vendor/packaging-rbac.md -================ -# Configuring KOTS RBAC - -This topic describes role-based access control (RBAC) for Replicated KOTS in existing cluster installations. It includes information about how to change the default cluster-scoped RBAC permissions granted to KOTS. - -## Cluster-scoped RBAC - -When a user installs your application with KOTS in an existing cluster, Kubernetes RBAC resources are created to allow KOTS to install and manage the application. - -By default, the following ClusterRole and ClusterRoleBinding resources are created that grant KOTS access to all resources across all namespaces in the cluster: - -```yaml -apiVersion: "rbac.authorization.k8s.io/v1" -kind: "ClusterRole" -metadata: - name: "kotsadm-role" -rules: - - apiGroups: ["*"] - resources: ["*"] - verbs: ["*"] -``` - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kotsadm-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kotsadm-role -subjects: -- kind: ServiceAccount - name: kotsadm - namespace: appnamespace -``` - -Alternatively, if your application does not require access to resources across all namespaces in the cluster, then you can enable namespace-scoped RBAC for KOTS. For information, see [About Namespace-scoped RBAC](#min-rbac) below. - -## Namespace-scoped RBAC {#min-rbac} - -Rather than use the default cluster-scoped RBAC, you can configure your application so that the RBAC permissions granted to KOTS are limited to a target namespace or namespaces. By default, for namespace-scoped installations, the following Role and RoleBinding resources are created that grant KOTS permissions to all resources in a target namespace: - -```yaml -apiVersion: "rbac.authorization.k8s.io/v1" -kind: "Role" -metadata: - name: "kotsadm-role" -rules: - - apiGroups: ["*"] - resources: ["*"] - verbs: ["*"] -``` - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kotsadm-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kotsadm-role -subjects: -- kind: ServiceAccount - name: kotsadm - namespace: appnamespace -``` - -Namespace-scoped RBAC is supported for applications that use Kubernetes Operators or multiple namespaces. During application installation, if there are `additionalNamespaces` specified in the Application custom resource manifest file, then Roles and RoleBindings are created to grant KOTS access to resources in all specified namespaces. - -### Enable Namespace-scoped RBAC {#enable} - -To enable namespace-scoped RBAC permissions for KOTS, specify one of the following options in the Application custom resource manifest file: - -* `supportMinimalRBACPrivileges`: Set to `true` to make namespace-scoped RBAC optional for existing cluster installations. When `supportMinimalRBACPrivileges` is `true`, cluster-scoped RBAC is used by default and users must pass the `--use-minimal-rbac` flag with the installation or upgrade command to use namespace-scoped RBAC. - -* `requireMinimalRBACPrivileges`: Set to `true` to require that all installations to existing clusters use namespace-scoped access. When `requireMinimalRBACPrivileges` is `true`, all installations use namespace-scoped RBAC automatically and users do not pass the `--use-minimal-rbac` flag. - -For more information about these options, see [requireMinimalRBACPrivileges](/reference/custom-resource-application#requireminimalrbacprivileges) and [supportMinimalRBACPrivileges](/reference/custom-resource-application#supportminimalrbacprivileges) in _Application_. - -### About Installing with Minimal RBAC - -In some cases, it is not possible to grant the user `* * *` permissions in the target namespace. For example, an organization might have security policies that prevent this level of permissions. - -If the user installing or upgrading KOTS cannot be granted `* * *` permissions in the namespace, then they can instead request the following: -* The minimum RBAC permissions required by KOTS -* RBAC permissions for any CustomResourceDefinitions (CRDs) that your application includes - -Installing with the minimum KOTS RBAC permissions also requires that the user manually creates a ServiceAccount, Role, and RoleBinding for KOTS, rather than allowing KOTS to automatically create a Role with `* * *` permissions. - -For more information about how users can install KOTS with minimal RBAC when namespace-scoped RBAC is enabled, see [Namespace-scoped RBAC Requirements](/enterprise/installing-general-requirements#namespace-scoped) in _Installation Requirements_. - -### Limitations - -The following limitations apply when using the `requireMinimalRBACPrivileges` or `supportMinimalRBACPrivileges` options to enable namespace-scoped RBAC for KOTS: - -* **Existing clusters only**: The `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` options apply only to installations in existing clusters. - -* **Preflight checks**: When namespace-scoped access is enabled, preflight checks cannot read resources outside the namespace where KOTS is installed. The preflight checks continue to function, but return less data. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). - -* **Velero namespace access for KOTS snapshots**: Velero is required for enabling backup and restore with the KOTS snapshots feature. Namespace-scoped RBAC does not grant access to the namespace where Velero is installed in the cluster. - - To set up snapshots when KOTS has namespace-scoped access, users can run the `kubectl kots velero ensure-permissions` command. This command creates additional Roles and RoleBindings to allow the necessary cross-namespace access. For more information, see [`velero ensure-permissions`](/reference/kots-cli-velero-ensure-permissions/) in the KOTS CLI documentation. - - For more information about snapshots, see [About Backup and Restore with Snapshots](/vendor/snapshots-overview). - -* **Air Gap Installations**: For air gap installations, the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags are supported only in automated, or _headless_, installations. In headless installations, the user passes all the required information to install both KOTS and the application with the `kots install` command. In non-headless installations, the user provides information to install the application through the Admin Console UI after KOTS is installed. - - In non-headless installations in air gap environments, KOTS does not have access to the application's `.airgap` package during installation. This means that KOTS does not have the information required to determine whether namespace-scoped access is needed, so it defaults to the more permissive, default cluster-scoped RBAC policy. - - For more information about how to do headless installations in air gap environments, see [Air Gap Installation](/enterprise/installing-existing-cluster-automation#air-gap) in _Installing with the KOTS CLI_. - -* **Changing RBAC permissions for installed instances**: The RBAC permissions for KOTS are set during its initial installation. KOTS runs using the assumed identity and cannot change its own authorization. When you update your application to add or remove the `requireMinimalRBACPrivileges` and `supportMinimalRBACPrivileges` flags in the Application custom resource, the RBAC permissions for KOTS are affected only for new installations. Existing KOTS installations continue to run with their current RBAC permissions. - - To expand the scope of RBAC for KOTS from namespace-scoped to cluster-scoped in new installations, Replicated recommends that you include a preflight check to ensure the permission is available in the cluster. - -================ -File: docs/vendor/packaging-using-tls-certs.mdx -================ -import KurlAvailability from "../partials/kurl/_kurl-availability.mdx" - -# Using TLS Certificates - -<KurlAvailability/> - -Replicated KOTS provides default self-signed certificates that renew automatically. For embedded clusters created with Replicated kURL, the self-signed certificate renews 30 days before expiration when you enable the kURL EKCO add-on version 0.7.0 and later. - -Custom TLS options are supported: - -- **Existing clusters:** The expectation is for the end customer to bring their own Ingress Controller such as Contour or Istio and upload their own `kubernetes.io/tls` secret. For an example, see [Ingress with TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) in the Kubernetes documentation. - -- **Embedded kURL clusters:** End customers can upload a custom TLS certificate. Replicated kURL creates a TLS secret that can reused by other Kubernetes resources, such as Deployment or Ingress, which can be easier than providing and maintaining multiple certificates. As a vendor, you can enable the use of custom TLS certificates with these additional resources. - -For example, if your application does TLS termination, your deployment would need the TLS secret. Or if the application is connecting to another deployment that is also secured using the same SSL certificate (which may not be a trusted certificate), the custom TLS certificate can be used to do validation without relying on the trust chain. - -### Get the TLS Secret - -kURL sets up a Kubernetes secret called `kotsadm-tls`. The secret stores the TLS certificate, key, and hostname. Initially, the secret has an annotation set called `acceptAnonymousUploads`. This indicates that a new TLS certificate can be uploaded by the end customer during the installation process. For more information about installing with kURL, see [Online Installation with kURL](/enterprise/installing-kurl). - -Before you can reference the TLS certificate in other resources, you must get the `kotsadm-tls` secret output. - -To get the `kots-adm-tls` secret, run: - -```shell -kubectl get secret kotsadm-tls -o yaml -``` - -In the output, the `tls.crt` and `tls.key` hold the certificate and key that can be referenced in other Kubernetes resources. - -**Example Output:** - -```yaml -apiVersion: v1 -kind: Secret -type: kubernetes.io/tls -metadata: - name: kotsadm-tls -data: - tls.crt: <base64_encoded> - tls.key: <base64_encoded> -``` - -### Use TLS in a Deployment Resource - -This procedure shows how to reference the `kotsadm-tls` secret using an example nginx Deployment resource (`kind: Deployment`). - -To use the `kotsadm-tls` secret in a Deployment resource: - -1. In the Deployment YAML file, configure SSL for volumeMounts and volumes, and add the `kotsadm-tls` secret to volumes: - - **Example:** - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: nginx - spec: - template: - spec: - containers: - volumeMounts: - - mountPath: "/etc/nginx/ssl" - name: nginx-ssl - readOnly: true - volumes: - - name: nginx-ssl - secret: - secretName: kotsadm-tls - ``` - -1. Deploy the release, and then verify the pod deployment using the `kubectl exec` command: - - **Example:** - - ```shell - export POD_NAME=nginx-<hash> - kubectl exec -it ${POD_NAME} bash - ``` - -1. Run the `ls` and `cat` commands to verify that the certificate and key were deployed to the specified volumeMount: - - **Example:** - - ```shell - $ ls /etc/nginx/ssl - tls.crt tls.key - - $ cat /etc/nginx/ssl/tls.crt - -----BEGIN CERTIFICATE----- - MIID8zCCAtugAwIBAgIUZF+NWHnpJCt2R1rDUhYjwgVv72UwDQYJKoZIhvcNAQEL - - $ cat /etc/nginx/ssl/tls.key - -----BEGIN PRIVATE KEY----- - MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCyiGNuHw2LY3Rv - ``` - -### Use TLS in an Ingress Resource - -You can add the `kotsadm-tls` secret to the Ingress resource to terminate TLS at the contour layer. The following example shows how to configure `secretName: kotsadm-tls` under the TLS `hosts` field in an Ingress resource (`kind: Ingress`): - -**Example:** - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: nginx -spec: - rules: - tls: - - hosts: - - 'tls.foo.com' - secretName: kotsadm-tls - - host: tls.foo.com - http: - paths: - - path: / - backend: - serviceName: nginx - servicePort: 80 -``` -:::note -`tls.foo.com` must resolve to a valid IP, and also must match the Common Name (CN) or Subjective Alternative Name (SAN) of the TLS certificate. -::: - -================ -File: docs/vendor/planning-questionnaire.md -================ -# Customer Application Deployment Questionnaire - -Before you package and distribute an application, Replicated recommends that you -understand several key characteristics about the environments where your customers -will deploy your application. - -To gather this information about your customers' environments: -1. Copy and customize the [$APP Deployment Questionnaire](#app-deployment-questionnaire) below. -1. Replace $APP with the name of your application. -1. Send the questionnaire to your users. - -## $APP Deployment Questionnaire - -### Infrastructure - -This section includes questions about your infrastructure and how you deploy software. -This includes both internally-written and Commercial Off The Shelf (COTS) applications. - -If it’s more convenient, limit answers to the scope of the target infrastructure for deploying $APP. - -- Do you use any IaaS like AWS, GCP, or Azure? - -- If you deploy to a physical datacenter, do you use a Hypervisor like VSphere? - -- Do you ever install on bare metal? - -- Do you have any restrictions on what operating systems are used? - -- Does the target infrastructure have a direct outbound internet connection? Can it connect out via a Proxy? - -- If the environment has no outbound network, do machines in a DMZ have direct network access to the air gapped infrastructure, or do release artifacts need to be copied to physical media for installation? - -- If there is an issue causing downtime in the on-prem application, would you be willing to give the $APP team direct SSH access to the instance(s)? - -### Development and Deployment Processes - -- Do you require applications be deployed by a configuration management framework like Chef, Ansible, or Puppet? - -- Do you run any container-based workloads today? - -- If you run container workloads, do you run any kind of orchestration like Kubernetes, Mesos, or Docker Swarm? - -- If you run container workloads, what tools do you use to host and serve container images? - -- If you run container workloads, what tools do you use to scan and secure container images? - -- If you are deploying $APP to your existing Kubernetes cluster, can your cluster nodes pull images from the public internet, or do you require images to be stored in an internal registry? - -### Change Management - -- How do you test new releases of COTS software? Do you have a UAT or Staging environment? Are there other change management requirements? - -- How often do you like to receive planned (non-critical) software updates? Quarterly? Monthly? As often as possible? - -- For critical updates, what is your target deployment time for new patches? Do you have a requirement for how quickly patches are made available after a vulnerability is announced? - -- Do you drive production deploys automatically from version control (“gitops”)? - - -### Application Usage and Policy Requirements - -- For applications that expose a web UI, how will you be connecting to the instance? As much as possible, include details about your workstation, any tunneling/VPN/proxy infrastructure, and what browsers you intend to use. - -- Do you require a disaster recovery strategy for deployed applications? If so, where are backups stored today? (SFTP? NAS? S3-compliant object store? Something else?) - -- Do you require deployed COTS applications to support logins with an internal identity provider like OpenLDAP, Windows AD or SAML? - -- Do you require an audit log of all user activity performed in $APP? What are your needs around exporting / aggregating audit log data? - -- Do you anticipate the need to scale the capacity of $APP up and down during its lifetime? - -- What are your requirements around log aggregation? What downstream systems do you need system logs to be piped to? - -================ -File: docs/vendor/policies-data-transmission.md -================ -# Data Transmission Policy - -A Replicated installation connects to a Replicated-hosted endpoint periodically to perform various tasks including checking for updates and synchronizing the installed license properties. During this time, some data is transmitted from an installed instance to the Replicated API. This data is limited to: - -- The IP address of the primary Replicated instance. -- The ID of the installation. -- [Resource statuses](/enterprise/status-viewing-details#resource-statuses) -- Information about the installation including data needed for [instance details](/vendor/instance-insights-details). -- [Custom metrics](/vendor/custom-metrics) which the vendor may configure as part of the installation. -- Date and timestamps of the data transmission. - -This data is required to provide the expected update and license services. The data is also used to provide telemetry and other reporting features. - -By default, no additional data is collected and transmitted from the instance to external servers. - -All data is encrypted in transit according to industry best practices. For more information about Replicated's security practices, see [Security at Replicated](https://www.replicated.com/security/) on the Replicated website. - -For more information about application instance data fields that the Replicated Vendor Portal uses to generate events for instances, see [About Instance and Event Data](/vendor/instance-insights-event-data). - -Last modified December 31, 2023 - -================ -File: docs/vendor/policies-infrastructure-and-subprocessors.md -================ -# Infrastructure and Subprocessor Providers - -This lists describes the infrastructure environment, subprocessors and other entities material to the Replicated products and services. - -Prior to engaging any third party, Replicated performs diligence to evaluate their privacy, security and confidentiality practices. Whenever possible, Replicated uses encryption for data at rest and in motion so that all information is not available to these third parties. - -Replicated does not engage in the business of selling or trading personal information. Any personally identifible information Replicated might possibly hold is data that a customer has provided to us. - -The fields that Replicated may posess as identifiable to a physical person may include: -- Name -- Email -- Phone Number -- Job Title -- Business Address -- Github Username - -Note: This does not imply that all these fields are collected for each person. It also does not mean all these datapoints are used with each declared provider. - - -## Replicated Infrastructure Providers - -Replicated might use the following entities to provide infrastructure that helps with delivery of our products: - - -| Entity Name | Purpose | Country where Infrastructure Resides | Notes -|---------------------|----------------------------|-------|----| -| Amazon Web Services | Various IaaS | United States | Vendor portal, registry, api and supporting infrastructure services. -| Cloudflare | Network security, DDoS mitigation, DNS | United States | -| Datadog | Performance monitoring | United States | -| DBT Labs | Data transformation or migration | United States | -| FiveTran | Data transformation or migration | United States | -| Github | Customer support | United States | Replicated's customers may engage with our customer support team using Github issues in a private repo. -| Google Looker | Product usage metrics | United States | -| Hex | Data transformation or migration | United States | -| Knock Labs, Inc.| Event notifications | United States | | -| Postmark / Active Campaign | Transactional emails from Vendor Portal. Marketing related communications. | United States | Active Campaign and Postmark businesses merged.| -| Salesforce |Customer and sales relationship management| United States | -| Snowflake | Usage data analysis and transformation | United States | -| Timescale | Time-series data of instance metrics | United States | See our [Data Transmission Policy](/vendor/policies-data-transmission) - -Last modified January 4, 2024 - -================ -File: docs/vendor/policies-support-lifecycle.md -================ -# Support Lifecycle Policy - -Replicated will provide support for products per our terms and services until that product is noted as End of Life (EOL). - -<table> - <tr> - <th width="30%">Product Phase</th> - <th width="70%">Definition</th> - </tr> - <tr> - <td>Alpha</td> - <td>A product or feature that is exploratory or experimental. Typically, access to alpha features and their documentation is limited to customers providing early feedback. While most alpha features progress to beta and general availability (GA), some are deprecated based on assessment learnings.</td> - </tr> - <tr> - <td>Beta</td> - <td><p>A product or feature that is typically production-ready, but has not met Replicated’s definition of GA for one or more of the following reasons:</p><ul><li>Remaining gaps in intended functionality</li><li>Outstanding needs around testing</li><li>Gaps in documentation or sales enablement</li><li>In-progress customer value validation efforts</li></ul><p>Documentation for beta products and features is published on the Replicated Documentation site with a "(Beta)" label. Beta products or features follow the same build and test processes required for GA.</p><p>Please contact your Replicated account representative if you have questions about why a product or feature is beta.</p></td> - </tr> - <tr> - <td>“GA” - General Availability</td> - <td>A product or feature that has been validated as both production-ready and value-additive by a percentage of Replicated customers. Products in the GA phase are typically those that are available for purchase from Replicated.</td> - </tr> - <tr> - <td>“LA” - Limited Availability</td> - <td>A product has reached the Limited Availability phase when it is no longer available for new purchases from Replicated. Updates will be primarily limited to security patches, critical bugs and features that enable migration to GA products.</td> - </tr> - <tr> - <td>“EOA” - End of Availability</td> - <td><p>A product has reached the End of Availability phase when it is no longer available for renewal purchase by existing customers. This date may coincide with the Limited Availability phase.</p><p>This product is considered deprecated, and will move to End of Life after a determined support window. Product maintenance is limited to critical security issues only.</p></td> - </tr> - <tr> - <td>“EOL” - End of Life</td> - <td><p>A product has reached its End of Life, and will no longer be supported, patched, or fixed by Replicated. Associated product documentation may no longer be available.</p><p>The Replicated team will continue to engage to migrate end customers to GA product based deployments of your application.</p></td> - </tr> -</table> - -<table> - <tr> - <th width="25%">Replicated Product</th> - <th width="15%">Product Phase</th> - <th width="25%">End of Availability</th> - <th width="25%">End of Life</th> - </tr> - <tr> - <td><a href="/vendor/testing-about">Compatibility Matrix</a></td> - <td>GA</td> - <td>N/A</td> - <td>N/A</td> - </tr> - <tr> - <td><a href="/vendor/replicated-sdk-overview">Replicated SDK</a></td> - <td>Beta</td> - <td>N/A</td> - <td>N/A</td> - </tr> - <tr> - <td><a href="/intro-kots">Replicated KOTS Installer</a></td> - <td>GA</td> - <td>N/A</td> - <td>N/A</td> - </tr> - <tr> - <td><a href="/vendor/kurl-about">Replicated kURL Installer</a></td> - <td>GA</td> - <td>N/A</td> - <td>N/A</td> - </tr> - <tr> - <td><a href="/vendor/embedded-overview">Replicated Embedded Cluster Installer</a></td> - <td>GA</td> - <td>N/A</td> - <td>N/A</td> - </tr> - <tr> - <td><a href="https://help.replicated.com/docs/native/getting-started/overview/">Replicated Classic Native Installer</a></td> - <td>EOL</td> - <td>2023-12-31*</td> - <td>2024-12-31*</td> - </tr> -</table> - -*Except for customers who have specifically contracted different dates for the End of Availability and End of Life timelines. - -## Supported Replicated Installer Versions - -The following table lists the versions of Replicated KOTS and Replicated kURL that are supported on each Kubernetes version. - -The End of Replicated Support date is the End Of Life (EOL) date for the Kubernetes version. The EOL date for each Kubernetes version is published on the [Releases](https://kubernetes.io/releases/) page in the Kubernetes documentation. - -<table> - <tr> - <th>Kubernetes Version</th> - <th>Embedded Cluster Versions</th> - <th>KOTS Versions</th> - <th>kURL Versions</th> - <th>End of Replicated Support</th> - </tr> - <tr> - <td>1.32</td> - <td>N/A</td> - <td>N/A</td> - <td>N/A</td> - <td>2026-02-28</td> - </tr> - <tr> - <td>1.31</td> - <td>N/A</td> - <td>1.117.0 and later</td> - <td>v2024.08.26-0 and later</td> - <td>2025-10-28</td> - </tr> - <tr> - <td>1.30</td> - <td>1.16.0 and later</td> - <td>1.109.1 and later</td> - <td>v2024.05.03-0 and later</td> - <td>2025-06-28</td> - </tr> - <tr> - <td>1.29</td> - <td>1.0.0 and later</td> - <td>1.105.2 and later</td> - <td>v2024.01.02-0 and later</td> - <td>2025-02-28</td> - </tr> -</table> - -Replicated support for end-customer installations is limited to those installs using a Replicated provided installer product, such as KOTS, kURL or Embedded Cluster, available with the [Business or Enterprise plans](https://www.replicated.com/pricing). Replicated support for direct Helm CLI installs or other vendor provided installers is limited to the successful distribution of the software to the end-customer, as well as any issues with the Replicated SDK if included with the installation. - - -The information contained herein is believed to be accurate as of the date of publication, but updates and revisions may be posted periodically and without notice. - -Last modified January 2, 2025. - -================ -File: docs/vendor/policies-vulnerability-patch.md -================ -# Vulnerability Patch Policy - -While it’s our goal to distribute vulnerability-free versions of all components, this isn’t always possible. -Kubernetes and KOTS are made from many components, each authored by different vendors. - -The best way to stay ahead of vulnerabilities is to run the latest version and have a strategy to quickly update when a patch is available. - -## How We Scan - -Our build pipeline uses [Trivy](https://www.aquasec.com/products/trivy/) to scan for and detect known, published vulnerabilities in our images. -It’s possible that other security scanners will detect a different set of results. -We commit to patching vulnerabilities according to the timeline below based on the results of our internal scans. - -If you or your customer detects a different vulnerability using a different scanner, we encourage you to report it to us in a GitHub issue, Slack message, or opening a support issue from the Replicated Vendor Portal. -Our team will evaluate the vulnerability and determine the best course of action. - -## Base Images - -KOTS images are built on top of Chainguard's open source [Wolfi](https://edu.chainguard.dev/open-source/wolfi/overview/) base image. Wolfi is a Linux undistro that is focused on supply chain security. - -KOTS has automation that uses the Chainguard [melange](https://edu.chainguard.dev/open-source/melange/overview/) and [apko](https://edu.chainguard.dev/open-source/apko/overview/) projects to build packages and assemble images on Wolfi. Building and assembling images in this way helps to ensure that any CVEs can be resolved quickly and efficiently. - -## Upstream CVE Disclosure - -Replicated KOTS, kURL, and Embedded Cluster deliver many upstream Kubernetes and ecosystem components. -We do not build these packages and rely on the upstream software vendor to distribute patches. -Our intent is to make any patches available as soon as possible, but guarantee the following timeline to make upstream patches available after we learn about the vulnerability and a patch is available to us: - -| CVE Level | Time to release | -|-----------|-----------------| -| Critical | Within 2 weeks | -| High | Within 60 days | -| Medium | Within 90 days | -| Low | Best effort unless risk accepted | - -## Notable Upstream CVEs - -This section lists CVEs that have yet to be resolved by the upstream maintainers and therefore are not patched in Replicated. This is not an exhaustive list of unpatched upstream CVEs; instead, these are noteworthy CVEs that we have evaluated and on which we offer our opinion to help with your own security reviews. When available, we will apply upstream patches in accordance with our policy desribed in [Upstream CVE Disclosure](#upstream-cve-disclosure) above. We will update this list after applying any upstream patches. - -| CVE ID | Explanation| -|--------|------------| -| None | N/A | - -## Vulnerability Management Exception Policy -There might be instances where policy exceptions are required to continue using third party software with known vulnerabilities in our on premises products. Some reasons for an exception include: - -- Feature breakage or bugs in patched versions -- Performance issues in patched versions -- Patched version contains higher severity vulnerabilities - -Regardless of the reason, an exception is vetted from a business impact and security standpoint. The business review assesses the overall impact to the product created by the patched, but otherwise problematic, piece of software. The security portion determines if the CVE is applicable to this specific context and if that CVE's impact to the product’s overall security posture is acceptable. - -In the event of a vulnerability management exception, a notice is posted containing: - -- The impacted product(s) -- The rationale for the exception -- The relevant CVE(s) -- A risk assessment in the product context for each CVE - -As subsequent versions of the vulnerable software are released, Replicated continues to research to find a solution that satisfies the business and security requirements of the original exception.  - -## Known Disclosed Vulnerabilities in our On Premises Products - -| CVE | CVE Summary | Rationale | Additional Reading | -|-----|-------------|-----------|--------------------| -| None | N/A | N/A | N/A | - -Last modified January 29, 2025. - -================ -File: docs/vendor/preflight-defining.mdx -================ -# Defining Preflight Checks - -This topic describes how to define preflight checks in Helm and Kubernetes manifest-based applications. For more information about preflight checks, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). - -The information in this topic applies to applications that are installed with Helm or with Replicated KOTS. - -## Step 1: Create the Manifest File - -You can define preflight checks in a Kubernetes Secret or in a Preflight custom resource. The type of manifest file that you use depends on your application type (Helm or Kubernetes manifest-based) and the installation methods that your application supports (Helm, KOTS v1.101.0 or later, or KOTS v1.100.3 or earlier). - -* **Helm Applications**: For Helm applications, see the following guidance: - - * **(Recommended) Helm or KOTS v1.101.0 or Later**: For Helm applications installed with Helm or KOTS v1.101.0 or later, define the preflight checks in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). - - * **KOTS v1.100.3 or Earlier**: For Helm applications installed with KOTS v1.100.3 or earlier, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). - -* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, define the preflight checks in a Preflight custom resource. See [Preflight Custom Resource](#preflight-cr). - -### Kubernetes Secret {#secret} - -For Helm applications installed with Helm or KOTS v1.101.0 or later, define preflight checks in a Kubernetes Secret in your Helm chart `templates`. This allows you to define the preflights spec only one time to support running preflight checks in both Helm and KOTS installations. - -For a tutorial that demonstrates how to define preflight checks in a Secret in chart `templates` and then run the preflight checks in both Helm and KOTS installations, see [Tutorial: Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup). - -Add the following YAML to a Kubernetes Secret in your Helm chart `templates` directory: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - collectors: [] - analyzers: [] -``` - -As shown above, the Secret must include the following: - -* The label `troubleshoot.sh/kind: preflight` -* A `stringData` field with a key named `preflight.yaml` so that the preflight binary can use this Secret when it runs from the CLI - -### Preflight Custom Resource {#preflight-cr} - -Define preflight checks in a Preflight custom resource for the following installation types: -* Kubernetes manifest-based applications installed with any version of KOTS -* Helm applications installed with KOTS v1.100.3 and earlier - :::note - For Helm charts installed with KOTS v1.101.0 and later, Replicated recommends that you define preflight checks in a Secret in the Helm chart `templates` instead of using the Preflight custom resource. See [Create a Secret](#secret) above. - - In KOTS v1.101.0 and later, preflights defined in the Helm chart override the Preflight custom resource used by KOTS. During installation, if KOTS v1.101.0 and later cannot find preflights specified in the Helm chart archive, then KOTS searches for `kind: Preflight` in the root of the release. - ::: - -Add the following YAML to a new file in a release: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: preflights -spec: - collectors: [] - analyzers: [] -``` - -For more information about the Preflight custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). - -## Step 2: Define Collectors and Analyzers - -This section describes how to define collectors and analyzers for preflight checks based on your application needs. You add the collectors and analyzers that you want to use in the `spec.collectors` and `spec.analyzers` keys in the manifest file that you created. - -### Collectors - -Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define to generate results for the preflight checks. - -The following default collectors are included automatically to gather information about the cluster and cluster resources: -* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) -* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) - -You do not need manually include the `clusterInfo` or `clusterResources` collectors in the specification. To use only the `clusterInfo` and `clusterResources` collectors, delete the `spec.collectors` key from the preflight specification. - -The Troubleshoot open source project includes several additional collectors that you can include in the specification to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. - -### Analyzers - -Analyzers use the output from the collectors to generate results for the preflight checks, including the criteria for pass, fail, and warn outcomes and custom messages for each outcome. - -For example, in a preflight check that checks the version of Kubernetes running in the target cluster, the analyzer can define a fail outcome when the cluster is running a version of Kubernetes less than 1.25 that includes the following custom message to the user: `The application requires Kubernetes 1.25.0 or later, and recommends 1.27.0`. - -The Troubleshoot open source project includes several analyzers that you can include in your preflight check specification. The following are some of the analyzers in the Troubleshoot project that use the default `clusterInfo` or `clusterResources` collectors: -* [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) -* [clusterVersion](https://troubleshoot.sh/docs/analyze/cluster-version/) -* [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) -* [distribution](https://troubleshoot.sh/docs/analyze/distribution/) -* [nodeResources](https://troubleshoot.sh/docs/analyze/node-resources/) -* [statefulsetStatus](https://troubleshoot.sh/docs/analyze/stateful-set-status/) -* [storageClass](https://troubleshoot.sh/docs/analyze/storage-class/) - -To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. - -### Block Installation with Required (Strict) Preflights {#strict} - -For applications installed with KOTS, you can set any preflight analyzer to `strict: true`. When `strict: true` is set, any `fail` outcomes for the analyzer block the deployment of the release. - -:::note -Strict preflight analyzers are ignored if the `exclude` property is also included and evaluates to `true`. See [exclude](https://troubleshoot.sh/docs/analyze/#exclude) in the Troubleshoot documentation. -::: - -### Examples - -For common examples of collectors and analyzers used in preflight checks, see [Examples of Preflight Specs](/vendor/preflight-examples). - -================ -File: docs/vendor/preflight-examples.mdx -================ -import HttpSecret from "../partials/preflights/_http-requests-secret.mdx" -import HttpCr from "../partials/preflights/_http-requests-cr.mdx" -import MySqlSecret from "../partials/preflights/_mysql-secret.mdx" -import MySqlCr from "../partials/preflights/_mysql-cr.mdx" -import K8sVersionSecret from "../partials/preflights/_k8s-version-secret.mdx" -import K8sVersionCr from "../partials/preflights/_k8s-version-cr.mdx" -import K8sDistroSecret from "../partials/preflights/_k8s-distro-secret.mdx" -import K8sDistroCr from "../partials/preflights/_k8s-distro-cr.mdx" -import NodeReqSecret from "../partials/preflights/_node-req-secret.mdx" -import NodeReqCr from "../partials/preflights/_node-req-cr.mdx" -import NodeCountSecret from "../partials/preflights/_node-count-secret.mdx" -import NodeCountCr from "../partials/preflights/_node-count-cr.mdx" -import NodeMemSecret from "../partials/preflights/_node-mem-secret.mdx" -import NodeMemCr from "../partials/preflights/_node-mem-cr.mdx" -import NodeStorageClassSecret from "../partials/preflights/_node-storage-secret.mdx" -import NodeStorageClassCr from "../partials/preflights/_node-storage-cr.mdx" -import NodeEphemStorageSecret from "../partials/preflights/_node-ephem-storage-secret.mdx" -import NodeEphemStorageCr from "../partials/preflights/_node-ephem-storage-cr.mdx" -import NodeCpuSecret from "../partials/preflights/_node-cpu-secret.mdx" -import NodeCpuCr from "../partials/preflights/_node-cpu-cr.mdx" -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -# Example Preflight Specs - -This section includes common examples of preflight check specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/preflight) in GitHub. - -## Check HTTP or HTTPS Requests from the Cluster - -The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. - -For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <HttpSecret/> - </TabItem> - <TabItem value="custom-resource" label="Preflight Custom Resource"> - <HttpCr/> - <p>The following shows how the <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> - <img alt="Preflight checks in Admin Console showing pass message" src="/images/preflight-http-pass.png"/> - <a href="/images/preflight-http-pass.png">View a larger version of this image</a> - </TabItem> -</Tabs> - -## Check Kubernetes Version - -The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. - -For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <K8sVersionSecret/> - </TabItem> - <TabItem value="custom-resource" label="Preflight Custom Resource"> - <K8sVersionCr/> - <p>The following shows how the <code>warn</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> - <img alt="Preflight checks in Admin Console showing warning message" src="/images/preflight-k8s-version-warn.png"/> - <a href="/images/preflight-k8s-version-warn.png">View a larger version of this image</a> - </TabItem> -</Tabs> - -## Check Kubernetes Distribution - -The examples below use the `distribution` analyzer to check the Kubernetes distribution of the cluster. The `distribution` analyzer uses data from the default `clusterInfo` collector. The `clusterInfo` collector is automatically included. - -For more information, see [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) and [Distribution](https://troubleshoot.sh/docs/analyze/distribution/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <K8sDistroSecret/> - </TabItem> - <TabItem value="custom-resource" label="Preflight Custom Resource"> - <K8sDistroCr/> - <p>The following shows how the <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> - <img alt="Preflight checks in Admin Console showing pass message" src="/images/preflight-k8s-distro.png"/> - <a href="/images/preflight-k8s-distro.png">View a larger version of this image</a> - </TabItem> -</Tabs> - -## Check MySQL Version Using Template Functions - -The examples below use the `mysql` collector and the `mysql` analyzer to check the version of MySQL running in the cluster. - -For more information, see [Collect > MySQL](https://troubleshoot.sh/docs/collect/mysql/) and [Analyze > MySQL](https://troubleshoot.sh/docs/analyze/mysql/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <p>This example uses Helm template functions to render the credentials and connection details for the MySQL server that were supplied by the user. Additionally, it uses Helm template functions to create a conditional statement so that the MySQL collector and analyzer are included in the preflight checks only when MySQL is deployed, as indicated by a <code>.Values.global.mysql.enabled</code> field evaluating to true.</p> - <p>For more information about using Helm template functions to access values from the values file, see <a href="https://helm.sh/docs/chart_template_guide/values_files/">Values Files</a>.</p> - <MySqlSecret/> - </TabItem> - <TabItem value="custom-resource" label="Preflight Custom Resource"> - <p>This example uses KOTS template functions in the Config context to render the credentials and connection details for the MySQL server that were supplied by the user in the Replicated Admin Console <strong>Config</strong> page. Replicated recommends using a template function for the URI, as shown above, to avoid exposing sensitive information. For more information about template functions, see <a href="/reference/template-functions-about">About Template Functions</a>.</p> - <p>This example also uses an analyzer with <code>strict: true</code>, which prevents installation from continuing if the preflight check fails.</p> - <MySqlCr/> - <p>The following shows how a <code>fail</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade when <code>strict: true</code> is set for the analyzer:</p> - <img alt="Strict preflight checks in Admin Console showing fail message" src="/images/preflight-mysql-fail-strict.png"/> - <a href="/images/preflight-mysql-fail-strict.png">View a larger version of this image</a> - </TabItem> -</Tabs> - -## Check Node Memory - -The examples below use the `nodeResources` analyzer to check that a required storage class is available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <NodeMemSecret/> - </TabItem> - <TabItem value="custom-resource" label="Preflight Custom Resource"> - <NodeMemCr/> - <p>The following shows how a <code>warn</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> - <img alt="Preflight checks in Admin Console showing warn message" src="/images/preflight-node-memory-warn.png"/> - <a href="/images/preflight-node-memory-warn.png">View a larger version of this image</a> - </TabItem> -</Tabs> - -## Check Node Storage Class Availability - -The examples below use the `storageClass` analyzer to check that a required storage class is available in the nodes in the cluster. The `storageClass` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <NodeStorageClassSecret/> - </TabItem> - <TabItem value="custom-resource" label="Preflight Custom Resource"> - <NodeStorageClassCr/> - <p>The following shows how a <code>fail</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> - <img alt="Preflight checks in Admin Console showing fail message" src="/images/preflight-storageclass-fail.png"/> - <a href="/images/preflight-storageclass-fail.png">View a larger version of this image</a> - </TabItem> -</Tabs> - -## Check Node Ephemeral Storage - -The examples below use the `nodeResources` analyzer to check the ephemeral storage available in the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <NodeEphemStorageSecret/> - </TabItem> - <TabItem value="custom-resource" label="Preflight Custom Resource"> - <NodeEphemStorageCr/> - <p>The following shows how a <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> - <img alt="Preflight checks in Admin Console showing pass message" src="/images/preflight-ephemeral-storage-pass.png"/> - <a href="/images/preflight-ephemeral-storage-pass.png">View a larger version of this image</a> - </TabItem> -</Tabs> - -## Check Requirements Are Met By At Least One Node - -The examples below use the `nodeResources` analyzer with filters to check that the requirements for memory, CPU cores, and architecture are met by at least one node in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <NodeReqSecret/> - </TabItem> - <TabItem value="custom-resource" label="Preflight Custom Resource"> - <NodeReqCr/> - <p>The following shows how the <code>fail</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> - <img alt="Preflight checks in Admin Console showing fail message" src="/images/preflight-node-filters-faill.png"/> - <a href="/images/preflight-node-filters-faill.png">View a larger version of this image</a> - </TabItem> -</Tabs> - -## Check Total CPU Cores Across Nodes - -The examples below use the `nodeResources` analyzer to check the version of Kubernetes running in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. The `clusterResources` collector is automatically included. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <NodeCpuSecret/> - </TabItem> - <TabItem value="custom-resource" label="Preflight Custom Resource"> - <NodeCpuCr/> - <p>The following shows how the <code>pass</code> outcome for this preflight check is displayed in the Admin Console during KOTS installation or upgrade:</p> - <img alt="Preflight checks in Admin Console showing fail message" src="/images/preflight-cpu-pass.png"/> - <a href="/images/preflight-cpu-pass.png">View a larger version of this image</a> - </TabItem> -</Tabs> - -================ -File: docs/vendor/preflight-host-preflights.md -================ -# Customizing Host Preflight Checks for kURL - -This topic provides information about how to customize host preflight checks for installations with Replicated kURL. For information about the default host preflight checks that run for installations with Replicated Embedded Cluster, see [About Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. - -## About Host Preflight Checks -You can include host preflight checks with kURL to verify that infrastructure requirements are met for: - -- Kubernetes -- kURL add-ons -- Your application - -This helps to ensure successful installation and the ongoing health of the cluster. - -While host preflights are intended to ensure requirements are met for running the cluster, you can also use them to codify some of your application requirements so that users get feedback even earlier in the installation process, rather than waiting to run preflights after the cluster is already installed. For more information about application checks, collectors, and analyzers, see [About Preflight Checks and Support Bundles](preflight-support-bundle-about). - -Default host preflight checks verify conditions such as operating system and disk usage. Default host preflight failures block the installation from continuing and exit with a non-zero return code. Users can then update their environment and run the kURL installation script again to re-run the host preflight checks. - -Host preflight checks run automatically. The default host preflight checks that run can vary, depending on whether the installation is new, an upgrade, joining a node, or an air gap installation. Additionally, some checks only run when certain add-ons are enabled in the installer. For a complete list of default host preflight checks, see [Default Host Preflights](https://kurl.sh/docs/install-with-kurl/host-preflights#default-host-preflights) in the kURL documentation. - -There are general kURL host preflight checks that run with all installers. There are also host preflight checks included with certain add-ons. Customizations include the ability to: - - - Bypass failures - - Block an installation for warnings - - Exclude certain preflights under specific conditions, such as when a particular license entitlement is enabled - - Skip the default host preflight checks and run only custom checks - - Add custom checks to the default host preflight checks - -For more information about customizing host preflights, see [Customize Host Preflight Checks](#customize-host-preflight-checks). - -## Customize Host Preflight Checks - -The default host preflights run automatically as part of your kURL installation. You can customize the host preflight checks by disabling them entirely, adding customizations to the default checks to make them more restrictive, or completely customizing them. You can also customize the outcomes to enforce warnings or ignore failures. - -### Add Custom Preflight Checks to the Defaults - -To run customized host preflight checks in addition to the default host preflight checks, add a `hostPreflights` field to the `kurl` field in your Installer manifest. Under the `hostPreflights` field, add a host preflight specification (`kind: HostPreflight`) with your customizations. You only need to specify your customizations because the default host preflights run automatically. - -Customized host preflight checks run in addition to default host preflight checks, if the default host preflight checks are enabled. - -If you only want to make the default host preflight checks more restrictive, add your more restrictive host preflight checks to `kurl.hostPreflights`, and do not set `excludeBuiltinHostPreflights`. For example, if your application requires 6 CPUs but the default host preflight check requires 4 CPUs, you can simply add a custom host preflight check for 6 CPUs, since the default host preflight must pass if the more restrictive custom check passes. - -The following example shows customized `kurl` host preflight checks for: - - - An application that requires more CPUs than the default - - Accessing a website that is critical to the application - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "latest" -spec: - kurl: - hostPreflights: - apiVersion: troubleshoot.sh/v1beta2 - kind: HostPreflight - spec: - collectors: - - cpu: {} - - http: - collectorName: Can Access A Website - get: - url: https://myFavoriteWebsite.com - analyzers: - - cpu: - checkName: Number of CPU check - outcomes: - - fail: - when: "count < 4" - message: This server has less than 4 CPU cores - - warn: - when: "count < 6" - message: This server has less than 6 CPU cores - - pass: - message: This server has at least 6 CPU cores - - http: - checkName: Can Access A Website - collectorName: Can Access A Website - outcomes: - - warn: - when: "error" - message: Error connecting to https://myFavoriteWebsite.com - - pass: - when: "statusCode == 200" - message: Connected to https://myFavoriteWebsite.com -``` - -### Customize the Default Preflight Checks - -To customize the default host preflights: - -1. Disable the default host preflight checks using `excludeBuiltinHostPreflights: true`. -1. Copy the default `host-preflights.yaml` specification for kURL from [host-preflights.yaml](https://github.com/replicatedhq/kURL/blob/main/pkg/preflight/assets/host-preflights.yaml) in the kURL repository. -1. Copy the default `host-preflight.yaml` specification for any and all add-ons that are included in your specification and have default host preflights. For links to the add-on YAML files, see [Finding the Add-on Host Preflight Checks](https://kurl.sh/docs/create-installer/host-preflights/#finding-the-add-on-host-preflight-checks) in the kURL documentation. -1. Merge the copied host preflight specifications into one host preflight specification, and paste it to the `kurl.hostPreflights` field in the Installer YAML in the Vendor Portal. -1. Edit the defaults as needed. - -### Ignore or Enforce Warnings and Failures - -Set either of the following flags to customize the outcome of your host preflight checks: - -<table> -<tr> - <th width="30%">Flag: Value</th> - <th width="70%">Description</th> -</tr> -<tr> - <td><code>hostPreflightIgnore: true</code></td> - <td>Ignores host preflight failures and warnings. The installation proceeds regardless of host preflight outcomes.</td> -</tr> -<tr> - <td><code>hostPreflightEnforceWarnings: true</code></td> - <td>Blocks an installation if the results include a warning.</td> -</tr> -</table> - -### Disable Host Preflight Checks - -To disable the default host preflight checks for Kubernetes and all included add-ons, add the `kurl` field to your Installer manifest and add `kurl.excludeBuiltinHostPreflights: true`. In this case, no host preflight checks are run. - -`excludeBuiltinHostPreflights` is an aggregate flag, so setting it to `true` disables the default host preflights for Kubernetes and all included add-ons. - -**Example:** - - ```yaml - apiVersion: "cluster.kurl.sh/v1beta1" - kind: "Installer" - metadata: - name: "latest" - spec: - kurl: - excludeBuiltinHostPreflights: true - ``` - -## Example of Customized Host Preflight Checks - -The following example shows: - -- Default host preflights checks are disabled -- Customized host preflight checks run -- The installation is blocked if there is a warning - -```yaml -apiVersion: "cluster.kurl.sh/v1beta1" -kind: "Installer" -metadata: - name: "latest" -spec: - kurl: - excludeBuiltinHostPreflights: true - hostPreflightEnforceWarnings: true - hostPreflights: - apiVersion: troubleshoot.sh/v1beta2 - kind: HostPreflight - spec: - collectors: - - cpu: {} - - http: - collectorName: Can Access A Website - get: - url: https://myFavoriteWebsite.com - analyzers: - - cpu: - checkName: Number of CPU check - outcomes: - - fail: - when: "count < 4" - message: This server has less than 4 CPU cores - - warn: - when: "count < 6" - message: This server has less than 6 CPU cores - - pass: - message: This server has at least 6 CPU cores - - http: - checkName: Can Access A Website - collectorName: Can Access A Website - outcomes: - - warn: - when: "error" - message: Error connecting to https://myFavoriteWebsite.com - - pass: - when: "statuscode == 200" - message: Connected to https://myFavoriteWebsite.com - ``` - -================ -File: docs/vendor/preflight-running.md -================ -# Running Preflight Checks for Helm Installations - -This topic describes how to use the preflight kubectl plugin to run preflight checks for applications installed with the Helm CLI. - -## Overview - -For applications installed with the Helm CLI, your users can optionally run preflight checks using the open source preflight kubectl plugin before they run `helm install`. - -The preflight plugin requires a preflight check specification as input. For Helm chart-based applications, the specification is defined in a Secret in the Helm chart `templates` directory. For information about how to configure preflight checks for your application, see [Defining Preflight Checks](preflight-defining). - -To run preflight checks that are defined in your application Helm chart templates, your users run `helm template` to render the Helm chart templates and then provide the result to the preflight plugin as stdin. The preflight plugin automatically filters the stream of stdout from the `helm template` command to find and run any preflight specifications. - -## Prerequisite - -The preflight kubectl plugin is required to run preflight checks for Helm CLI installations. The preflight plugin is a client-side utility that adds a single binary to the path. - -To install the preflight plugin, run the following command to install the preflight plug-in using krew: - -``` -curl https://krew.sh/preflight | bash -``` -For information about the preflight plugin, including additional installation options, see [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. - -## Command - -``` -helm template HELM_CHART | kubectl preflight - -``` - -Where `HELM_CHART` is the Helm chart that contains the preflight specification. - -For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. - -**Examples:** - -``` -helm template gitea-1.0.6.tgz | kubectl preflight - -``` -``` -helm template gitea | kubectl preflight - -``` -``` -helm template oci://myregistry.io/org/examplechart | kubectl preflight - -``` - -## Run Preflight Checks from a Release - -When you promote a release that contains one or more Helm charts, the Helm charts are automatically pushed to the Replicated registry. To run preflight checks before installing a release, your users must first log in to the Replicated registry where they can access your application Helm chart containing the preflight specification. - -To run preflights checks from a release before installation: - -1. In the [Vendor Portal](https://vendor.replicated.com/apps/gitea-boxer/customers), go to the **Customers** page. Click on the name of the target customer. - -1. On the landing page for the customer, click **Helm install instructions**. - - The **Helm install instructions** dialog opens: - - <img alt="Helm install instructions dialog with preflight checks" src="/images/helm-install-preflights.png" width="550px"/> - - [View a larger version of this image](/images/helm-install-preflights.png) - -1. Run the commands provided in the dialog: - - 1. Run the first command to log in to the Replicated registry: - - ``` - helm registry login registry.replicated.com --username USERNAME --password PASSWORD - ``` - - Where: - - `USERNAME` is the customer's email address. - - `PASSWORD` is the customer's license ID. - - **Example:** - ``` - helm registry login registry.replicated.com --username example@companyname.com password 1234abcd - ``` - - 1. Run the second command to install the kubectl plugin with krew: - - ``` - curl https://krew.sh/preflight | bash - ``` - - 1. Run the third command to run preflight checks: - - ``` - helm template oci://registry.replicated.com/APP_SLUG/CHANNEL/CHART | kubectl preflight - - ``` - - Where: - - `APP_SLUG` is the name of the application. - - `CHANNEL` is the lowercased name of the release channel. - - `CHART` is the name of the Helm chart. - - **Examples:** - - ``` - helm template oci://registry.replicated.com/gitea-app/unstable/gitea | kubectl preflight - - ``` - ``` - helm template oci://registry.replicated.com/gitea-app/unstable/gitea --values values.yaml | kubectl preflight - - ``` - - For all available options with this command, see [Run Preflight Checks using the CLI](https://troubleshoot.sh/docs/preflight/cli-usage/#options) in the open source Troubleshoot documentation. - - 1. (Optional) Run the fourth command to install the application. For more information, see [Installing with Helm](install-with-helm). - -## (Optional) Save Preflight Check Results - -The output of the preflight plugin shows the success, warning, or fail message for each preflight, depending on how they were configured. You can ask your users to send you the results of the preflight checks if needed. - -To save the results of preflight checks to a `.txt` file, users can can press `s` when viewing the results from the CLI, as shown in the example below: - -![Save output dialog](/images/helm-preflight-save-output.png) - -[View a larger version of this image](/images/helm-preflight-save-output.png) - -================ -File: docs/vendor/preflight-sb-helm-templates-about.md -================ -# Using Helm Templates in Specifications - -You can use Helm templates to configure collectors and analyzers for preflight checks and support bundles for Helm installations. - -Helm templates can be useful when you need to: - -- Run preflight checks based on certain conditions being true or false, such as the customer wants to use an external database. -- Pull in user-specific information from the values.yaml file, such as the version a customer is using for an external database. - -You can also use Helm templating with the Troubleshoot template functions for the `clusterPodStatuses` analyzer. For more information, see [Helm and Troubleshoot Template Example](#troubleshoot). - -## Helm Template Example - -In the following example, the `mysql` collector is included in a preflight check if the customer does not want to use the default MariaDB. This is indicated by the template `{{- if eq .Values.global.mariadb.enabled false -}}`. - -This specification also takes the MySQL connection string information from the `values.yaml` file, indicated by the template `'{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false'` in the `uri` field. - -Additionally, the specification verifies the maximum number of nodes in the `values.yaml` file is not exceeded by including the template `'count() > {{ .Values.global.maxNodeCount }}'` for the `nodeResources` analyzer. - -```yaml -{{- define "preflight.spec" }} -apiVersion: troubleshoot.sh/v1beta2 -kind: Preflight -metadata: - name: preflight-sample -spec: - {{ if eq .Values.global.mariadb.enabled false }} - collectors: - - mysql: - collectorName: mysql - uri: '{{ .Values.global.externalDatabase.user }}:{{ .Values.global.externalDatabase.password }}@tcp({{ .Values.global.externalDatabase.host }}:{{ .Values.global.externalDatabase.port }})/{{ .Values.global.externalDatabase.database }}?tls=false' - {{ end }} - analyzers: - - nodeResources: - checkName: Node Count Check - outcomes: - - fail: - when: 'count() > {{ .Values.global.maxNodeCount }}' - message: "The cluster has more than {{ .Values.global.maxNodeCount }} nodes." - - pass: - message: You have the correct number of nodes. - - clusterVersion: - outcomes: - - fail: - when: "< 1.22.0" - message: The application requires at least Kubernetes 1.22.0, and recommends 1.23.0. - uri: https://kubernetes.io - - warn: - when: "< 1.23.0" - message: Your cluster meets the minimum version of Kubernetes, but we recommend you update to 1.23.0 or later. - uri: https://kubernetes.io - - pass: - message: Your cluster meets the recommended and required versions of Kubernetes. - {{ if eq .Values.global.mariadb.enabled false }} - - mysql: - checkName: Must be MySQL 8.x or later - collectorName: mysql - outcomes: - - fail: - when: connected == false - message: Cannot connect to MySQL server - - fail: - when: version < 8.x - message: The MySQL server must be at least version 8 - - pass: - message: The MySQL server is ready - {{ end }} -{{- end }} ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - app.kubernetes.io/managed-by: {{ .Release.Service | quote }} - app.kubernetes.io/instance: {{ .Release.Name | quote }} - app.kubernetes.io/version: {{ .Chart.AppVersion }} - helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" -stringData: - preflight.yaml: | -{{- include "preflight.spec" . | indent 4 }} -``` - -## Helm and Troubleshoot Template Example {#troubleshoot} - -You can also use Helm templates with the Troubleshoot template functions to automatically add the Pod name and namespace to a message when a `clusterPodStatuses` analyzer fails. For more information about the Troubleshoot template function, see [Cluster Pod Statuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) in the Troubleshoot documentation. - -When you add the `clusterPodStatuses` analyzer template function values (such as `{{ .Name }}`) to your Helm template, you must encapsulate the Helm template using \{\{ ` ` \}\} so that Helm does not expand it. - -The following example shows an analyzer that uses Troubleshoot templates and the override for Helm: - -```yaml -# This is the support bundle config secret that will be used to generate the support bundle -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: support-bundle - name: {{ .Release.Name }}-support-bundle - namespace: {{ .Release.Namespace }} -type: Opaque -stringData: - # This is the support bundle spec that will be used to generate the support bundle - # Notes: we use {{ .Release.Namespace }} to ensure that the support bundle is scoped to the release namespace - # We can use any of Helm's templating features here, including {{ .Values.someValue }} - support-bundle-spec: | - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: support-bundle - spec: - collectors: - - clusterInfo: {} - - clusterResources: {} - - logs: - selector: - - app=someapp - namespace: {{ .Release.Namespace }} - analyzers: - - clusterPodStatuses: - name: unhealthy - namespaces: - - default - - myapp-namespace - outcomes: - - fail: - when: "== CrashLoopBackOff" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a CrashLoopBackOff state.` }} - - fail: - when: "== ImagePullBackOff" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a ImagePullBackOff state.` }} - - fail: - when: "== Pending" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Pending state.` }} - - fail: - when: "== Evicted" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Evicted state.` }} - - fail: - when: "== Terminating" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in a Terminating state.` }} - - fail: - when: "== Init:Error" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in an Init:Error state.` }} - - fail: - when: "== Init:CrashLoopBackOff" - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is in an Init:CrashLoopBackOff state.` }} - - fail: - when: "!= Healthy" # Catch all unhealthy pods. A pod is considered healthy if it has a status of Completed, or Running and all of its containers are ready. - message: {{ `Pod {{ .Namespace }}/{{ .Name }} is unhealthy with a status of {{ .Status.Reason }}.` }} -``` - -================ -File: docs/vendor/preflight-support-bundle-about.mdx -================ -import Overview from "../partials/preflights/_preflights-sb-about.mdx" - -# About Preflight Checks and Support Bundles - -This topic provides an introduction to preflight checks and support bundles, which are provided by the [Troubleshoot](https://troubleshoot.sh/) open source project. - -## Overview - -<Overview/> - -Preflight checks and support bundles consist of _collectors_, _redactors_, and _analyzers_ that are defined in a YAML specification. When preflight checks or support bundles are executed, data is collected, redacted, then analyzed to provide insights to users, as illustrated in the following diagram: - -![Troubleshoot Workflow Diagram](/images/troubleshoot-workflow-diagram.png) - -[View a larger version of this image](/images/troubleshoot-workflow-diagram.png) - -For more information about each step in this workflow, see the sections below. - -### Collect - -During the collection phase, _collectors_ gather information from the cluster, the environment, the application, and other sources. - -The data collected depends on the types of collectors that are included in the preflight or support bundle specification. For example, the Troubleshoot project provides collectors that can gather information about the Kubernetes version that is running in the cluster, information about database servers, logs from pods, and more. - -For more information, see the [Collect](https://troubleshoot.sh/docs/collect/) section in the Troubleshoot documentation. - -### Redact - -During the redact phase, _redactors_ censor sensitive customer information from the data before analysis. By default, the following information is automatically redacted: - -- Passwords -- API token environment variables in JSON -- AWS credentials -- Database connection strings -- URLs that include usernames and passwords - -For Replicated KOTS installations, it is also possible to add custom redactors to redact additional data. For more information, see the [Redact](https://troubleshoot.sh/docs/redact/) section in the Troubleshoot documentation. - -### Analyze - -During the analyze phase, _analyzers_ use the redacted data to provide insights to users. - -For preflight checks, analyzers define the pass, fail, and warning outcomes, and can also display custom messages to the user. For example, you can define a preflight check that fails if the cluster's Kubernetes version does not meet the minimum version that your application supports. - -For support bundles, analyzers can be used to identify potential problems and share relevant troubleshooting guidance with users. Additionally, when a support bundle is uploaded to the Vendor Portal, it is extracted and automatically analyzed. The goal of analyzers in support bundles is to surface known issues or hints of what might be a problem to make troubleshooting easier. - -For more information, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. - -## Preflight Checks - - -This section provides an overview of preflight checks, including how preflights are defined and run. - -### Overview - -Preflight checks let you define requirements for the cluster where your application is installed. When run, preflight checks provide clear feedback to your customer about any missing requirements or incompatibilities in the cluster before they install or upgrade your application. For KOTS installations, preflight checks can also be used to block the deployment of the application if one or more requirements are not met. - -Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. - -### About Host Preflights {#host-preflights} - -_Host preflight checks_ automatically run during [Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated kURL](/vendor/kurl-about) installations on a VM or bare metal server. The purpose of host preflight checks is to verify that the user's installation environment meets the requirements of the Embedded Cluster or kURL installer, such as checking the number of CPU cores in the system, available disk space, and memory usage. If any of the host preflight checks fail, installation is blocked and a message describing the failure is displayed. - -Host preflight checks are separate from any application-specific preflight checks that are defined in the release, which run in the Admin Console before the application is deployed with KOTS. Both Embedded Cluster and kURL have default host preflight checks that are specific to the requirements of the given installer. For kURL installations, it is possible to customize the default host preflight checks. - -For more information about the default Embedded Cluster host preflight checks, see [Host Preflight Checks](/vendor/embedded-using#about-host-preflight-checks) in _Using Embedded Cluster_. - -For more information about kURL host preflight checks, including information about how to customize the defaults, see [Customizing Host Preflight Checks for kURL](/vendor/preflight-host-preflights). - -### Defining Preflights - -To add preflight checks for your application, create a Preflight YAML specification that defines the collectors and analyzers that you want to include. - -For information about how to add preflight checks to your application, including examples, see [Defining Preflight Checks](preflight-defining). - -### Blocking Installation with Required (Strict) Preflights - -For applications installed with KOTS, it is possible to block the deployment of a release if a preflight check fails. This is helpful when it is necessary to prevent an installation or upgrade from continuing unless a given requirement is met. - -You can add required preflight checks for an application by including `strict: true` for the target analyzer in the preflight specification. For more information, see [Block Installation with Required Preflights](preflight-defining#strict) in _Defining Preflight Checks_. - -### Running Preflights - -This section describes how users can run preflight checks for KOTS and Helm installations. - -#### Replicated Installations - -For Replicated installations with Embedded Cluster, KOTS, or kURL, preflight checks run automatically as part of the installation process. The results of the preflight checks are displayed either in the KOTS Admin Console or in the KOTS CLI, depending on the installation method. - -Additionally, users can access preflight checks from the Admin Console after installation to view their results and optionally re-run the checks. - -The following shows an example of the results of preflight checks displayed in the Admin Console during installation: - -![Preflight results in Admin Console](/images/preflight-warning.png) - -[View a larger version of this image](/images/preflight-warning.png) - -#### Helm Installations - -For installations with Helm, the preflight kubectl plugin is required to run preflight checks. The preflight plugin is a client-side utility that adds a single binary to the path. For more information, see [Getting Started](https://troubleshoot.sh/docs/) in the Troubleshoot documentation. - -Users can optionally run preflight checks before they run `helm install`. The results of the preflight checks are then displayed through the CLI, as shown in the example below: - -![Save output dialog](/images/helm-preflight-save-output.png) - -[View a larger version of this image](/images/helm-preflight-save-output.png) - -For more information, see [Running Preflight Checks for Helm Installations](preflight-running). - -## Support Bundles - -This section provides an overview of support bundles, including how support bundles are customized and generated. - -### Overview - -Support bundles collect and analyze troubleshooting data from customer environments, helping both users and support teams diagnose problems with application deployments. - -Support bundles can collect a variety of important cluster-level data from customer environments, such as: -* Pod logs -* Node resources and status -* The status of replicas in a Deployment -* Cluster information -* Resources deployed to the cluster -* The history of Helm releases installed in the cluster - -Support bundles can also be used for more advanced use cases, such as checking that a command successfully executes in a pod in the cluster, or that an HTTP request returns a succesful response. - -Support bundles then use the data collected to provide insights to users on potential problems or suggested troubleshooting steps. The troubleshooting data collected and analyzed by support bundles not only helps users to self-resolve issues with their application deployment, but also helps reduce the amount of time required by support teams to resolve requests by ensuring they have access to all the information they need up front. - -### About Host Support Bundles - -For installations on VMs or bare metal servers with [Replicated Embedded Cluster](/vendor/embedded-overview) or [Replicated kURL](/vendor/kurl-about), it is possible to generate a support bundle that includes host-level information to help troubleshoot failures related to host configuration like DNS, networking, or storage problems. - -For Embedded Cluster installations, a default spec can be used to generate support bundles that include cluster- and host-level information. See [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). - -For kURL installations, vendors can customize a host support bundle spec for their application. See [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). - -### Customizing Support Bundles - -To enable support bundles for your application, add a support bundle YAML specification to a release. An empty support bundle specification automatically includes several default collectors and analzyers. You can also optionally customize the support bundle specification for by adding, removing, or editing collectors and analyzers. - -For more information, see [Adding and Customizing Support Bundles](support-bundle-customizing). - -### Generating Support Bundles - -Users generate support bundles as `tar.gz` files from the command line, using the support-bundle kubectl plugin. Your customers can share their support bundles with your team by sending you the resulting `tar.gz` file. - -KOTS users can also generate and share support bundles from the KOTS Admin Console. - -For more information, see [Generating Support Bundles](support-bundle-generating). - -================ -File: docs/vendor/private-images-about.md -================ -# About the Replicated Proxy Registry - -This topic describes how the Replicated proxy registry can be used to grant proxy access to your application's private images or allow pull through access of public images. - -## Overview - -If your application images are available in a private image registry exposed to the internet such as Docker Hub or Amazon Elastic Container Registry (ECR), then the Replicated proxy registry can grant proxy, or _pull-through_, access to the images without exposing registry credentials to your customers. When you use the proxy registry, you do not have to modify the process that you already use to build and push images to deploy your application. - -To grant proxy access, the proxy registry uses the customer licenses that you create in the Replicated vendor portal. This allows you to revoke a customer’s ability to pull private images by editing their license, rather than having to manage image access through separate identity or authentication systems. For example, when a trial license expires, the customer's ability to pull private images is automatically revoked. - -The following diagram demonstrates how the proxy registry pulls images from your external registry, and how deployed instances of your application pull images from the proxy registry: - -![Proxy registry workflow diagram](/images/private-registry-diagram.png) - -[View a larger version of this image](/images/private-registry-diagram-large.png) - -## About Enabling the Proxy Registry - -The proxy registry requires read-only credentials to your private registry to access your application images. See [Connecting to an External Registry](/vendor/packaging-private-images). - -After connecting your registry, the steps the enable the proxy registry vary depending on your application deployment method. For more information, see: -* [Using the Proxy Registry with KOTS Installations](/vendor/private-images-kots) -* [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) - -## About Allowing Pull-Through Access of Public Images - -Using the Replicated proxy registry to grant pull-through access to public images can simplify network access requirements for your customers, as they only need to whitelist a single domain (either `proxy.replicated.com` or your custom domain) instead of multiple registry domains. - -For more information about how to pull public images through the proxy registry, see [Connecting to a Public Registry through the Proxy Registry](/vendor/packaging-public-images). - -================ -File: docs/vendor/private-images-kots.mdx -================ -import Deprecated from "../partials/helm/_replicated-deprecated.mdx" -import StepCreds from "../partials/proxy-service/_step-creds.mdx" -import StepCustomDomain from "../partials/proxy-service/_step-custom-domain.mdx" - -# Using the Proxy Registry with KOTS Installations - -This topic describes how to use the Replicated proxy registry with applications deployed with Replicated KOTS. - -## Overview - -Replicated KOTS automatically creates the required image pull secret for accessing the Replicated proxy registry during application deployment. When possible, KOTS also automatically rewrites image names in the application manifests to the location of the image at `proxy.replicated.com` or your custom domain. - -### Image Pull Secret - -During application deployment, KOTS automatically creates an `imagePullSecret` with `type: kubernetes.io/dockerconfigjson` that is based on the customer license. This secret is used to authenticate with the proxy registry and grant proxy access to private images. - -For information about how Kubernetes uses the `kubernetes.io/dockerconfigjson` Secret type to authenticate to a private image registry, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) in the Kubernetes documentation. - -### Image Location Patching (Standard Manifests and HelmChart v1) - -For applications packaged with standard Kubernetes manifests (or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource), KOTS automatically patches image names to the location of the image at at `proxy.replicated.com` or your custom domain during deployment. If KOTS receives a 401 response when attempting to load image manifests using the image reference from the PodSpec, it assumes that this is a private image that must be proxied through the proxy registry. - -KOTS uses Kustomize to patch the `midstream/kustomization.yaml` file to change the image name during deployment to reference the proxy registry. For example, a PodSpec for a Deployment references a private image hosted at `quay.io/my-org/api:v1.0.1`: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: example -spec: - template: - spec: - containers: - - name: api - image: quay.io/my-org/api:v1.0.1 -``` - -When this application is deployed, KOTS detects that it cannot access -the image at quay.io. So, it creates a patch in the `midstream/kustomization.yaml` -file that changes the image name in all manifest files for the application. This causes the container runtime in the cluster to use the proxy registry to pull the images, using the license information provided to KOTS for authentication. - -```yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -bases: -- ../../base -images: -- name: quay.io/my-org/api:v1.0.1 - newName: proxy.replicated.com/proxy/my-kots-app/quay.io/my-org/api -``` - -## Enable the Proxy Registry - -This section describes how to enable the proxy registry for applications deployed with KOTS, including how to ensure that image names are rewritten and that the required image pull secret is provided. - -To enable the proxy registry: - -1. <StepCreds/> - -1. <StepCustomDomain/> - -1. Rewrite images names to the location of the image at `proxy.replicated.com` or your custom domain. Also, ensure that the correct image pull secret is provided for all private images. The steps required to configure image names and add the image pull secret vary depending on your application type: - - * **HelmChart v2**: For Helm charts deployed with the[ HelmChart v2](/reference/custom-resource-helmchart-v2) custom resource, configure the HelmChart v2 custom resource to dynamically update image names in your Helm chart and to inject the image pull secret that is automatically created by KOTS. For instructions, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). - - * **Standard Manifests or HelmChart v1**: For standard manifest-based applications or Helm charts deployed with the [HelmChart v1](/reference/custom-resource-helmchart) custom resource, no additional configuration is required. KOTS automatically rewrites image names and injects image pull secrets during deployment for these application types. - - :::note - <Deprecated/> - ::: - - * **Kubernetes Operators**: For applications packaged with Kubernetes Operators, KOTS cannot modify pods that are created at runtime by the Operator. To support the use of private images in all environments, the Operator code should use KOTS functionality to determine the image name and image pull secrets for all pods when they are created. For instructions, see [Referencing Images](/vendor/operator-referencing-images) in the _Packaging Kubernetes Operators_ section. - -1. If you are deploying Pods to namespaces other than the application namespace, add the namespace to the `additionalNamespaces` attribute of the KOTS Application custom resource. This ensures that KOTS can provision the `imagePullSecret` in the namespace to allow the Pod to pull the image. For instructions, see [Defining Additional Namespaces](operator-defining-additional-namespaces). - -================ -File: docs/vendor/private-images-replicated.mdx -================ -import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" - -# Using the Replicated Registry for KOTS Installations - -This topic describes how to push images to the Replicated private registry. - -## Overview - -For applications installed with KOTS, you can host private images on the Replicated registry. Hosting your images on the Replicated registry is useful if you do not already have your images in an existing private registry. It is also useful for testing purposes. - -Images pushed to the Replicated registry are displayed on the **Images** page in the Vendor Portal: - -![Replicated Private Registry section of the vendor portal Images page](/images/images-replicated-registry.png) - -[View a larger version of this image](/images/images-replicated-registry.png) - -For information about security for the Replicated registry, see [Replicated Registry Security](packaging-private-registry-security). - -## Limitations - -The Replicated registry has the following limitations: - -* You cannot delete images from the Replicated registry. As a workaround, you can push a new, empty image to the registry using the same tags as the target image. Replicated does not recommend removing tags from the registry because it could break older releases of your application. - -* When using Docker Build to build and push images to the Replicated registry, provenance attestations are not supported. To avoid a 400 error, include the `--provenance=false` flag to disable all provenance attestations. For more information, see [docker buildx build](https://docs.docker.com/engine/reference/commandline/buildx_build/#provenance) and [Provenance Attestations](https://docs.docker.com/build/attestations/slsa-provenance/) in the Docker documentation. - -* You might encounter a timeout error when pushing images with layers close to or exceeding 2GB in size, such as: "received unexpected HTTP status: 524." To work around this, reduce the size of the image layers and push the image again. If the 524 error persists, continue decreasing the layer sizes until the push is successful. - -## Push Images to the Replicated Registry - -This procedure describes how to tag and push images to the Replicated registry. For more information about building, tagging, and pushing Docker images, see the -[Docker CLI documentation](https://docs.docker.com/engine/reference/commandline/cli/). - -To push images to the Replicated registry: - -1. Do one of the following to connect with the `registry.replicated.com` container registry: - * **(Recommended) Log in with a user token**: Use `docker login registry.replicated.com` with your Vendor Portal email as the username and a Vendor Portal user token as the password. For more information, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. - * **Log in with a service account token:** Use `docker login registry.replicated.com` with a Replicated Vendor Portal service account as the password. If you have an existing team token, you can use that instead. You can use any string as the username. For more information, see [Service Accounts](replicated-api-tokens#service-accounts) in _Generating API Tokens_. - - <TeamTokenNote/> - - * **Log in with your credentials**: Use `docker login registry.replicated.com` with your Vendor Portal email and password as the credentials. - -1. Tag your private image with the Replicated registry hostname in the standard -Docker format: - - ``` - docker tag IMAGE_NAME registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG - ``` - - Where: - * `IMAGE_NAME` is the name of the existing private image for your application. - * `APPLICATION_SLUG` is the unique slug for the application. You can find the application slug on the **Application Settings** page in the Vendor Portal. For more information, see [Get the Application Slug](/vendor/vendor-portal-manage-app#slug) in _Managing Applications_. - * `TARGET_IMAGE_NAME` is a name for the image. Replicated recommends that the `TARGET_IMAGE_NAME` is the same as the `IMAGE_NAME`. - * `TAG` is a tag for the image. - - For example: - - ```bash - docker tag worker registry.replicated.com/myapp/worker:1.0.1 - ``` - -1. Push your private image to the Replicated registry using the following format: - - ``` - docker push registry.replicated.com/APPLICATION_SLUG/TARGET_IMAGE_NAME:TAG - ``` - Where: - * `APPLICATION_SLUG` is the unique slug for the application. - * `TARGET_IMAGE_NAME` is a name for the image. Use the same name that you used when tagging the image in the previous step. - * `TAG` is a tag for the image. Use the same tag that you used when tagging the image in the previous step. - - For example: - - ```bash - docker push registry.replicated.com/myapp/worker:1.0.1 - ``` - -1. In the [Vendor Portal](https://vendor.replicated.com/), go to **Images** and scroll down to the **Replicated Private Registry** section to confirm that the image was pushed. - -================ -File: docs/vendor/private-images-tags-digests.md -================ -# Using Image Tags and Digests - -This topic describes using image tags and digests with your application images. It includes information about when image tags and digests are supported, and how to enable support for image digests in air gap bundles. - -## Support for Image Tags and Digests - -The following table describes the use cases in which image tags and digests are supported: - -<table> - <tr> - <th width="10%">Installation</th> - <th width="30%">Support for Image Tags</th> - <th width="30%">Support for Image Digests</th> - </tr> - <tr> - <td>Online</td> - <td>Supported by default</td> - <td>Supported by default</td> - </tr> - <tr> - <td>Air Gap</td> - <td>Supported by default for Replicated KOTS installations</td> - <td> - <p>Supported for applications on KOTS v1.82.0 and later when the <b>Enable new air gap bundle format</b> toggle is enabled on the channel.</p> - <p>For more information, see <a href="#digests-air-gap">Using Image Digests in Air Gap Installations</a> below.</p> - </td> - </tr> -</table> - -:::note -You can use image tags and image digests together in any case where both are supported. -::: - -## Using Image Digests in Air Gap Installations {#digests-air-gap} - -For applications installed with KOTS v1.82.0 or later, you can enable a format for air gap bundles that supports the use of image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. - -You can enable or disable this air gap bundle format using the **Enable new air gap bundle format** toggle in the settings for any channel in the Vendor Portal. The **Enable new air gap bundle format** toggle is enabled by default. - -When you enable **Enable new air gap bundle format** on a channel, all air gap bundles that you build or rebuild on that channel use the updated air gap bundle format. - -If users on a version of KOTS earlier than v1.82.0 attempt to install or upgrade an application with an air gap bundle that uses the **Enable new air gap bundle format** format, then the Admin Console displays an error message when they attempt to upload the bundle. - -To enable the new air gap bundle format on a channel: - -1. In the Replicated [Vendor Portal](https://vendor.replicated.com/channels), go to the Channels page and click the edit icon in the top right of the channel where you want to use the new air gap bundle format. -1. Enable the **Enable new air gap bundle format** toggle. -1. (Recommended) To prevent users on a version of KOTS earlier than v1.82.0 from attempting to upgrade with an air gap bundle that uses the new air gap bundle format, set `minKotsVersion` to "1.82.0" in the Application custom resource manifest file. - - `minKotsVersion` defines the minimum version of KOTS required by the application release. Including `minKotsVersion` displays a warning in the Admin Console when users attempt to install or upgrade the application if they are not on the specified minimum version or later. For more information, see [Setting Minimum and Target Versions for KOTS](packaging-kots-versions). - - **Example**: - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: my-application - spec: - ... - minKotsVersion: "1.82.0" - ... - ``` - -1. Test your changes: - 1. Save and promote the release to a development environment. - 1. On the channel where you enabled **Enable new air gap bundle format**, click **Release history**. On the Release History page, click **Build** next to the latest release to create an air gap bundle with the new format. - - ![Vendor portal release history page](../../static/images/airgap-download-bundle.png) - - 1. Click **Download Airgap Bundle**. - 1. Install or upgrade the application with version 1.82.0 or later of the Admin Console or the KOTS CLI. Upload the new air gap bundle to confirm that the installation or upgrade completes successfully. - -================ -File: docs/vendor/quick-start.mdx -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import HelmPackage from "../partials/helm/_helm-package.mdx" -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" -import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" -import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" -import EcCr from "../partials/embedded-cluster/_ec-config.mdx" -import Requirements from "../partials/embedded-cluster/_requirements.mdx" - -# Replicated Quick Start - -Welcome! This topic provides a quick start workflow to help new users learn about the Replicated Platform. Complete this quick start before you onboard your application to the platform. - -## Introduction - -This quick start shows how to create, install, and update releases for a sample Helm chart in the Replicated Platform. You will repeat these same basic steps to create and test releases throughout the onboarding process to integrate Replicated features with your own application. - -The goals of this quick start are to introduce new Replicated users to the following common tasks for the purpose of preparing to onboard to the Replicated Platform: - -* Working with _applications_, _channels_, _releases_, and _customers_ in the Replicated Vendor Portal - -* Working with the Replicated CLI - -* Installing and updating applications on a VM with Replicated Embedded Cluster - -* Managing an installation with the Replicated KOTS Admin Console - -## Set Up the Environment - -Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: - -<Requirements/> - -## Quick Start - -1. Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). - -1. Create an application using the Replicated CLI: - - 1. On your local machine, install the Replicated CLI: - - ```bash - brew install replicatedhq/replicated/cli - ``` - For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - - 1. Authorize the Replicated CLI: - - ```bash - replicated login - ``` - In the browser window that opens, complete the prompts to log in to your Vendor Portal account and authorize the CLI. - - 1. Create an application named `Gitea`: - - ```bash - replicated app create Gitea - ``` - - 1. Set the `REPLICATED_APP` environment variable to the application that you created: - - ```bash - export REPLICATED_APP=APP_SLUG - ``` - Where `APP_SLUG` is the unique application slug provided in the output of the `app create` command. For example, `export REPLICATED_APP=gitea-kite`. - - This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command. - -1. Get the sample Bitnami Gitea Helm chart and add the Replicated SDK as a dependency: - - 1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: - - ``` - helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 - ``` - For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. - - 1. Change to the new `gitea` directory that was created: - - ```bash - cd gitea - ``` - - 1. In the Helm chart `Chart.yaml`, add the Replicated SDK as a dependency: - - <DependencyYaml/> - - The Replicated SDK is a Helm chart that provides access to Replicated features and can be installed as a small service alongside your application. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). - - 1. Update dependencies and package the Helm chart to a `.tgz` chart archive: - - ```bash - helm package -u . - ``` - Where `-u` or `--dependency-update` is an option for the helm package command that updates chart dependencies before packaging. For more information, see [Helm Package](https://helm.sh/docs/helm/helm_package/) in the Helm documentation. - -1. Add the chart archive to a release: - - 1. In the `gitea` directory, create a subdirectory named `manifests`: - - ``` - mkdir manifests - ``` - - You will add the files required to support installation with Replicated KOTS and Replicated Embedded Cluster to this subdirectory. - - 1. Move the Helm chart archive that you created to `manifests`: - - ``` - mv gitea-1.0.6.tgz manifests - ``` - - 1. In `manifests`, create the following YAML files: - ``` - cd manifests - ``` - ``` - touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml - ``` - - 1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: - - <Tabs> - <TabItem value="helmchart" label="gitea.yaml" default> - <h5>Description</h5> - <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The <a href="/vendor/helm-optional-value-keys#conditionally-set-values"><code>optionalValues</code></a> field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment for the purpose of this quick start.</p> - <h5>YAML</h5> - <HelmChartCr/> - </TabItem> - <TabItem value="kots-app" label="kots-app.yaml"> - <h5>Description</h5> - <p>The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.</p> - <h5>YAML</h5> - <KotsCr/> - </TabItem> - <TabItem value="k8s-app" label="k8s-app.yaml"> - <h5>Description</h5> - <p>The Kubernetes SIG Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.</p> - <h5>YAML</h5> - <K8sCr/> - </TabItem> - <TabItem value="ec" label="embedded-cluster.yaml"> - <h5>Description</h5> - <p>To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.</p> - <h5>YAML</h5> - <EcCr/> - </TabItem> - </Tabs> - - 1. Lint the YAML files: - - ```bash - replicated release lint --yaml-dir . - ``` - **Example output:** - ```bash - RULE TYPE FILENAME LINE MESSAGE - config-spec warn Missing config spec - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - :::note - You can ignore any warning messages for the purpose of this quick start. - ::: - - 1. Create the release and promote it to the Unstable channel: - - ```bash - replicated release create --yaml-dir . --promote Unstable - ``` - **Example output**: - ```bash - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 1 - • Promoting ✓ - • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 1 - ``` - -1. Create a customer so that you can install the release on your VM with Embedded Cluster: - - 1. In the [Vendor Portal](https://vendor.replicated.com), under the application drop down, select the Gitea application that you created. - - <img alt="App drop down" src="/images/quick-start-select-gitea-app.png" width="250px"/> - - [View a larger version of this image](/images/quick-start-select-gitea-app.png) - - 1. Click **Customers > Create customer**. - - The **Create a new customer** page opens: - - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - - [View a larger version of this image](/images/create-customer.png) - - 1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. - - 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. - - 1. For **License type**, select **Development**. - - 1. For **License options**, enable the following entitlements: - * **KOTS Install Enabled** - * **Embedded Cluster Enabled** - - 1. Click **Save Changes**. - -1. Install the application with Embedded Cluster: - - 1. On the page for the customer that you created, click **Install instructions > Embedded Cluster**. - - ![Customer install instructions dropdown](/images/customer-install-instructions-dropdown.png) - - [View a larger image](/images/customer-install-instructions-dropdown.png) - - 1. On the command line, SSH onto your VM and run the commands in the **Embedded cluster install instructions** dialog to download the latest release, extract the installation assets, and install. - - <img width="500px" src="/images/embedded-cluster-install-dialog-latest.png" alt="embedded cluster install instructions dialog"/> - - [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) - - 1. When prompted, enter a password for accessing the Admin Console. - - The installation command takes a few minutes to complete. - - **Example output:** - - ```bash - ? Enter an Admin Console password: ******** - ? Confirm password: ******** - ✔ Host files materialized! - ✔ Running host preflights - ✔ Node installation finished! - ✔ Storage is ready! - ✔ Embedded Cluster Operator is ready! - ✔ Admin Console is ready! - ✔ Additional components are ready! - Visit the Admin Console to configure and install gitea-kite: http://104.155.145.60:30000 - ``` - - At this point, the cluster is provisioned and the Admin Console is deployed, but the application is not yet installed. - - 1. Go to the URL provided in the output to access to the Admin Console. - - 1. On the Admin Console landing page, click **Start**. - - 1. On the **Secure the Admin Console** screen, review the instructions and click **Continue**. In your browser, follow the instructions that were provided on the **Secure the Admin Console** screen to bypass the warning. - - 1. On the **Certificate type** screen, either select **Self-signed** to continue using the self-signed Admin Console certificate or click **Upload your own** to upload your own private key and certificacte. - - By default, a self-signed TLS certificate is used to secure communication between your browser and the Admin Console. You will see a warning in your browser every time you access the Admin Console unless you upload your own certificate. - - 1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. - - 1. On the **Configure the cluster** screen, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. - - The Admin Console dashboard opens. - - 1. On the Admin Console dashboard, next to the version, click **Deploy** and then **Yes, Deploy**. - - The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. - - 1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser. - - For example: - - ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) - - [View a larger version of this image](/images/gitea-ec-ready.png) - - <img alt="Gitea app landing page" src="/images/gitea-app.png" width="600px"/> - - [View a larger version of this image](/images/gitea-app.png) - -1. Return to the Vendor Portal and go to **Customers**. Under the name of the customer, confirm that you can see an active instance. - - This instance telemetry is automatically collected and sent back to the Vendor Portal by both KOTS and the Replicated SDK. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). - -1. Under **Instance ID**, click on the ID to view additional insights including the versions of Kubernetes and the Replicated SDK running in the cluster where you installed the application. For more information, see [Instance Details](/vendor/instance-insights-details). - -1. Create a new release that adds preflight checks to the application: - - 1. In your local filesystem, go to the `gitea` directory. - - 1. Create a `gitea-preflights.yaml` file in the `templates` directory: - - ``` - touch templates/gitea-preflights.yaml - ``` - - 1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a simple preflight spec: - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - labels: - troubleshoot.sh/kind: preflight - name: "{{ .Release.Name }}-preflight-config" - stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: preflight-sample - spec: - collectors: - - http: - collectorName: slack - get: - url: https://api.slack.com/methods/api.test - analyzers: - - textAnalyze: - checkName: Slack Accessible - fileName: slack.json - regex: '"status": 200,' - outcomes: - - pass: - when: "true" - message: "Can access the Slack API" - - fail: - when: "false" - message: "Cannot access the Slack API. Check that the server can reach the internet and check [status.slack.com](https://status.slack.com)." - ``` - The YAML above defines a preflight check that confirms that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. - - 1. In the `Chart.yaml` file, increment the version to 1.0.7: - - ```yaml - # Chart.yaml - version: 1.0.7 - ``` - - 1. Update dependencies and package the chart to a `.tgz` chart archive: - - ```bash - helm package -u . - ``` - - 1. Move the chart archive to the `manifests` directory: - - ```bash - mv gitea-1.0.7.tgz manifests - ``` - - 1. In the `manifests` directory, open the KOTS HelmChart custom resource (`gitea.yaml`) and update the `chartVersion`: - - ```yaml - # gitea.yaml KOTS HelmChart - chartVersion: 1.0.7 - ``` - - 1. Remove the chart archive for version 1.0.6 of the Gitea chart from the `manifests` directory: - - ``` - rm gitea-1.0.6.tgz - ``` - - 1. From the `manifests` directory, create and promote a new release, setting the version label of the release to `0.0.2`: - - ```bash - replicated release create --yaml-dir . --promote Unstable --version 0.0.2 - ``` - **Example output**: - ```bash - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 2 - • Promoting ✓ - • Channel 2kvjwEj4uBaCMoTigW5xty1iiw6 successfully set to release 2 - ``` - -1. On your VM, update the application instance to the new version that you just promoted: - - 1. In the Admin Console, go to the **Version history** tab. - - The new version is displayed automatically. - - 1. Click **Deploy** next to the new version. - - The Embedded Cluster upgrade wizard opens. - - 1. In the Embedded Cluster upgrade wizard, on the **Preflight checks** screen, note that the "Slack Accessible" preflight check that you added was successful. Click **Next: Confirm and deploy**. - - ![preflight page of the embedded cluster upgrade wizard](/images/quick-start-ec-upgrade-wizard-preflight.png) - - [View a larger version of this image](/images/quick-start-ec-upgrade-wizard-preflight.png) - - :::note - The **Config** screen in the upgrade wizard is bypassed because this release does not contain a KOTS Config custom resource. The KOTS Config custom resource is used to set up the Config screen in the KOTS Admin Console. - ::: - - 1. On the **Confirm and Deploy** page, click **Deploy**. - -1. Reset and reboot the VM to remove the installation: - - ```bash - sudo ./APP_SLUG reset - ``` - Where `APP_SLUG` is the unique slug for the application. - - :::note - You can find the application slug by running `replicated app ls` on your local machine. - ::: - -## Next Steps - -Congratulations! As part of this quick start, you: -* Added the Replicated SDK to a Helm chart -* Created a release with the Helm chart -* Installed the release on a VM with Embedded Cluster -* Viewed telemetry for the installed instance in the Vendor Portal -* Created a new release to add preflight checks to the application -* Updated the application from the Admin Console - -Now that you are familiar with the workflow of creating, installing, and updating releases, you can begin onboarding your own application to the Replicated Platform. - -To get started, see [Replicated Onboarding](replicated-onboarding). - -## Related Topics - -For more information about the Replicated Platform features mentioned in this quick start, see: - -* [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) -* [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about) -* [About the Replicated SDK](/vendor/replicated-sdk-overview) -* [Introduction to KOTS](/intro-kots) -* [Managing Releases with the CLI](/vendor/releases-creating-cli) -* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) -* [Using Embedded Cluster](/vendor/embedded-overview) - -## Related Tutorials - -For additional tutorials related to this quick start, see: - -* [Deploying a Helm Chart on a VM with Embedded Cluster](/vendor/tutorial-embedded-cluster-setup) -* [Adding Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) -* [Deploying a Helm Chart with KOTS and the Helm CLI](/vendor/tutorial-kots-helm-setup) - -================ -File: docs/vendor/releases-about.mdx -================ -import ChangeChannel from "../partials/customers/_change-channel.mdx" -import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" -import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" -import VersionLabelReqsHelm from "../partials/releases/_version-label-reqs-helm.mdx" - -# About Channels and Releases - -This topic describes channels and releases, including information about the **Releases** and **Channels** pages in the Replicated Vendor Portal. - -## Overview - -A _release_ represents a single version of your application. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. - -Channels also control which customers are able to install a release. You assign each customer to a channel to define the releases that the customer can access. For example, a customer assigned to the Stable channel can only install releases that are promoted to the Stable channel, and cannot see any releases promoted to other channels. For more information about assigning customers to channels, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. - -Using channels and releases helps you distribute versions of your application to the right customer segments, without needing to manage different release workflows. - -You can manage channels and releases with the Vendor Portal, the Replicated CLI, or the Vendor API v3. For more information about creating and managing releases or channels, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Creating and Editing Channels](releases-creating-channels). - -## About Channels - -This section provides additional information about channels, including details about the default channels in the Vendor Portal and channel settings. - -### Unstable, Beta, and Stable Channels - -Replicated includes the following channels by default: - -* **Unstable**: The Unstable channel is designed for internal testing and development. You can create and assign an internal test customer to the Unstable channel to install in a development environment. Replicated recommends that you do not license any of your external users against the Unstable channel. -* **Beta**: The Beta channel is designed for release candidates and early-adopting customers. Replicated recommends that you promote a release to the Beta channel after it has passed automated testing in the Unstable channel. You can also choose to license early-adopting customers against this channel. -* **Stable**: The Stable channel is designed for releases that are generally available. Replicated recommends that you assign most of your customers to the Stable channel. Customers licensed against the Stable channel only receive application updates when you promote a new release to the Stable channel. - -You can archive or edit any of the default channels, and create new channels. For more information, see [Creating and Editing Channels](releases-creating-channels). - -### Settings - -Each channel has settings. You can customize the settings for a channel to control some of the behavior of releases promoted to the channel. - -The following shows the **Channel Settings** dialog, accessed by clicking the settings icon on a channel: - -<img src="/images/channel-settings.png" alt="Channel Settings dialog in the Vendor Portal" width="500"/> - -[View a larger version of this image](/images/channel-settings.png) - -The following describes each of the channel settings: - -* **Channel name**: The name of the channel. You can change the channel name at any time. Each channel also has a unique ID listed below the channel name. -* **Description**: Optionally, add a description of the channel. -* **Set this channel to default**: When enabled, sets the channel as the default channel. The default channel cannot be archived. -* **Custom domains**: Select the customer-facing domains that releases promoted to this channel use for the Replicated registry, Replicated proxy registry, Replicated app service, or Replicated Download Portal endpoints. If a default custom domain exists for any of these endpoints, choosing a different domain in the channel settings overrides the default. If no custom domains are configured for an endpoint, the drop-down for the endpoint is disabled. - - For more information about configuring custom domains and assigning default domains, see [Using Custom Domains](custom-domains-using). -* The following channel settings apply only to applications that support KOTS: - * **Automatically create airgap builds for newly promoted releases in this channel**: When enabled, the Vendor Portal automatically builds an air gap bundle when a new release is promoted to the channel. When disabled, you can generate an air gap bundle manually for a release on the **Release History** page for the channel. - * **Enable semantic versioning**: When enabled, the Vendor Portal verifies that the version label for any releases promoted to the channel uses a valid semantic version. For more information, see [Semantic Versioning](releases-about#semantic-versioning) in _About Releases_. - * **Enable new airgap bundle format**: When enabled, air gap bundles built for releases promoted to the channel use a format that supports image digests. This air gap bundle format also ensures that identical image layers are not duplicated, resulting in a smaller air gap bundle size. For more information, see [Using Image Digests in Air Gap Installations](private-images-tags-digests#digests-air-gap) in _Using Image Tags and Digests_. - - :::note - The new air gap bundle format is supported for applications installed with KOTS v1.82.0 or later. - ::: - -## About Releases - -This section provides additional information about releases, including details about release promotion, properties, sequencing, and versioning. - -### Release Files - -A release contains your application files as well as the manifests required to install the application with the Replicated installers ([Replicated Embedded Cluster](/vendor/embedded-overview) and [Replicated KOTS](../intro-kots)). - -The application files in releases can be Helm charts and/or Kubernetes manifests. Replicated strongly recommends that all applications are packaged as Helm charts because many enterprise customers will expect to be able to install with Helm. - -### Promotion - -Each release is promoted to one or more channels. While you are developing and testing releases, Replicated recommends promoting to a channel that does not have any real customers assigned, such as the default Unstable channel. When the release is ready to be shared externally with customers, you can then promote to a channel that has the target customers assigned, such as the Beta or Stable channel. - -A release cannot be edited after it is promoted to a channel. This means that you can test a release on an internal development channel, and know with confidence that the same release will be available to your customers when you promote it to a channel where real customers are assigned. - -### Properties - -Each release has properties. You define release properties when you promote a release to a channel. You can edit release properties at any time from the channel **Release History** page in the Vendor Portal. For more information, see [Edit Release Properties](releases-creating-releases#edit-release-properties) in _Managing Releases with the Vendor Portal_. - -The following shows an example of the release properties dialog: - -<img src="/images/release-properties.png" width="500px" alt="release properties dialog for a release with version label 0.1.22"/> - -[View a larger version of this image](/images/release-properties.png) - -As shown in the screenshot above, the release has the following properties: - -* **Version label**: The version label for the release. Version labels have the following requirements: - - * If semantic versioning is enabled for the channel, you must use a valid semantic version. For more information, see [Semantic Versioning](#semantic-versioning). - - <VersionLabelReqsHelm/> - -* **Requirements**: Select **Prevent this release from being skipped during upgrades** to mark the release as required. - - <RequiredReleasesDescription/> - - <RequiredReleasesLimitations/> - -* **Release notes (supports markdown)**: Detailed release notes for the release. The release notes support markdown and are shown to your customer. - -### Sequencing - -By default, Replicated uses release sequence numbers to organize and order releases, and uses instance sequence numbers in an instance's internal version history. - -#### Release Sequences - -In the Vendor Portal, each release is automatically assigned a unique, monotonically-increasing sequence number. You can use this number as a fallback to identify a promoted or draft release, if you do not set the `Version label` field during promotion. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases). - -The following graphic shows release sequence numbers in the Vendor Portal: - -<img alt="Release sequence numbers" src="/images/release-sequences.png" width="750px"/> - -[View a larger version of this image](/images/release-sequences.png) - -#### Instance Sequences - -When a new version is available for upgrade, including when KOTS checks for upstream updates as well as when the user syncs their license or makes a config change, the KOTS Admin Console assigns a unique instance sequence number to that version. The instance sequence in the Admin Console starts at 0 and increments for each identifier that is returned when a new version is available. - -This instance sequence is unrelated to the release sequence dispalyed in the Vendor Portal, and it is likely that the instance sequence will differ from the release sequence. Instance sequences are only tracked by KOTS instances, and the Vendor Portal has no knowledge of these numbers. - -The following graphic shows instance sequence numbers on the Admin Console dashboard: - -<img alt="Instance sequence numbers" src="/images/instance-sequences.png" width="550px"/> - -[View a larger version of this image](/images/instance-sequences.png) - -#### Channel Sequences - -When a release is promoted to a channel, a channel sequence number is assigned. This unique sequence number increments by one and tracks the order in which releases were promoted to a channel. You can view the channel sequence on the **Release History** page in the Vendor Portal, as shown in the image below: - -<img alt="Channel sequence on Release History page" src="/images/release-history-channel-sequence.png" width="750px"/> - -[View a larger version of this image](/images/release-history-channel-sequence.png) - -The channel sequence is also used in certain URLs. For example, a release with a *release sequence* of `170` can have a *channel sequence* of `125`. The air gap download URL for that release can contain `125` in the URL, even though the release sequence is `170`. - -Ordering is more complex if some or all of the releases in a channel have a semantic version label and semantic versioning is enabled for the channel. For more information, see [Semantic Versioning Sequence](#semantic-versioning-sequence). - -#### Semantic Versioning Sequence - -For channels with semantic versioning enabled, the Admin Console sequences instance releases by their semantic versions instead of their promotion dates. - -If releases without a valid semantic version are already promoted to a channel, the Admin Console sorts the releases that do have semantic versions starting with the earliest version and proceeding to the latest. The releases with non-semantic versioning stay in the order of their promotion dates. For example, assume that you promote these releases in the following order to a channel: - -- 1.0.0 -- abc -- 0.1.0 -- xyz -- 2.0.0 - -Then, you enable semantic versioning on that channel. The Admin Console sequences the version history for the channel as follows: - -- 0.1.0 -- 1.0.0 -- abc -- xyz -- 2.0.0 - -### Semantic Versioning - -Semantic versioning is available with the Replicated KOTS v1.58.0 and later. Note the following: - -- For applications created in the Vendor Portal on or after February 23, 2022, semantic versioning is enabled by default on the Stable and Beta channels. Semantic versioning is disabled on the Unstable channel by default. - -- For existing applications created before February 23, 2022, semantic versioning is disabled by default on all channels. - -Semantic versioning is recommended because it makes versioning more predictable for users and lets you enforce versioning so that no one uses an incorrect version. - -To use semantic versioning: - -1. Enable semantic versioning on a channel, if it is not enabled by default. Click the **Edit channel settings** icon, and turn on the **Enable semantic versioning** toggle. -1. Assign a semantic version number when you promote a release. - -Releases promoted to a channel with semantic versioning enabled are verified to ensure that the release version label is a valid semantic version. For more information about valid semantic versions, see [Semantic Versioning 2.0.0](https://semver.org). - -If you enable semantic versioning for a channel and then promote releases to it, Replicated recommends that you do not later disable semantic versioning for that channel. - -You can enable semantic versioning on a channel that already has releases promoted to it without semantic versioning. Any subsequently promoted releases must use semantic versioning. In this case, the channel will have releases with and without semantic version numbers. For information about how Replicated organizes these release sequences, see [Semantic Versioning Sequences](#semantic-versioning-sequence). - -### Demotion - -A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. - -The demoted release's channel sequence and version are not reused. For customers, the release will appear to have been skipped. Un-demoting a release will restore its place in the channel sequence making it again available for download and installation. - -For information about how to demote a release, see [Demote a Release](/vendor/releases-creating-releases#demote-a-release) in _Managing Releases with the Vendor Portal_. - -## Vendor Portal Pages - -This section provides information about the channels and releases pages in the Vendor Portal. - -### Channels Page - -The **Channels** page in the Vendor Portal includes information about each channel. From the **Channels** page, you can edit and archive your channels. You can also edit the properties of the releases promoted to each channel, and view and edit the customers assigned to each channel. - -The following shows an example of a channel in the Vendor Portal **Channels** page: - -<img src="/images/channel-card.png" alt="Channel card in the Vendor Portal" width="400"/> - -[View a larger version of this image](/images/channel-card.png) - -As shown in the image above, you can do the following from the **Channels** page: - -* Edit the channel settings by clicking on the settings icon, or archive the channel by clicking on the trash can icon. For information about channel settings, see [Settings](#settings). - -* In the **Adoption rate** section, view data on the adoption rate of releases promoted to the channel among customers assigned to the channel. - -* In the **Customers** section, view the number of active and inactive customers assigned to the channel. Click **Details** to go to the **Customers** page, where you can view details about the customers assigned to the channel. - -* In the **Latest release** section, view the properties of the latest release, and get information about any warnings or errors in the YAML files for the latest release. - - Click **Release history** to access the history of all releases promoted to the channel. From the **Release History** page, you can view the version labels and files in each release that has been promoted to the selected channel. - - You can also build and download air gap bundles to be used in air gap installations with Replicated installers (Embedded Cluster, KOTS, kURL), edit the release properties for each release promoted to the channel from the **Release History** page, and demote a release from the channel. - - The following shows an example of the **Release History** page: - - <img src="/images/channels-release-history.png" alt="Release history page in the Vendor Portal" width="750"/> - - [View a larger version of this image](/images/channel-card.png) - -* For applications that support KOTS, you can also do the following from the **Channel** page: - - * In the **kURL installer** section, view the current kURL installer promoted to the channel. Click **Installer history** to view the history of kURL installers promoted to the channel. For more information about creating kURL installers, see [Creating a kURL Installer](packaging-embedded-kubernetes). - - * In the **Install** section, view and copy the installation commands for the latest release on the channel. - -### Draft Release Page - -For applications that support installation with KOTS, the **Draft** page provides a YAML editor to add, edit, and delete your application files and Replicated custom resources. You click **Releases > Create Release** in the Vendor Portal to open the **Draft** page. - -The following shows an example of the **Draft** page in the Vendor Portal: - - <img alt="Draft release page"src="/images/guides/kots/default-yaml.png" width="700px"/> - - [View a larger version of this image](/images/guides/kots/default-yaml.png) - -You can do the following tasks on the **Draft** page: - -- In the file directory, manage the file directory structure. Replicated custom resource files are grouped together above the white line of the file directory. Application files are grouped together underneath the white line in the file directory. - - Delete files using the trash icon that displays when you hover over a file. Create a new file or folder using the corresponding icons at the bottom of the file directory pane. You can also drag and drop files in and out of the folders. - - ![Manage File Directory](/images/new-file-and-trash.png) - -- Edit the YAML files by selecting a file in the directory and making changes in the YAML editor. - -- In the **Help** or **Config help** pane, view the linter for any errors. If there are no errors, you get an **Everything looks good!** message. If an error displays, you can click the **Learn how to configure** link. For more information, see [Linter Rules](/reference/linter). - -- Select the Config custom resource to preview how your application's Config page will look to your customers. The **Config preview** pane only appears when you select that file. For more information, see [About the Configuration Screen](config-screen-about). - -- Select the Application custom resource to preview how your application icon will look in the Admin Console. The **Application icon preview** only appears when you select that file. For more information, see [Customizing the Application Icon](admin-console-customize-app-icon). - -================ -File: docs/vendor/releases-creating-channels.md -================ -# Creating and Editing Channels - -This topic describes how to create and edit channels using the Replicated Vendor Portal. For more information about channels, see [About Channels and Releases](releases-about). - -For information about creating channels with the Replicated CLI, see [channel create](/reference/replicated-cli-channel-create). - -For information about creating and managing channels with the Vendor API v3, see the [channels](https://replicated-vendor-api.readme.io/reference/createchannel) section in the Vendor API v3 documentation. - -## Create a Channel - -To create a channel: - -1. From the Replicated [Vendor Portal](https://vendor.replicated.com), select **Channels** from the left menu. -1. Click **Create Channel**. - - The Create a new channel dialog opens. For example: - - <img src="/images/channels-create.png" alt="Create channel dialog" width="400px"/> - -1. Enter a name and description for the channel. -1. (Recommended) Enable semantic versioning on the channel if it is not enabled by default by turning on **Enable semantic versioning**. For more information about semantic versioning and defaults, see [Semantic Versioning](releases-about#semantic-versioning). - -1. (Recommended) Enable an air gap bundle format that supports image digests and deduplication of image layers, by turning on **Enable new air gap bundle format**. For more information, see [Using Image Tags and Digests](private-images-tags-digests). - -1. Click **Create Channel**. - -## Edit a Channel - -To edit the settings of an existing channel: - -1. In the Vendor Portal, select **Channels** from the left menu. -1. Click the gear icon on the top right of the channel that you want to modify. - - The Channel settings dialog opens. For example: - - <img src="/images/channel-settings.png" alt="Channel Settings dialog in the Vendor Portal" width="500"/> - -1. Edit the fields and click **Save**. - - For more information about channel settings, see [Settings](releases-about#settings) in _About Channels and Releases_. - -## Archive a Channel - -You can archive an existing channel to prevent any new releases from being promoted to the channel. - -:::note -You cannot archive a channel if: -* There are customers assigned to the channel. -* The channel is set as the default channel. - -Assign customers to a different channel and set a different channel as the default before archiving. -::: - -To archive a channel with the Vendor Portal or the Replicated CLI: - -* **Vendor portal**: In the Vendor Portal, go to the **Channels** page and click the trash can icon in the top right corner of the card for the channel that you want to archive. -* **Replicated CLI**: - 1. Run the following command to find the ID for the channel that you want to archive: - ``` - replicated channel ls - ``` - The output of this command includes the ID and name for each channel, as well as information about the latest release version on the channels. - - 1. Run the following command to archive the channel: - ``` - replicated channel rm CHANNEL_ID - ``` - Replace `CHANNEL_ID` with the channel ID that you retrieved in the previous step. - - For more information, see [channel rm](/reference/replicated-cli-channel-rm) in the Replicated CLI documentation. - -================ -File: docs/vendor/releases-creating-cli.mdx -================ -# Managing Releases with the CLI - -This topic describes how to use the Replicated CLI to create and promote releases. - -For information about creating and managing releases with the Vendor Portal, see [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - -For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) section in the Vendor API v3 documentation. - -## Prerequisites - -Before you create a release using the Replicated CLI, complete the following prerequisites: - -* Install the Replicated CLI and then log in to authorize the CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -* Create a new application using the `replicated app create APP_NAME` command. You only need to do this procedure one time for each application that you want to deploy. See [`app create`](/reference/replicated-cli-app-create) in _Reference_. - -* Set the `REPLICATED_APP` environment variable to the slug of the target application. See [Set Environment Variables](/reference/replicated-cli-installing#env-var) in _Installing the Replicated CLI_. - - **Example**: - - ```bash - export REPLICATED_APP=my-app-slug - ``` - -## Create a Release From a Local Directory {#dir} - -You can use the Replicated CLI to create a release from a local directory that contains the release files. - -To create and promote a release: - -1. (Helm Charts Only) If your release contains any Helm charts: - - 1. Package each Helm chart as a `.tgz` file. See [Packaging a Helm Chart for a Release](/vendor/helm-install-release). - - 1. Move the `.tgz` file or files to the local directory that contains the release files: - - ```bash - mv CHART_TGZ PATH_TO_RELEASE_DIR - ``` - Where: - * `CHART_TGZ` is the `.tgz` Helm chart archive. - * `PATH_TO_RELEASE_DIR` is path to the directory that contains the release files. - - **Example** - - ```bash - mv wordpress-1.3.5.tgz manifests - ``` - - 1. In the same directory that contains the release files, add a HelmChart custom resource for each Helm chart in the release. See [Configuring the HelmChart Custom Resource](helm-native-v2-using). - -1. Lint the application manifest files and ensure that there are no errors in the YAML: - - ```bash - replicated release lint --yaml-dir=PATH_TO_RELEASE_DIR - ``` - - Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. - - For more information, see [release lint](/reference/replicated-cli-release-lint) and [Linter Rules](/reference/linter). - -1. Do one of the following: - - * **Create and promote the release with one command**: - - ```bash - replicated release create --yaml-dir PATH_TO_RELEASE_DIR --lint --promote CHANNEL - ``` - Where: - * `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. - * `CHANNEL` is the channel ID or the case sensitive name of the channel. - - * **Create and edit the release before promoting**: - - 1. Create the release: - - ```bash - replicated release create --yaml-dir PATH_TO_RELEASE_DIR - ``` - Where `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. - - For more information, see [release create](/reference/replicated-cli-release-create). - - 1. Edit and update the release as desired: - - ``` - replicated release update SEQUENCE --yaml-dir PATH_TO_RELEASE_DIR - ``` - Where: - - - `SEQUENCE` is the release sequence number. This identifies the existing release to be updated. - - `PATH_TO_RELEASE_DIR` is the path to the directory with the release files. - - For more information, see [release update](/reference/replicated-cli-release-update). - - 1. Promote the release when you are ready to test it. Releases cannot be edited after they are promoted. To make changes after promotion, create a new release. - - ``` - replicated release promote SEQUENCE CHANNEL - ``` - - Where: - - - `SEQUENCE` is the release sequence number. - - `CHANNEL` is the channel ID or the case sensitive name of the channel. - - For more information, see [release promote](/reference/replicated-cli-release-promote). - -1. Verify that the release was promoted to the target channel: - - ``` - replicated release ls - ``` - -================ -File: docs/vendor/releases-creating-customer.mdx -================ -import ChangeChannel from "../partials/customers/_change-channel.mdx" -import Download from "../partials/customers/_download.mdx" -import GitOpsNotRecommended from "../partials/gitops/_gitops-not-recommended.mdx" - -# Creating and Managing Customers - -This topic describes how to create and manage customers in the Replicated Vendor Portal. For more information about customer licenses, see [About Customers](licenses-about). - -## Create a Customer - -This procedure describes how to create a new customer in the Vendor Portal. You can edit customer details at any time. - -For information about creating a customer with the Replicated CLI, see [customer create](/reference/replicated-cli-customer-create). - -For information about creating and managing customers with the Vendor API v3, see the [customers](https://replicated-vendor-api.readme.io/reference/getcustomerentitlements) section in the Vendor API v3 documentation. - -To create a customer: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. - - The **Create a new customer** page opens: - - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - - [View a larger version of this image](/images/create-customer.png) - -1. For **Customer name**, enter a name for the customer. - -1. For **Customer email**, enter the email address for the customer. - - :::note - A customer email address is required for Helm installations. This email address is never used to send emails to customers. - ::: - -1. For **Assigned channel**, assign the customer to one of your channels. You can select any channel that has at least one release. The channel a customer is assigned to determines the application releases that they can install. For more information, see [Channel Assignment](licenses-about#channel-assignment) in _About Customers_. - - :::note - <ChangeChannel/> - ::: - -1. For **Custom ID**, you can enter a custom ID for the customer. Setting a custom ID allows you to easily associate this Replicated customer record to your own internal customer data systems during data exports. Replicated recommends using an alphanumeric value such as your Salesforce ID or Hubspot ID. - - :::note - Replicated does _not_ require that the custom ID is unique. The custom ID is for vendor data reconciliation purposes, and is not used by Replicated for any functionality purposes. - ::: - -1. For **Expiration policy**, by default, **Customer's license does not expire** is enabled. To set an expiration date for the license, enable **Customer's license has an expiration date** and specify an expiration date in the **When does this customer expire?** calendar. - -1. For **Customer type**, set the customer type. Customer type is used only for reporting purposes. Customer access to your application is not affected by the type you assign to them. By default, **Trial** is selected. For more information, see [About Customer License Types](licenses-about-types). - -1. Enable any of the available options for the customer. For more information about the license options, see [Built-in License Fields](/vendor/licenses-using-builtin-fields). For more information about enabling install types, see [Managing Install Types for a License (Beta)](/vendor/licenses-install-types). - -1. For **Custom fields**, configure any custom fields that you have added for your application. For more information about how to create custom fields for your application, see [Managing Customer License Fields](licenses-adding-custom-fields). - -1. Click **Save Changes**. - -## Edit a Customer - -You can edit the built-in and custom license fields for a customer at any time by going to the **Manage customer** for a customer. For more information, see [Manage Customer Page](licenses-about#about-the-manage-customer-page) in _About Customers and Licensing_. - -Replicated recommends that you test any licenses changes in a development environment. If needed, install the application using a developer license matching the current customer's entitlements before editing the developer license. Then validate the updated license. - -:::important -For online environments, changing license entitlements can trigger changes to the customer's installed application instance during runtime. Replicated recommends that you verify the logic your application uses to query and enforce the target entitlement before making any changes. -::: - -To edit license fields: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers**. - -1. Select the target customer and click the **Manage customer** tab. - -1. On the **Manage customer** page, edit the desired fields and click **Save**. - - ![Full manage customer page for a customer named Prestige Financial](/images/customer-details.png) - -1. Test the changes by installing or updating in a development environment. Do one of the following, depending on the installation method for your application: - * For applications installed with Helm that use the Replicated SDK, you can add logic to your application to enforce entitlements before installation or during runtime using the Replicated SDK API license endpoints. See [Checking Entitlements in Helm Charts Before Deployment](licenses-reference-helm). - * For applications installed with Replicated KOTS, update the license in the admin console. See [Update Online Licenses](/enterprise/updating-licenses#update-online-licenses) and [Update Air Gap Licenses](/enterprise/updating-licenses#update-air-gap-licenses) in _Updating Licenses in the Admin Console_. - -## Archive a Customer - -When you archive a customer in the Vendor Portal, the customer is hidden from search by default and becomes read-only. Archival does not affect the utility of license files downloaded before the customer was archived. - -To expire a license, set an expiration date and policy in the **Expiration policy** field before you archive the customer. - -To archive a customer: - -1. In the Vendor Portal, click **Customers**. Select the target customer then click the **Manage customer** tab. - -1. Click **Archive Customer**. In the confirmation dialog, click **Archive Customer** again. - -You can unarchive by clicking **Unarchive Customer** in the customer's **Manage customer** page. - -## Export Customer and Instance Data {#export} - -<Download/> - -For more information about the data fields in the CSV downloads, see [Data Dictionary](/vendor/instance-data-export#data-dictionary) in _Export Customers and Instance Data_. -## Filter and Search Customers - -The **Customers** page provides a search box and filters that help you find customers: - -<img alt="search box and filters on the customers page" src="/images/customers-filter.png" width="400px"/> - -[View a larger version of this image](/images/customers-filter.png) - -You can filter customers based on whether they are active, by license type, and by channel name. You can filter using more than one criteria, such as Active, Paid, and Stable. However, you can select only one license type and one channel at a time. - -If there is adoption rate data available for the channel that you are filtering by, you can also filter by current version, previous version, and older versions. - -You can also filter customers by custom ID or email address. To filter customers by custom ID or email, use the search box and prepend your search term with "customId:" (ex: `customId:1234`) or "email:" (ex: `email:bob@replicated.com`). - -If you want to filter information using multiple license types or channels, you can download a CSV file instead. For more information, see [Export Customer and Instance Data](#export) above. - -================ -File: docs/vendor/releases-creating-releases.mdx -================ -import RequiredReleasesLimitations from "../partials/releases/_required-releases-limitations.mdx" -import RequiredReleasesDescription from "../partials/releases/_required-releases-description.mdx" - -# Managing Releases with the Vendor Portal - -This topic describes how to use the Replicated Vendor Portal to create and promote releases, edit releases, edit release properties, and archive releases. - -For information about creating and managing releases with the CLI, see [Managing Releases with the CLI](/vendor/releases-creating-cli). - -For information about creating and managing releases with the Vendor API v3, see the [releases](https://replicated-vendor-api.readme.io/reference/createrelease) and [channelReleases](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) sections in the Vendor API v3 documentation. - -## Create a Release - -To create and promote a release in the Vendor Portal: - -1. From the **Applications** dropdown list, select **Create an app** or select an existing application to update. - -1. Click **Releases > Create release**. - - ![Create Release](/images/release-create-new.png) - - [View a larger version of this image](/images/release-create-new.png) - -1. Add your files to the release. You can do this by dragging and dropping files to the file directory in the YAML editor or clicking the plus icon to add a new, untitled YAML file. - -1. For any Helm charts that you add to the release, in the **Select Installation Method** dialog, select the version of the HelmChart custom resource that KOTS will use to install the chart. kots.io/v1beta2 is recommended. For more information about the HelmChart custom resource, see [Configuring the HelmChart Custom Resource](helm-native-v2-using). - - <img src="/images/helm-select-install-method.png" alt="select installation method dialog" width="550px"/> - - [View a larger version of this image](/images/helm-select-install-method.png) - -1. Click **Save release**. This saves a draft that you can continue to edit until you promote it. - -1. Click **Promote**. In the **Promote Release** dialog, edit the fields: - - For more information about the requirements and limitations of each field, see <a href="releases-about#properties">Properties</a> in _About Channels and Releases_. - - <table> - <tr> - <th width="30%">Field</th> - <th width="70%">Description</th> - </tr> - <tr> - <td>Channel</td> - <td> - <p>Select the channel where you want to promote the release. If you are not sure which channel to use, use the default Unstable channel.</p> - </td> - </tr> - <tr> - <td>Version label</td> - <td> - <p>Enter a version label.</p> - <p>If you have one or more Helm charts in your release, the Vendor Portal automatically populates this field. You can change the version label to any <code>version</code> specified in any of the <code>Chart.yaml</code> files included in the release.</p> - </td> - </tr> - <tr> - <td>Requirements</td> - <td> - Select the <strong>Prevent this release from being skipped during upgrades</strong> to mark the release as required for KOTS installations. This option does not apply to installations with Helm. - </td> - </tr> - <tr> - <td>Release notes</td> - <td>Add release notes. The release notes support markdown and are shown to your customer.</td> - </tr> - </table> - -1. Click **Promote**. - - The release appears in an **Active** state on the Releases page. - -## Edit a Draft Release - -To edit a draft release: - -1. From the **Applications** dropdown list, select an existing application to update. -1. On the **Releases** page, find the draft release you want to edit and click **Edit YAML**. - - <img src="/images/releases-edit-draft.png" alt="Edit YAML button for a draft release in the Vendor Portal" width="400"/> - - [View a larger image](/images/releases-edit-draft.png) - -1. Click **Save** to save your updated draft. -1. (Optional) Click **Promote**. - -## Edit Release Properties - -You can edit the properties of a release at any time. For more information about release properties, see [Properties](releases-about#properties) in _About Channels and Releases_. - -To edit release properties: - -1. Go to **Channels**. -1. In the channel where the release was promoted, click **Release History**. -1. For the release sequence that you want to edit, open the dot menu and click **Edit release**. -1. Edit the properties as needed. - <img src="/images/release-properties.png" alt="Release Properties dialog in the Vendor Portal" width="300"/> - - [View a larger image](/images/release-properties.png) -1. Click **Update Release**. - -## Archive a Release - -You can archive releases to remove them from view on the **Releases** page. Archiving a release that has been promoted does _not_ remove the release from the channel's **Release History** page or prevent KOTS from downloading the archived release. - -To archive one or more releases: - -1. From the **Releases** page, click the trash can icon in the upper right corner. -1. Select one or more releases. -1. Click **Archive Releases**. -1. Confirm the archive action when prompted. - -## Demote a Release - -A channel release can be demoted from a channel. When a channel release is demoted, the release is no longer available for download, but is not withdrawn from environments where it was already downloaded or installed. For more information, see [Demotion](/vendor/releases-about#demotion) in _About Channels and Releases_. - -For information about demoting and un-demoting releases with the Replicated CLI, see [channel demote](/reference/replicated-cli-channel-demote) and [channel un-demote](/reference/replicated-cli-channel-un-demote). - -To demote a release in the Vendor Portal: - -1. Go to **Channels**. -1. In the channel where the release was promoted, click **Release History**. -1. For the release sequence that you want to demote, open the dot menu and select **Demote Release**. - - ![Release history page](/images/channels-release-history.png) - [View a larger version of this image](/images/channels-release-history.png) - - After the release is demoted, the given release sequence is greyed out and a **Demoted** label is displayed next to the release on the **Release History** page. - -================ -File: docs/vendor/releases-share-download-portal.md -================ -import DownloadPortal from "../partials/kots/_download-portal-about.mdx" - -# Downloading Assets from the Download Portal - -This topic describes how to download customer license files, air gap bundles, and other assets from the Replicated Download Portal. - -For information about downloading air gap bundles and licenses with the Vendor API v3, see the following pages in the Vendor API v3 documentation: -* [Download a customer license file as YAML](https://replicated-vendor-api.readme.io/reference/downloadlicense) -* [Trigger airgap build for a channel's release](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbuild) -* [Get airgap bundle download URL for the active release on the channel](https://replicated-vendor-api.readme.io/reference/channelreleaseairgapbundleurl) - -## Overview - -<DownloadPortal/> - -The most common use case for the Download Portal is for customers installing into air gap environments who need to download both their license file as well as multiple air gap bundles. - -The following is an example of the Download Portal for an air gap customer installing in their own existing cluster: - -![Download Portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) - -[View a larger version of this image](/images/download-portal-existing-cluster.png) - -## Limitations - -* Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. - -* Sessions in the Download Portal are valid for 72 hours. After the session expires, your customer must log in again. The Download Portal session length is not configurable. - -## Download Assets from the Download Portal - -To log in to the Download Portal and download assets: - -1. In the [Vendor Portal](https://vendor.replicated.com), on the **Customers** page, click on the name of the customer. - -1. (Optional) On the **Manage customer** tab, enable the **Airgap Download Enabled** option. This makes air gap bundles available in the Download Portal. - - ![airgap download enabled license option](/images/airgap-download-enabled.png) - - [View a larger version of this image](/images/airgap-download-enabled.png) - -1. On the **Reporting** tab, in the **Download portal** section, click **Manage customer password**. - - ![download portal section](/images/download-portal-link.png) - - [View a larger version of this image](/images/download-portal-link.png) - -1. In the pop-up window, enter a password or click **Generate**. - - <img alt="download portal password pop-up" src="/images/download-portal-password-popup.png" width="450px"/> - - [View a larger version of this image](/images/download-portal-password-popup.png) - -1. Click **Copy** to copy the password to your clipboard. - - After the password is saved, it cannot be retrieved again. If you lose the password, you can generate a new one. - -1. Click **Save** to set the password. - -1. Click **Visit download portal** to log in to the Download Portal -and preview your customer's experience. - - :::note - By default, the Download Portal uses the domain `get.replicated.com`. You can optionally use a custom domain for the Download Portal. For more information, see [Using Custom Domains](/vendor/custom-domains-using). - ::: - -1. In the Download Portal, on the left side of the screen, select one of the following: - * **Bring my own Kubernetes**: View the downloadable assets for existing cluster installations with KOTS. - * **Embedded Kubernetes**: View the downloadable assets for Replicated kURL installations. - - :::note - Installation assets for [Replicated Embedded Cluster](/vendor/embedded-overview) are not available for download in the Download Portal. - ::: - - The following is an example of the Download Portal for an air gap customer: - - ![download portal for existing cluster air gap installs](/images/download-portal-existing-cluster.png) - - [View a larger version of this image](/images/download-portal-existing-cluster.png) - -1. Under **Select application version**, use the dropdown to select the target application release version. The Download Portal automatically makes the correct air gap bundles available for download based on the selected application version. - -1. Click the download button to download each asset. - -1. To share installation files with a customer, send the customer their unique link and password for the Download Portal. - -================ -File: docs/vendor/releases-sharing-license-install-script.mdx -================ -import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - -# Finding Installation Commands for a Release - -This topic describes where to find the installation commands and instructions for releases in the Replicated Vendor Portal. - -For information about getting installation commands with the Replicated CLI, see [channel inspect](/reference/replicated-cli-channel-inspect). For information about getting installation commands with the Vendor API v3, see [Get install commands for a specific channel release](https://replicated-vendor-api.readme.io/reference/getchannelreleaseinstallcommands) in the Vendor API v3 documentation. - -## Get Commands for the Latest Release - -Every channel in the Vendor Portal has an **Install** section where you can find installation commands for the latest release on the channel. - -To get the installation commands for the latest release: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. - -1. On the target channel card, under **Install**, click the tab for the type of installation command that you want to view: - - <Tabs> - <TabItem value="kots" label="KOTS" default> - <p>View the command for installing with Replicated KOTS in existing clusters.</p> - - <img alt="Install section of the channel card" src="/images/channel-card-install-kots.png" width="400px"/> - [View a larger version of this image](/images/channel-card-install-kots.png) - </TabItem> - <TabItem value="embedded" label="Embedded K8s" default> - <p>View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.</p> - - <p>In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:</p> - - <img alt="Install section of the channel card" src="/images/channel-card-install-kurl.png" width="400px"/> - [View a larger version of this image](/images/channel-card-install-kurl.png) - - <img alt="Install section of the channel card" src="/images/channel-card-install-ec.png" width="400px"/> - [View a larger version of this image](/images/channel-card-install-ec.png) - - :::note - The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. - ::: - </TabItem> - <TabItem value="helm" label="Helm" default> - <p>View the command for installing with the Helm CLI in an existing cluster.</p> - - <img alt="Install section of the channel card" src="/images/channel-card-install-helm.png" width="400px"/> - [View a larger version of this image](/images/channel-card-install-helm.png) - - :::note - The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. - ::: - </TabItem> - </Tabs> - -## Get Commands for a Specific Release - -Every channel in the Vendor Portal has a **Release history** page where you can find the installation commands for specific release versions. - -To get the command for a specific release version: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Channels** page. - -1. On the channel card, click **Release history**. - - <img alt="Release history link on channel card" src="/images/release-history-link.png" width="500px"/> - - [View a larger version of this image](/images/release-history-link.png) - -1. For the target release version, open the dot menu and click **Install Commands**. - - ![Release history page](/images/channels-release-history.png) - - [View a larger version of this image](/images/channels-release-history.png) - -1. In the **Install Commands** dialog, click the tab for the type of installation command that you want to view: - - <Tabs> - <TabItem value="kots" label="KOTS" default> - <p>View the command for installing with Replicated KOTS in existing clusters.</p> - - <img alt="Install section of the channel card" src="/images/release-history-install-kots.png" width="500px"/> - [View a larger version of this image](/images/release-history-install-kots.png) - </TabItem> - <TabItem value="embedded" label="Embedded K8s" default> - <p>View the commands for installing with Replicated Embedded Cluster or Replicated kURL on VMs or bare metal servers.</p> - - <p>In the dropdown, choose **kURL** or **Embedded Cluster** to view the command for the target installer:</p> - - <img alt="Install section of the channel card" src="/images/release-history-install-kurl.png" width="500px"/> - [View a larger version of this image](/images/release-history-install-kurl.png) - - <img alt="Install section of the channel card" src="/images/release-history-install-embedded-cluster.png" width="500px"/> - [View a larger version of this image](/images/release-history-install-embedded-cluster.png) - - :::note - The Embedded Cluster installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. - ::: - </TabItem> - <TabItem value="helm" label="Helm" default> - <p>View the command for installing with the Helm CLI in an existing cluster.</p> - - <img alt="Install section of the channel card" src="/images/release-history-install-helm.png" width="500px"/> - [View a larger version of this image](/images/release-history-install-helm.png) - - :::note - The Helm installation instructions are customer-specific. Click **View customer list** to navigate to the page for the target customer. For more information, see [Get Customer-Specific Installation Instructions for Helm or Embedded Cluster](#customer-specific) below. - ::: - </TabItem> - </Tabs> - -## Get Customer-Specific Installation Instructions for Helm or Embedded Cluster {#customer-specific} - -Installation instructions for the Helm CLI and Replicated Embedded Cluster are customer-specific. You can find installation instructions on the page for the target customer. - -To get customer-specific Helm or Embedded Cluster installation instructions: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to the **Customers** page and click on the target customer. - -1. At the top of the page, click the **Install instructions** drop down, then click **Helm** or **Embedded cluster**. - - ![Install instructions button](/images/customer-install-instructions-dropdown.png) - - [View a larger version of this image](/images/customer-install-instructions-dropdown.png) - -1. In the dialog that opens, follow the installation instructions to install. - - <Tabs> - <TabItem value="helm" label="Helm" default> - <p>View the customer-specific Helm CLI installation instructions. For more information about installing with the Helm CLI, see [Installing with Helm](/vendor/install-with-helm).</p> - <img alt="Helm install button" src="/images/helm-install-instructions-dialog.png" width="500px"/> - [View a larger version of this image](/images/helm-install-instructions-dialog.png) - </TabItem> - <TabItem value="ec" label="Embedded Cluster" default> - <p>View the customer-specific Embedded Cluster installation instructions. For more information about installing with Embedded Cluster, see [Online Installation with Embedded Cluster](/enterprise/installing-embedded).</p> - <img alt="Embedded cluster install instructions" src="/images/embedded-cluster-install-dialog-latest.png" width="500px"/> - [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) - </TabItem> - </Tabs> - -================ -File: docs/vendor/replicated-api-tokens.md -================ -import TeamTokenNote from "../partials/vendor-api/_team-token-note.mdx" - -# Generating API Tokens - -This topic describes the available types of API tokens and how to generate them for use with the Replicated CLI and Replicated Vendor API v3. - -## About API Tokens - -The Vendor API v3 is the API that manages applications in the Replicated Vendor Portal. The Replicated CLI is an implementation of the Vendor API v3. - -Using the Replicated CLI and Vendor API V3 requires an API token for authorization. Tokens are primarily used for automated customer, channel, and release management. You create tokens in the Vendor Portal. - -The following types of tokens are available: - -- [Service Accounts](#service-accounts) -- [User API Tokens](#user-api-tokens) - -<TeamTokenNote/> - -### Service Accounts - -Service accounts are assigned a token and associated with an RBAC policy. Users with the proper permissions can create, retrieve, or revoke service account tokens. Admin users can assign any RBAC policy to a service account. Non-admin users can only assign their own RBAC policy when they create a service account. - -Service accounts are useful for operations that are not tied to a particular user, such as CI/CD or integrations. - -Updates to a service account's RBAC policy are automatically applied to its associated token. When a service account is removed, its tokens are also invalidated. - -### User API Tokens - -User API tokens are private to the user creating the token. User tokens assume the user's account when used, including any RBAC permissions. - -Updates to a user's RBAC role are applied to all of the tokens belonging to that user. - -Revoking a user token immediately invalidates that token. When a user account is deleted, its user tokens are also deleted. - -## Generate Tokens - -To use the Replicated CLI or the Vendor API v3, you need a User API token or a Service Account token. Existing team API tokens also continue to work. - -### Generate a Service Account - -To generate a service account: - -1. Log in to the Vendor Portal, and select [**Team > Service Accounts**](https://vendor.replicated.com/team/serviceaccounts). -1. Select **New Service Account**. If one or more service accounts already exist, you can add another by selecting **New Service Account**. - -1. Edit the fields in the **New Service Account** dialog: - - <img alt="New Service Accounts Dialog" src="/images/service-accounts.png" width="400px"/> - - [View a larger version of this image](/images/service-accounts.png) - - 1. For **Nickname**, enter a name the token. Names for service accounts must be unique within a given team. - - 1. For **RBAC**, select the RBAC policy from the dropdown list. The token must have `Admin` access to create new releases. - - This list includes the Vendor Portal default policies `Admin` and `Read Only`. Any custom policies also display in this list. For more information, see [Configuring RBAC Policies](team-management-rbac-configuring). - - Users with a non-admin RBAC role cannot select any other RBAC role when creating a token. They are restricted to creating a token with their same level of access to avoid permission elevation. - - 1. (Optional) For custom RBAC policies, select the **Limit to read-only version of above policy** check box to if you want use a policy that has Read/Write permissions but limit this service account to read-only. This option lets you maintain one version of a custom RBAC policy and use it two ways: as read/write and as read-only. - -1. Select **Create Service Account**. - -1. Copy the service account token and save it in a secure location. The token will not be available to view again. - - :::note - To remove a service account, select **Remove** for the service account that you want to delete. - ::: - -### Generate a User API Token - -To generate a user API token: - -1. Log in to the Vendor Portal and go to the [Account Settings](https://vendor.replicated.com/account-settings) page. -1. Under **User API Tokens**, select **Create a user API token**. If one or more tokens already exist, you can add another by selecting **New user API token**. - - <img alt="User API Token Page" src="/images/user-token-list.png" width="600px"/> - - [View a larger version of this image](/images/user-token-list.png) - -1. In the **New user API token** dialog, enter a name for the token in the **Nickname** field. Names for user API tokens must be unique per user. - - <img alt="Create New User Token Dialog" src="/images/user-token-create.png" width="400px"/> - - [View a larger version of this image](/images/user-token-create.png) - -1. Select the required permissions or use the default **Read and Write** permissions. Then select **Create token**. - - :::note - The token must have `Read and Write` access to create new releases. - ::: - -1. Copy the user API token that displays and save it in a secure location. The token will not be available to view again. - - :::note - To revoke a token, select **Revoke token** for the token that you want to delete. - ::: - -================ -File: docs/vendor/replicated-onboarding.mdx -================ -import CreateRelease from "../partials/getting-started/_create-promote-release.mdx" -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import EcCr from "../partials/embedded-cluster/_ec-config.mdx" -import HelmPackage from "../partials/helm/_helm-package.mdx" -import Requirements from "../partials/embedded-cluster/_requirements.mdx" -import SDKOverview from "../partials/replicated-sdk/_overview.mdx" -import TestYourChanges from "../partials/getting-started/_test-your-changes.mdx" -import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" - -# Replicated Onboarding - -This topic describes how to onboard applications to the Replicated Platform. - -## Before You Begin - -This section includes guidance and prerequisites to review before you begin onboarding your application. - -### Best Practices and Recommendations - -The following are some best practices and recommendations for successfully onboarding with Replicated: - -* When integrating new Replicated features with an application, make changes in small iterations and test frequently by installing or upgrading the application in a development environment. This will help you to more easily identify issues and troubleshoot. This onboarding workflow will guide you through the process of integrating features in small iterations. - -* Use the Replicated CLI to create and manage your application and releases. Getting familiar with the Replicated CLI will also help later on when integrating Replicated workflows into your CI/CD pipelines. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - -* These onboarding tasks assume that you will test the installation of each release on a VM with the Replicated Embedded Cluster installer _and_ in a cluster with the Replicated KOTS installer. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then can choose to test with Embedded Cluster only. - -* Ask for help from the Replicated community. For more information, see [Getting Help from the Community](#community) below. - -### Getting Help from the Community {#community} - -The [Replicated community site](https://community.replicated.com/) is a forum where Replicated team members and users can post questions and answers related to working with the Replicated Platform. It is designed to help Replicated users troubleshoot and learn more about common tasks involved with distributing, installing, observing, and supporting their application. - -Before posting in the community site, use the search to find existing knowledge base articles related to your question. If you are not able to find an existing article that addresses your question, create a new topic or add a reply to an existing topic so that a member of the Replicated community or team can respond. - -To search and participate in the Replicated community, see https://community.replicated.com/. - -### Prerequisites - -* Create an account in the Vendor Portal. You can either create a new team or join an existing team. For more information, see [Creating a Vendor Account](vendor-portal-creating-account). - -* Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -* Complete a basic quick start workflow to create an application with a sample Helm chart and then promote and install releases in a development environment. This helps you get familiar with the process of creating, installing, and updating releases in the Replicated Platform. See [Replicated Quick Start](/vendor/quick-start). - -* Ensure that you have access to a VM that meets the requirements for the Replicated Embedded Cluster installer. You will use this VM to test installation with Embedded Cluster. - - Embedded Cluster has the following requirements: - - <Requirements/> - -* (Optional) Ensure that you have kubectl access to a Kubernetes cluster. You will use this cluster to test installation with KOTS. If you do not intend to offer existing cluster installations with KOTS (for example, if you intend to support only Embedded Cluster and Helm installations for your users), then you do not need access to a cluster for the main onboarding tasks. - - You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. - -## Onboard - -Complete the tasks in this section to onboard your application. When you are done, you can continue to [Next Steps](#next-steps) to integrate other Replicated features with your application. - -### Task 1: Create An Application - -To get started with onboarding, first create a new application. This will be the official Vendor Portal application used by your team to create and promote both internal and customer-facing releases. - -To create an application: - -1. Create a new application using the Replicated CLI or the Vendor Portal. Use an official name for your application. See [Create an Application](/vendor/vendor-portal-manage-app#create-an-application). - - <details> - <summary>Can I change the application name in the future?</summary> - - You can change the application name, but you cannot change the application _slug_. - - The Vendor Portal automatically generates and assigns a unique slug for each application based on the application's name. For example, the slug for "Example App" would be `example-app`. - - Application slugs are unique across all of Replicated. This means that, if necessary, the Vendor Portal will append a random word to the end of slug to ensure uniqueness. For example, `example-app-flowers`. - </details> - -1. Set the `REPLICATED_APP` environment variable to the unique slug of the application that you created. This will allow you to interact with the application from the Replicated CLI throughout onboarding. See [Set Environment Variables](/reference/replicated-cli-installing#replicated_app) in _Installing the Replicated CLI_. - - For example: - - ```bash - export REPLICATED_APP=my-app - ``` - -### Task 2: Connect Your Image Registry - -Add credentials for your image registry to the Vendor Portal. This will allow you to use the Replicated proxy registry in a later step so that you can grant proxy access to application images without exposing registry credentials to your customers. - -For more information, see [Connecting to an External Registry](/vendor/packaging-private-images). - -### Task 3: Add the Replicated SDK and Package your Chart - -Next, add the Replicated SDK as a dependency of your Helm chart and package the chart as a `.tgz` archive. - -The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. The SDK provides access to key Replicated functionality, including an in-cluster API and automatic access to insights and operational telemetry for instances running in customer environments. For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). - -To package your Helm chart with the Replicated SDK: - -1. Go to the local directory where your Helm chart is. - -1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. - - If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. For more information, see [Packaging a Helm Chart for a Release](helm-install-release). - - <DependencyYaml/> - -1. Update dependencies and package the chart as a `.tgz` file: - - <HelmPackage/> - - <UnauthorizedError/> - -1. If your application is deployed as multiple Helm charts, package each chart as a separate `.tgz` archive using the `helm package -u PATH_TO_CHART` command. Do not declare the SDK in more than one chart. - -### Task 4: Create the Initial Release with KOTS HelmChart and Embedded Cluster Config {#first-release} - -After packaging your Helm chart, you can create a release. The initial release for your application will include the minimum files required to install a Helm chart with the Embedded Cluster installer: -* The Helm chart `.tgz` archive -* [KOTS HelmChart custom resource](/reference/custom-resource-helmchart-v2) -* [Embedded Cluster Config](/reference/embedded-config) - -If you have multiple charts, you will add each chart archive to the release, plus a corresponding KOTS HelmChart custom resource for each archive. - -:::note -Configuring the KOTS HelmChart custom resource includes several tasks, and involves the use of KOTS template functions. Depending on how many Helm charts your application uses, Replicated recommends that you allow about two to three hours for configuring the HelmChart custom resource and creating and testing your initial release. -::: - -To create the first release for your application: - -1. In the local directory for your Helm chart, create a subdirectory named `manifests` where you will add the files for the release. - -1. In the `manifests` directory: - - 1. Move the `.tgz` chart archive that you packaged. If your application is deployed as multiple Helm charts, move each `.tgz` archive to `manifests`. - - 1. Create an `embedded-cluster.yaml` file with the following default Embedded Cluster Config: - - <EcCr/> - - <details> - <summary>What is the Embedded Cluster Config?</summary> - - The Embedded Cluster Config is required to install with Embedded Cluster. - </details> - - For more information, see [Using Embedded Cluster](/vendor/embedded-overview). - - 1. Create a new YAML file. In this file, configure the KOTS HelmChart custom resource by completing the workflow in [Configuring the HelmChart Custom Resource](helm-native-v2-using). - - <details> - <summary>What is the KOTS HelmChart custom resource?</summary> - - The KOTS HelmChart custom resource is required to install Helm charts with KOTS and Embedded Cluster. As part of configuring the KOTS HelmChart custom resource, you will rewrite image names and add image pull secrets to allow your application images to be accessed through the Replicated proxy registry. - </details> - - 1. If your application is deployed as multiple Helm charts, repeat the step above to add a separate HelmChart custom resource for each Helm chart archive in the release. - - 1. If there are values in any of your Helm charts that need to be set for the installation to succeed, you can set those values using the `values` key in the corresponding HelmChart custom resource. See [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys). - - This is a temporary measure to ensure the values get passed to the Helm chart during installation until you configure the Admin Console Config screen in a later onboarding task. If your default Helm values are sufficient for installation, you can skip this step. - - 1. If your application requires that certain components are deployed before the application and as part of the Embedded Cluster itself, then update the Embedded Cluster Config to add [extensions](/reference/embedded-config#extensions). Extensions allow you to provide Helm charts that are deployed before your application. For example, one situation where this is useful is if you want to ship an ingress controller because Embedded Cluster does not include one. - - For more information, see [extensions](/reference/embedded-config#extensions) in _Embedded Cluster Config_. - -1. From the `manifests` directory, create a release and promote it to the Unstable channel. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - - ```bash - replicated release create --yaml-dir . --promote Unstable - ``` - -1. Install the release in your development environment to test: - - 1. Install with Embedded Cluster on a VM. See [Online Installation with Embedded Cluster](/enterprise/installing-embedded). - - 1. (Optional) Install in an existing cluster with KOTS. See [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster). - -After successfully installing the initial release on a VM with Embedded Cluster (and optionally in an existing cluster with KOTS), go to the next task. You will continue to iterate throughout the rest of the onboarding process by creating and promoting new releases, then upgrading to the new version in your development environment. - -### Task 5: Customize the KOTS Admin Console {#admin-console} - -Configure the KOTS Application custom resource to add an application name, icon, and status informers. The name and icon will be displayed in the Admin Console and the Replicated Download Portal. The status informers will be used to display the application status on the Admin Console dashboard. - -To configure the KOTS Application custom resource: - -1. In your `manifests` directory, create a new `kots-app.yaml` file. - -1. In the `kots-app.yaml` file, add the [KOTS Application](/reference/custom-resource-application) custom resource YAML and set the `title`, `icon`, and `statusInformers` fields. - - **Example:** - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Application - metadata: - name: gitea - spec: - title: Gitea - # Base64 encoded image string - icon: fyJINrigNkt5VsRiub9nXICdsYyVd2NcVvA3ScE5t2rb5JuEeyZnAhmLt9NK63vX1O - statusInformers: - - deployment/gitea - ``` - For more information, see: - * [Customizing the Application Icon](/vendor/admin-console-customize-app-icon) - * [Enabling and Understanding Application Status](/vendor/insights-app-status) - * [Application](/reference/custom-resource-application) - <br/> - <details> - <summary>Can I preview the icon before installing the release?</summary> - - Yes. The Vendor Portal includes a **Application icon preview** in the **Help** pane on the **Edit release** page. - - ![Icon preview](/images/icon-preview.png) - - [View a larger version of this image](/images/icon-preview.png) - - </details> - -1. <CreateRelease/> - -1. <TestYourChanges/> - -### Task 6: Set Up the Admin Console Config Screen and Map to Helm Values - -The KOTS Admin Console Config screen is used to collect required and optional application configuration values from your users. User-supplied values provided on the Config screen can be mapped to your Helm values. - -Before you begin this task, you can complete the [Set Helm Values with KOTS](/vendor/tutorial-config-setup) tutorial to learn how to map user-supplied values from the Admin Console Config screen to a Helm chart. - -:::note -Setting up the Admin Console config screen can include the use of various types of input fields, conditional statements, and KOTS template functions. Depending on your application's configuration options, Replicated recommends that you allow about two to three hours for configuring the Config custom resource and testing the Admin Console config screen. -::: - -To set up the Admin Console Config screen for your application: - -1. In your `manifests` directory, create a new file named `kots-config.yaml`. - -1. In `kots-config.yaml`, add the KOTS Config custom resource. Configure the KOTS Config custom resource based on the values that you need to collect from users. - - **Example:** - - ```yaml - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: my-application - spec: - groups: - - name: example_group - title: Example Group - items: - - name: example_item - title: Example Item - type: text - default: "Hello World" - ``` - - For more information, see: - * [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen) - * [Using Conditional Statements in Configuration Fields](/vendor/config-screen-conditional) - * [Config](/reference/custom-resource-config) - - <br/> - - <details> - <summary>Can I preview the Admin Console config screen before installing the release?</summary> - - Yes. The Vendor Portal includes a **Config preview** in the **Help** pane on the **Edit release** page. - - For example: - - ![Config preview](/images/config-preview.png) - - [View a larger version of this image](/images/config-preview.png) - </details> - -1. <CreateRelease/> - -1. <TestYourChanges/> - -1. In `manifests`, open the KOTS HelmChart custom resource that you configured in a previous step. Configure the `values` key of the HelmChart custom resource to map the fields in the KOTS Config custom resource to your Helm values. - - For more information, see: - * [Mapping User-Supplied Values](/vendor/config-screen-map-inputs) - * [Tutorial: Set Helm Chart Values with KOTS](/vendor/tutorial-config-setup) - * [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) - * [`values`](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_ - -1. <CreateRelease/> - -1. <TestYourChanges/> - -1. Continue to create and test new releases with new config fields until you are ready to move on to the next task. - -### Task 7: Define Preflight Checks - -In the next two tasks, you will add specs for _preflight checks_ and _support bundles_. - -Preflight checks and support bundles are provided by the Troubleshoot open source project, which is maintained by Replicated. Troubleshoot is a kubectl plugin that provides diagnostic tools for Kubernetes applications. For more information, see the open source [Troubleshoot](https://troubleshoot.sh/docs/) documentation. - -Preflight checks and support bundles analyze data from customer environments to provide insights that help users to avoid or troubleshoot common issues with an application: -* **Preflight checks** run before an application is installed to check that the customer environment meets the application requirements. -* **Support bundles** collect troubleshooting data from customer environments to help users diagnose problems with application deployments. - -:::note -Before you begin this task, you can complete the [Add Preflight Checks to a Helm Chart](/vendor/tutorial-preflight-helm-setup) tutorial to learn how to add a preflight spec to a Helm chart in a Kubernetes secret and run the preflight checks before installation. -::: - -To define preflight checks for your application: - -1. In your Helm chart `templates` directory, add a Kubernetes Secret that includes a preflight spec. For more information, see [Defining Preflight Checks](/vendor/preflight-defining). For examples, see [Example Preflight Specs](/vendor/preflight-examples). - :::note - If your application is deployed as multiple Helm charts, add the Secret to the `templates` directory for the chart that is installed first. - ::: - -1. Update dependencies and package the chart as a `.tgz` file: - - <HelmPackage/> - -1. Move the `.tgz` file to the `manifests` directory. - -1. <CreateRelease/> - -1. <TestYourChanges/> - - Preflight checks run automatically during installation. - -1. Continue to create and test new releases with additional preflight checks until you are ready to move on to the next task. - -### Task 8: Add a Support Bundle Spec - -To add the default support bundle spec to your application: - -1. In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret to enable the default support bundle spec for your application: - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - labels: - troubleshoot.sh/kind: support-bundle - name: example - stringData: - support-bundle-spec: | - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: support-bundle - spec: - collectors: [] - analyzers: [] - ``` - :::note - If your application is installed as multiple Helm charts, you can optionally create separate support bundle specs in each chart. The specs are automatically merged when a support bundle is generated. Alternatively, continue with a single support bundle spec and then optionally revisit how you organize your support bundle specs after you finish onboarding. - ::: - -1. (Recommended) At a minimum, Replicated recommends that all support bundle specs include the `logs` collector. This collects logs from running Pods in the cluster. - - **Example:** - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: example - labels: - troubleshoot.sh/kind: support-bundle - stringData: - support-bundle-spec: |- - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: example - spec: - collectors: - - logs: - selector: - - app.kubernetes.io/name=myapp - namespace: {{ .Release.Namespace }} - limits: - maxAge: 720h - maxLines: 10000 - ``` - - For more information, see: - * [Adding and Customizing Support Bundles](/vendor/support-bundle-customizing) - * [Example Support Bundle Specs](/vendor/support-bundle-examples) - * [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. - -1. (Recommended) Ensure that any preflight checks that you added are also include in your support bundle spec. This ensures that support bundles collect at least the same information collected when running preflight checks. - -1. Update dependencies and package the chart as a `.tgz` file: - - <HelmPackage/> - -1. Move the `.tgz` file to the `manifests` directory. - -1. <CreateRelease/> - -1. <TestYourChanges/> - - For information about how to generate support bundles, see [Generating Support Bundles](/vendor/support-bundle-generating). - -1. (Optional) Customize the support bundle spec by adding additional collectors and analyzers. - -### Task 9: Alias Replicated Endpoints with Your Own Domains - -Your customers are exposed to several Replicated domains by default. Replicated recommends you use custom domains to unify the customer's experience with your brand and simplify security reviews. - -For more information, see [Using Custom Domains](/vendor/custom-domains-using). - -## Next Steps - -After completing the main onboarding tasks, Replicated recommends that you also complete the following additional tasks to integrate other Replicated features with your application. You can complete these next recommended tasks in any order and at your own pace. - -### Add Support for Helm Installations - -Existing KOTS releases that include one or more Helm charts can be installed with the Helm CLI; it is not necessary to create and manage separate releases or channels for each installation method. - -To enable Helm installations for Helm charts distributed with Replicated, the only extra step is to add a Secret to your chart to authenticate with the Replicated proxy registry. - -This is the same secret that is passed to KOTS in the HelmChart custom resource using `'{{repl ImagePullSecretName }}'`, which you did as part of [Task 4: Create and Install the Initial Release](#first-release). So, whereas this Secret is created automatically for KOTS and Embedded Cluster installations, you need to create it and add it to your Helm chart for Helm installations. - -:::note -Before you test Helm installations for your application, you can complete the [Deploy a Helm Chart with KOTS and the Helm CLI](tutorial-kots-helm-setup) tutorial to learn how to install a single release with both KOTS and Helm. -::: - -To support and test Helm installations: - -1. Follow the steps in [Using the Proxy Registry with Helm Installations](/vendor/helm-image-registry) to authenticate with the Replicated proxy registry by creating a Secret with `type: kubernetes.io/dockerconfigjson` in your Helm chart. - -1. Update dependencies and package the chart as a `.tgz` file: - - <HelmPackage/> - -1. Add the `.tgz` file to a release. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - -1. Install the release in a cluster with the Helm CLI to test your changes. For more information, see [Installing with Helm](/vendor/install-with-helm). - -### Add Support for Air Gap Installations - -Replicated Embedded Cluster and KOTS support installations in _air gap_ environments with no outbound internet access. Users can install with Embedded Cluster and KOTS in air gap environments by providing air gap bundles that contain the required images for the installers and for your application. - -:::note -Replicated also offers Alpha support for air gap installations with Helm. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. -::: - -To add support for air gap installations: - -1. If there are any images for your application that are not listed in your Helm chart, list these images in the `additionalImages` attribute of the KOTS Application custom resource. This ensures that the images are included in the air gap bundle for the release. One common use case for this is applications that use Kubernetes Operators. See [Define Additional Images](/vendor/operator-defining-additional-images). - -1. In the KOTS HelmChart custom resource `builder` key, pass any values that are required in order for `helm template` to yield all the images needed to successfully install your application. See [Packaging Air Gap Bundles for Helm Charts](/vendor/helm-packaging-airgap-bundles). - - :::note - If the default values in your Helm chart already enable all the images needed to successfully deploy, then you do not need to configure the `builder` key. - ::: - - <details> - <summary>How do I know if I need to configure the `builder` key?</summary> - - When building an air gap bundle, the Vendor Portal templates the Helm charts in a release with `helm template` in order to detect the images that need to be included in the bundle. Images yielded by `helm template` are included in the bundle for the release. - - For many applications, running `helm template` with the default values would not yield all the images required to install. In these cases, vendors can pass the additional values in the `builder` key to ensure that the air gap bundle includes all the necessary images. - </details> - -1. If you have not done so already as part of [Task 4: Create and Install the Initial Release](#first-release), ensure that the `values` key in the KOTS HelmChart custom resource correctly rewrites image names for air gap installations. This is done using the KOTS HasLocalRegistry, LocalRegistryHost, and LocalRegistryNamespace template functions to render the location of the given image in the user's own local registry. - - For more information, see [Rewrite Image Names](/vendor/helm-native-v2-using#rewrite-image-names) in _Configuring the HelmChart Custom Resource v2_. - -1. Create and promote a new release with your changes. For more information, see [Managing Releases with the Vendor Portal](releases-creating-releases) or [Managing Releases with the CLI](releases-creating-cli). - -1. In the [Vendor Portal](https://vendor.replicated.com), go the channel where the release was promoted to build the air gap bundle. Do one of the following: - * If the **Automatically create airgap builds for newly promoted releases in this channel** setting is enabled on the channel, watch for the build status to complete. - * If automatic air gap builds are not enabled, go to the **Release history** page for the channel and build the air gap bundle manually. - -1. Create a customer with the **Airgap Download Enabled** entitlement enabled so that you can test air gap installations. See [Creating and Managing Customers](/vendor/releases-creating-customer). - -1. Download the Embedded Cluster air gap installation assets, then install with Embedded Cluster on an air gap VM to test. See [Installing in Air Gap Environments with Embedded Cluster](/enterprise/installing-embedded-air-gap). - -1. (Optional) Download the `.airgap` bundle for the release and the air gap bundle for the KOTS Admin Console. You can also download both bundles from the Download Portal for the target customer. Then, install in an air gap existing cluster to test. See [Air Gap Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster-airgapped). - -1. (Optional) Follow the steps in [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap) to test air gap installation with Helm. - - :::note - Air gap Helm installations are an Alpha feature. If you are interested in trying Helm air gap installations and providing feedback, please reach out to your account rep to enable this feature. - ::: - -### Add Roles for Multi-Node Clusters in Embedded Cluster Installations - -The Embedded Cluster Config supports roles for multi-node clusters. One or more roles can be selected and assigned to a node when it is joined to the cluster. Node roles can be used to determine which nodes run the Kubernetes control plane, and to assign application workloads to particular nodes. - -For more information, see [roles](/reference/embedded-config#roles) in _Embedded Cluster Config_. - -### Add and Map License Entitlements - -You can add custom license entitlements for your application in the Vendor Portal. Custom license fields are useful when there is entitlement information that applies to a subset of customers. For example, you can use entitlements to: -* Limit the number of active users permitted -* Limit the number of nodes a customer is permitted on their cluster -* Identify a customer on a "Premium" plan that has access to additional features or functionality not available with your base plan - -For more information about how to create and assign custom entitlements in the Vendor Portal, see [Managing Customer License Fields](/vendor/licenses-adding-custom-fields) and [Creating and Managing Customers](/vendor/releases-creating-customer). - -#### Map Entitlements to Helm Values - -You can map license entitlements to your Helm values using KOTS template functions. This can be useful when you need to set certain values based on the user's license information. For more information, see [Using KOTS Template Functions](/vendor/helm-optional-value-keys#using-kots-template-functions) in _Setting Helm Values with KOTS_. - -#### Query Entitlements Before Installation and at Runtime - -You can add logic to your application to query license entitlements both before deployment and at runtime. For example, you might want to add preflight checks that verify a user's entitlements before installing. Or, you can expose additional product functionality dynamically at runtime based on a customer's entitlements. - -For more information, see: -* [Querying Entitlements with the Replicated SDK API](/vendor/licenses-reference-sdk) -* [Checking Entitlements in Preflights with KOTS Template Functions](/vendor/licenses-referencing-fields) - -### Add Application Links to the Admin Console Dashboard - -You can add the Kubernetes SIG Application custom resource to your release to add a link to your application from the Admin Console dashboard. This makes it easier for users to access your application after installation. - -You can also configure the Kubernetes SIG Application resource add links to other resources like documentation or dashboards. - -For more information, see [Adding Application Links to the Dashboard](/vendor/admin-console-adding-buttons-links). - -### Update the Preflight and Support Bundles Specs - -After adding basic specs for preflights and support bundles, you can continue to add more collectors and analyzers as needed. - -Consider the following recommendations and best practices: - -* Revisit your preflight and support bundle specs when new support issues arise that are not covered by your existing specs. - -* Your support bundles should include all of the same collectors and analyzers that are in your preflight checks. This ensures that support bundles include all the necessary troubleshooting information, including any failures in preflight checks. - -* Your support bundles will most likely need to include other collectors and analyzers that are not in your preflight checks. This is because some of the information used for troubleshooting (such as logs) is not necessary when running preflight checks before installation. - -* If your application is installed as multiple Helm charts, you can optionally add separate support bundle specs in each chart. This can make it easier to keep the specs up-to-date and to avoid merge conflicts that can be caused when multiple team members contribute to a single, large support bundle spec. When an application has multiple support bundle specs, the specs are automatically merged when generating a support bundle so that only a single support bundle is provided to the user. - -The documentation for the open-source Troubleshoot project includes the full list of available collectors and analyzers that you can use. See [All Collectors](https://troubleshoot.sh/docs/collect/all/) and the [Analyze](https://troubleshoot.sh/docs/analyze/) section in the Troubleshoot documentation. - -You can also view common examples of collectors and analyzers used in preflight checks and support bundles in [Preflight Spec Examples](preflight-examples) and [Support Bundle Spec Examples](support-bundle-examples). - -### Configure Backup and Restore - -Enable backup and restore with Velero for your application so that users can back up and restore their KOTS Admin Console and application data. - -There are different steps to configure backup and restore for Embedded Cluster and for existing cluster installations with KOTS: -* To configure the disaster recovery feature for Embedded Cluster, see [Disaster Recovery for Embedded Cluster](/vendor/embedded-disaster-recovery) -* To configure the snapshots feature for existing cluster KOTS installations, see [Configuring Snapshots](snapshots-configuring-backups). - -### Add Custom Metrics - -In addition to the built-in insights displayed in the Vendor Portal by default (such as uptime and time to install), you can also configure custom metrics to measure instances of your application running in customer environments. Custom metrics can be collected for application instances running in online or air gap environments using the Replicated SDK. - -For more information, see [Configuring Custom Metrics](/vendor/custom-metrics). - -### Integrate with CI/CD - -Replicated recommends that teams integrate the Replicated Platform into their existing develeopment and production CI/CD workflows. This can be useful for automating the processes of creating new releases, promoting releases, and testing releases with the Replicated Compatibility Matrix. - -For more information, see: -* [About Integrating with CI/CD](/vendor/ci-overview) -* [About Compatibility Matrix](/vendor/testing-about) -* [Recommended CI/CD Workflows](/vendor/ci-workflows) - -### Customize Release Channels - -By default, the Vendor Portal includes Unstable, Beta, and Stable channels. You can customize the channels in the Vendor Portal based on your application needs. - -Consider the following recommendations: -* Use the Stable channel for your primary release cadence. Releases should be promoted to the Stable channel only as frequently as your average customer can consume new releases. Typically, this is no more than monthly. However, this cadence varies depending on the customer base. -* If you have a SaaS product, you might want to create an "Edge" channel where you promote the latest SaaS releases. -* You can consider a “Long Term Support” channel where you promote new releases less frequently and support those releases for longer. -* It can be useful to create channels for each feature branch so that internal teams reviewing a PR can easily get the installation artifacts as well as review the code. You can automate channel creation as part of a pipeline or Makefile. - -For more information, see: -* [About Channels and Releases](/vendor/releases-about) -* [Creating and Editing Channels](/vendor/releases-creating-channels) - -### Write Your Documentation - -Before distributing your application to customers, ensure that your documentation is up-to-date. In particular, be sure to update the installation documentation to include the procedures and requirements for installing with Embedded Cluster, Helm, and any other installation methods that you support. - -For guidance on how to get started with documentation for applications distributed with Replicated, including key considerations, examples, and templates, see [Writing Great Documentation for On-Prem Software Distributed with Replicated](https://www.replicated.com/blog/writing-great-documentation-for-on-prem-software-distributed-with-replicated) in the Replicated blog. - -================ -File: docs/vendor/replicated-sdk-airgap.mdx -================ -# Installing the SDK in Air Gap Environments - -This topic explains how to install the Replicated SDK in air gap environments by enabling air gap mode. - -## Overview - -The Replicated SDK has an _air gap mode_ that allows it to run in environments with no outbound internet access. When installed in air gap mode, the SDK does not attempt to connect to the internet. This avoids any failures that would occur when the SDK is unable to make outbound requests in air gap environments. - -Air gap mode is enabled when `isAirgap: true` is set in the values for the SDK Helm chart. For more information, see [Install the SDK in Air Gap Mode](#install) below. Allowing air gap mode to be controlled with the `isAirgap` value means that vendors and enterprise customers do not need to rely on air gap environments being automatically detected, which is unreliable and error-prone. The `isAirgap` value also allows the SDK to be installed in air gap mode even if the instance can access the internet. - -## Differences in Air Gap Mode - -Air gap mode differs from non-air gap installations of the SDK in the following ways: -* The SDK stores instance telemetry and custom metrics in a Kubernetes Secret in the customer environment, rather than attempting to send telemetry and custom metrics back to the Replicated Vendor Portal. The telemetry and custom metrics stored in the Secret are collected whenever a support bundle is generated in the environment, and are reported when the support bundle is uploaded to the Vendor Portal. For more information about telemetry for air gap instances, see [Collecting Telemetry for Air Gap Instances](/vendor/telemetry-air-gap). -* The SDK returns an empty array (`[]`) for any requests to check for updates using the [`/api/v1/app/updates`](/reference/replicated-sdk-apis#get-appupdates) SDK API endpoint. This is because the SDK is not able to receive updates from the Vendor Portal when running in air gap environments. -* Instance tags cannot be updated with the [`/app/instance-tags`](/reference/replicated-sdk-apis#post-appinstance-tags) SDK API endpoint. - -In air gap mode, the SDK can still make requests to SDK API endpoints that do not require outbound internet access, such as the [`license`](/reference/replicated-sdk-apis#license) endpoints and the [`/app/info`](/reference/replicated-sdk-apis#get-appinfo) endpoint. However, these endpoints will return whatever values were injected into the SDK when the chart was most recently pulled. These values might not match the latest information available in the Vendor Portal because the SDK cannot receive updates when running in air gap environments. - -## Install the SDK in Air Gap Mode {#install} - -This section describes how to install the Replicated SDK in air gap mode with the Helm CLI and with Replicated KOTS. - -### Helm CLI - -When the SDK is installed with the Helm CLI, air gap mode can be enabled by passing `--set replicated.isAirgap=true` with the Helm CLI installation command. - -For example: - -``` -helm install gitea oci://registry.replicated.com/my-app/gitea --set replicated.isAirgap=true -``` - -For more information about Helm CLI installations with Replicated, see [Installing with Helm](/vendor/install-with-helm). For more information about setting Helm values with the `helm install` command, see [Helm Install](https://helm.sh/docs/helm/helm_install/) in the Helm documentation. - -:::note -Replicated does not provide air gap bundles for applications installed with the Helm CLI. Air gap bundles are a feature of KOTS. -::: - -### KOTS - -When the SDK is installed by KOTS in an air gap environment, KOTS automatically sets `isAirGap: true` in the SDK Helm chart values to enable air gap mode. No additional configuration is required. - -================ -File: docs/vendor/replicated-sdk-customizing.md -================ -# Customizing the Replicated SDK - -This topic describes various ways to customize the Replicated SDK, including customizing RBAC, setting environment variables, adding tolerations, and more. - -## Customize RBAC for the SDK - -This section describes role-based access control (RBAC) for the Replicated SDK, including the default RBAC, minimum RBAC requirements, and how to install the SDK with custom RBAC. - -### Default RBAC - -The SDK creates default Role, RoleBinding, and ServiceAccount objects during installation. The default Role allows the SDK to get, list, and watch all resources in the namespace, to create Secrets, and to update the `replicated` and `replicated-instance-report` Secrets: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - {{- include "replicated.labels" . | nindent 4 }} - name: replicated-role -rules: -- apiGroups: - - '*' - resources: - - '*' - verbs: - - 'get' - - 'list' - - 'watch' -- apiGroups: - - '' - resources: - - 'secrets' - verbs: - - 'create' -- apiGroups: - - '' - resources: - - 'secrets' - verbs: - - 'update' - resourceNames: - - replicated - - replicated-instance-report - - replicated-custom-app-metrics-report -``` - -### Minimum RBAC Requirements - -The SDK requires the following minimum RBAC permissions: -* Create Secrets. -* Get and update Secrets named `replicated`, `replicated-instance-report`, and `replicated-custom-app-metrics-report`. -* The SDK requires the following minimum RBAC permissions for status informers: - * If you defined custom status informers, then the SDK must have permissions to get, list, and watch all the resources listed in the `replicated.statusInformers` array in your Helm chart `values.yaml` file. - * If you did _not_ define custom status informers, then the SDK must have permissions to get, list, and watch the following resources: - * Deployments - * Daemonsets - * Ingresses - * PersistentVolumeClaims - * Statefulsets - * Services - * For any Ingress resources used as status informers, the SDK requires `get` permissions for the Service resources listed in the `backend.Service.Name` field of the Ingress resource. - * For any Daemonset and Statefulset resources used as status informers, the SDK requires `list` permissions for pods in the namespace. - * For any Service resources used as status informers, the SDK requires `get` permissions for Endpoint resources with the same name as the service. - - The Replicated Vendor Portal uses status informers to provide application status data. For more information, see [Helm Installations](/vendor/insights-app-status#helm-installations) in _Enabling and Understanding Application Status_. -### Install the SDK with Custom RBAC - -#### Custom ServiceAccount - -To use the SDK with custom RBAC permissions, provide the name for a custom ServiceAccount object during installation. When a service account is provided, the SDK uses the RBAC permissions granted to the service account and does not create the default Role, RoleBinding, or ServiceAccount objects. - -To install the SDK with custom RBAC: - -1. Create custom Role, RoleBinding, and ServiceAccount objects. The Role must meet the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. -1. During installation, provide the name of the service account that you created by including `--set replicated.serviceAccountName=CUSTOM_SERVICEACCOUNT_NAME`. - - **Example**: - - ``` - helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.serviceAccountName=mycustomserviceaccount - ``` - - For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). - -#### Custom ClusterRole - -To use the SDK with an existing ClusterRole, provide the name for a custom ClusterRole object during installation. When a cluster role is provided, the SDK uses the RBAC permissions granted to the cluster role and does not create the default RoleBinding. Instead, the SDK creates a ClusterRoleBinding as well as a ServiceAccount object. - -To install the SDK with a custom ClusterRole: - -1. Create a custom ClusterRole object. The ClusterRole must meet at least the minimum requirements described in [Minimum RBAC Requirements](#minimum-rbac-requirements) above. However, it can also provide additional permissions that can be used by the SDK, such as listing cluster Nodes. -1. During installation, provide the name of the cluster role that you created by including `--set replicated.clusterRole=CUSTOM_CLUSTERROLE_NAME`. - - **Example**: - - ``` - helm install wordpress oci://registry.replicated.com/my-app/beta/wordpress --set replicated.clusterRole=mycustomclusterrole - ``` - - For more information about installing with Helm, see [Installing with Helm](/vendor/install-with-helm). - -## Set Environment Variables {#env-var} - -The Replicated SDK provides a `replicated.extraEnv` value that allows users to set additional environment variables for the deployment that are not exposed as Helm values. - -This ensures that users can set the environment variables that they require without the SDK Helm chart needing to be modified to expose the values. For example, if the SDK is running behind an HTTP proxy server, then the user could set `HTTP_PROXY` or `HTTPS_PROXY` environment variables to provide the hostname or IP address of their proxy server. - -To add environment variables to the Replicated SDK deployment, include the `replicated.extraEnv` array in your Helm chart `values.yaml` file. The `replicated.extraEnv` array accepts a list of environment variables in the following format: - -```yaml -# Helm chart values.yaml - -replicated: - extraEnv: - - name: ENV_VAR_NAME - value: ENV_VAR_VALUE -``` - -:::note -If the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` variables are configured with the [kots install](/reference/kots-cli-install) command, these variables will also be set automatically in the Replicated SDK. -::: - -**Example**: - -```yaml -# Helm chart values.yaml - -replicated: - extraEnv: - - name: MY_ENV_VAR - value: my-value - - name: MY_ENV_VAR_2 - value: my-value-2 -``` - -## Custom Certificate Authority - -When installing the Replicated SDK behind a proxy server that terminates TLS and injects a custom certificate, you must provide the CA to the SDK. This can be done by storing the CA in a ConfigMap or a Secret prior to installation and providing appropriate values during installation. - -### Using a ConfigMap - -To use a CA stored in a ConfigMap: - -1. Create a ConfigMap and the CA as the data value. Note that name of the ConfigMap and data key can be anything. - ```bash - kubectl -n <NAMESPACE> create configmap private-ca --from-file=ca.crt=./ca.crt - ``` -1. Add the name of the config map to the values file: - ```yaml - replicated: - privateCAConfigmap: private-ca - ``` - -:::note -If the `--private-ca-configmap` flag is used with the [kots install](/reference/kots-cli-install) command, this value will be populated in the Replicated SDK automatically. -::: - -### Using a Secret - -To use a CA stored in a Secret: - -1. Create a Secret and the CA as a data value. Note that the name of the Secret and the key can be anything. - ```bash - kubectl -n <NAMESPACE> create secret generic private-ca --from-file=ca.crt=./ca.crt - ``` -1. Add the name of the secret and the key to the values file: - ```yaml - replicated: - privateCASecret: - name: private-ca - key: ca.crt - ``` - -## Add Tolerations - -The Replicated SDK provides a `replicated.tolerations` value that allows users to add custom tolerations to the deployment. For more information about tolerations, see [Taints and Tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) in the Kubernetes documentation. - -To add tolerations to the Replicated SDK deployment, include the `replicated.tolerations` array in your Helm chart `values.yaml` file. The `replicated.tolerations` array accepts a list of tolerations in the following format: - -```yaml -# Helm chart values.yaml - -replicated: - tolerations: - - key: "key" - operator: "Equal" - value: "value" - effect: "NoSchedule" -``` - -## Add Affinity - -The Replicated SDK provides a `replicated.affinity` value that allows users to add custom affinity to the deployment. For more information about affinity, see [Affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) in the Kubernetes documentation. - -To add affinity to the Replicated SDK deployment, include the `replicated.affinity` map in your Helm chart `values.yaml` file. The `replicated.affinity` map accepts a standard Kubernets affinity object in the following format: - -```yaml -# Helm chart values.yaml - -replicated: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: production/node-pool - operator: In - values: - - private-node-pool -``` -## Add Custom Labels - -With the Replicated SDK version 1.1.0 and later, you can pass custom labels to the Replicated SDK Helm Chart by setting the `replicated.commonLabels` and `replicated.podLabels` Helm values in your Helm chart. - -### Requirement - -The `replicated.commonLabels` and `replicated.podLabels` values are available with the Replicated SDK version 1.1.0 and later. - -### commonLabels - -The `replicated.commonLabels` value allows you to add one or more labels to all resources created by the SDK chart. - -For example: - -```yaml -# Helm chart values.yaml - -replicated: - commonLabels: - environment: production - team: platform -``` - -### podLabels - -The `replicated.podLabels` value allows you to add pod-specific labels to the pod template. - -For example: - -```yaml -# Helm chart values.yaml - -replicated: - podLabels: - monitoring: enabled - custom.company.io/pod-label: value -``` - -================ -File: docs/vendor/replicated-sdk-development.mdx -================ -import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" - -# Developing Against the SDK API - -This topic describes how to develop against the SDK API to test changes locally. It includes information about installing the SDK in integration mode and port forwarding the SDK API service to your local machine. For more information about the SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). - -## Install the SDK in Integration Mode - -<IntegrationMode/> - -## Port Forwarding the SDK API Service {#port-forward} - -After the Replicated SDK is installed and initialized in a cluster, the Replicated SDK API is exposed at `replicated:3000`. You can access the SDK API for testing by forwarding port 3000 to your local machine. - -To port forward the SDK API service to your local machine: - -1. Run the following command to port forward to the SDK API service: - - ```bash - kubectl port-forward service/replicated 3000 - ``` - ``` - Forwarding from 127.0.0.1:3000 -> 3000 - Forwarding from [::1]:3000 -> 3000 - ``` - -1. With the port forward running, test the SDK API endpoints as desired. For example: - - ```bash - curl localhost:3000/api/v1/license/fields/expires_at - curl localhost:3000/api/v1/license/fields/{field} - ``` - - For more information, see [Replicated SDK API](/reference/replicated-sdk-apis). - - :::note - When the SDK is installed in integration mode, requests to the `license` endpoints use your actual development license data, while requests to the `app` endpoints use the default mock data. - ::: - -================ -File: docs/vendor/replicated-sdk-installing.mdx -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" -import RegistryLogout from "../partials/replicated-sdk/_registry-logout.mdx" -import IntegrationMode from "../partials/replicated-sdk/_integration-mode-install.mdx" - -# Installing the Replicated SDK - -This topic describes the methods for distributing and installing the Replicated SDK. - -It includes information about how to install the SDK alongside Helm charts or Kubernetes manifest-based applications using the Helm CLI or a Replicated installer (Replicated KOTS, kURL, Embedded Cluster). It also includes information about installing the SDK as a standalone component in integration mode. - -For information about installing the SDK in air gap mode, see [Installing the SDK in Air Gap Environments](replicated-sdk-airgap). - -## Requirement - -<KotsVerReq/> - -## Install the SDK as a Subchart - -When included as a dependency of your application Helm chart, the SDK is installed as a subchart alongside the application. - -To install the SDK as a subchart: - -1. In your application Helm chart `Chart.yaml` file, add the YAML below to declare the SDK as a dependency. If your application is installed as multiple charts, declare the SDK as a dependency of the chart that customers install first. Do not declare the SDK in more than one chart. - - <DependencyYaml/> - -1. Update the `charts/` directory: - - ``` - helm dependency update - ``` - :::note - <RegistryLogout/> - ::: - -1. Package the Helm chart into a `.tgz` archive: - - ``` - helm package . - ``` - -1. Add the chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - -1. (Optional) Add a KOTS HelmChart custom resource to the release to support installation with Embedded Cluster, KOTS, or kURL. For more information, see [Configuring the HelmChart Custom Resource v2](/vendor/helm-native-v2-using). - -1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. - -1. Install the release using Helm or a Replicated installer. For more information, see: - * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) - * [Installing with Helm](/vendor/install-with-helm) - * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) - * [Online Installation with kURL](/enterprise/installing-kurl) - -1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: - - ``` - kubectl get deploy --namespace NAMESPACE - ``` - Where `NAMESPACE` is the namespace in the cluster where the application and the SDK are installed. - - **Example output**: - - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - my-app 1/1 1 1 35s - replicated 1/1 1 1 35s - ``` - -## Install the SDK Alongside a Kubernetes Manifest-Based Application {#manifest-app} - -For applications that use Kubernetes manifest files instead of Helm charts, the SDK Helm chart can be added to a release and then installed by KOTS alongside the application. - -<KotsVerReq/> - -To add the SDK Helm chart to a release for a Kubernetes manifest-based application: - -1. Install the Helm CLI using Homebrew: - - ``` - brew install helm - ``` - For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. - -1. Download the `.tgz` chart archive for the SDK Helm chart: - - ``` - helm pull oci://registry.replicated.com/library/replicated --version SDK_VERSION - ``` - Where `SDK_VERSION` is the version of the SDK to install. For a list of available SDK versions, see the [replicated-sdk repository](https://github.com/replicatedhq/replicated-sdk/tags) in GitHub. - - The output of this command is a `.tgz` file with the naming convention `CHART_NAME-CHART_VERSION.tgz`. For example, `replicated-1.1.1.tgz`. - - For more information and additional options, see [Helm Pull](https://helm.sh/docs/helm/helm_pull/) in the Helm documentation. - -1. Add the SDK `.tgz` chart archive to a new release. For more information, see [Managing Releases with the CLI](/vendor/releases-creating-cli) or [Managing Releases with the Vendor Portal](/vendor/releases-creating-releases). - - The following shows an example of the SDK Helm chart added to a draft release for a standard manifest-based application: - - ![SDK Helm chart in a draft release](/images/sdk-kots-release.png) - - [View a larger version of this image](/images/sdk-kots-release.png) - -1. If one was not created automatically, add a KOTS HelmChart custom resource to the release. HelmChart custom resources have `apiVersion: kots.io/v1beta2` and `kind: HelmChart`. - - **Example:** - - ```yaml - apiVersion: kots.io/v1beta2 - kind: HelmChart - metadata: - name: replicated - spec: - # chart identifies a matching chart from a .tgz - chart: - # for name, enter replicated - name: replicated - # for chartversion, enter the version of the - # SDK Helm chart in the release - chartVersion: 1.1.1 - ``` - - As shown in the example above, the HelmChart custom resource requires the name and version of the SDK Helm chart that you added to the release: - * **`chart.name`**: The name of the SDK Helm chart is `replicated`. You can find the chart name in the `name` field of the SDK Helm chart `Chart.yaml` file. - * **`chart.chartVersion`**: The chart version varies depending on the version of the SDK that you pulled and added to the release. You can find the chart version in the `version` field of SDK Helm chart `Chart.yaml` file. - - For more information about configuring the HelmChart custom resource to support KOTS installations, see [About Distributing Helm Charts with KOTS](/vendor/helm-native-about) and [HelmChart v2](/reference/custom-resource-helmchart-v2). - -1. Save and promote the release to an internal-only channel used for testing, such as the default Unstable channel. - -1. Install the release using a Replicated installer. For more information, see: - * [Online Installation with Embedded Cluster](/enterprise/installing-embedded) - * [Online Installation in Existing Clusters with KOTS](/enterprise/installing-existing-cluster) - * [Online Installation with kURL](/enterprise/installing-kurl) - -1. Confirm that the SDK was installed by seeing that the `replicated` Deployment was created: - - ``` - kubectl get deploy --namespace NAMESPACE - ``` - Where `NAMESPACE` is the namespace in the cluster where the application, the Admin Console, and the SDK are installed. - - **Example output**: - - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - kotsadm 1/1 1 1 112s - my-app 1/1 1 1 28s - replicated 1/1 1 1 27s - ``` - -## Install the SDK in Integration Mode - -<IntegrationMode/> - -## Troubleshoot - -### 401 Unauthorized Error When Updating Helm Dependencies {#401} - -#### Symptom - -You see an error message similar to the following after adding the Replicated SDK as a dependency in your Helm chart then running `helm dependency update`: - -``` -Error: could not download oci://registry.replicated.com/library/replicated-sdk: failed to authorize: failed to fetch oauth token: unexpected status from GET request to https://registry.replicated.com/v2/token?scope=repository%3Alibrary%2Freplicated-sdk%3Apull&service=registry.replicated.com: 401 Unauthorized -``` - -#### Cause - -When you run `helm dependency update`, Helm attempts to pull the Replicated SDK chart from the Replicated registry. An error can occur if you are already logged in to the Replicated registry with a license that has expired, such as when testing application releases. - -#### Solution - -To solve this issue: - -1. Run the following command to remove login credentials for the Replicated registry: - - ``` - helm registry logout registry.replicated.com - ``` - -1. Re-run `helm dependency update` for your Helm chart. - -================ -File: docs/vendor/replicated-sdk-overview.mdx -================ -import SDKOverview from "../partials/replicated-sdk/_overview.mdx" -import SdkValues from "../partials/replicated-sdk/_sdk-values.mdx" - -# About the Replicated SDK - -This topic provides an introduction to using the Replicated SDK with your application. - -## Overview - -<SDKOverview/> - -For more information about the Replicated SDK API, see [Replicated SDK API](/reference/replicated-sdk-apis). For information about developing against the SDK API locally, see [Developing Against the SDK API](replicated-sdk-development). - -## Limitations - -The Replicated SDK has the following limitations: - -* Some popular enterprise continuous delivery tools, such as ArgoCD and Pulumi, deploy Helm charts by running `helm template` then `kubectl apply` on the generated manifests, rather than running `helm install` or `helm upgrade`. The following limitations apply to applications installed by running `helm template` then `kubectl apply`: - - * The `/api/v1/app/history` SDK API endpoint always returns an empty array because there is no Helm history in the cluster. See [GET /app/history](/reference/replicated-sdk-apis#get-apphistory) in _Replicated SDK API_. - - * The SDK does not automatically generate status informers to report status data for installed instances of the application. To get instance status data, you must enable custom status informers by overriding the `replicated.statusInformers` Helm value. See [Enable Application Status Insights](/vendor/insights-app-status#enable-application-status-insights) in _Enabling and Understanding Application Status_. - -## SDK Resiliency - -At startup and when serving requests, the SDK retrieves and caches the latest information from the upstream Replicated APIs, including customer license information. - -If the upstream APIs are not available at startup, the SDK does not accept connections or serve requests until it is able to communicate with the upstream APIs. If communication fails, the SDK retries every 10 seconds and the SDK pod is at `0/1` ready. - -When serving requests, if the upstream APIs become unavailable, the SDK serves from the memory cache and sets the `X-Replicated-Served-From-Cache` header to `true`. Additionally, rapid successive requests to same SDK endpoint with the same request properties will be rate-limited returning the last cached payload and status code without reaching out to the upstream APIs. A `X-Replicated-Rate-Limited` header will set to `true`. - -## Replicated SDK Helm Values - -<SdkValues/> - -================ -File: docs/vendor/replicated-sdk-slsa-validating.md -================ -# SLSA Provenance Validation Process for the Replicated SDK - -This topic describes the process to perform provenance validation on the Replicated SDK. - -## About Supply Chain Levels for Software Artifacts (SLSA) - -[Supply Chain Levels for Software Artifacts (SLSA)](https://slsa.dev/), pronounced “salsa,” is a security framework that comprises standards and controls designed to prevent tampering, enhance integrity, and secure software packages and infrastructure. - - -## Purpose of Attestations -Attestations enable the inspection of an image to determine its origin, the identity of its creator, the creation process, and its contents. When building software using the Replicated SDK, the image’s Software Bill of Materials (SBOM) and SLSA-based provenance attestations empower your customers to make informed decisions regarding the impact of an image on the supply chain security of your application. This process ultimately enhances the security and assurances provided to both vendors and end customers. - -## Prerequisite -Before you perform these tasks, you must install [slsa-verifier](https://github.com/slsa-framework/slsa-verifier) and [crane](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md). - -## Validate the SDK SLSA Attestations - -The Replicated SDK build process utilizes Wolfi-based images to minimize the number of CVEs. The build process automatically generates SBOMs and attestations, and then publishes the image along with these metadata components. For instance, you can find all the artifacts readily available on [DockerHub](https://hub.docker.com/r/replicated/replicated-sdk/tags). The following shell script is a tool to easily validate the SLSA attestations for a given Replicated SDK image. - -``` -#!/bin/bash - -# This script verifies the SLSA metadata of a container image -# -# Requires -# - slsa-verifier (https://github.com/slsa-framework/slsa-verifier) -# - crane (https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane.md) -# - - -# Define the image and version to verify -VERSION=v1.0.0-beta.20 -IMAGE=replicated/replicated-sdk:${VERSION} - -# expected source repository that should have produced the artifact, e.g. github.com/some/repo -SOURCE_REPO=github.com/replicatedhq/replicated-sdk - - -# Use `crane` to retrieve the digest of the image without pulling the image -IMAGE_WITH_DIGEST="${IMAGE}@"$(crane digest "${IMAGE}") - -echo "Verifying artifact" -echo "Image: ${IMAGE_WITH_DIGEST}" -echo "Source Repo: ${SOURCE_REPO}" - -slsa-verifier verify-image "${IMAGE_WITH_DIGEST}" \ - --source-uri ${SOURCE_REPO} \ - --source-tag ${VERSION} - -``` - -================ -File: docs/vendor/resources-annotations-templating.md -================ -# Templating Annotations - -This topic describes how to use Replicated KOTS template functions to template annotations for resources and objects based on user-supplied values. - -## Overview - -It is common for users to need to set custom annotations for a resource or object deployed by your application. For example, you might need to allow your users to provide annotations to apply to a Service or Ingress object in public cloud environments. - -For applications installed with Replicated KOTS, you can apply user-supplied annotations to resources or objects by first adding a field to the Replicated Admin Console **Config** page where users can enter one or more annotations. For information about how to add fields on the **Config** page, see [Creating and Editing Configuration Fields](/vendor/admin-console-customize-config-screen). - -You can then map these user-supplied values from the **Config** page to resources and objects in your release using KOTS template functions. KOTS template functions are a set of custom template functions based on the Go text/template library that can be used to generate values specific to customer environments. The template functions in the Config context return user-supplied values on the **Config** page. - -For more information about KOTS template functions in the Config text, see [Config Context](/reference/template-functions-config-context). For more information about the Go library, see [text/template](https://pkg.go.dev/text/template) in the Go documentation. - -## About `kots.io/placeholder` - -For applications installed with KOTS that use standard Kubernetes manifests, the `kots.io/placeholder` annotation allows you to template annotations in resources and objects without breaking the base YAML or needing to include the annotation key. - -The `kots.io/placeholder` annotation uses the format `kots.io/placeholder 'bool' 'string'`. For example: - -```yaml -# Example manifest file - -annotations: - kots.io/placeholder: |- - repl{{ ConfigOption "additional_annotations" | nindent 4 }} -``` - -:::note -For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file using the Replicated HelmChart custom resource, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. - -For an example, see [Map User-Supplied Annotations to Helm Chart Values](#map-user-supplied-annotations-to-helm-chart-values) below. -::: - -## Annotation Templating Examples - -This section includes common examples of templating annotations in resources and objects to map user-supplied values. - -For additional examples of how to map values to Helm chart-based applications, see [Applications](https://github.com/replicatedhq/platform-examples/tree/main/applications) in the platform-examples repository in GitHub. - -### Map Multiple Annotations from a Single Configuration Field - -You can map one or more annotations from a single `textarea` field on the **Config** page. The `textarea` type defines multi-line text input and supports properties such as `rows` and `cols`. For more information, see [textarea](/reference/custom-resource-config#textarea) in _Config_. - -For example, the following Config custom resource adds an `ingress_annotations` field of type `textarea`: - -```yaml -# Config custom resource - -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config -spec: - groups: - - name: ingress_settings - title: Ingress Settings - description: Configure Ingress - items: - - name: ingress_annotations - type: textarea - title: Ingress Annotations - help_text: See your cloud provider’s documentation for the required annotations. -``` - -On the **Config** page, users can enter one or more key value pairs in the `ingress_annotations` field, as shown in the example below: - -![Config page with custom annotations in a Ingress Annotations field](/images/config-map-annotations.png) - -[View a larger version of this image](/images/config-map-annotations.png) - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - repl{{ ConfigOption "ingress_annotations" | nindent 4 }} -``` - -During installation, KOTS renders the YAML with the multi-line input from the configuration field as shown below: - -```yaml -# Rendered Ingress object -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - - key1: value1 - key2: value2 - key3: value3 -``` - -### Map Annotations from Multiple Configuration Fields - -You can specify multiple annotations using the same `kots.io/placeholder` annotation. - -For example, the following Ingress object includes ConfigOption template functions that render the user-supplied values for the `ingress_annotation` and `ingress_hostname` fields: - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - repl{{ ConfigOption "ingress_annotation" | nindent 4 }} - repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} -``` - -During installation, KOTS renders the YAML as shown below: - -```yaml -# Rendered Ingress object - -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - - key1: value1 - my.custom/annotation.ingress.hostname: example.hostname.com -``` - -### Map User-Supplied Value to a Key - -You can map a user-supplied value from the **Config** page to a pre-defined annotation key. - -For example, in the following Ingress object, `my.custom/annotation.ingress.hostname` is the key for the templated annotation. The annotation also uses the ConfigOption template function to map the user-supplied value from a `ingress_hostname` configuration field: - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - repl{{ printf "my.custom/annotation.ingress.hostname: %s" (ConfigOption "ingress_hostname") | nindent 4 }} -``` - -During installation, KOTS renders the YAML as shown below: - -```yaml -# Rendered Ingress object - -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: example-annotation - annotations: - kots.io/placeholder: |- - - my.custom/annotation.ingress.hostname: example.hostname.com -``` - -### Include Conditional Statements in Templated Annotations - -You can include or exclude templated annotations based on a conditional statement. - -For example, the following Ingress object includes a conditional statement for `kots.io/placeholder` that renders `my.custom/annotation.class: somevalue` if the user enables a `custom_annotation` field on the **Config** page: - -```yaml -apiVersion: v1 -kind: Ingress -metadata: - name: myapp - labels: - app: myapp -annotations: - kots.io/placeholder: |- - repl{{if ConfigOptionEquals "custom_annotation" "1" }}repl{{ printf "my.custom/annotation.class: somevalue" | nindent 4 }}repl{{end}} -spec: -... -``` - -During installation, if the user enables the `custom_annotation` configuration field, KOTS renders the YAML as shown below: - -```yaml -# Rendered Ingress object - -apiVersion: v1 -kind: Ingress -metadata: - name: myapp - labels: - app: myapp - annotations: - kots.io/placeholder: |- - my.custom/annotation.class: somevalue -spec: -... -``` - -Alternatively, if the condition evaluates to false, the annotation does not appear in the rendered YAML: - -```yaml -apiVersion: v1 -kind: Ingress -metadata: - name: myapp - labels: - app: myapp - annotations: - kots.io/placeholder: |- -spec: -... -``` - -### Map User-Supplied Annotations to Helm Chart Values - -For Helm chart-based applications installed with KOTS, Replicated recommends that you map user-supplied annotations to the Helm chart `values.yaml` file, rather than using `kots.io/placeholder`. This allows you to access user-supplied values in your Helm chart without needing to include KOTS template functions directly in the Helm chart templates. - -To map user-supplied annotations from the **Config** page to the Helm chart `values.yaml` file, you use the `values` field of the Replicated HelmChart custom resource. For more information, see [values](/reference/custom-resource-helmchart-v2#values) in _HelmChart v2_. - -For example, the following HelmChart custom resource uses a ConfigOption template function in `values.services.myservice.annotations` to map the value of a configuration field named `additional_annotations`: - -```yaml -# HelmChart custom resource - -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: myapp -spec: - values: - services: - myservice: - annotations: repl{{ ConfigOption "additional_annotations" | nindent 10 }} -``` - -The `values.services.myservice.annotations` field in the HelmChart custom resource corresponds to a `services.myservice.annotations` field in the `value.yaml` file of the application Helm chart, as shown in the example below: - -```yaml -# Helm chart values.yaml - -services: - myservice: - annotations: {} -``` - -During installation, the ConfigOption template function in the HelmChart custom resource renders the user-supplied values from the `additional_annotations` configuration field. - -Then, KOTS replaces the value in the corresponding field in the `values.yaml` in the chart archive, as shown in the example below. - -```yaml -# Rendered Helm chart values.yaml - -services: - myservice: - annotations: - key1: value1 -``` - -In your Helm chart templates, you can access these values from the `values.yaml` file to apply the user-supplied annotations to the target resources or objects. For information about how to access values from a `values.yaml` file, see [Values Files](https://helm.sh/docs/chart_template_guide/values_files/) in the Helm documentation. - -================ -File: docs/vendor/snapshots-configuring-backups.md -================ -# Configuring Snapshots - -This topic provides information about how to configure the Velero Backup resource to enable Replicated KOTS snapshots for an application. - -For more information about snapshots, see [About Backup and Restore with snapshots](/vendor/snapshots-overview). - -## Configure Snapshots - -Add a Velero Backup custom resource (`kind: Backup`, `apiVersion: velero.io/v1`) to your release and configure it as needed. After configuring the Backup resource, add annotations for each volume that you want to be included in backups. - -To configure snapshots for your application: - -1. In a new release containing your application files, add a Velero Backup resource (`kind: Backup` and `apiVersion: velero.io/v1`): - - ```yaml - apiVersion: velero.io/v1 - kind: Backup - metadata: - name: backup - spec: {} - ``` - -1. Configure the Backup resource to specify the resources that will be included in backups. - - For more information about the Velero Backup resource, including limitations, the list of supported fields for snapshots, and an example, see [Velero Backup Resource for Snapshots](/reference/custom-resource-backup). - -1. (Optional) Configure backup and restore hooks. For more information, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). - -1. For each volume that requires a backup, add the `backup.velero.io/backup-volumes` annotation. The annotation name is `backup.velero.io/backup-volumes` and the value is a comma separated list of volumes to include in the backup. - - <details> - <summary>Why do I need to use the backup annotation?</summary> - <p>By default, no volumes are included in the backup. If any pods mount a volume that should be backed up, you must configure the backup with an annotation listing the specific volumes to include in the backup.</p> - </details> - - **Example:** - - In the following Deployment manifest file, `pvc-volume` is the only volume that is backed up. The `scratch` volume is not included in the backup because it is not listed in annotation on the pod specification. - - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: sample - labels: - app: foo - spec: - replicas: 1 - selector: - matchLabels: - app: foo - template: - metadata: - labels: - app: foo - annotations: - backup.velero.io/backup-volumes: pvc-volume - spec: - containers: - - image: k8s.gcr.io/test-webserver - name: test-webserver - volumeMounts: - - name: pvc-volume - mountPath: /volume-1 - - name: scratch - mountPath: /volume-2 - volumes: - - name: pvc-volume - persistentVolumeClaim: - claimName: test-volume-claim - - name: scratch - emptyDir: {} - - ``` - -1. (Optional) Configure manifest exclusions. By default, Velero also includes backups of all of the Kubernetes objects in the namespace. - - To exclude any manifest file, add a [`velero.io/exclude-from-backup=true`](https://velero.io/docs/v1.5/resource-filtering/#veleroioexclude-from-backuptrue) label to the manifest to be excluded. The following example shows the Secret manifest file with the `velero.io/exclude-from-backup` label: - - ```yaml - apiVersion: apps/v1 - kind: Secret - metadata: - name: sample - labels: - velero.io/exclude-from-backup: "true" - stringData: - uri: Secret To Not Include - - ``` - -1. If you distribute multiple applications with Replicated, repeat these steps for each application. Each application must have its own Backup resource to be included in a full backup with snapshots. - -1. (kURL Only) If your application supports installation with Replicated kURL, Replicated recommends that you include the kURL Velero add-on so that customers do not have to manually install Velero in the kURL cluster. For more information, see [Creating a kURL Installer](packaging-embedded-kubernetes). - -================ -File: docs/vendor/snapshots-hooks.md -================ -# Configuring Backup and Restore Hooks for Snapshots - -This topic describes the use of custom backup and restore hooks and demonstrates a common example. - -## About Backup and Restore Hooks - -Velero supports the use of backup hooks and restore hooks. - -Your application workload might require additional processing or scripts to be run before or after creating a backup to prepare the system for a backup. Many application workloads also require additional processing or scripts to run during or after the restore process. - -Some common examples of how to use a hook to create backups are: -- Run `pg_dump` to export a postgres database prior to backup -- Lock a file before running a backup, and unlock immediately after -- Delete TMP files that should not be backed up -- Restore a database file only if that file exists -- Perform required setup tasks in a restored Pod before the application containers can start - -Additionally, for embedded clusters created by Replicated kURL, you must write custom backup and restore hooks to enable back ups for any object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs). For more information about object-stored data, see [Other Object Stored Data](snapshots-overview#other-object-stored-data) in _Backup and Restore_. - -For more information about backup and restore hooks, see [Backup Hooks](https://velero.io/docs/v1.10/backup-hooks/) and [Restore Hooks](https://velero.io/docs/v1.10/restore-hooks) in the Velero documentation. - -## Example - -The following example demonstrates how to include Velero backup and restore hooks for a Postgres database in a Replicated HelmChart custom resource manifest file. - -The use case for this example is an application packaged with a Helm chart that includes a Postgres database. A description of key fields from the YAML follows the example. - -```yaml -apiVersion: kots.io/v1beta2 -kind: HelmChart -metadata: - name: postgresql -spec: - exclude: 'repl{{ ConfigOptionEquals `postgres_type` `external_postgres` }}' - - chart: - name: postgresql - chartVersion: 8.7.4 - - values: - - master: - podAnnotations: - backup.velero.io/backup-volumes: backup - pre.hook.backup.velero.io/command: '["/bin/bash", "-c", "PGPASSWORD=$POSTGRES_PASSWORD pg_dump -U username -d dbname -h 127.0.0.1 > /scratch/backup.sql"]' - pre.hook.backup.velero.io/timeout: 3m - post.hook.restore.velero.io/command: '["/bin/bash", "-c", "[ -f \"/scratch/backup.sql\" ] && PGPASSWORD=$POSTGRES_PASSWORD psql -U username -h 127.0.0.1 -d dbname -f /scratch/backup.sql && rm -f /scratch/backup.sql;"]' - - extraVolumes: - - name: backup - emptyDir: - sizeLimit: 1Gi - extraVolumeMounts: - - name: backup - mountPath: /scratch - - global: - postgresql: - postgresqlUsername: username - postgresqlPassword: "repl{{ ConfigOption `embedded_postgres_password` }}" - postgresqlDatabase: dbname -``` - -The following describes key fields from the example above: - -* `spec.exclude`: A common and recommended pattern for applications. The customer can choose to bring an external Postgres instance instead of running it in-cluster. The Replicated KOTS template function in `spec.exclude` evaluates to true when the user specifies the external database option in the Admin Console **Config** page. This means that the internal Postgres database is not included in the deployment. - -* `spec.values.master.podAnnotations`: Adds podAnnotations to the postgres master PodSpec. Velero backup and restore hooks are included in the podAnnotations. The following table describes the podAnnotations: - - :::note - Run backup hooks inside the container that contains the data to back up. - ::: - - <table> - <tr> - <th>podAnnotation</th> - <th>Description</th> - </tr> - <tr> - <td><code>backup.velero.io/backup-volumes</code></td> - <td>A comma-separated list of volumes from the Pod to include in the backup. The primary data volume is not included in this field because data is exported using the backup hook.</td> - </tr> - <tr> - <td><code>pre.hook.backup.velero.io/command</code></td> - <td>A stringified JSON array containing the command for the backup hook. - This command is a <code>pg_dump</code> from the running database to the backup volume.</td> - </tr> - <tr> - <td><code>pre.hook.backup.velero.io/timeout</code></td> - <td>A duration for the maximum time to let this script run.</td> - </tr> - <tr> - <td><code>post.hook.restore.velero.io/command</code></td> - <td>A Velero exec restore hook that runs a script to check if the database file exists, and restores only if it exists. Then, the script deletes the file after the operation is complete.</td> - </tr> - </table> - -* `spec.master.extraVolumes`: A new volume that is injected into the postgres Pod. The new volume is an empty volume that uses ephemeral storage. The ephemeral storage must have enough space to accommodate the size of the exported data. -The `extraVolumeMounts` field mounts the volume into the `/scratch` directory of the master Pod. The volume is used as a destination when the backup hook command described above runs `pg_dump`. This is the only volume that is backed up. - -================ -File: docs/vendor/snapshots-overview.mdx -================ -import RestoreTable from "../partials/snapshots/_restoreTable.mdx" -import NoEcSupport from "../partials/snapshots/_limitation-no-ec-support.mdx" -import RestoreTypes from "../partials/snapshots/_restore-types.mdx" -import Dr from "../partials/snapshots/_limitation-dr.mdx" -import Os from "../partials/snapshots/_limitation-os.mdx" -import InstallMethod from "../partials/snapshots/_limitation-install-method.mdx" -import CliRestores from "../partials/snapshots/_limitation-cli-restores.mdx" - -# About Backup and Restore with Snapshots - -This topic provides an introduction to the Replicated KOTS snapshots feature for backup and restore. It describes how vendors enable snapshots, the type of data that is backed up, and how to troubleshoot issues for enterprise users. - -:::note -<NoEcSupport/> -::: - -## Overview - -An important part of the lifecycle of an application is backup and restore. You can enable Replicated KOTS snapshots to support backup and restore for existing cluster installations with KOTS and Replicated kURL installations. - -When snapshots is enabled for your application, your customers can manage and perform backup and restore from the Admin Console or KOTS CLI. - -Snapshots uses the Velero open source project as the backend to back up Kubernetes manifests and persistent volumes. Velero is a mature, fully-featured application. For more information, see the [Velero documentation](https://velero.io/docs/). - -In addition to the default functionality that Velero provides, KOTS exposes hooks that let you inject scripts that can execute both before and after a backup, and before and after a restore. For more information, see [Configuring Backup and Restore Hooks for Snapshots](/vendor/snapshots-hooks). - -### Limitations and Considerations - -* <NoEcSupport/> - -- The snapshots feature is available only for licenses with the **Allow Snapshots** option enabled. For more information, see [Creating and Managing Customers](/vendor/releases-creating-customer). - -- Snapshots are useful for rollback and disaster recovery scenarios. They are not intended to be used for application migration. - -- <Dr/> - -- <Os/> - -- <InstallMethod/> - -- <CliRestores/> - -- Removing data from the snapshot storage itself results in data corruption and the loss of snapshots. Instead, use the **Snapshots** tab in the Admin Console to cleanup and remove snapshots. - -- Snapshots does not support Amazon Simple Storage Service (Amazon S3) buckets that have a bucket policy requiring the server-side encryption header. If you want to require server-side encryption for objects, you can enable default encryption on the bucket instead. For more information about Amazon S3, see the [Amazon S3](https://docs.aws.amazon.com/s3/?icmpid=docs_homepage_featuredsvcs) documentation. - -### Velero Version Compatibility - -The following table lists which versions of Velero are compatible with each version of KOTS. For more information, see the [Velero documentation](https://velero.io/docs/). - -| KOTS version | Velero version | -|------|-------------| -| 1.15 to 1.20.2 | 1.2.0 | -| 1.20.3 to 1.94.0 | 1.5.1 through 1.9.x | -| 1.94.1 and later | 1.6.x through 1.12.x | - -## About Backups - -This section describes the types of backups that are supported with snapshots. For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. - -### Application and Admin Console (Full) Backups - -Full backups (also referred to as _instance_ backups) include the KOTS Admin Console and all application data, including application volumes and manifest files. - -For clusters created with Replicated kURL, full backups also back up the Docker registry, which is required for air gapped installations. - -If you manage multiple applications with the Admin Console, data from all applications that support backups is included in a full backup. To be included in full backups, each application must include a manifest file with `kind: Backup` and `apiVersion: velero.io/v1`, which you can check for in the Admin Console. - -Full backups are recommended because they support all types of restores. For example, you can restore both the Admin Console and application from a full backup to a new cluster in disaster recovery scenarios. Or, you can use a full backup to restore only application data for the purpose of rolling back after deploying a new version of an application. - -### Application-Only (Partial) Backups - -Partial backups back up the application volumes and manifest files only. Partial backups do not back up the KOTS Admin Console. - -Partial backups can be useful if you need to roll back after deploying a new application version. Partial backups of the application only _cannot_ be restored to a new cluster, and are therefore not useable for disaster recovery scenarios. - -### Backup Storage Destinations - -For disaster recovery, backups should be configured to use a storage destination that exists outside of the cluster. This is especially true for installations in clusters created with Replicated kURL, because the default storage location on these clusters is internal. - -You can use a storage provider that is compatible with Velero as the storage destination for backups created with the Replicated snapshots feature. For a list of the compatible storage providers, see [Providers](https://velero.io/docs/v1.9/supported-providers/) in the Velero documentation. - -You initially configure backups on a supported storage provider backend using the KOTS CLI. If you want to change the storage destination after the initial configuration, you can use the the **Snapshots** page in the Admin Console, which has built-in support for the following storage destinations: - -- Amazon Web Services (AWS) -- Google Cloud Provider (GCP) -- Microsoft Azure -- S3-Compatible -- Network File System (NFS) -- Host Path - -kURL installers that include the Velero add-on also include a locally-provisioned object store. By default, kURL clusters are preconfigured in the Admin Console to store backups in the locally-provisioned object store. This object store is sufficient for only rollbacks and downgrades and is not a suitable configuration for disaster recovery. Replicated recommends that you configure a snapshots storage destination that is external to the cluster in the Admin Console for kURL clusters. - -For information about how to configure backup storage destinations for snapshots, see the [Configuring Backup Storage](/enterprise/snapshots-velero-cli-installing) section. - -### What Data is Backed Up? - -Full backups include the Admin Console and all application data, including KOTS-specific object-stored data. For Replicated kURL installations, this also backs up the Docker registry, which is required for air gapped installations. - -#### Other Object-Stored Data - -For kURL clusters, you might be using object-stored data that is not specific to the kURL KOTS add-on. - -For object-stored data that is not KOTS-specific and does not use persistentVolumeClaims (PVCs), you must write custom backup and restore hooks to enable back ups for that object-stored data. For example, Rook and Ceph do not use PVCs and so require custom backup and restore hooks. For more information about writing custom hooks, see [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). - -#### Pod Volume Data - -Replicated supports only the restic backup program for pod volume data. - -By default, Velero requires that you opt-in to have pod volumes backed up. In the Backup resource that you configure to enable snapshots, you must annotate each specific volume that you want to back up. For more information about including and excluding pod volumes, see [Configuring Snapshots](/vendor/snapshots-configuring-backups). - -## About Restores {#restores} - -<RestoreTypes/> - -When you restore an application with snapshots, KOTS first deletes the selected application. All existing application manifests are removed from the cluster, and all `PersistentVolumeClaims` are deleted. This action is not reversible. - -Then, the restore process redeploys all of the application manifests. All Pods are given an extra `initContainer` and an extra directory named `.velero`, which are used for restore hooks. For more information about the restore process, see [Restore Reference](https://velero.io/docs/v1.9/restore-reference/) in the Velero documentation. - -When you restore the Admin Console only, no changes are made to the application. - -For information about how to restore using the Admin Console or the KOTS CLI, see [Restoring from Backups](/enterprise/snapshots-restoring-full). - -## Using Snapshots - -This section provides an overview of how vendors and enterprise users can configure and use the snapshots feature. - -### How to Enable Snapshots for Your Application - -To enable the snapshots backup and restore feature for your users, you must: - -- Have the snapshots entitlement enabled in your Replicated vendor account. For account entitlements, contact the Replicated TAM team. -- Define a manifest for creating backups. See [Configuring Snapshots](snapshots-configuring-backups). -- When needed, configure backup and restore hooks. See [Configuring Backup and Restore Hooks for Snapshots](snapshots-hooks). -- Enable the **Allow Snapshot** option in customer licenses. See [Creating and Managing Customers](releases-creating-customer). - -### Understanding Backup and Restore for Users {#how-users} - -After vendors enable backup and restore, enterprise users install Velero and configure a storage destination in the Admin Console. Then users can create backups manually or schedule automatic backups. - -Replicated recommends advising your users to make full backups for disaster recovery purposes. Additionally, full backups give users the flexibility to do a full restore, a partial restore (application only), or restore just the Admin Console. - -From a full backup, users restore using the KOTS CLI or the Admin Console as indicated in the following table: - -<RestoreTable/> - -Partial backups are not recommended as they are a legacy feature and only back up the application volumes and manifests. Partial backups can be restored only from the Admin Console. - -### Troubleshooting Snapshots - -To support end users with backup and restore, use the following resources: - -- To help troubleshoot error messages, see [Troubleshooting Snapshots](/enterprise/snapshots-troubleshooting-backup-restore). - -- Review the Limitations and Considerations section to make sure an end users system is compliant. - -- Check that the installed Velero version and KOTS version are compatible. - -================ -File: docs/vendor/support-bundle-customizing.mdx -================ -# Adding and Customizing Support Bundles - -This topic describes how to add a default support bundle spec to a release for your application. It also describes how to customize the default support bundle spec based on your application's needs. For more information about support bundles, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). - -The information in this topic applies to Helm applications and Kubernetes manifest-based application installed with Helm or with Replicated KOTS. - -## Step 1: Add the Default Spec to a Manifest File - -You can add the support bundle spec to a Kubernetes Secret or a SupportBundle custom resource. The type of manifest file that you use depends on your application type (Helm or manifest-based) and installation method (Helm or KOTS). - -Use the following guidance to determine which type of manifest file to use for creating a support bundle spec: - -* **Helm Applications**: For Helm applications, see the following guidance: - - * **(Recommended) Helm or KOTS v1.94.2 and Later**: For Helm applications installed with Helm or KOTS v1.94.2 or later, create the support bundle spec in a Kubernetes Secret in your Helm chart `templates`. See [Kubernetes Secret](#secret). - - * **KOTS v1.94.1 and Earlier**: For Helm applications installed with KOTS v1.94.1 or earlier, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). - -* **Kubernetes Manifest-Based Applications**: For Kubernetes manifest-based applications, create the support bundle spec in a Preflight custom resource. See [SupportBundle Custom Resource](#sb-cr). - -### Kubernetes Secret {#secret} - -You can define support bundle specs in a Kubernetes Secret for the following installation types: -* Installations with Helm -* Helm applications installed with KOTS v1.94.2 and later - -In your Helm chart `templates` directory, add the following YAML to a Kubernetes Secret: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - labels: - troubleshoot.sh/kind: support-bundle - name: example -stringData: - support-bundle-spec: | - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: support-bundle - spec: - collectors: [] - analyzers: [] -``` - -As shown above, the Secret must include the following: - -* The label `troubleshoot.sh/kind: support-bundle` -* A `stringData` field with a key named `support-bundle-spec` - -This empty support bundle spec includes the following collectors by default: -* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) -* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) - -You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. - -:::note -If your application is deployed as multiple Helm charts, Replicated recommends that you create separate support bundle specs for each subchart. This allows you to make specs that are specific to different components of your application. When a support bundle is generated, all the specs are combined to provide a single bundle. -::: - -After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. - -### SupportBundle Custom Resource {#sb-cr} - -You can define support bundle specs in a SupportBundle custom resource for the following installation types: -* Kubernetes manifest-based applications installed with KOTS -* Helm applications installed with KOTS v1.94.1 and earlier - -In a release for your application, add the following YAML to a new `support-bundle.yaml` manifest file: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: example -spec: - collectors: [] - analyzers: [] -``` -For more information about the SupportBundle custom resource, see [Preflight and Support Bundle](/reference/custom-resource-preflight). - -This empty support bundle spec includes the following collectors by default: -* [clusterInfo](https://troubleshoot.sh/docs/collect/cluster-info/) -* [clusterResources](https://troubleshoot.sh/docs/collect/cluster-resources/) - -You do not need manually include the `clusterInfo` or `clusterResources` collectors in the spec. - -After you create this empty support bundle spec, you can test the support bundle by following the instructions in [Generating a Support Bundle](/vendor/support-bundle-generating). You can customize the support bundle spec by adding collectors and analyzers or editing the default collectors. For more information, see [Step 2: Customize the spec](/vendor/support-bundle-customizing#customize-the-spec) below. - -## Step 2: Customize the Spec {#customize-the-spec} - -You can customize the support bundles for your application by: -* Adding collectors and analyzers -* Editing or excluding the default `clusterInfo` and `clusterResources` collectors - -### Add Collectors - -Collectors gather information from the cluster, the environment, the application, or other sources. Collectors generate output that is then used by the analyzers that you define. - -In addition to the default `clusterInfo` and `clusterResources` collectors, the Troubleshoot open source project includes several collectors that you can include in the spec to gather more information from the installation environment. To view all the available collectors, see [All Collectors](https://troubleshoot.sh/docs/collect/all/) in the Troubleshoot documentation. - -The following are some recommended collectors: - -- [logs](https://troubleshoot.sh/docs/collect/logs/) -- [secret](https://troubleshoot.sh/docs/collect/secret/) and [configMap](https://troubleshoot.sh/docs/collect/configmap/) -- [postgresql](https://troubleshoot.sh/docs/collect/postgresql/), [mysql](https://troubleshoot.sh/docs/collect/mysql/), and [redis](https://troubleshoot.sh/docs/collect/redis/) -- [runPod](https://troubleshoot.sh/docs/collect/run-pod/) -- [copy](https://troubleshoot.sh/docs/collect/copy/) and [copyFromHost](https://troubleshoot.sh/docs/collect/copy-from-host/) -- [http](https://troubleshoot.sh/docs/collect/http/) - -### Add Analyzers - -Analyzers use the data from the collectors to generate output for the support bundle. Good analyzers clearly identify failure modes and provide troubleshooting guidance for the user. For example, if you can identify a log message from your database component that indicates a problem, you should write an analyzer that checks for that log and provides a description of the error to the user. - -The Troubleshoot open source project includes several analyzers that you can include in the spec. To view all the available analyzers, see the [Analyze](https://troubleshoot.sh/docs/analyze/) section of the Troubleshoot documentation. - -The following are some recommended analyzers: - -- [textAnalyze](https://troubleshoot.sh/docs/analyze/regex/) -- [deploymentStatus](https://troubleshoot.sh/docs/analyze/deployment-status/) -- [clusterPodStatuses](https://troubleshoot.sh/docs/analyze/cluster-pod-statuses/) -- [replicasetStatus](https://troubleshoot.sh/docs/analyze/replicaset-status/) -- [statefulsetStatus](https://troubleshoot.sh/docs/analyze/statefulset-status/) -- [postgresql](https://troubleshoot.sh/docs/analyze/postgresql/), [mysql](https://troubleshoot.sh/docs/analyze/mysql/), and [redis](https://troubleshoot.sh/docs/analyze/redis/) - -### Customize the Default `clusterResources` Collector - -You can edit the default `clusterResources` using the following properties: - -* `namespaces`: The list of namespaces where the resources and information is collected. If the `namespaces` key is not specified, then the `clusterResources` collector defaults to collecting information from all namespaces. The `default` namespace cannot be removed, but you can specify additional namespaces. - -* `ignoreRBAC`: When true, the `clusterResources` collector does not check for RBAC authorization before collecting resource information from each namespace. This is useful when your cluster uses authorization webhooks that do not support SelfSubjectRuleReviews. Defaults to false. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. - -The following example shows how to specify the namespaces where the `clusterResources` collector collects information: - -```yaml -spec: - collectors: - - clusterResources: - namespaces: - - default - - my-app-namespace - ignoreRBAC: true -``` - -The following example shows how to use Helm template functions to set the namespace: - -```yaml -spec: - collectors: - - clusterResources: - namespaces: {{ .Release.Namespace }} - ignoreRBAC: true -``` - -The following example shows how to use the Replicated Namespace template function to set the namespace: - -```yaml -spec: - collectors: - - clusterResources: - namespaces: '{{repl Namespace }}' - ignoreRBAC: true -``` -For more information, see [Namespace](/reference/template-functions-static-context#namespace) in _Static Context_. - -### Exclude the Default Collectors - -Although Replicated recommends including the default `clusterInfo` and `clusterResources` collectors because they collect a large amount of data to help with installation and debugging, you can optionally exclude them. - -The following example shows how to exclude both the clusterInfo and clusterResources collectors from your support bundle spec: - -```yaml -spec: - collectors: - - clusterInfo: - exclude: true - - clusterResources: - exclude: true -``` - -### Examples - -For common examples of collectors and analyzers used in support bundle specs, see [Examples of Support Bundle Specs](/vendor/support-bundle-examples). - -================ -File: docs/vendor/support-bundle-embedded.mdx -================ -import EmbeddedClusterSupportBundle from "../partials/support-bundles/_generate-bundle-ec.mdx" -import SupportBundleIntro from "../partials/support-bundles/_ec-support-bundle-intro.mdx" - -# Generating Support Bundles for Embedded Cluster - -This topic describes how to generate a support bundle that includes cluster- and host-level information for [Replicated Embedded Cluster](/vendor/embedded-overview) installations. - -For information about generating host support bundles for Replicated kURL installations, see [Generating Host Bundles for kURL](/vendor/support-host-support-bundles). - -## Overview - -<SupportBundleIntro/> - -## Generate a Support Bundle - -<EmbeddedClusterSupportBundle/> - -================ -File: docs/vendor/support-bundle-examples.mdx -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HttpSecret from "../partials/support-bundles/_http-requests-secret.mdx" -import HttpCr from "../partials/support-bundles/_http-requests-cr.mdx" -import NodeStatusSecret from "../partials/support-bundles/_node-status-secret.mdx" -import NodeStatusCr from "../partials/support-bundles/_node-status-cr.mdx" -import K8sVersionSecret from "../partials/support-bundles/_k8s-version-secret.mdx" -import K8sVersionCr from "../partials/support-bundles/_k8s-version-cr.mdx" -import DeployStatusSecret from "../partials/support-bundles/_deploy-status-secret.mdx" -import DeployStatusCr from "../partials/support-bundles/_deploy-status-cr.mdx" -import NodeResourcesSecret from "../partials/support-bundles/_node-resources-secret.mdx" -import NodeResourcesCr from "../partials/support-bundles/_node-resources-cr.mdx" -import LogsSelectorsSecret from "../partials/support-bundles/_logs-selectors-secret.mdx" -import LogsSelectorsCr from "../partials/support-bundles/_logs-selectors-cr.mdx" -import LogsLimitsSecret from "../partials/support-bundles/_logs-limits-secret.mdx" -import LogsLimitsCr from "../partials/support-bundles/_logs-limits-cr.mdx" -import RedisMysqlSecret from "../partials/support-bundles/_redis-mysql-secret.mdx" -import RedisMysqlCr from "../partials/support-bundles/_redis-mysql-cr.mdx" -import RunPodsSecret from "../partials/support-bundles/_run-pods-secret.mdx" -import RunPodsCr from "../partials/support-bundles/_run-pods-cr.mdx" - -# Example Support Bundle Specs - -This topic includes common examples of support bundle specifications. For more examples, see the [Troubleshoot example repository](https://github.com/replicatedhq/troubleshoot/tree/main/examples/support-bundle) in GitHub. - -## Check API Deployment Status - -The examples below use the `deploymentStatus` analyzer to check the version of Kubernetes running in the cluster. The `deploymentStatus` analyzer uses data from the default `clusterResources` collector. - -For more information, see [Deployment Status](https://troubleshoot.sh/docs/analyze/deployment-status/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <DeployStatusSecret/> - </TabItem> - <TabItem value="custom-resource" label="SupportBundle Custom Resource"> - <DeployStatusCr/> - </TabItem> -</Tabs> - -## Check HTTP Requests - -If your application has its own API that serves status, metrics, performance data, and so on, this information can be collected and analyzed. - -The examples below use the `http` collector and the `textAnalyze` analyzer to check that an HTTP request to the Slack API at `https://api.slack.com/methods/api.test` made from the cluster returns a successful response of `"status": 200,`. - -For more information, see [HTTP](https://troubleshoot.sh/docs/collect/http/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <HttpSecret/> - </TabItem> - <TabItem value="custom-resource" label="SupportBundle Custom Resource"> - <HttpCr/> - </TabItem> -</Tabs> - -## Check Kubernetes Version - -The examples below use the `clusterVersion` analyzer to check the version of Kubernetes running in the cluster. The `clusterVersion` analyzer uses data from the default `clusterInfo` collector. - -For more information, see [Cluster Version](https://troubleshoot.sh/docs/analyze/cluster-version/) and [Cluster Info](https://troubleshoot.sh/docs/collect/cluster-info/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <K8sVersionSecret/> - </TabItem> - <TabItem value="custom-resource" label="SupportBundle Custom Resource"> - <K8sVersionCr/> - </TabItem> -</Tabs> - -## Check Node Resources - -The examples below use the `nodeResources` analyzer to check that the minimum requirements are met for memory, CPU cores, number of nodes, and ephemeral storage. The `nodeResources` analyzer uses data from the default `clusterResources` collector. - -For more information, see [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) and [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <NodeResourcesSecret/> - </TabItem> - <TabItem value="custom-resource" label="SupportBundle Custom Resource"> - <NodeResourcesCr/> - </TabItem> -</Tabs> - -## Check Node Status - -The following examples use the `nodeResources` analyzers to check the status of the nodes in the cluster. The `nodeResources` analyzer uses data from the default `clusterResources` collector. - -For more information, see [Node Resources](https://troubleshoot.sh/docs/analyze/node-resources/) and [Cluster Resources](https://troubleshoot.sh/docs/collect/cluster-resources/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <NodeStatusSecret/> - </TabItem> - <TabItem value="custom-resource" label="SupportBundle Custom Resource"> - <NodeStatusCr/> - </TabItem> -</Tabs> - -## Collect Logs Using Multiple Selectors - -The examples below use the `logs` collector to collect logs from various Pods where application workloads are running. They also use the `textAnalyze` collector to analyze the logs for a known error. - -For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. - -You can use the `selector` attribute of the `logs` collector to find Pods that have the specified labels. Depending on the complexity of an application's labeling schema, you might need a few different declarations of the logs collector, as shown in the examples below. You can include the `logs` collector as many times as needed. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <LogsSelectorsSecret/> - </TabItem> - <TabItem value="custom-resource" label="SupportBundle Custom Resource"> - <LogsSelectorsCr/> - </TabItem> -</Tabs> - -## Collect Logs Using `limits` - -The examples below use the `logs` collector to collect Pod logs from the Pod where the application is running. These specifications use the `limits` field to set a `maxAge` and `maxLines` to limit the output provided. - -For more information, see [Pod Logs](https://troubleshoot.sh/docs/collect/logs/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <LogsLimitsSecret/> - </TabItem> - <TabItem value="custom-resource" label="SupportBundle Custom Resource"> - <LogsLimitsCr/> - </TabItem> -</Tabs> - -## Collect Redis and MySQL Server Information - -The following examples use the `redis` and `mysql` collectors to collect information about Redis and MySQL servers running in the cluster. - -For more information, see [Redis](https://troubleshoot.sh/docs/collect/redis/) and [MySQL](https://troubleshoot.sh/docs/collect/mysql/) and in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <RedisMysqlSecret/> - </TabItem> - <TabItem value="custom-resource" label="SupportBundle Custom Resource"> - <RedisMysqlCr/> - </TabItem> -</Tabs> - -## Run and Analyze a Pod - -The examples below use the `textAnalyze` analyzer to check that a command successfully executes in a Pod running in the cluster. The Pod specification is defined in the `runPod` collector. - -For more information, see [Run Pods](https://troubleshoot.sh/docs/collect/run-pod/) and [Regular Expression](https://troubleshoot.sh/docs/analyze/regex/) in the Troubleshoot documentation. - -<Tabs> - <TabItem value="secret" label="Kubernetes Secret" default> - <RunPodsSecret/> - </TabItem> - <TabItem value="custom-resource" label="SupportBundle Custom Resource"> - <RunPodsCr/> - </TabItem> -</Tabs> - -================ -File: docs/vendor/support-bundle-generating.mdx -================ -import InstallPlugin from "../partials/support-bundles/_install-plugin.mdx" -import GenerateBundle from "../partials/support-bundles/_generate-bundle.mdx" - -# Generating Support Bundles - -This topic describes how to generate support bundles from the command line using the kubectl support-bundle plugin. For more information about support bundles, see [About Preflights and Support Bundles](/vendor/preflight-support-bundle-about). - -The information in this topic applies to generating support bundles in clusters where you have kubectl access. For information about generating support bundles that include cluster- and host-level information for Replicated Embedded Cluster installations, see [Generating Support Bundles for Embedded Cluster](support-bundle-embedded). - -## Prerequisite: Install the support-bundle Plugin - -<InstallPlugin/> - -## Generate a Bundle - -<GenerateBundle/> - -## Generate a Bundle when a Helm Installation Fails - -If a Helm installation fails and you want to collect a support bundle to assist with diagnostics, you can use a Replicated default specification to generate the support bundle. - -Run the following command: - -```bash -kubectl support-bundle https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml -``` - -================ -File: docs/vendor/support-enabling-direct-bundle-uploads.md -================ -# Enabling Support Bundle Uploads (Beta) - -:::note -Direct bundle uploads is in beta. The functionality, requirements, and limitations of direct bundle uploads are subject to change. -::: - -When this feature is enabled, customers using online KOTS installations can upload support bundles directly through the Admin Console UI, eliminating the need to share the generated bundle with you manually. - -When enabled, your customers can use the **Send bundle to vendor button** in the Admin Console to upload a generated support bundle. - -<img alt="Send bundle to vendor screen" src="/images/send-bundle-to-vendor.png" width="600px"/> - -After clicking this button, the bundle will be immediately available under the Troubleshoot tab in the Vendor Portal team account associated with this customer. - -For more information on how your customer can use this feature, see [Generating Support Bundles from the Admin Console](/enterprise/troubleshooting-an-app). - -### How to Enable Direct Bundle Uploads - -Direct bundle uploads are disabled by default. To enable this feature for your customer: - -1. Log in to the Vendor Portal and navigate to your customer's **Manage Customer** page. -1. Under the **License options** section, make sure your customer has **KOTS Install Enabled** checked, and then check the **Support Bundle Upload Enabled (Beta)** option. - <img alt="Customer license options: configure direct support bundle upload" src="/images/configure-direct-support-bundle-upload.png" width="400px"/> - - [View a larger version of this image](/images/configure-direct-support-bundle-upload.png) -1. Click **Save**. - -### Limitations - -- You will not receive a notification when a customer sends a support bundle to the Vendor Portal. To avoid overlooking these uploads, activate this feature only if there is a reliable escalation process already in place for the customer license. -- This feature only supports online KOTS installations. If enabled, but installed in air gap mode, the upload button will not appear. -- There is a 500mb limit for support bundles uploaded directly via the Admin Console. - -================ -File: docs/vendor/support-host-support-bundles.md -================ -import GenerateBundleHost from "../partials/support-bundles/_generate-bundle-host.mdx" - -# Generating Host Bundles for kURL - -This topic describes how to configure a host support bundle spec for Replicated kURL installations. For information about generating host support bundles for Replicated Embedded Cluster installations, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). - -## Overview - -Host support bundles can be used to collect information directly from the host where a kURL cluster is running, such as CPU, memory, available block devices, and the operating system. Host support bundles can also be used for testing network connectivity and gathering the output of provided commands. - -Host bundles for kURL are useful when: -- The kURL cluster is offline -- The kURL installer failed before the control plane was initialized -- The Admin Console is not working -- You want to debug host-specific performance and configuration problems even when the cluster is running - -You can create a YAML spec to allow users to generate host support bundles for kURL installations. For information, see [Create a Host Support Bundle Spec](#create-a-host-support-bundle-spec) below. - -Replicated also provides a default support bundle spec to collect host-level information for installations with the Embedded Cluster installer. For more information, see [Generating Host Bundles for Embedded Cluster](/vendor/support-bundle-embedded). - -## Create a Host Support Bundle Spec - -To allow users to generate host support bundles for kURL installations, create a host support bundle spec in a YAML manifest that is separate from your application release and then share the file with customers to run on their hosts. This spec is separate from your application release because host collectors and analyzers are intended to run directly on the host and not with Replicated KOTS. If KOTS runs host collectors, the collectors are unlikely to produce the desired results because they run in the context of the kotsadm Pod. - -To configure a host support bundle spec for kURL: - -1. Create a SupportBundle custom resource manifest file (`kind: SupportBundle`). - -1. Configure all of your host collectors and analyzers in one manifest file. You can use the following resources to help create your specification: - - - Access sample specifications in the the Replicated troubleshoot-specs repository, which provides specifications for supporting your customers. See [troubleshoot-specs/host](https://github.com/replicatedhq/troubleshoot-specs/tree/main/host) in GitHub. - - - View a list and details of the available host collectors and analyzers. See [All Host Collectors and Analyzers](https://troubleshoot.sh/docs/host-collect-analyze/all/) in the Troubleshoot documentation. - - **Example:** - - The following example shows host collectors and analyzers for the number of CPUs and the amount of memory. - - ```yaml - apiVersion: troubleshoot.sh/v1beta2 - kind: SupportBundle - metadata: - name: host-collectors - spec: - hostCollectors: - - cpu: {} - - memory: {} - hostAnalyzers: - - cpu: - checkName: "Number of CPUs" - outcomes: - - fail: - when: "count < 2" - message: At least 2 CPU cores are required, and 4 CPU cores are recommended. - - pass: - message: This server has at least 4 CPU cores. - - memory: - checkName: "Amount of Memory" - outcomes: - - fail: - when: "< 4G" - message: At least 4G of memory is required, and 8G is recommended. - - pass: - message: The system has at least 8G of memory. - ``` - -1. Share the file with your customers to run on their hosts. - -:::important -Do not store support bundles on public shares, as they may still contain information that could be used to infer private data about the installation, even if some values are redacted. -::: - -## Generate a Host Bundle for kURL - -<GenerateBundleHost/> - -================ -File: docs/vendor/support-inspecting-support-bundles.md -================ -# Inspecting Support Bundles - -You can use the Vendor Portal to get a visual analysis of customer support bundles and use the file inspector to drill down into the details and logs files. Use this information to get insights and help troubleshoot your customer issues. - -To inspect a support bundle: - -1. In the Vendor Portal, go to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Add support bundle > Upload a support bundle**. - -1. In the **Upload a support bundle** dialog, drag and drop or use the file selector to upload a support bundle file to the Vendor Portal. - - <img alt="Upload a support bundle dialog" src="/images/support-bundle-analyze.png" width="500px"/> - - [View a larger version of this image](/images/support-bundle-analyze.png) - -1. (Optional) If the support bundle relates to an open support issue, select the support issue from the dropdown to share the bundle with Replicated. - -1. Click **Upload support bundle**. - - The **Support bundle analysis** page opens. The **Support bundle analysis** page includes information about the bundle, any available instance reporting data from the point in time when the bundle was collected, an analysis overview that can be filtered to show errors and warnings, and a file inspector. - - ![Support bundle analysis overview](/images/support-bundle-analysis-overview.png) - - [View a larger version of this image](/images/support-bundle-analysis-overview.png) - -1. On the **File inspector** tab, select any files from the directory tree to inspect the details of any files included in the support bundle, such as log files. - -1. (Optional) Click **Download bundle** to download the bundle. This can be helpful if you want to access the bundle from another system or if other team members want to access the bundle and use other tools to examine the files. - -1. (Optional) Navigate back to the [**Troubleshoot**](https://vendor.replicated.com/troubleshoot) page and click **Create cluster** to provision a cluster with Replicated Compatibility Matrix. This can be helpful for creating customer-representative environments for troubleshooting. For more information about creating clusters with Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). - - <img alt="Cluster configuration dialog" src="/images/cmx-cluster-configuration.png" width="400px"/> - - [View a larger version of this image](/images/cmx-cluster-configuration.png) - -1. If you cannot resolve your customer's issue and need to submit a support request, go to the [**Support**](https://vendor.replicated.com/) page and click **Open a support request**. For more information, see [Submitting a Support Request](support-submit-request). - - :::note - The **Share with Replicated** button on the support bundle analysis page does _not_ open a support request. You might be directed to use the **Share with Replicated** option when you are already interacting with a Replicated team member. - ::: - - ![Submit a Support Request](/images/support.png) - - [View larger version of this image](/images/support.png) - -================ -File: docs/vendor/support-modular-support-bundle-specs.md -================ -# About Creating Modular Support Bundle Specs - -This topic describes how to use a modular approach to creating support bundle specs. - -## Overview - -Support bundle specifications can be designed using a modular approach. This refers to creating multiple different specs that are scoped to individual components or microservices, rather than creating a single, large spec. For example, for applications that are deployed as multiple Helm charts, vendors can create a separate support bundle spec in the `templates` directory in the parent chart as well as in each subchart. - -This modular approach helps teams develop specs that are easier to maintain and helps teams to avoid merge conflicts that are more likely to occur when making to changes to a large spec. When generating support bundles for an application that includes multiple modular specs, the specs are merged so that only one support bundle archive is generated. - -## Example: Support Bundle Specifications by Component {#component} - -Using a modular approach for an application that ships MySQL, NGINX, and Redis, your team can add collectors and analyzers in using a separate support bundle specification for each component. - -`manifests/nginx/troubleshoot.yaml` - -This collector and analyzer checks compliance for the minimum number of replicas for the NGINX component: - - ```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: nginx -spec: - collectors: - - logs: - selector: - - app=nginx - analyzers: - - deploymentStatus: - name: nginx - outcomes: - - fail: - when: replicas < 2 - ``` - -`manifests/mysql/troubleshoot.yaml` - -This collector and analyzer checks compliance for the minimum version of the MySQL component: - - ```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: mysql -spec: - collectors: - - mysql: - uri: 'dbuser:**REDACTED**@tcp(db-host)/db' - analyzers: - - mysql: - checkName: Must be version 8.x or later - outcomes: - - fail: - when: version < 8.x -``` - -`manifests/redis/troubleshoot.yaml` - -This collector and analyzer checks that the Redis server is responding: - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: - name: redis -spec: - collectors: - - redis: - collectorName: redis - uri: rediss://default:password@hostname:6379 -``` - -A single support bundle archive can be generated from a combination of these manifests using the `kubectl support-bundle --load-cluster-specs` command. -For more information and additional options, see [Generating Support Bundles](support-bundle-generating). - -================ -File: docs/vendor/support-online-support-bundle-specs.md -================ -# Making Support Bundle Specs Available Online - -This topic describes how to make your application's support bundle specs available online as well as how to link to online specs. - -## Overview - -You can make the definition of one or more support bundle specs available online in a source repository and link to it from the specs in the cluster. This approach lets you update collectors and analyzers outside of the application release and notify customers of potential problems and fixes in between application updates. - -The schema supports a `uri:` field that, when set, causes the support bundle generation to use the online specification. If the URI is unreachable or unparseable, any collectors or analyzers in the specification are used as a fallback. - -You update collectors and analyzers in the online specification to manage bug fixes. When a customer generates a support bundle, the online specification can detect those potential problems in the cluster and let them know know how to fix it. Without the URI link option, you must wait for the next time your customers update their applications or Kubernetes versions to get notified of potential problems. The URI link option is particularly useful for customers that do not update their application routinely. - -If you are using a modular approach to designing support bundles, you can use multiple online specs. Each specification supports one URI link. For more information about modular specs, see [About Creating Modular Support Bundle Specs](support-modular-support-bundle-specs). - -## Example: URI Linking to a Source Repository - -This example shows how Replicated could set up a URI link for one of its own components. You can follow a similar process to link to your own online repository for your support bundles. - -Replicated kURL includes an EKCO add-on for maintenance on embedded clusters, such as automating certificate rotation or data migration tasks. Replicated can ship this component with a support bundle manifest that warns users if they do not have this add-on installed or if it is not running in the cluster. - -**Example: Release v1.0.0** - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: -  name: ekco -spec: - collectors: - analyzers: - - deploymentStatus: - checkName: Check EKCO is operational - name: ekc-operator - namespace: kurl - outcomes: - - fail: - when: absent - message: EKCO is not installed - please add the EKCO component to your kURL spec and re-run the installer script - - fail: - when: "< 1" - message: EKCO does not have any ready replicas - - pass: - message: EKCO has at least 1 replica -``` - -If a bug is discovered at any time after the release of the specification above, Replicated can write an analyzer for it in an online specification. By adding a URI link to the online specification, the support bundle uses the assets hosted in the online repository, which is kept current. - -The `uri` field is added to the specification as a raw file link. Replicated hosts the online specification on [GitHub](https://github.com/replicatedhq/troubleshoot-specs/blob/main/in-cluster/default.yaml). - -**Example: Release v1.1.0** - -```yaml -apiVersion: troubleshoot.sh/v1beta2 -kind: SupportBundle -metadata: -  name: ekco -spec: - uri: https://raw.githubusercontent.com/replicatedhq/troubleshoot-specs/main/in-cluster/default.yaml - collectors: [...] - analyzers: [...] -``` - -Using the `uri:` property, the support bundle gets the latest online specification if it can, or falls back to the collectors and analyzers listed in the specification that is in the cluster. - -Note that because the release version 1.0.0 did not contain the URI, Replicated would have to wait until existing users upgrade a cluster before getting the benefit of the new analyzer. Then, going forward, those users get any future online analyzers without having to upgrade. New users who install the version containing the URI as their initial installation automatically get any online analyzers when they generate a support bundle. - -For more information about the URI, see [Troubleshoot schema supports a `uri://` field](https://troubleshoot.sh/docs/support-bundle/supportbundle/#uri) in the Troubleshoot documentation. For a complete example, see [Debugging Kubernetes: Enhancements to Troubleshoot](https://www.replicated.com/blog/debugging-kubernetes-enhancements-to-troubleshoot/#Using-online-specs-for-support-bundles) in The Replicated Blog. - -================ -File: docs/vendor/support-submit-request.md -================ -# Submitting a Support Request - -You can submit a support request and a support bundle using the Replicated Vendor Portal. Uploading a support bundle is secure and helps the Replicated support team troubleshoot your application faster. Severity 1 issues are resolved three times faster when you submit a support bundle with your support request. - -### Prerequisites - -The following prerequisites must be met to submit support requests: - -* Your Vendor Portal account must be configured for access to support before you can submit support requests. Contact your administrator to ensure that you are added to the correct team. - -* Your team must have a replicated-collab repository configured. If you are a team administrator and need information about getting a collab repository set up and adding users, see [Adding Users to the Collab Repository](team-management-github-username#add). - - -### Submit a Support Request - -To submit a support request: - -1. From the [Vendor Portal](https://vendor.replicated.com), click **Support > Submit a Support Request** or go directly to the [Support page](https://vendor.replicated.com/support). - -1. In section 1 of the Support Request form, complete the fields with information about your issue. - -1. In section 2, do _one_ of the following actions: - - Use your pre-selected support bundle or select a different bundle in the pick list - - Select **Upload and attach a new support bundle** and attach a bundle from your file browser - -1. Click **Submit Support Request**. You receive a link to your support issue, where you can interact with the support team. - - :::note - Click **Back** to exit without submitting a support request. - ::: - -================ -File: docs/vendor/team-management-github-username.mdx -================ -import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" -import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" -import CollabExistingUser from "../partials/collab-repo/_collab-existing-user.mdx" - - -# Managing Collab Repository Access - -This topic describes how to add users to the Replicated collab GitHub repository automatically through the Replicated Vendor Portal. It also includes information about managing user roles in this repository using Vendor Portal role-based access control (RBAC) policies. - -## Overview {#overview} - -<CollabRepoAbout/> - -To get access to the collab repository, members of a Vendor Portal team can add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. The Vendor Portal then automatically provisions the team member as a user in the collab repository in GitHub. The RBAC policy that the member is assigned in the Vendor Portal determines the GitHub role that they have in the collab repository. - -Replicated recommends that Vendor Portal admins manage user access to the collab repository through the Vendor Portal, rather than manually managing users through GitHub. Managing access through the Vendor Portal has the following benefits: -* Users are automatically added to the collab repository when they add their GitHub username in the Vendor Portal. -* Users are automatically removed from the collab repository when they are removed from the Vendor Portal team. -* Vendor portal and collab repository RBAC policies are managed from a single location. - -## Add Users to the Collab Repository {#add} - -This procedure describes how to use the Vendor Portal to access the collab repository for the first time as an Admin, then automatically add new and existing users to the repository. This allows you to use the Vendor Portal to manage the GitHub roles for users in the collab repository, rather than manually adding, managing, and removing users from the repository through GitHub. - -### Prerequisite - -Your team must have a replicated-collab repository configured to add users to -the repository and to manage repository access through the Vendor Portal. To get -a collab support repository configured in GitHub for your team, complete the onboarding -instructions in the email you received from Replicated. You can also access the [Replicated community help forum](https://community.replicated.com/) for assistance. - -### Procedure - -To add new and existing users to the collab repository through the Vendor Portal: - -1. As a Vendor Portal admin, log in to your Vendor Portal account. In the [Account Settings](https://vendor.replicated.com/account-settings) page, add your GitHub username and click **Save Changes**. - - <img src="/images/account-info.png" alt="Account info in the Vendor Portal" width="600"/> - - The Vendor Portal automatically adds your GitHub username to the collab repository and assigns it the Admin role. You receive an email with details about the collab repository when you are added. - -1. Follow the collab repository link from the email that you receive to log in to your GitHub account and access the repository. - -1. (Recommended) Manually remove any users in the collab repository that were previously added through GitHub. - - :::note - <CollabExistingUser/> - ::: - -1. (Optional) In the Vendor Portal, go to the [Team](https://vendor.replicated.com/team/members) page. For each team member, click **Edit permissions** as necessary to specify their GitHub role in the collab repository. - - For information about which policies to select, see [About GitHub Roles](#about-github-roles). - -1. Instruct each Vendor Portal team member to add their GitHub username to the [Account Settings](https://vendor.replicated.com/account-settings) page in the Vendor Portal. - - The Vendor Portal adds the username to the collab repository and assigns a GitHub role to the user based on their Vendor Portal policy. - - Users receive an email when they are added to the collab repository. - -## About GitHub Roles - -When team members add a GitHub username to their Vendor Portal account, the Vendor Portal determines how to assign the user a default GitHub role in the collab repository based on the following criteria: -* If the GitHub username already exists in the collab repository -* The RBAC policy assigned to the member in the Vendor Portal - -You can also update any custom RBAC policies in the Vendor Portal to change the default GitHub roles for those policies. - -### Default Roles for Existing Users {#existing-username} - -<CollabExistingUser/> - -### Default Role Mapping {#role-mapping} - -When team members add a GitHub username to their Vendor Portal account, the Vendor Portal assigns them to a GitHub role in the collab repository that corresponds to their Vendor Portal policy. For example, users with the default Read Only policy in the Vendor Portal are assigned the Read GitHub role in the collab repository. - -For team members assigned custom RBAC policies in the Vendor Portal, you can edit the custom policy to change their GitHub role in the collab repository. For more information, see [About Changing the Default GitHub Role](#custom) below. - -The table below describes how each default and custom Vendor Portal policy corresponds to a role in the collab repository in GitHub. For more information about each of the GitHub roles described in this table, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - -<table> - <tr> - <th width="25%">Vendor Portal Role</th> - <th width="25%">GitHub collab Role</th> - <th width="50%">Description</th> - </tr> - <tr> - <td>Admin</td> - <td>Admin</td> - <td><p>Members assigned the default Admin role in the Vendor Portal are assigned the GitHub Admin role in the collab repository.</p></td> - </tr> - <tr> - <td>Support Engineer</td> - <td>Triage</td> - <td><p>Members assigned the custom Support Engineer role in the Vendor Portal are assigned the GitHub Triage role in the collab repository.</p><p>For information about creating a custom Support Engineer policy in the Vendor Portal, see <a href="team-management-rbac-configuring#support-engineer">Support Engineer</a> in <em>Configuring RBAC Policies</em>.</p><p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p></td> - </tr> - <tr> - <td>Read Only</td> - <td>Read</td> - <td>Members assigned the default Read Only role in the Vendor Portal are assigned the GitHub Read role in the collab repository.</td> - </tr> - <tr> - <td>Sales</td> - <td>N/A</td> - <td><p>Users assigned the custom Sales role in the Vendor Portal do not have access to the collab repository.</p><p>For information about creating a custom Sales policy in the Vendor Portal, see <a href="team-management-rbac-configuring#sales">Sales</a> in <em>Configuring RBAC Policies</em>.</p><p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p></td> - </tr> - <tr> - <td>Custom policies with <code>**/admin</code> under <code>allowed:</code></td> - <td>Admin</td> - <td> - <p>By default, members assigned to a custom RBAC policy that specifies <code>**/admin</code> under <code>allowed:</code> are assigned the GitHub Admin role in the collab repository.</p> - <p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p> - </td> - </tr> - <tr> - <td>Custom policies <em>without</em> <code>**/admin</code> under <code>allowed:</code></td> - <td>Read Only</td> - <td> - <p>By default, members assigned to any custom RBAC policies that do not specify <code>**/admin</code> under <code>allowed:</code> are assigned the Read Only GitHub role in the collab repository.</p> - <p>For information about editing custom RBAC policies to change this default GitHub role, see <a href="#custom">About Changing the Default GitHub Role</a> below.</p> - </td> - </tr> -</table> - -### Change the Default Role {#custom} - -You can update any custom RBAC policies that you create in the Vendor Portal to change the default GitHub roles for those policies. For example, by default, any team members assigned a custom policy with `**/admin` under `allowed:` are assigned the Admin role in the collab repository in GitHub. You can update the custom policy to specify a more restrictive GitHub role. - -To edit a custom policy to change the default GitHub role assigned to users with that policy, add one of the following RBAC resources to the `allowed:` or `denied:` list in the custom policy: - -* `team/support-issues/read` -* `team/support-issues/write` -* `team/support-issues/triage` -* `team/support-issues/admin` - -For more information about each of these RBAC resources, see [Team](team-management-rbac-resource-names#team) in _RBAC Resource Names_. - -For more information about how to edit the `allowed:` or `denied:` lists for custom policies in the Vendor Portal, see [Configuring Custom RBAC Policies](team-management-rbac-configuring). - -<CollabRbacResourcesImportant/> - -================ -File: docs/vendor/team-management-google-auth.md -================ -# Managing Google Authentication - -This topic describes the Google authentication options that you can configure to control access to the Replicated Vendor Portal. - -## Manage Google Authentication Options - -As a team administrator, you can enable, disable, or require Google authentication for all accounts in the team. - -A core benefit of using Google authentication is that when a user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal, unless the username and password is also allowed. Requiring Google authentication is an effective way of centrally removing access to the Vendor Portal. - -To manage Google authentication settings: - -1. Click **Team Settings > [Google Authentication](https://vendor.replicated.com/team/google-authentication)**. - - ![Google Auth Settings](/images/team-mgmt-google-auth.png) - -1. Enable or disable the settings: - - | Field | Instructions | - |-----------------------|------------------------| - | Allow Google authentication for team members | Enables team members to log in using a Google account. | - | Restrict login to only allow to Google authentication | Requires new users to accept an invitation and sign up with a Google account that exactly matches the email address that was invited to the team. The email address can be a gmail.com address or user from another domain, but it must match the email address from the invitation exactly. Disabling this setting requires users to accept the invitation by creating a username and password (or use the SAML workflow). | - - -## Migrating Existing Accounts -Excluding some teams that restrict end users to use only Security Assertion Markup Language (SAML) or require two-factor authentication (2FA), existing end users can seamlessly sign into an account that exactly matches their Google Workspace (formerly GSuite) email address. However, Google authentication only matches existing user accounts, so for users who have signed up using task-based email addresses (such as name+news@domain.com), you can continue to use email/password to sign in, invite your normal email address to your team, or contact support to change your email address. For more information about task-based email addresses, see [Create task-specific email addresses](https://support.google.com/a/users/answer/9308648?hl=en) in the Google Support site. - -Migrated accounts maintain the same role-based access control (RBAC) permissions that were previously assigned. After signing in with Google, users can choose to disable username/password-based authentication on their account or maintain both authentication methods using the Vendor Portal [account settings page](https://vendor.replicated.com/account-settings). - -## Limitations - -Using distribution lists for sending invitations to join a team are not supported. The invitations are sent, but are invalid and cannot be used to join a team using Google authentication. - -## Compatibility with Two-Factor Authentication -Google authentication is not entirely compatible with Replicated two-factor authentication (2FA) implementation because Google authentication bypasses account-based 2FA, relying on your Google Authentication instead. However, the Vendor Portal continues to enforce 2FA on all email/password-based authentication, even for the same user, if both options are enabled. - -## Related Topic - -[Managing Team Members](team-management) - -================ -File: docs/vendor/team-management-rbac-configuring.md -================ -import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" - -# Configuring RBAC Policies - -This topic describes how to use role-based access policies (RBAC) to grant or deny team members permissions to use Replicated services in the Replicated Vendor Portal. - -## About RBAC Policies - -By default, every team has two policies created automatically: **Admin** and **Read Only**. If you have an Enterprise plan, you will also have the **Sales** and **Support** policies created automatically. These default policies are not configurable. For more information, see [Default RBAC Policies](#default-rbac) below. - -You can configure custom RBAC policies if you are on the Enterprise pricing plan. Creating custom RBAC policies lets you limit which areas of the Vendor Portal are accessible to team members, and control read and read/write privileges to groups based on their role. For example, you can limit access for the sales team to one application and to specific channels. Or, you can grant only certain users permission to promote releases to your production channels. - -You can also create custom RBAC policies in the Vendor Portal to manage user access and permissions in the Replicated collab repository in GitHub. For more information, see [Managing Access to the Collab Repository](team-management-github-username). - -## Default RBAC Policies {#default-rbac} - -This section describes the default RBAC policies that are included for Vendor Portal teams, depending on the team's Replicated pricing plan. - -### Admin - -The Admin policy grants read/write permissions to all resources on the team. - -:::note -This policy is automatically created for all plans. -::: - -```json -{ - "v1": { - "name": "Admin", - "resources": { - "allowed": [ - "**/*" - ], - "denied": [] - } - } -} -``` - -### Read Only - -The Read Only policy grants read permission to all resources on the team except for API tokens. - -:::note -This policy is automatically created for all plans. -::: - -```json -{ - "v1": { - "name": "Read Only", - "resources": { - "allowed": [ - "**/list", - "**/read" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -### Support Engineer - -The Support Engineer policy grants read access to release, channels, and application data, and read-write access to customer and license details. It also grants permission to open Replicated support issues and upload support bundles. - -:::note -This policy is automatically created for teams with the Enterprise plan only. -::: - -```json -{ - "v1": { - "name": "Support Engineer", - "resources": { - "allowed": [ - "**/read", - "**/list", - "kots/app/*/license/**", - "team/support-issues/read", - "team/support-issues/write" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -### Sales - -The Sales policy grants read-write access to customers and license details and read-only access to resources necessary to manage licenses (applications, channels, and license fields). No additional access is granted. - -:::note -This policy is automatically created for teams with the Enterprise plan only. -::: - -```json -{ - "v1": { - "name": "Sales", - "resources": { - "allowed": [ - "kots/app/*/read", - "kots/app/*/channel/*/read", - "kots/app/*/licensefields/read", - "kots/app/*/license/**" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -## Configure a Custom RBAC Policy - -To configure a custom RBAC policy: - -1. From the Vendor Portal [Team page](https://vendor.replicated.com/team), select **RBAC** from the left menu. - -1. Do _one_ of the following: - - - Click **Create Policy** from the RBAC page to create a new policy. - - Click **View policy** to edit an existing custom policy in the list. - - <CollabRbacResourcesImportant/> - -1. Edit the fields in the policy dialog. In the **Definition** pane, specify the `allow` and `denied` arrays in the resources key to create limits for the role. - - The default policy allows everything and the **Config help** pane displays any errors. - - ![Create RBAC Policy](/images/policy-create.png) - - - For more information, see [Policy Definition](#policy-definition). - - For more information about and examples of rule order, see [Rule Order](#rule-order). - - For a list of resource names, see [RBAC Resource Names](team-management-rbac-resource-names). - -1. Click **Create Policy** to create a new policy, or click **Update Policy** to update an existing policy. - - :::note - Click **Cancel** to exit without saving changes. - ::: - -1. To apply RBAC policies to Vendor Portal team members, you can: - - - Assign policies to existing team members - - Specify a policy when inviting new team members - - Set a default policy for auto-joining a team - - See [Managing Team Members](team-management). - -## Policy Definition - -A policy is defined in a single JSON document: - -``` -{ - "v1": { - "name": "Read Only", - "resources": { - "allowed": [ - "**/read", - "**/list" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -The primary content of a policy document is the resources key. The resources key should contain two arrays, identified as `allowed` and `denied`. Resources specified in the allowed list are allowed for users assigned to the policy, and resources specified in the denied list are denied. - -Resource names are hierarchical, and support wildcards and globs. For a complete list of resource names that can be defined in a policy document, see [RBAC Resource Names](team-management-rbac-resource-names). - -When a policy document has conflicting rules, the behavior is predictable. For more information about conflicting rules, see [Rule Order](#rule-order). - -### Example: View Specific Application and Channel - - The following policy definition example limits any user with this role to viewing a specific application and a specific channel for that application: - - ``` - { - "v1": { - "name": "Policy Name", - "resources": { - "allowed": [ - "kots/app/appID/list", - "kots/app/appID/read", - "kots/app/appID/channel/channelID/list", - "kots/app/appID/channel/channelID/read" - ], - "denied": [] - } - } - } - ``` - The example above uses an application ID and a channel ID to scope the permissions of the RBAC policy. To find your application and channel IDs, do the following: - - - To get the application ID, click **Settings > Show Application ID (Advanced)** in the Vendor Portal. - - - To get the channel ID, click **Channels** in the Vendor Portal. Then click the Release History link for the channel that you want to limit access to. The channel ID displays in your browser URL. - -## Rule Order - -When a resource name is specified in both the `allow` and the `deny` chains of a policy, defined rules determine which rule is applied. - -If `denied` is left empty, it is implied as a `**/*` rule, unless `**/*` rule is specified in the `allowed` resources. If a rule exactly conflicts with another rule, the `denied` rule takes precedence. - -### Defining Precedence Using Rule Specificity -The most specific rule definition is always applied, when compared with less specific rules. Specificity of a rule is calculated by the number of asterisks (`**` and `*`) in the definition. A `**` in the rule definition is the least specific, followed by rules with `*`, and finally rules with no wildcards as the most specific. - -### Example: No Access To Stable Channel - -In the following example, a policy grants access to promote releases to any channel except the Stable channel. It uses the rule pattern `kots/app/[:appId]/channel/[:channelId]/promote`. Note that you specify the channel ID, rather than the channel name. To find the channel ID, go to the Vendor Portal **Channels** page and click the **Settings** icon for the target channel. - -```json -{ - "v1": { - "name": "No Access To Stable Channel", - "resources": { - "allowed": [ - "**/*" - ], - "denied": [ - "kots/app/*/channel/1eg7CyEofYSmVAnK0pEKUlv36Y3/promote" - ] - } - } -} -``` - -### Example: View Customers Only - -In the following example, a policy grants access to viewing all customers, but not to creating releases, promoting releases, or creating new customers. - -```json -{ - "v1": { - "name": "View Customers Only", - "resources": { - "allowed": [ - "kots/app/*/license/*/read", - "kots/app/*/license/*/list", - "kots/app/*/read", - "kots/app/*/list" - ], - "denied": [ - "**/*" - ] - } - } -} -``` - -================ -File: docs/vendor/team-management-rbac-resource-names.md -================ -import CollabRbacResourcesImportant from "../partials/collab-repo/_collab-rbac-resources-important.mdx" - -# RBAC Resource Names - -This a list of all available resource names for the Replicated vendor role-based access control (RBAC) policy: - -## Integration Catalog - -### integration/catalog/list - -Grants the holder permission to view the catalog events and triggers available for integrations. - -## kots - -### kots/app/create - -When allowed, the holder will be allowed to create new applications. - -### kots/app/[:appId]/read -Grants the holder permission to view the application. If the holder does not have permissions to view an application, it will not appear in lists. - -### kots/externalregistry/list -Grants the holder the ability to list external docker registry for application(s). - -### kots/externalregistry/create - -Grants the holder the ability to link a new external docker registry to application(s). - -### kots/externalregistry/[:registryName]/delete - -Grants the holder the ability to delete the specified linked external docker registry in application(s). - -### kots/app/[:appId]/channel/create - -Grants the holder the ability to create a new channel in the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/archive - -Grants the holder permission to archive the specified channel(s) of the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/promote - -Grants the holder the ability to promote a new release to the specified channel(s) of the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/update - -Grants the holder permission to update the specified channel of the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/read - -Grants the holder the permission to view information about the specified channel of the specified application(s). - -### kots/app/[:appId]/enterprisechannel/[:channelId]/read - -Grants the holder the permission to view information about the specified enterprise channel of the specified application(s). - -### kots/app/[:appId]/channel/[:channelId]/releases/airgap - -Grants the holder permission to trigger airgap builds for the specified channel. - -### kots/app/[:appId]/channel/[:channelId]/releases/airgap/download-url - -Grants the holder permission to get an airgap bundle download URL for any release on the specified channel. - -### kots/app/[:appId]/installer/create - -Grants the holder permission to create kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - -### kots/app/[:appId]/installer/update - -Grants the holder permission to update kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - -### kots/app/[:appId]/installer/read - -Grants the holder permission to view kURL installers. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - -### kots/app/[:appId]/installer/promote - -Grants the holder permission to promote kURL installers to a channel. For more information, see [Creating a kURL installer](packaging-embedded-kubernetes). - -:::note -The `kots/app/[:appId]/installer/promote` policy does not grant the holder permission to view and create installers. Users must be assigned both the `kots/app/[:appId]/installers` and `kots/app/[:appId]/installer/promote` policies to have permissions to view, create, and promote installers. -::: - -### kots/app/[:appId]/license/create - -Grants the holder permission to create a new license in the specified application(s). - -### kots/app/[:appId]/license/[:customerId]/read - -Grants the holder permission to view the license specified by ID. If this is denied, the licenses will not show up in search, CSV export or on the Vendor Portal, and the holder will not be able to subscribe to this license's instance notifications. - -### kots/app/[:appId]/license/[:customerId]/update - -Grants the holder permission to edit the license specified by ID for the specified application(s). - -### kots/app/[:appId]/license/[:customerId]/slack-notifications/read - -Grants the holder permission to view the team's Slack notification subscriptions for instances associated with the specified license. - -### kots/app/[:appId]/license/[:customerId]/slack-notifications/update - -Grants the holder permission to edit the team's Slack notification subscriptions for instances associated with the specified license. - -### kots/app/[:appId]/builtin-licensefields/update - -Grants the holder permission to edit the builtin license field override values for the specified application(s). - -### kots/app/[:appId]/builtin-licensefields/delete - -Grants the holder permission to delete the builtin license field override values for the specified application(s). - -### kots/license/[:customerId]/airgap/password - -Grants the holder permission to generate a new download portal password for the license specified (by ID) for the specified application(s). - -### kots/license/[:customerId]/archive - -Grants the holder permission to archive the specified license (by ID). - -### kots/license/[:customerId]/unarchive - -Grants the holder permissions to unarchive the specified license (by ID). - -### kots/app/[:appId]/licensefields/create - -Grants the holder permission to create new license fields in the specified application(s). - -### kots/app/[:appId]/licensefields/read - -Grants the holder permission to view the license fields in the specified application(s). - -### kots/app/[:appId]/licensefields/update - -Grants the holder permission to edit the license fields for the specified application(s). - -### kots/app/[:appId]/licensefields/delete - -Grants the holder permission to delete the license fields for the specified application(s). - -### kots/app/[:appId]/release/create - -Grants the holder permission to create a new release in the specified application(s). - -### kots/app/[:appId]/release/[:sequence]/update - -Grants the holder permission to update the files saved in release sequence `[:sequence]` in the specified application(s). Once a release is promoted to a channel, it's not editable by anyone. - -### kots/app/[:appId]/release/[:sequence]/read - -Grants the holder permission to read the files at release sequence `[:sequence]` in the specified application(s). - -### kots/app/[:appId]/customhostname/list - -Grants the holder permission to view custom hostnames for the team. - -### kots/app/[:appId]/customhostname/create - -Grants the holder permission to create custom hostnames for the team. - -### kots/app/[:appId]/customhostname/delete - -Grants the holder permission to delete custom hostnames for the team. - -### kots/app/[:appId]/customhostname/default/set - -Grants the holder permission to set default custom hostnames. - -### kots/app/[:appId]/customhostname/default/unset - -Grants the holder permission to unset the default custom hostnames. - -### kots/app/[:appId]/supportbundle/read - -Grants the holder permission to view and download support bundles. - -## Registry - -### registry/namespace/:namespace/pull - -Grants the holder permission to pull images from Replicated registry. - -### registry/namespace/:namespace/push - -Grants the holder permission to push images into Replicated registry. - -## Compatibility Matrix - -### kots/cluster/create - -Grants the holder permission to create new clusters. - -### kots/cluster/list - -Grants the holder permission to list running and terminated clusters. - -### kots/cluster/[:clusterId] - -Grants the holder permission to get cluster details. - -### kots/cluster/[:clusterId]/upgrade - -Grants the holder permission to upgrade a cluster. - -### kots/cluster/tag/update - -Grants the holder permission to update cluster tags. - -### kots/cluster/ttl/update - -Grants the holder permission to update cluster ttl. - -### kots/cluster/[:clusterId]/nodegroup - -Grants the holder permission to update nodegroup details. - -### kots/cluster[:clusterId]/kubeconfig - -Grants the holder permision to get the kubeconfig for a cluster. - -### kots/cluster/[:clusterId]/delete - -Grants the holder permission to delete a cluster. - -### kots/cluster/[:clusterId]/addon/list - -Grants the holder permission to list addons for a cluster. - -### kots/cluster/[:clusterId]/addon/[:addonId]/read - -Grants the holder permission to read the addon for a cluster. - -### kots/cluster/[:clusterId]/addon/[:addonId]/delete - -Grants the holder permission to delete the addon for a cluster. - -### kots/cluster/[:clusterId]/addon/create/objectStore - -Grants the holder permission to create an object store for a cluster. - -### kots/cluster/[:clusterId]/port/expose - -Grants the holder permission to expose a port for a cluster. - -### kots/cluster/[:clusterId]/port/delete - -Grants the holder permission to delete a port for a cluster. - -### kots/cluster/[:clusterId]/port/list - -Grants the holder permission to list exposed ports for a cluster. - -### kots/cluster/list-quotas - -Grants the holder permission to list the quotas. - -### kots/cluster/increase-quota - -Grants the holder permission to request an increase in the quota. - -### kots/vm/tag/update - -Grants the holder permission to update vm tags. - -### kots/vm/ttl/update - -Grants the holder permission to update vm ttl. - -### kots/vm/[:vmId]/port/expose - -Grants the holder permission to expose a port for a vm. - -### kots/vm/[:vmId]/port/list - -Grants the holder permission to list exposed ports for a vm. - -### kots/vm/[:vmId]/addon/[:addonId]/delete - -Grants the holder permission to delete the addon for a vm. - -## Team - -### team/auditlog/read - -Grants the holder permission to view the audit log for the team. - -### team/authentication/update - -Grants the holder permission to manage the following team authentication settings: Google authentication, Auto-join, and SAML authentication. - -### team/authentication/read - -Grants the holder permission to read the following authentication settings: Google authentication, Auto-join, and SAML authentication. - -### team/integration/list - -Grants the holder permission to view team's integrations. - -### team/integration/create - -Grants the holder permission to create an integration. - -### team/integration/[:integrationId]/delete - -Grants the holder permission to delete specified integration(s). - -### team/integration/[:integrationId]/update - -Grants the holder permission to update specified integration(s). - -### team/members/list - -Grants the holder permission to list team members and invitations. - -### team/member/invite - -Grants the holder permission to invite additional people to the team. - -### team/members/delete - -Grants the holder permission to delete other team members. - -### team/notifications/slack-webhook/read - -Grants the holder permission to view the team's Slack webhook for instance notifications. - -### team/notifications/slack-webhook/update - -Grants the holder permission to edit the team's Slack webhook for instance notifications. - -### team/policy/read - -Grants the holder permission to view RBAC policies for the team. - -### team/policy/update - -Grants the holder permission to update RBAC policies for the team. - -### team/policy/delete - -Grants the holder permission to delete RBAC policies for the team. - -### team/policy/create - -Grants the holder permission to create RBAC policies for the team. - -### team/security/update - -Grants the holder permission to manage team password requirements including two-factor authentication and password complexity requirements. - -### team/serviceaccount/list - -Grants the holder permission to list service accounts. - -### team/serviceaccount/create - -Grants the holder permission to create new service accounts. - -### team/serviceaccount/[:name]/delete - -Grants the holder permission to delete the service account identified by the name specified. - -### team/support-issues/read - -Grants the holder Read permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - -To prevent access to the collab repository for an RBAC policy, add `team/support-issues/read` to the `denied:` list in the policy. For example: - -``` -{ - "v1": { - "name": "Policy Name", - "resources": { - "allowed": [], - "denied": [ - "team/support-issues/read" - ] - } - } -} -``` - -For more information about the Read role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - -<CollabRbacResourcesImportant/> - -### team/support-issues/write - -Grants the holder Write permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - -For more information about the Write role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - -<CollabRbacResourcesImportant/> - -### team/support-issues/triage - -Grants the holder Triage permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - -For more information about the Triage role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - -<CollabRbacResourcesImportant/> - -### team/support-issues/admin - -Grants the holder Admin permissions in the Replicated collab repository in GitHub for the Vendor Portal team. Applies after the user adds their GitHub username to the Vendor Portal [Account Settings](https://vendor.replicated.com/account-settings) page. - -For more information about the Admin role in GitHub, see [Permissions for each role](https://docs.github.com/en/organizations/managing-user-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) in the GitHub documentation. - -<CollabRbacResourcesImportant/> - -## User - -### user/token/list - -Grants the holder permission to list user tokens. - -### user/token/create - -Grants the holder permission to create new user tokens. - -### user/token/delete - -Grants the holder permission to delete user tokens. - -================ -File: docs/vendor/team-management-saml-auth.md -================ -# Managing SAML Authentication - -This topic describes how to enable or disable SAML authentication for the Replicated Vendor Portal. - -## About Using SAML with the Vendor Portal - -After starting out with Replicated, most teams grow, adding more developers, support engineers, and sales engineers. Eventually, managing access to the Vendor Portal can become difficult. Replicated supports logging in using SAML, which lets you manage access (provisioning and unprovisioning accounts) through your SAML identity provider. - -Using SAML, everyone on your team logs in with their existing usernames and passwords through your identity provider's dashboard. Users do not need to sign up through the Vendor Portal or log in with a separate Vendor Portal account, simplifying their experience. - -### Enabling SAML in Your Vendor Account - -To enable SAML in your Vendor Portal account, you must have an Enterprise plan. For access to SAML, you can contact Replicated through [Support](https://vendor.replicated.com/support). For information about the Enterprise plan, see [pricing](https://www.replicated.com/pricing/). - -### SCIM - -Replicated does not implement System for Cross-domain Identity Management (SCIM). Instead, we use SAML to authenticate and create just-in-time user identities in our system. We resolve the username (email address) as the actor and use this to ensure that audit log events follow these dynamically provisioned users. If a user's email address is already associated with a Replicated account, by using your SAML integration to access the Vendor Portal, they automatically leave their current team and join the team associated with the SAML login. - -### Compatibility with Two-Factor Authentication - -If SAML authentication is configured for your team, Replicated two-factor authentication (2FA) is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. - -### Role Based Access Control - -Replicated supports Role Based Access Control (RBAC) in the Vendor Portal. To use RBAC with SAML, you must configure policies and add users to the policies by their username. Usernames are the identity of the user in your identity provide (IDP). Typically, this username is the full email address. For more information about configuring RBAC, see [Configuring RBAC Policies](team-management-rbac-configuring). - -## Downloading Certificates from Supported SAML providers - -You must retrieve the metadata and x.509 public certificate files from your SAML provider before configuring SAML in the Vendor Portal. The certificate file must be in PEM format. - -Replicated tests several SAML providers, but the service should be compatible with any SAML 2.0 compliant service provider. We provide full support for the following SAML providers: - -* Okta. For more information about integrating Okta with Replicated, see [Configure Okta](#configure-okta). - -* OneLogin - - -## Configure Okta - -The first part of the Vendor Portal and Okta integration is configured in the Okta dashboard. This configuration lets you download the XML Metadata file and x.509 public certificate in PEM format required for the SAML authentication. - -This procedure outlines the basic configuration steps, recommended settings, and the specific fields to configure in Okta. For more information about using Okta, see the [Okta](https://help.okta.com/en/prod/Content/index.htm) documentation. - -To configure Okta and download the required files: - -1. Log in to your Okta Admin dashboard, and click applications. - -1. Select **Create new app integration**, and create a new application as a SAML 2.0 application. - -1. Provide a name and icon for the application, such as Replicated Vendor Portal. You can download a high quality Replicated icon [here](https://help.replicated.com/images/guides/vendor-portal-saml/replicated-application-icon.png). - -1. Click **Next**. - - The Configuring SAML page opens. - -1. Click **Download Okta Certificate**. This downloads your x.509 certificate to provide to Replicated. Save this file to safe location. - -1. On this same page, edit the following fields: - - | Field Name | Description | - | :---------------------- | ----------------------------------------------------------------------------------------------- | - | Single Sign On URL | Set this to `https://id.replicated.com/v1/saml`. | - | Audience URI (SP Entity ID) | Displays on the Vendor Portal [SAML authentication](https://vendor.replicated.com/team/saml-authentication) tab, and is unique to your team. | - | Name ID Format | Change this to `EmailAddress`. | - -1. Click **Next**. - -1. Select **I’m an Okta customer adding an internal app** on the final screen, and click **Finish**. - -1. Click **Identity provider metadata** to download the Metadata.xml file. This likely opens an XML download that you can right-click and select **Save Link As…** to download this file. - -### Next Step - -Configure and enable SAML in the Vendor Portal. For more information, see [Configure SAML](#configure-saml). - -## Configure SAML - -When you initially configure SAML, we do not recommend that you disable username/password access at the same time. It is possible, and recommended during testing, to support both SAML and non-SAML authentication on your account simultaneously. - -**Prerequisite** - -- Download your XML Metadata file and x.509 public certificate PEM file from your SAML provider. For more information on supported SAML providers and how to find these files, see [Supported SAML providers](#downloading-certificates-from-supported-saml-providers). - -To configure SAML: - -1. Log in to the Vendor Portal [Team Members page](https://vendor.replicated.com/team/members) as a user with Admin access. -1. Click [SAML Authentication](https://vendor.replicated.com/team/saml-authentication) from the left menu. If you do not see these options, contact [Support](https://vendor.replicated.com/support). - - The SAML Authentication page opens. - - ![SAML Authentication](/images/team-mgmt-saml-authentication.png) - - [View a larger version of this image](/images/team-mgmt-saml-authentication.png) - -1. Browse for, or drag and drop, your XML Metadata file and x.509 PEM file from your SAML provider. - -1. Click **Upload Metadata & Cert**. - -### Next Step - -At this point, SAML is configured, but not enabled. The next step is to enable SAML enforcement options. For more information, see [Enable SAML Enforcement](#enable-saml-enforcement). - -## Enable SAML Enforcement - -After you have uploaded the metadata and x.509 public certificate PEM file, you must enable SAML enforcement options. Replicated provides options that can be enabled or disabled at any time. You can also change the IDP metadata if needed. - -To enable SAML enforcement: - -1. From the Vendor Portal, select **Team > [SAML Authentication](https://vendor.replicated.com/team/saml-authentication)**. - -1. Select either or both login method options in the the Manage your SAML authentication pane. Allowing both login methods is a good way to test SAML without risking any interruption for the rest of your team. - - **Enable SAML for team logins** - Allows members of your team to log in to the Vendor Portal through your identity provider. This option does not remove, change, or restrict any other authentication that methods you have configured in the Vendor Portal. If you enable SAML and your team already is logging in with accounts provisioned in the Vendor Portal, they will be able to continue logging in with those accounts. - - **Only allow SAML logins** - Requires members of your team to log in to the Vendor Portal through your identity provider. Prevents any non-SAML accounts from logging in. Replicated does not delete the existing accounts. If you turn on this option and then later disable it, accounts that never logged in using SAML will be able to log in again. If an account exists outside of SAML and then is authenticated with SAML, the account is converted and cannot authenticate using a password again. - - ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) - - [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) - -1. (Optional) Set a default policy for new accounts from the drop-down list. -1. (Optional) Click **Change IdP Metadata** and follow the prompts to upload any changes to your metadata. - -SAML is now enabled on your account. For your team to use the SAML login option, you must enable access through your SAML identity provider’s dashboard. For example, if you use Okta, assign the application to users or groups. When a user clicks through to use the application, they are granted access as described in [SCIM](#scim). - -## Disable SAML Enforcement - -You can disable SAML authentication options at any time and re-enable them later if needed. - -To disable SAML enforcement: - -1. From the Vendor Portal, select **Team > SAML Authentication**. - -1. Click **Deprovision SAML** in the Manage your SAML authentication pane. - - ![SAML Authentication](/images/team-mgmt-saml-manage-auth.png) - - [View a larger version of this image](/images/team-mgmt-saml-manage-auth.png) - -================ -File: docs/vendor/team-management-slack-config.mdx -================ -import NotificationsAbout from "../partials/instance-insights/_notifications-about.mdx" - - -# Configuring a Slack Webhook (Beta) - -As a vendor, anyone on your team can set up Slack notifications, which are sent to a shared Slack channel. Notifications give your team visibility into customer instance statuses and changes. - -<NotificationsAbout/> - -While email notifications are specific to each user, Slack notifications settings are shared, viewable, and editable by the entire team. Any changes made by a team member impacts the team. - -## Limitations - -As a Beta feature, the following limitations apply: - -- Only one Slack channel per team is supported. - -- RBAC policies are not supported for configuring granular permissions. - -## Prerequisite - -Create a Slack webhook URL. For more information, see [Sending Messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks) in the Slack API documentation. - -Make sure to keep the URL secure because it contains a Secret that allows write access to one or more channels in your Slack Workspace. - -## Configure the Webhook in the Vendor Portal - -When you enable Slack notifications for a team, you must first configure the Slack webhook in the Vendor Portal. Typically you do this one time. Then you can configure notifications for individual customer instances. - -To configure the Slack webhook: - -1. From the **[Team Vendor Portal](https://vendor.replicated.com/team/members)** page, click **Slack Notifications**. - -1. On the **Slack Notifications Setup** page, paste the Slack webhook URL. Click **Save**. - -## Next Step - -[Configure Slack notifications for customer instances](instance-notifications-config). - -================ -File: docs/vendor/team-management-two-factor-auth.md -================ -# Managing Two-Factor Authentication - -This topic describes how to enable and disable Replicated two-factor authentication for individual and team accounts in the Replicated Vendor Portal. - -Alternatively, you can use Google Authentication or SAML Authentication to access the Vendor Portal. For more information about those options, see [Managing Google Authentication](team-management-google-auth) and [Managing SAML Authentication](team-management-saml-auth). - -## About Two-Factor Authentication - -Two-factor authentication (2FA) provides additional security by requiring two methods of authentication to access resources and data. When you enable the 2FA option in the Vendor Portal, you are asked to provide an authentication code and your password during authentication. Replicated uses the open algorithm known as the Time-based One-time Password (TOTP 7), which is specified by the Internet Engineering Task Force (IETF) under RFC 6238 2. - -## Limitation - -If SAML Authentication or Google Authentication is configured and 2FA is also enabled, then 2FA is bypassed. You can leave 2FA enabled, but you are not prompted to enter a code when logging in. - -## Enable 2FA on Individual Accounts - -If you are an administrator or if 2FA is enabled for your team, you can enable 2FA on your individual account. - -To enable two-factor authentication on your individual account: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. - - <img src="/images/vendor-portal-account-settings.png" alt="Vendor portal account settings" width="200"/> - - [View a larger version of this image](/images/vendor-portal-account-settings.png) - -1. In the **Two-Factor Authentication** pane, click **Turn on two-factor authentication**. - - <img src="/images/vendor-portal-password-2fa.png" alt="Turn on 2FA in the Vendor Portal" width="600"/> - - [View a larger version of this image](/images/vendor-portal-password-2fa.png) - -1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. - -1. Scan the QR code that displays using a supported two-factor authentication application on your mobile device, such as Google Authenticator. Alternatively, click **Use this text code** in the Vendor Portal to generate an alphanumeric code that you enter in the mobile application. - - <img src="/images/vendor-portal-scan-qr.png" alt="Turn on 2FA in the Vendor Portal" width="400"/> - - [View a larger version of this image](/images/vendor-portal-scan-qr.png) - - Your mobile application displays an authentication code. - -1. Enter the authentication code in the Vendor Portal. - - Two-factor authentication is enabled and a list of recovery codes is displayed at the bottom of the **Two-Factor Authentication** pane. - -1. Save the recovery codes in a secure location. These codes can be used any time (one time per code), if you lose your mobile device. - -1. Log out of your account, then log back in to test that it is enabled. You are prompted to enter a one-time code generated by the application on your mobile device. - - -## Disable 2FA on Individual Accounts - -To disable two-factor authentication on your individual account: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Account Settings** from the dropdown list in the upper right corner of the screen. - - <img src="/images/vendor-portal-account-settings.png" alt="Vendor portal account settings" width="200"/> - - [View a larger version of this image](/images/vendor-portal-account-settings.png) - -1. In the **Two-Factor Authentication** pane, click **Turn off two-factor authentication**. - -1. In the **Confirm password** dialog, enter your Vendor Portal account password. Click **Confirm password**. - -## Enable or Disable 2FA for a Team - -As an administrator, you can enable and disable 2FA for teams. You must first enable 2FA on your individual account before you can enable 2FA for teams. After you enable 2FA for your team, team members can enable 2FA on their individual accounts. - -To enable or disable 2FA for a team: - -1. In the [Vendor Portal](https://vendor.replicated.com), select the **Team** tab, then select **Multifactor Auth**. - - <img src="/images/team-2fa-auth.png" alt="Multifactor authentication for teams in the Vendor Portal" width="600"/> - - [View a larger image](/images/team-2fa-auth.png) - -1. On the **Multifactor Authentication** page, do one of the following with the **Require Two-Factor Authentication for all Username/Password authenticating users** toggle: - - - Turn on the toggle to enable 2FA - - Turn off the toggle to disable 2FA - -1. Click **Save changes**. - -================ -File: docs/vendor/team-management.md -================ -import CollabRepoAbout from "../partials/collab-repo/_collab-repo-about.mdx" -import CollabRbacImportant from "../partials/collab-repo/_collab-rbac-important.mdx" - -# Managing Team Members - -This topic describes how to manage team members in the Replicated Vendor Portal, such as inviting and removing members, and editing permissions. For information about managing user access to the Replicated collab repository in GitHub, see [Managing Collab Repository Access](team-management-github-username). - -## Viewing Team Members -The [Team](https://vendor.replicated.com/team/members) page provides a list of all accounts currently associated with or invited to your team. Each row contains information about the user, including their two-factor authentication (2FA) status and role-based access control (RBAC) role, and lets administrators take additional actions, such as remove, re-invite, and edit permissions. - -<img src="/images/teams-view.png" alt="View team members list in the Vendor Portal" width="700"/> - -[View a larger image](/images/teams-view.png) - -All users, including read-only, can see the name of the RBAC role assigned to each team member. When SAML authentication is enabled, users with the built-in read-only policy cannot see the RBAC role assigned to team members. - -## Invite Members -By default, team administrators can invite more team members to collaborate. Invited users receive an email to activate their account. The activation link in the email is unique to the invited user. Following the activation link in the email also ensures that the invited user joins the team from which the invitation originated. - -:::note -Teams that have enforced SAML-only authentication do not use the email invitation flow described in this procedure. These teams and their users must log in through their SAML provider. -::: - -To invite a new team member: - -1. From the [Team Members](https://vendor.replicated.com/team/members) page, click **Invite team member**. - - The Invite team member dialog opens. - - <img src="/images/teams-invite-member.png" alt="Invite team member dialog in the Vendor Portal" width="500"/> - - [Invite team member dialog](/images/teams-invite-member.png) - -1. Enter the email address of the member. - -1. In the **Permissions** field, assign an RBAC policy from the dropdown list. - - <CollabRbacImportant/> - -1. Click **Invite member**. - - People invited to join your team receive an email notification to accept the invitation. They must follow the link in the email to accept the invitation and join the team. If they do not have a Replicated account already, they can create one that complies with your password policies, 2FA, and Google authentication requirements. If an invited user's email address is already associated with a Replicated account, by accepting your invitation, they automatically leave their current team and join the team that you have invited them to. - -## Managing Invitations - -Invitations expire after 7 days. If a prospective member has not accepted their invitation in this time frame, you can re-invite them without having to reenter their details. You can also remove the prospective member from the list. - -You must be an administrator to perform this action. - -To re-invite or remove a prospective member, do one of the following on the **Team Members** page: - -* Click **Reinvite** from the row with the user's email address, and then click **Reinvite** in the confirmation dialog. - -* Click **Remove** from the row with the user's email address, and then click **Delete Invitation** in the confirmation dialog. - -## Edit Policy Permissions - -You can edit the RBAC policy that is assigned to a member at any time. - -<CollabRbacImportant/> - -To edit policy permissions for individual team members: - -1. From the the Team Members list, click **Edit permissions** next to a members name. - - :::note - The two-factor authentication (2FA) status displays on the **Team members** page, but it is not configured on this page. For more information about configuring 2FA, see [Managing Two-Factor Authentication](team-management-two-factor-auth). - ::: - -1. Select an RBAC policy from the **Permissions** dropdown list, and click **Save**. For information about configuring the RBAC policies that display in this list, see [Configuring RBAC Policies](team-management-rbac-configuring). - - <img src="/images/teams-edit-permissions.png" alt="Edit team member permissions in the Vendor Portal" width="400"/> - -## Enable Users to Auto-join Your Team -By default, users must be invited to your team. Team administrators can use the auto-join feature to allow users from the same email domain to join their team automatically. This applies to users registering with an email, or with Google authentication if it is enabled for the team. The auto-join feature does not apply to SAML authentication because SAML users log in using their SAML provider's application portal instead of the Vendor Portal. - -To add, edit, or delete custom RBAC policies, see [Configuring RBAC Policies](team-management-rbac-configuring). - -To enable users to auto-join your team: - -1. From the Team Members page, click **Auto-join** from the left navigation. -1. Enable the **Allow all users from my domain to be added to my team** toggle. - - <img src="/images/teams-auto-join.png" alt="Auto join dialog in the Vendor Portal" width="600"/> - - [View a larger image](/images/teams-auto-join.png) - -1. For **Default RBAC policy level for new accounts**, you can use the default Read Only policy or select another policy from the list. This RBAC policy is applied to all users who join the team with the auto-join feature. - - <CollabRbacImportant/> - - -## Remove Members and End Sessions -As a Vendor Portal team admin, you can remove team members, except for the account you are currently logged in with. - -If the team member that you remove added their GitHub username to their Account Settings page in the Vendor Portal to access the Replicated collab repository, then the Vendor Portal also automatically removes their username from the collab repository. For more information, see [Managing Collab Repository Access](team-management-github-username). - -SAML-created users must be removed using this method to expire their existing sessions because Replicated does not support System for Cross-domain Identity Management (SCIM). - -To remove a member: - -1. From the Team Members page, click **Remove** on the right side of a user's row. - -1. Click **Remove** in the confirmation dialog. - - The member is removed. All of their current user sessions are deleted and their next attempt at communicating with the server logs them out of their browser's session. - - If the member added their GitHub username to the Vendor Portal to access the collab repository, then the Vendor Portal also removes their GitHub username from the collab repository. - - For Google-authenticated users, if the user's Google account is suspended or deleted, Replicated logs that user out of all Google authenticated Vendor Portal sessions within 10 minutes. The user remains in the team list, but they cannot log into the Vendor Portal unless the username and password are allowed. - -## Update Email Addresses - -:::important -Changing team member email addresses has security implications. Replicated advises that you avoid changing team member email addresses if possible. -::: - -Updating the email address for a team member requires creating a new account with the updated email address, and then deactivating the previous account. - -To update the email address for a team member: - -1. From the Team Members page, click **Invite team member**. - -1. Assign the required RBAC policies to the new user. - -1. Deactivate the previous team member account. - -================ -File: docs/vendor/telemetry-air-gap.mdx -================ -import AirGapTelemetry from "../partials/instance-insights/_airgap-telemetry.mdx" - -# Collecting Telemetry for Air Gap Instances - -This topic describes how to collect telemetry for instances in air gap environments. - -## Overview - -Air gap instances run in environments without outbound internet access. This limitation prevents these instances from periodically sending telemetry to the Replicated Vendor Portal through the Replicated SDK or Replicated KOTS. For more information about how the Vendor Portal collects telemetry from online (internet-connected) instances, see [About Instance and Event Data](/vendor/instance-insights-event-data#about-reporting). - -<AirGapTelemetry/> - -The following diagram demonstrates how air gap telemetry is collected and stored by the Replicated SDK in a customer environment, and then shared to the Vendor Portal in a support bundle: - -<img alt="Air gap telemetry collected by the SDK in a support bundle" src="/images/airgap-telemetry.png" width="800px"/> - -[View a larger version of this image](/images/airgap-telemetry.png) - -All support bundles uploaded to the Vendor Portal from air gap customers contributes to a comprehensive dataset, providing parity in the telemetry for air gap and online instances. Replicated recommends that you collect support bundles from air gap customers regularly (monthly or quarterly) to improve the completeness of the dataset. The Vendor Portal handles any overlapping event archives idempotently, ensuring data integrity. - -## Requirement - -Air gap telemetry has the following requirements: - -* To collect telemetry from air gap instances, one of the following must be installed in the cluster where the instance is running: - - * The Replicated SDK installed in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). - - * KOTS v1.92.1 or later - - :::note - When both the Replicated SDK and KOTS v1.92.1 or later are installed in the cluster (such as when a Helm chart that includes the SDK is installed by KOTS), both collect and store instance telemetry in their own dedicated secret, subject to the size limitation noted below. In the case of any overlapping data points, the Vendor Portal will report these data points chronologically based on their timestamp. - ::: - -* To collect custom metrics from air gap instances, the Replicated SDK must installed in the cluster in air gap mode. See [Installing the SDK in Air Gap Environments](/vendor/replicated-sdk-airgap). - - For more information about custom metrics, see [Configuring Custom Metrics](https://docs.replicated.com/vendor/custom-metrics). - -Replicated strongly recommends that all applications include the Replicated SDK because it enables access to both standard instance telemetry and custom metrics for air gap instances. - -## Limitation - -Telemetry data is capped at 4,000 events or 1MB per Secret; whichever limit is reached first. - -When a limit is reached, the oldest events are purged until the payload is within the limit. For optimal use, consider collecting support bundles regularly (monthly or quarterly) from air gap customers. - -## Collect and View Air Gap Telemetry - -To collect telemetry from air gap instances: - -1. Ask your customer to collect a support bundle. See [Generating Support Bundles](/vendor/support-bundle-generating). - -1. After receiving the support bundle from your customer, go to the Vendor Portal **Customers**, **Customer Reporting**, or **Instance Details** page and upload the support bundle: - - ![upload new bundle button on instance details page](/images/airgap-upload-telemetry.png) - - The telemetry collected from the support bundle appears in the instance data shortly. Allow a few minutes for all data to be processed. - -================ -File: docs/vendor/testing-about.md -================ -import Overview from "../partials/cmx/_overview.mdx" -import SupportedClusters from "../partials/cmx/_supported-clusters-overview.mdx" - -# About Compatibility Matrix - -This topic describes Replicated Compatibility Matrix, including use cases, billing, limitations, and more. - -## Overview - -<Overview/> - -You can use Compatibility Matrix with the Replicated CLI or the Replicated Vendor Portal. For more information about how to use Compatibility Matrix, see [Using Compatibility Matrix](testing-how-to). - -### Supported Clusters - -<SupportedClusters/> - -### Billing and Credits - -Clusters created with Compatibility Matrix are billed by the minute. Per-minute billing begins when the cluster reaches a `running` status and ends when the cluster is deleted. Compatibility Matrix marks a cluster as `running` when a working kubeconfig for the cluster is accessible. - -You are billed only for the time that the cluster is in a `running` status. You are _not_ billed for the time that it takes Compatibility Matrix to create and tear down clusters, including when the cluster is in an `assigned` status. - -For more information about pricing, see [Compatibility Matrix Pricing](testing-pricing). - -To create clusters with Compatibility Matrix, you must have credits in your Vendor Portal account. -If you have a contract, you can purchase credits by logging in to the Vendor Portal and going to [**Compatibility Matrix > Buy additional credits**](https://vendor.replicated.com/compatibility-matrix). -Otherwise, to request credits, log in to the Vendor Portal and go to [**Compatibility Matrix > Request more credits**](https://vendor.replicated.com/compatibility-matrix). - -### Quotas and Capacity - -By default, Compatibility Matrix sets quotas for the capacity that can be used concurrently by each vendor portal team. These quotas are designed to ensure that Replicated maintains a minimum amount of capacity for provisioning both VM and cloud-based clusters. - -By default, the quota for cloud-based cluster distributions (AKS, GKE, EKS) is three clusters running concurrently. - -VM-based cluster distributions (such as kind, OpenShift, and Replicated Embedded Cluster) have the following default quotas: -* 32 vCPUs -* 128 GiB memory -* 800 GiB disk size - -You can request increased quotas at any time with no additional cost. To view your team's current quota and capacity usage, or to request a quota increase, go to [**Compatibility Matrix > Settings**](https://vendor.replicated.com/compatibility-matrix/settings) in the vendor portal: - -![Compatibility matrix settings page](/images/compatibility-matrix-settings.png) - -[View a larger version of this image](/images/compatibility-matrix-settings.png) - -### Cluster Status - -Clusters created with Compatibility Matrix can have the following statuses: - -* `assigned`: The cluster resources were requested and Compatibility Matrix is provisioning the cluster. You are not billed for the time that a cluster spends in the `assigned` status. - -* `running`: A working kubeconfig for the cluster is accessible. Billing begins when the cluster reaches a `running` status. - - Additionally, clusters are verified prior to transitioning to a `running` status. Verification includes checking that the cluster is healthy and running with the correct number of nodes, as well as passing [sonobuoy](https://sonobuoy.io/) tests in `--quick` mode. - -* `terminated`: The cluster is deleted. Billing ends when the cluster status is changed from `running` to `terminated`. - -* `error`: An error occured when attempting to provision the cluster. - -You can view the status of clusters using the `replicated cluster ls` command. For more information, see [cluster ls](/reference/replicated-cli-cluster-ls). - -### Cluster Add-ons - -The Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. -This allows you to more easily provision dependencies required by your application. - -For more information about how to use the add-ons, see [Compatibility Matrix Cluster Add-ons](testing-cluster-addons). - -## Limitations - -Compatibility Matrix has the following limitations: - -- Clusters cannot be resized. Create another cluster if you want to make changes, such as add another node. -- Clusters cannot be rebooted. Create another cluster if you need to reset/reboot the cluster. -- On cloud clusters, node groups are not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). -- Multi-node support is not available for every distribution. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). -- ARM instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). -- GPU instance types are only supported on Cloud Clusters. For distribution-specific details, see [Supported Compatibility Matrix Cluster Types](/vendor/testing-supported-clusters). -- There is no support for IPv6 as a single stack. Dual stack support is available on kind clusters. -- There is no support for air gap testing. -- The `cluster upgrade` feature is available only for kURL distributions. See [cluster upgrade](/reference/replicated-cli-cluster-upgrade). -- Cloud clusters do not allow for the configuration of CNI, CSI, CRI, Ingress, or other plugins, add-ons, services, and interfaces. -- The node operating systems for clusters created with Compatibility Matrix cannot be configured nor replaced with different operating systems. -- The Kubernetes scheduler for clusters created with Compatibility Matrix cannot be replaced with a different scheduler. -- Each team has a quota limit on the amount of resources that can be used simultaneously. This limit can be raised by messaging your account representative. -- Team actions with Compatibility Matrix (for example, creating and deleting clusters and requesting quota increases) are not logged and displayed in the [Vendor Team Audit Log](https://vendor.replicated.com/team/audit-log). - -For additional distribution-specific limitations, see [Supported Compatibility Matrix Cluster Types](testing-supported-clusters). - -================ -File: docs/vendor/testing-cluster-addons.md -================ -# Compatibility Matrix Cluster Add-ons (Alpha) - -This topic describes the supported cluster add-ons for Replicated Compatibility Matrix. - -## Overview - -Replicated Compatibility Matrix enables you to extend your cluster with add-ons, to make use of by your application, such as an AWS S3 object store. -This allows you to more easily provision dependencies required by your application. - -## CLI - -The Replicated CLI can be used to [create](/reference/replicated-cli-cluster-addon-create), [manage](/reference/replicated-cli-cluster-addon-ls) and [remove](/reference/replicated-cli-cluster-addon-rm) cluster add-ons. - -## Supported Add-ons - -This section lists the supported cluster add-ons for clusters created with Compatibility Matrix. - -### object-store (Alpha) - -The Replicated cluster object store add-on can be used to create S3 compatible object store buckets for clusters (currently only AWS S3 is supported for EKS clusters). - -Assuming you already have a cluster, run the following command with the cluster ID to create an object store bucket: - -```bash -$ replicated cluster addon create object-store 4d2f7e70 --bucket-prefix mybucket -05929b24 Object Store pending {"bucket_prefix":"mybucket"} -$ replicated cluster addon ls 4d2f7e70 -ID TYPE STATUS DATA -05929b24 Object Store ready {"bucket_prefix":"mybucket","bucket_name":"mybucket-05929b24-cmx","service_account_namespace":"cmx","service_account_name":"mybucket-05929b24-cmx","service_account_name_read_only":"mybucket-05929b24-cmx-ro"} -``` - -This will create two service accounts in a namespace, one read-write and the other read-only access to the object store bucket. - -Additional service accounts can be created in any namespace with access to the object store by annotating the new service account with the same `eks.amazonaws.com/role-arn` annotation found in the predefined ones (`service_account_name` and `service_account_name_read_only`). - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported Kubernetes Distributions</th> - <td>EKS (AWS S3)</td> - </tr> - <tr> - <th>Cost</th> - <td>Flat fee of $0.50 per bucket.</td> - </tr> - <tr> - <th>Options</th> - <td> - <ul> - <li><strong>bucket_prefix (string):</strong> A prefix for the bucket name to be created (required)</li> - </ul> - </td> - </tr> - <tr> - <th>Data</th> - <td> - <ul> - <li><strong>bucket_prefix:</strong> The prefix specified by the user for the bucket name</li> - </ul> - <ul> - <li><strong>bucket_name:</strong> The actual bucket name</li> - </ul> - <ul> - <li><strong>service_account_namespace:</strong> The namespace in which the service accounts (`service_account_name` and `service_account_name_read_only`) have been created.</li> - </ul> - <ul> - <li><strong>service_account_name:</strong> The service account name for read-write access to the bucket.</li> - </ul> - <ul> - <li><strong>service_account_name_read_only:</strong> The service account name for read-only access to the bucket.</li> - </ul> - </td> - </tr> -</table> - -================ -File: docs/vendor/testing-how-to.md -================ -import TestRecs from "../partials/ci-cd/_test-recs.mdx" -import Prerequisites from "../partials/cmx/_prerequisites.mdx" - -# Using Compatibility Matrix - -This topic describes how to use Replicated Compatibility Matrix to create ephemeral clusters. - -## Prerequisites - -Before you can use Compatibility Matrix, you must complete the following prerequisites: - -<Prerequisites/> - -* Existing accounts must accept the TOS for the trial on the [**Compatibility Matrix**](https://vendor.replicated.com/compatibility-matrix) page in the Replicated Vendor Portal. - -## Create and Manage Clusters - -This section explains how to use Compatibility Matrix to create and manage clusters with the Replicated CLI or the Vendor Portal. - -For information about creating and managing clusters with the Vendor API v3, see the [clusters](https://replicated-vendor-api.readme.io/reference/listclusterusage) section in the Vendor API v3 documentation. - -### Create Clusters - -You can create clusters with Compatibility Matrix using the Replicated CLI or the Vendor Portal. - -#### Replicated CLI - -To create a cluster using the Replicated CLI: - -1. (Optional) View the available cluster distributions, including the supported Kubernetes versions, instance types, and maximum nodes for each distribution: - - ```bash - replicated cluster versions - ``` - For command usage, see [cluster versions](/reference/replicated-cli-cluster-versions). - -1. Run the following command to create a cluster: - - ``` - replicated cluster create --name NAME --distribution K8S_DISTRO --version K8S_VERSION --disk DISK_SIZE --instance-type INSTANCE_TYPE [--license-id LICENSE_ID] - ``` - Where: - * `NAME` is any name for the cluster. If `--name` is excluded, a name is automatically generated for the cluster. - * `K8S_DISTRO` is the Kubernetes distribution for the cluster. - * `K8S_VERSION` is the Kubernetes version for the cluster if creating a standard Cloud or VM-based cluster. If creating an Embedded Cluster or kURL cluster type,`--version` is optional: - * For Embedded Cluster types, `--verison` is the latest available release on the channel by default. Otherwise, to specify a different release, set `--version` to the `Channel release sequence` value for the release. - * For kURL cluster types, `--verison` is the `"latest"` kURL Installer ID by default. Otherwise, to specify a different kURL Installer, set `--version` to the kURL Installer ID. - * `DISK_SIZE` is the disk size (GiB) to request per node. - * `INSTANCE_TYPE` is the instance type to use for each node. - * (Embedded Cluster Only) `LICENSE_ID` is a valid customer license. Required to create an Embedded Cluster. - - For command usage and additional optional flags, see [cluster create](/reference/replicated-cli-cluster-create). - - **Example:** - - The following example creates a kind cluster with Kubernetes version 1.27.0, a disk size of 100 GiB, and an instance type of `r1.small`. - - ```bash - replicated cluster create --name kind-example --distribution kind --version 1.27.0 --disk 100 --instance-type r1.small - ``` - -1. Verify that the cluster was created: - - ```bash - replicated cluster ls CLUSTER_NAME - ``` - Where `CLUSTER_NAME` is the name of the cluster that you created. - - In the output of the command, you can see that the `STATUS` of the cluster is `assigned`. When the kubeconfig for the cluster is accessible, the cluster's status is changed to `running`. For more information about cluster statuses, see [Cluster Status](testing-about#cluster-status) in _About Compatibility Matrix._ - -#### Vendor Portal - -To create a cluster using the Vendor Portal: - -1. Go to [**Compatibility Matrix > Create cluster**](https://vendor.replicated.com/compatibility-matrix/create-cluster). - - <img alt="Create a cluster page" src="/images/create-a-cluster.png" width="650px"/> - - [View a larger version of this image](/images/create-a-cluster.png) - -1. On the **Create a cluster** page, complete the following fields: - - <table> - <tr> - <th>Field</th> - <th>Description</th> - </tr> - <tr> - <td>Kubernetes distribution</td> - <td>Select the Kubernetes distribution for the cluster.</td> - </tr> - <tr> - <td>Version</td> - <td>Select the Kubernetes version for the cluster. The options available are specific to the distribution selected.</td> - </tr> - <tr> - <td>Name (optional)</td> - <td>Enter an optional name for the cluster.</td> - </tr> - <tr> - <td>Tags</td> - <td>Add one or more tags to the cluster as key-value pairs.</td> - </tr> - <tr> - <td>Set TTL</td> - <td>Select the Time to Live (TTL) for the cluster. When the TTL expires, the cluster is automatically deleted. TTL can be adjusted after cluster creation with [cluster update ttl](/reference/replicated-cli-cluster-update-ttl).</td> - </tr> - </table> - -1. For **Nodes & Nodes Groups**, complete the following fields to configure nodes and node groups for the cluster: - - <table> - <tr> - <td>Instance type</td> - <td>Select the instance type to use for the nodes in the node group. The options available are specific to the distribution selected.</td> - </tr> - <tr> - <td>Disk size</td> - <td>Select the disk size in GiB to use per node.</td> - </tr> - <tr> - <td>Nodes</td> - <td>Select the number of nodes to provision in the node group. The options available are specific to the distribution selected.</td> - </tr> - </table> - -1. (Optional) Click **Add node group** to add additional node groups. - -1. Click **Create cluster**. - - The cluster is displayed in the list of clusters on the **Compatibility Matrix** page with a status of Assigned. When the kubeconfig for the cluster is accessible, the cluster's status is changed to Running. - - :::note - If the cluster is not automatically displayed, refresh your browser window. - ::: - - <img alt="Cluster configuration dialog" src="/images/cmx-assigned-cluster.png" width="700px"/> - - [View a larger version of this image](/images/cmx-assigned-cluster.png) - -### Prepare Clusters - -For applications distributed with the Replicated Vendor Portal, the [`cluster prepare`](/reference/replicated-cli-cluster-prepare) command reduces the number of steps required to provision a cluster and then deploy a release to the cluster for testing. This is useful in continuous integration (CI) workflows that run multiple times a day. For an example workflow that uses the `cluster prepare` command, see [Recommended CI/CD Workflows](/vendor/ci-workflows). - -The `cluster prepare` command does the following: -* Creates a cluster -* Creates a release for your application based on either a Helm chart archive or a directory containing the application YAML files -* Creates a temporary customer of type `test` - :::note - Test customers created by the `cluster prepare` command are not saved in your Vendor Portal team. - ::: -* Installs the release in the cluster using either the Helm CLI or Replicated KOTS - -The `cluster prepare` command requires either a Helm chart archive or a directory containing the application YAML files to be installed: - -* **Install a Helm chart with the Helm CLI**: - - ```bash - replicated cluster prepare \ - --distribution K8S_DISTRO \ - --version K8S_VERSION \ - --chart HELM_CHART_TGZ - ``` - The following example creates a kind cluster and installs a Helm chart in the cluster using the `nginx-chart-0.0.14.tgz` chart archive: - ```bash - replicated cluster prepare \ - --distribution kind \ - --version 1.27.0 \ - --chart nginx-chart-0.0.14.tgz \ - --set key1=val1,key2=val2 \ - --set-string s1=val1,s2=val2 \ - --set-json j1='{"key1":"val1","key2":"val2"}' \ - --set-literal l1=val1,l2=val2 \ - --values values.yaml - ``` - -* **Install with KOTS from a YAML directory**: - - ```bash - replicated cluster prepare \ - --distribution K8S_DISTRO \ - --version K8S_VERSION \ - --yaml-dir PATH_TO_YAML_DIR - ``` - The following example creates a k3s cluster and installs an application in the cluster using the manifest files in a local directory named `config-validation`: - ```bash - replicated cluster prepare \ - --distribution k3s \ - --version 1.26 \ - --namespace config-validation \ - --shared-password password \ - --app-ready-timeout 10m \ - --yaml-dir config-validation \ - --config-values-file conifg-values.yaml \ - --entitlements "num_of_queues=5" - ``` - -For command usage, including additional options, see [cluster prepare](/reference/replicated-cli-cluster-prepare). - -### Access Clusters - -Compatibility Matrix provides the kubeconfig for clusters so that you can access clusters with the kubectl command line tool. For more information, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. - -To access a cluster from the command line: - -1. Verify that the cluster is in a Running state: - - ```bash - replicated cluster ls - ``` - In the output of the command, verify that the `STATUS` for the target cluster is `running`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). - -1. Run the following command to open a new shell session with the kubeconfig configured for the cluster: - - ```bash - replicated cluster shell CLUSTER_ID - ``` - Where `CLUSTER_ID` is the unique ID for the running cluster that you want to access. - - For command usage, see [cluster shell](/reference/replicated-cli-cluster-shell). - -1. Verify that you can interact with the cluster through kubectl by running a command. For example: - - ```bash - kubectl get ns - ``` - -1. Press Ctrl-D or type `exit` when done to end the shell and the connection to the server. - -### Upgrade Clusters (kURL Only) - -For kURL clusters provisioned with Compatibility Matrix, you can use the the `cluster upgrade` command to upgrade the version of the kURL installer specification used to provision the cluster. A recommended use case for the `cluster upgrade` command is for testing your application's compatibility with Kubernetes API resource version migrations after upgrade. - -The following example upgrades a kURL cluster from its previous version to version `9d5a44c`: - -```bash -replicated cluster upgrade cabb74d5 --version 9d5a44c -``` - -For command usage, see [cluster upgrade](/reference/replicated-cli-cluster-upgrade). - -### Delete Clusters - -You can delete clusters using the Replicated CLI or the Vendor Portal. - -#### Replicated CLI - -To delete a cluster using the Replicated CLI: - -1. Get the ID of the target cluster: - - ``` - replicated cluster ls - ``` - In the output of the command, copy the ID for the cluster. - - **Example:** - - ``` - ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES - 1234abc My Test Cluster eks 1.27 running 2023-10-09 17:08:01 +0000 UTC - - ``` - - For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). - -1. Run the following command: - - ``` - replicated cluster rm CLUSTER_ID - ``` - Where `CLUSTER_ID` is the ID of the target cluster. - For command usage, see [cluster rm](/reference/replicated-cli-cluster-rm). -1. Confirm that the cluster was deleted: - ``` - replicated cluster ls CLUSTER_ID --show-terminated - ``` - Where `CLUSTER_ID` is the ID of the target cluster. - In the output of the command, you can see that the `STATUS` of the cluster is `terminated`. For command usage, see [cluster ls](/reference/replicated-cli-cluster-ls). -#### Vendor Portal - -To delete a cluster using the Vendor Portal: - -1. Go to **Compatibility Matrix**. - -1. Under **Clusters**, in the vertical dots menu for the target cluster, click **Delete cluster**. - - <img alt="Delete cluster button" src="/images/cmx-delete-cluster.png" width="700px"/> - - [View a larger version of this image](/images/cmx-delete-cluster.png) - -## About Using Compatibility Matrix with CI/CD - -Replicated recommends that you integrate Compatibility Matrix into your existing CI/CD workflow to automate the process of creating clusters to install your application and run tests. For more information, including additional best practices and recommendations for CI/CD, see [About Integrating with CI/CD](/vendor/ci-overview). - -### Replicated GitHub Actions - -Replicated maintains a set of custom GitHub actions that are designed to replace repetitive tasks related to using Compatibility Matrix and distributing applications with Replicated. - -If you use GitHub Actions as your CI/CD platform, you can include these custom actions in your workflows rather than using Replicated CLI commands. Integrating the Replicated GitHub actions into your CI/CD pipeline helps you quickly build workflows with the required inputs and outputs, without needing to manually create the required CLI commands for each step. - -To view all the available GitHub actions that Replicated maintains, see the [replicatedhq/replicated-actions](https://github.com/replicatedhq/replicated-actions/) repository in GitHub. - -For more information, see [Integrating Replicated GitHub Actions](/vendor/ci-workflows-github-actions). - -### Recommended Workflows - -Replicated recommends that you maintain unique CI/CD workflows for development (continuous integration) and for releasing your software (continuous delivery). For example development and release workflows that integrate Compatibility Matrix for testing, see [Recommended CI/CD Workflows](/vendor/ci-workflows). - -### Test Script Recommendations - -Incorporating code tests into your CI/CD workflows is important for ensuring that developers receive quick feedback and can make updates in small iterations. Replicated recommends that you create and run all of the following test types as part of your CI/CD workflows: - -<TestRecs/> - -================ -File: docs/vendor/testing-ingress.md -================ -# Accessing Your Application - -This topic describes the networking options for accessing applications deployed on clusters created with Replicated Compatibility Matrix. It also describes how to use and manage Compatibility Matrix tunnels. - -## Networking Options - -After deploying your application into Compatibility Matrix clusters, you will want to execute your tests using your own test runner. -In order to do this, you need to access your application. -Compatibility matrix offers several methods to access your application. - -Some standard Kubernetes networking options are available, but vary based on the distribution. -For VM-based distributions, there is no default network route into the cluster, making inbound connections challenging to create. - -### Port Forwarding -Port forwarding is a low-cost and portable mechanism to access your application. -Port forwarding works on all clusters supported by Compatibility Matrix because the connection is initiated from the client, over the Kubernetes API server port. -If you have a single service or pod and are not worried about complex routing, this is a good mechanism. -The basic steps are to connect the port-forward, execute your tests against localhost, and then shut down the port-forward. - -### LoadBalancer -If your application is only running on cloud services (EKS, GKE, AKS) you can create a service of type `LoadBalancer`. -This will provision the cloud-provider specific load balancer. -The `LoadBalancer` service will be filled by the in-tree Kubernetes functionality that's integrated with the underlying cloud provider. -You can then query the service definition using `kubectl` and connect to and execute your tests over the `LoadBalancer` IP address. - -### Ingress -Ingress is a good way to recreate customer-representative environments, but the problem still remains on how to get inbound access to the IP address that the ingress controller allocates. -Ingress is also not perfectly portable; each ingress controller might require different annotations in the ingress resource to work properly. -Supported ingress controllers vary based on the distribution. -Compatibility matrix supports ingress controllers that are running as a `NodePort` service. - -### Compatibility Matrix Tunnels -All VM-based Compatibility Matrix clusters support tunneling traffic into a `NodePort` service. -When this option is used, Replicated is responsible for creating the DNS record and TLS certs. -Replicated will route traffic from `:443` and/or `:80` into the `NodePort` service you defined. For more information about using tunnels, see [Managing Compatibility Matrix Tunnels](#manage-nodes) below. - -The following diagram shows how the traffic is routed into the service using Compatibility Matrix tunnels: - -<img src="/images/compatibility-matrix-ingress.png" alt="Compatibility Matrix ingress"></img> - -[View a larger version of this image](/images/compatibility-matrix-ingress.png) - -## Managing Compatibility Matrix Tunnels {#manage-nodes} - -Tunnels are viewed, created, and removed using the Compatibility Matrix UI within Vendor Portal, the Replicated CLI, GitHub Actions, or directly with the Vendor API v3. There is no limit to the number of tunnels you can create for a cluster and multiple tunnels can connect to a single service, if desired. - -### Limitations - -Compatibility Matrix tunnels have the following limitations: -* One tunnel can only connect to one service. If you need fanout routing into different services, consider installing the nginx ingress controller as a `NodePort` service and exposing it. -* Tunnels are not supported for cloud distributions (EKS, GKE, AKS). - -### Supported Protocols - -A tunnel can support one or more protocols. -The supported protocols are HTTP, HTTPS, WS and WSS. -GRPC and other protocols are not routed into the cluster. - -### Exposing Ports -Once you have a node port available on the cluster, you can use the Replicated CLI to expose the node port to the public internet. -This can be used multiple times on a single cluster. - -Optionally, you can specify the `--wildcard` flag to expose this port with wildcard DNS and TLS certificate. -This feature adds extra time to provision the port, so it should only be used if necessary. - -```bash -replicated cluster port expose \ - [cluster id] \ - --port [node port] \ - --protocol [protocol] \ - --wildcard -``` - -For example, if you have the nginx ingress controller installed and the node port is 32456: - -```bash -% replicated cluster ls -ID NAME DISTRIBUTION VERSION STATUS -1e616c55 tender_ishizaka k3s 1.29.2 running - -% replicated cluster port expose \ - 1e616c55 \ - --port 32456 \ - --protocol http \ - --protocol https \ - --wildcard -``` - -:::note -You can expose a node port that does not yet exist in the cluster. -This is useful if you have a deterministic node port, but need the DNS name as a value in your Helm chart. -::: - -### Viewing Ports -To view all exposed ports, use the Replicated CLI `port ls` subcommand with the cluster ID: - -```bash -% replicated cluster port ls 1e616c55 -ID CLUSTER PORT PROTOCOL EXPOSED PORT WILDCARD STATUS -d079b2fc 32456 http http://happy-germain.ingress.replicatedcluster.com true ready - -d079b2fc 32456 https https://happy-germain.ingress.replicatedcluster.com true ready -``` - -### Removing Ports -Exposed ports are automatically deleted when a cluster terminates. -If you want to remove a port (and the associated DNS records and TLS certs) prior to cluster termination, run the `port rm` subcommand with the cluster ID: - -```bash -% replicated cluster port rm 1e616c55 --id d079b2fc -``` - -You can remove just one protocol, or all. -Removing all protocols also removes the DNS record and TLS cert. - -================ -File: docs/vendor/testing-pricing.mdx -================ -# Compatibility Matrix Pricing - -This topic describes the pricing for Replicated Compatibility Matrix. - -## Pricing Overview - -Compatibility Matrix usage-based pricing includes a $0.50 per cluster startup cost, plus by the minute pricing based on instance size and count (starting at the time the cluster state changed to "running" and ending when the cluster is either expired (TTL) or removed). Minutes will be rounded up, so there will be a minimum charge of $0.50 plus 1 minute for all running clusters. Each cluster's cost will be rounded up to the nearest cent and subtracted from the available credits in the team account. Remaining credit balance is viewable on the Replicated Vendor Portal [Cluster History](https://vendor.replicated.com/compatibility-matrix/history) page or with the Vendor API v3 [/vendor/v3/cluster/stats](https://replicated-vendor-api.readme.io/reference/getclusterstats) endpoint. Cluster [add-ons](/vendor/testing-cluster-addons) may incur additional charges. - -If the team's available credits are insufficient to run the cluster for the full duration of the TTL, the cluster creation will be rejected. - -## Cluster Quotas - -Each team is limited by the number of clusters that they can run concurrently. To increase the quota, reach out to your account manager. - -## VM Cluster Pricing (Openshift, RKE2, K3s, Kind, Embedded Cluster, kURL) - -VM-based clusters approximately match the AWS m6.i instance type pricing. - -<table> - <tr> - <th width="25%">Instance Type</th> - <th width="25%">VCPUs</th> - <th width="25%">Memory (GiB)</th> - <th width="25%">USD/Credit per hour</th> - </tr> - <tr> - <td>r1.small</td> - <td>2</td> - <td>8</td> - <td>$0.096</td> - </tr> - <tr> - <td>r1.medium</td> - <td>4</td> - <td>16</td> - <td>$0.192</td> - </tr> - <tr> - <td>r1.large</td> - <td>8</td> - <td>32</td> - <td>$0.384</td> - </tr> - <tr> - <td>r1.xlarge</td> - <td>16</td> - <td>64</td> - <td>$0.768</td> - </tr> - <tr> - <td>r1.2xlarge</td> - <td>32</td> - <td>128</td> - <td>$1.536</td> - </tr> -</table> - -## Cloud Cluster Pricing - -### AWS EKS Cluster Pricing - -AWS clusters will be charged AWS pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. Pricing for Extended Support EKS versions (those Kubernetes versions considered deprecated by upstream Kubernetes) will have additional charges applied. - -<table> - <tr> - <th width="25%">Instance Type</th> - <th width="25%">VCPUs</th> - <th width="25%">Memory (GiB)</th> - <th width="25%">USD/Credit per hour</th> - </tr> - <tr> - <td>m6i.large</td> - <td>2</td> - <td>8</td> - <td>$0.115</td> - </tr> - <tr> - <td>m6i.xlarge</td> - <td>4</td> - <td>16</td> - <td>$0.230</td> - </tr> - <tr> - <td>m6i.2xlarge</td> - <td>8</td> - <td>32</td> - <td>$0.461</td> - </tr> - <tr> - <td>m6i.4xlarge</td> - <td>16</td> - <td>64</td> - <td>$0.922</td> - </tr> - <tr> - <td>m6i.8xlarge</td> - <td>32</td> - <td>128</td> - <td>$1.843</td> - </tr> -<tr> - <td>m7i.large</td> - <td>2</td> - <td>8</td> - <td>$0.121</td> - </tr> - <tr> - <td>m7i.xlarge</td> - <td>4</td> - <td>16</td> - <td>$0.242</td> - </tr> - <tr> - <td>m7i.2xlarge</td> - <td>8</td> - <td>32</td> - <td>$0.484</td> - </tr> - <tr> - <td>m7i.4xlarge</td> - <td>16</td> - <td>64</td> - <td>$0.968</td> - </tr> - <tr> - <td>m7i.8xlarge</td> - <td>32</td> - <td>128</td> - <td>$1.935</td> - </tr> - <tr> - <td>m5.large</td> - <td>2</td> - <td>8</td> - <td>$0.115</td> - </tr> - <tr> - <td>m5.xlarge</td> - <td>4</td> - <td>16</td> - <td>$0.230</td> - </tr> - <tr> - <td>m5.2xlarge</td> - <td>8</td> - <td>32</td> - <td>$0.461</td> - </tr> - <tr> - <td>m5.4xlarge</td> - <td>16</td> - <td>64</td> - <td>$0.922</td> - </tr> - <tr> - <td>m5.8xlarge</td> - <td>32</td> - <td>128</td> - <td>$1.843</td> - </tr> - <tr> - <td>m7g.large</td> - <td>2</td> - <td>8</td> - <td>$0.098</td> - </tr> - <tr> - <td>m7g.xlarge</td> - <td>4</td> - <td>16</td> - <td>$0.195</td> - </tr> - <tr> - <td>m7g.2xlarge</td> - <td>8</td> - <td>32</td> - <td>$0.392</td> - </tr> - <tr> - <td>m7g.4xlarge</td> - <td>16</td> - <td>64</td> - <td>$0.784</td> - </tr> - <tr> - <td>m7g.8xlarge</td> - <td>32</td> - <td>128</td> - <td>$1.567</td> - </tr> - <tr> - <td>c5.large</td> - <td>2</td> - <td>4</td> - <td>$0.102</td> - </tr> - <tr> - <td>c5.xlarge</td> - <td>4</td> - <td>8</td> - <td>$0.204</td> - </tr> - <tr> - <td>c5.2xlarge</td> - <td>8</td> - <td>16</td> - <td>$0.408</td> - </tr> - <tr> - <td>c5.4xlarge</td> - <td>16</td> - <td>32</td> - <td>$0.816</td> - </tr> - <tr> - <td>c5.9xlarge</td> - <td>36</td> - <td>72</td> - <td>$1.836</td> - </tr> - <tr> - <td>g4dn.xlarge</td> - <td>4</td> - <td>16</td> - <td>$0.631</td> - </tr> - <tr> - <td>g4dn.2xlarge</td> - <td>8</td> - <td>32</td> - <td>$0.902</td> - </tr> - <tr> - <td>g4dn.4xlarge</td> - <td>16</td> - <td>64</td> - <td>$1.445</td> - </tr> - <tr> - <td>g4dn.8xlarge</td> - <td>32</td> - <td>128</td> - <td>$2.611</td> - </tr> - <tr> - <td>g4dn.12xlarge</td> - <td>48</td> - <td>192</td> - <td>$4.964</td> - </tr> - <tr> - <td>g4dn.16xlarge</td> - <td>64</td> - <td>256</td> - <td>$5.222</td> - </tr> -</table> - -### GCP GKE Cluster Pricing - -GCP clusters will be charged GCP list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. - -<table> - <tr> - <th width="25%">Instance Type</th> - <th width="25%">VCPUs</th> - <th width="25%">Memory (GiB)</th> - <th width="25%">USD/Credit per hour</th> - </tr> - <tr> - <td>n2-standard-2</td> - <td>2</td> - <td>8</td> - <td>$0.117</td> - </tr> - <tr> - <td>n2-standard-4</td> - <td>4</td> - <td>16</td> - <td>$0.233</td> - </tr> - <tr> - <td>n2-standard-8</td> - <td>8</td> - <td>32</td> - <td>$0.466</td> - </tr> - <tr> - <td>n2-standard-16</td> - <td>16</td> - <td>64</td> - <td>$0.932</td> - </tr> - <tr> - <td>n2-standard-32</td> - <td>32</td> - <td>128</td> - <td>$1.865</td> - </tr> - <tr> - <td>t2a-standard-2</td> - <td>2</td> - <td>8</td> - <td>$0.092</td> - </tr> - <tr> - <td>t2a-standard-4</td> - <td>4</td> - <td>16</td> - <td>$0.185</td> - </tr> - <tr> - <td>t2a-standard-8</td> - <td>8</td> - <td>32</td> - <td>$0.370</td> - </tr> - <tr> - <td>t2a-standard-16</td> - <td>16</td> - <td>64</td> - <td>$0.739</td> - </tr> - <tr> - <td>t2a-standard-32</td> - <td>32</td> - <td>128</td> - <td>$1.478</td> - </tr> - <tr> - <td>t2a-standard-48</td> - <td>48</td> - <td>192</td> - <td>$2.218</td> - </tr> - <tr> - <td>e2-standard-2</td> - <td>2</td> - <td>8</td> - <td>$0.081</td> - </tr> - <tr> - <td>e2-standard-4</td> - <td>4</td> - <td>16</td> - <td>$0.161</td> - </tr> - <tr> - <td>e2-standard-8</td> - <td>8</td> - <td>32</td> - <td>$0.322</td> - </tr> - <tr> - <td>e2-standard-16</td> - <td>16</td> - <td>64</td> - <td>$0.643</td> - </tr> - <tr> - <td>e2-standard-32</td> - <td>32</td> - <td>128</td> - <td>$1.287</td> - </tr> - <tr> - <td>n1-standard-1+nvidia-tesla-t4+1</td> - <td>1</td> - <td>3.75</td> - <td>$0.321</td> - </tr> - <tr> - <td>n1-standard-1+nvidia-tesla-t4+2</td> - <td>1</td> - <td>3.75</td> - <td>$0.585</td> - </tr> - <tr> - <td>n1-standard-1+nvidia-tesla-t4+4</td> - <td>1</td> - <td>3.75</td> - <td>$1.113</td> - </tr> - <tr> - <td>n1-standard-2+nvidia-tesla-t4+1</td> - <td>2</td> - <td>7.50</td> - <td>$0.378</td> - </tr> - <tr> - <td>n1-standard-2+nvidia-tesla-t4+2</td> - <td>2</td> - <td>7.50</td> - <td>$0.642</td> - </tr> - <tr> - <td>n1-standard-2+nvidia-tesla-t4+4</td> - <td>2</td> - <td>7.50</td> - <td>$1.170</td> - </tr> - <tr> - <td>n1-standard-4+nvidia-tesla-t4+1</td> - <td>4</td> - <td>15</td> - <td>$0.492</td> - </tr> - <tr> - <td>n1-standard-4+nvidia-tesla-t4+2</td> - <td>4</td> - <td>15</td> - <td>$0.756</td> - </tr> - <tr> - <td>n1-standard-4+nvidia-tesla-t4+4</td> - <td>4</td> - <td>15</td> - <td>$1.284</td> - </tr> - <tr> - <td>n1-standard-8+nvidia-tesla-t4+1</td> - <td>8</td> - <td>30</td> - <td>$0.720</td> - </tr> - <tr> - <td>n1-standard-8+nvidia-tesla-t4+2</td> - <td>8</td> - <td>30</td> - <td>$0.984</td> - </tr> - <tr> - <td>n1-standard-8+nvidia-tesla-t4+4</td> - <td>8</td> - <td>30</td> - <td>$1.512</td> - </tr> - <tr> - <td>n1-standard-16+nvidia-tesla-t4+1</td> - <td>16</td> - <td>60</td> - <td>$1.176</td> - </tr> - <tr> - <td>n1-standard-16+nvidia-tesla-t4+2</td> - <td>16</td> - <td>60</td> - <td>$1.440</td> - </tr> - <tr> - <td>n1-standard-16+nvidia-tesla-t4+4</td> - <td>16</td> - <td>60</td> - <td>$1.968</td> - </tr> - <tr> - <td>n1-standard-32+nvidia-tesla-t4+1</td> - <td>32</td> - <td>120</td> - <td>$2.088</td> - </tr> - <tr> - <td>n1-standard-32+nvidia-tesla-t4+2</td> - <td>32</td> - <td>120</td> - <td>$2.352</td> - </tr> - <tr> - <td>n1-standard-32+nvidia-tesla-t4+4</td> - <td>32</td> - <td>120</td> - <td>$2.880</td> - </tr> - <tr> - <td>n1-standard-64+nvidia-tesla-t4+1</td> - <td>64</td> - <td>240</td> - <td>$3.912</td> - </tr> - <tr> - <td>n1-standard-64+nvidia-tesla-t4+2</td> - <td>64</td> - <td>240</td> - <td>$4.176</td> - </tr> - <tr> - <td>n1-standard-64+nvidia-tesla-t4+4</td> - <td>64</td> - <td>240</td> - <td>$4.704</td> - </tr> - <tr> - <td>n1-standard-96+nvidia-tesla-t4+1</td> - <td>96</td> - <td>360</td> - <td>$5.736</td> - </tr> - <tr> - <td>n1-standard-96+nvidia-tesla-t4+2</td> - <td>96</td> - <td>360</td> - <td>$6.000</td> - </tr> - <tr> - <td>n1-standard-96+nvidia-tesla-t4+4</td> - <td>96</td> - <td>360</td> - <td>$6.528</td> - </tr> -</table> - -### Azure AKS Cluster Pricing - -Azure clusters will be charged Azure list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. - -<table> - <tr> - <th width="25%">Instance Type</th> - <th width="15%">VCPUs</th> - <th width="15%">Memory (GiB)</th> - <th width="15%">Rate</th> - <th width="15%">List Price</th> - <th width="15%">USD/Credit per hour</th> - </tr> - <tr> - <td>Standard_B2ms</td> - <td>2</td> - <td>8</td> - <td>8320</td> - <td>$0.083</td> - <td>$0.100</td> - </tr> - <tr> - <td>Standard_B4ms</td> - <td>4</td> - <td>16</td> - <td>16600</td> - <td>$0.166</td> - <td>$0.199</td> - </tr> - <tr> - <td>Standard_B8ms</td> - <td>8</td> - <td>32</td> - <td>33300</td> - <td>$0.333</td> - <td>$0.400</td> - </tr> - <tr> - <td>Standard_B16ms</td> - <td>16</td> - <td>64</td> - <td>66600</td> - <td>$0.666</td> - <td>$0.799</td> - </tr> - <tr> - <td>Standard_DS2_v2</td> - <td>2</td> - <td>7</td> - <td>14600</td> - <td>$0.146</td> - <td>$0.175</td> - </tr> - <tr> - <td>Standard_DS3_v2</td> - <td>4</td> - <td>14</td> - <td>29300</td> - <td>$0.293</td> - <td>$0.352</td> - </tr> - <tr> - <td>Standard_DS4_v2</td> - <td>8</td> - <td>28</td> - <td>58500</td> - <td>$0.585</td> - <td>$0.702</td> - </tr> - <tr> - <td>Standard_DS5_v2</td> - <td>16</td> - <td>56</td> - <td>117000</td> - <td>$1.170</td> - <td>$1.404</td> - </tr> - <tr> - <td>Standard_D2ps_v5</td> - <td>2</td> - <td>8</td> - <td>14600</td> - <td>$0.077</td> - <td>$0.092</td> - </tr> - <tr> - <td>Standard_D4ps_v5</td> - <td>4</td> - <td>16</td> - <td>7700</td> - <td>$0.154</td> - <td>$0.185</td> - </tr> - <tr> - <td>Standard_D8ps_v5</td> - <td>8</td> - <td>32</td> - <td>15400</td> - <td>$0.308</td> - <td>$0.370</td> - </tr> - <tr> - <td>Standard_D16ps_v5</td> - <td>16</td> - <td>64</td> - <td>30800</td> - <td>$0.616</td> - <td>$0.739</td> - </tr> - <tr> - <td>Standard_D32ps_v5</td> - <td>32</td> - <td>128</td> - <td>61600</td> - <td>$1.232</td> - <td>$1.478</td> - </tr> - <tr> - <td>Standard_D48ps_v5</td> - <td>48</td> - <td>192</td> - <td>23200</td> - <td>$1.848</td> - <td>$2.218</td> - </tr> - <tr> - <td>Standard_NC4as_T4_v3</td> - <td>4</td> - <td>28</td> - <td>52600</td> - <td>$0.526</td> - <td>$0.631</td> - </tr> - <tr> - <td>Standard_NC8as_T4_v3</td> - <td>8</td> - <td>56</td> - <td>75200</td> - <td>$0.752</td> - <td>$0.902</td> - </tr> - <tr> - <td>Standard_NC16as_T4_v3</td> - <td>16</td> - <td>110</td> - <td>120400</td> - <td>$1.204</td> - <td>$1.445</td> - </tr> - <tr> - <td>Standard_NC64as_T4_v3</td> - <td>64</td> - <td>440</td> - <td>435200</td> - <td>$4.352</td> - <td>$5.222</td> - </tr> - <tr> - <td>Standard_D2S_v5</td> - <td>2</td> - <td>8</td> - <td>9600</td> - <td>$0.096</td> - <td>$0.115</td> - </tr> - <tr> - <td>Standard_D4S_v5</td> - <td>4</td> - <td>16</td> - <td>19200</td> - <td>$0.192</td> - <td>$0.230</td> - </tr> - <tr> - <td>Standard_D8S_v5</td> - <td>8</td> - <td>32</td> - <td>38400</td> - <td>$0.384</td> - <td>$0.461</td> - </tr> - <tr> - <td>Standard_D16S_v5</td> - <td>16</td> - <td>64</td> - <td>76800</td> - <td>$0.768</td> - <td>$0.922</td> - </tr> - <tr> - <td>Standard_D32S_v5</td> - <td>32</td> - <td>128</td> - <td>153600</td> - <td>$1.536</td> - <td>$1.843</td> - </tr> - <tr> - <td>Standard_D64S_v5</td> - <td>64</td> - <td>192</td> - <td>230400</td> - <td>$2.304</td> - <td>$2.765</td> - </tr> -</table> - -### Oracle OKE Cluster Pricing - -Oracle based clusters will be charged Oracle list pricing plus a markup of 20%. Note that the markup will be calculated at the rounded price per hour in order to make hourly prices fixed. - -<table> - <tr> - <th width="25%">Instance Type</th> - <th width="25%">VCPUs</th> - <th width="25%">Memory (GiB)</th> - <th width="25%">USD/Credit per hour</th> - </tr> - <tr> - <td>VM.Standard2.1</td> - <td>1</td> - <td>15</td> - <td>$0.076</td> - </tr> - <tr> - <td>VM.Standard2.2</td> - <td>2</td> - <td>30</td> - <td>$0.153</td> - </tr> - <tr> - <td>VM.Standard2.4</td> - <td>4</td> - <td>60</td> - <td>$0.306</td> - </tr> - <tr> - <td>VM.Standard2.8</td> - <td>8</td> - <td>120</td> - <td>$0.612</td> - </tr> - <tr> - <td>VM.Standard2.16</td> - <td>16</td> - <td>240</td> - <td>$1.225</td> - </tr> - <tr> - <td>VM.Standard3Flex.1</td> - <td>1</td> - <td>4</td> - <td>$0.055</td> - </tr> - <tr> - <td>VM.Standard3Flex.2</td> - <td>2</td> - <td>8</td> - <td>$0.110</td> - </tr> - <tr> - <td>VM.Standard3Flex.4</td> - <td>4</td> - <td>16</td> - <td>$0.221</td> - </tr> - <tr> - <td>VM.Standard3Flex.8</td> - <td>8</td> - <td>32</td> - <td>$0.442</td> - </tr> - <tr> - <td>VM.Standard3Flex.16</td> - <td>16</td> - <td>64</td> - <td>$0.883</td> - </tr> - <tr> - <td>VM.Standard.A1.Flex.1</td> - <td>1</td> - <td>4</td> - <td>$0.019</td> - </tr> - <tr> - <td>VM.Standard.A1.Flex.2</td> - <td>2</td> - <td>8</td> - <td>$0.038</td> - </tr> - <tr> - <td>VM.Standard.A1.Flex.4</td> - <td>4</td> - <td>16</td> - <td>$0.077</td> - </tr> - <tr> - <td>VM.Standard.A1.Flex.8</td> - <td>8</td> - <td>32</td> - <td>$0.154</td> - </tr> - <tr> - <td>VM.Standard.A1.Flex.16</td> - <td>16</td> - <td>64</td> - <td>$0.309</td> - </tr> -</table> - -Last modified January 06, 2025 - -================ -File: docs/vendor/testing-supported-clusters.md -================ -import Pool from "../partials/cmx/\_openshift-pool.mdx" - -# Supported Compatibility Matrix Cluster Types - -This topic describes the supported Kubernetes distributions, Kubernetes versions, instance types, nodes, limitations, and common use cases for clusters created with Replicated Compatibility Matrix. - -Compatibility Matrix provisions cloud-based or virtual machine (VM) clusters. - -## VM Clusters - -This section lists the supported VM cluster distributions for clusters created with Compatibility Matrix. - -### kind - -Compatibility Matrix supports creating [kind](https://kind.sigs.k8s.io/) clusters. - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported Kubernetes Versions</th> - <td>{/* START_kind_VERSIONS */}1.26.15, 1.27.16, 1.28.15, 1.29.14, 1.30.10, 1.31.6, 1.32.2{/* END_kind_VERSIONS */}</td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td>See <a href="#types">Replicated Instance Types</a></td> - </tr> - <tr> - <th>Node Groups</th> - <td>No</td> - </tr> - <tr> - <th>Node Auto Scaling</th> - <td>No</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports a single node.</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4` or `dual`.</td> - </tr> - <tr> - <th>Limitations</th> - <td>See <a href="testing-about#limitations">Limitations</a></td> - </tr> - <tr> - <th>Common Use Cases</th> - <td>Smoke tests</td> - </tr> -</table> - -### k3s - -Compatibility Matrix supports creating [k3s](https://k3s.io) clusters. - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported k3s Versions</th> - <td>The upstream k8s version that matches the Kubernetes version requested.</td> - </tr> - <tr> - <th>Supported Kubernetes Versions</th> - <td>{/* START_k3s_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.1, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_k3s_VERSIONS */}</td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td>See <a href="#types">Replicated Instance Types</a></td> - </tr> - <tr> - <th>Node Groups</th> - <td>Yes</td> - </tr> - <tr> - <th>Node Auto Scaling</th> - <td>No</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports multiple nodes.</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4`.</td> - </tr> - <tr> - <th>Limitations</th> - <td>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</td> - </tr> - <tr> - <th>Common Use Cases</th> - <td><ul><li>Smoke tests</li><li>Customer release tests</li></ul></td> - </tr> -</table> - -### RKE2 (Beta) - -Compatibility Matrix supports creating [RKE2](https://docs.rke2.io/) clusters. - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported RKE2 Versions</th> - <td>The upstream k8s version that matches the Kubernetes version requested.</td> - </tr> - <tr> - <th>Supported Kubernetes Versions</th> - <td>{/* START_rke2_VERSIONS */}1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.24.6, 1.24.7, 1.24.8, 1.24.9, 1.24.10, 1.24.11, 1.24.12, 1.24.13, 1.24.14, 1.24.15, 1.24.16, 1.24.17, 1.25.0, 1.25.2, 1.25.3, 1.25.4, 1.25.5, 1.25.6, 1.25.7, 1.25.8, 1.25.9, 1.25.10, 1.25.11, 1.25.12, 1.25.13, 1.25.14, 1.25.15, 1.25.16, 1.26.0, 1.26.1, 1.26.2, 1.26.3, 1.26.4, 1.26.5, 1.26.6, 1.26.7, 1.26.8, 1.26.9, 1.26.10, 1.26.11, 1.26.12, 1.26.13, 1.26.14, 1.26.15, 1.27.1, 1.27.2, 1.27.3, 1.27.4, 1.27.5, 1.27.6, 1.27.7, 1.27.8, 1.27.9, 1.27.10, 1.27.11, 1.27.12, 1.27.13, 1.27.14, 1.27.15, 1.27.16, 1.28.2, 1.28.3, 1.28.4, 1.28.5, 1.28.6, 1.28.7, 1.28.8, 1.28.9, 1.28.10, 1.28.11, 1.28.12, 1.28.13, 1.28.14, 1.28.15, 1.29.0, 1.29.1, 1.29.2, 1.29.3, 1.29.4, 1.29.5, 1.29.6, 1.29.7, 1.29.8, 1.29.9, 1.29.10, 1.29.11, 1.29.12, 1.29.13, 1.29.14, 1.30.0, 1.30.1, 1.30.2, 1.30.3, 1.30.4, 1.30.5, 1.30.6, 1.30.7, 1.30.8, 1.30.9, 1.30.10, 1.31.0, 1.31.1, 1.31.2, 1.31.3, 1.31.4, 1.31.5, 1.31.6, 1.32.0, 1.32.1, 1.32.2{/* END_rke2_VERSIONS */}</td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td>See <a href="#types">Replicated Instance Types</a></td> - </tr> - <tr> - <th>Node Groups</th> - <td>Yes</td> - </tr> - <tr> - <th>Node Auto Scaling</th> - <td>No</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports multiple nodes.</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4`.</td> - </tr> - <tr> - <th>Limitations</th> - <td>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</td> - </tr> - <tr> - <th>Common Use Cases</th> - <td><ul><li>Smoke tests</li><li>Customer release tests</li></ul></td> - </tr> -</table> - -### OpenShift OKD - -Compatibility Matrix supports creating [Red Hat OpenShift OKD](https://www.okd.io/) clusters, which is the community distribution of OpenShift, using CodeReady Containers (CRC). - -OpenShift clusters are provisioned with two users: - -- (Default) A `kubeadmin` user with `cluster-admin` priviledges. Use the `kubeadmin` user only for administrative tasks such as creating new users or setting roles. -- A `developer` user with namespace-scoped priviledges. The `developer` user can be used to better simulate access in end-customer environments. - -By default, kubeconfig context is set to the `kubeadmin` user. To switch to the `developer` user, run the command `oc login --username developer`. - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported OpenShift Versions</th> - <td>{/* START_openshift_VERSIONS */}4.10.0-okd, 4.11.0-okd, 4.12.0-okd, 4.13.0-okd, 4.14.0-okd, 4.15.0-okd, 4.16.0-okd, 4.17.0-okd{/* END_openshift_VERSIONS */}</td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td>See <a href="#types">Replicated Instance Types</a></td> - </tr> - <tr> - <th>Node Groups</th> - <td>Yes</td> - </tr> - <tr> - <th>Node Auto Scaling</th> - <td>No</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports multiple nodes for versions 4.13.0-okd and later.</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4`.</td> - </tr> - <tr> - <th>Limitations</th> - <td> - <ul> - <li>OpenShift does not support r1.small instance types.</li> - <li>OpenShift versions earlier than 4.13-okd do not have a registry mirror and so may be subject to rate limiting from Docker Hub. For information about Docker Hub rate limiting, see <a href="https://docs.docker.com/docker-hub/download-rate-limit/">Docker Hub rate limit</a>. To increase limits, Replicated recommends that you configure an image pull secret to pull public Docker Hub images as an authenticated user. For more information about how to configure image pull secrets, see <a href="https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/">Pull an Image from a Private Registry</a> in the Kubernetes documentation.</li> - <li> - <p>OpenShift builds take approximately 17 minutes.</p> - <p><Pool/></p> - </li> - </ul> - <p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p> - </td> - </tr> - <tr> - <th>Common Use Cases</th> - <td>Customer release tests</td> - </tr> -</table> - -### Embedded Cluster - -Compatibility Matrix supports creating clusters with Replicated Embedded Cluster. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported Embedded Cluster Versions</th> - <td> - Any valid release sequence that has previously been promoted to the channel where the customer license is assigned. - Version is optional and defaults to the latest available release on the channel. - </td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td>See <a href="#types">Replicated Instance Types</a></td> - </tr> - <tr> - <th>Node Groups</th> - <td>Yes</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports multiple nodes (alpha).</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4`.</td> - </tr> - <tr> - <th>Limitations</th> - <td> - <ul> - <li>The Admin Console UI is not exposed publicly and must be exposed via `kubectl -n kotsadm port-forward svc/kurl-proxy-kotsadm 38800:8800`. The password for the Admin Console is `password`.</li> - <li><strong>A valid customer license is required to create an Embedded Cluster.</strong></li> - <li>The [cluster prepare](/vendor/testing-how-to#prepare-clusters) command is not supported.</li> - </ul> - <p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p> - </td> - </tr> - <tr> - <th>Common Use Cases</th> - <td>Customer release tests</td> - </tr> -</table> - -### kURL - -Compatibility Matrix supports creating [kURL](https://kurl.sh) clusters. - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported kURL Versions</th> - <td>Any promoted kURL installer. Version is optional. For an installer version other than "latest", you can find the specific Installer ID for a previously promoted installer under the relevant **Install Command** (ID after kurl.sh/) on the **Channels > kURL Installer History** page in the Vendor Portal. For more information about viewing the history of kURL installers promoted to a channel, see [Installer History](/vendor/installer-history).</td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td>See <a href="#types">Replicated Instance Types</a></td> - </tr> - <tr> - <th>Node Groups</th> - <td>Yes</td> - </tr> - <tr> - <th>Node Auto Scaling</th> - <td>No</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports multiple nodes.</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4`.</td> - </tr> - <tr> - <th>Limitations</th> - <td><p>Does not work with the <a href="https://kurl.sh/docs/add-ons/longhorn">Longhorn add-on</a>.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> - </tr> - <tr> - <th>Common Use Cases</th> - <td>Customer release tests</td> - </tr> -</table> - -## Cloud Clusters - -This section lists the supported cloud clusters for compatibility testing. - -### EKS - -Compatibility Matrix supports creating [AWS EKS](https://aws.amazon.com/eks/?nc2=type_a) clusters. - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported Kubernetes Versions</th> - <td><p>{/* START_eks_VERSIONS */}1.25, 1.26, 1.27, 1.28, 1.29, 1.30, 1.31, 1.32{/* END_eks_VERSIONS */}</p><p>Extended Support Versions: 1.25, 1.26, 1.27, 1.28</p></td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td><p>m6i.large, m6i.xlarge, m6i.2xlarge, m6i.4xlarge, m6i.8xlarge, m7i.large, m7i.xlarge, m7i.2xlarge, m7i.4xlarge, m7i.8xlarge, m5.large, m5.xlarge, m5.2xlarge, - m5.4xlarge, m5.8xlarge, m7g.large (arm), m7g.xlarge (arm), m7g.2xlarge (arm), m7g.4xlarge (arm), m7g.8xlarge (arm), c5.large, c5.xlarge, c5.2xlarge, c5.4xlarge, - c5.9xlarge, g4dn.xlarge (gpu), g4dn.2xlarge (gpu), g4dn.4xlarge (gpu), g4dn.8xlarge (gpu), g4dn.12xlarge (gpu), g4dn.16xlarge (gpu)</p><p>g4dn instance types depend on available capacity. After a g4dn cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [Amazon EKS optimized accelerated Amazon Linux AMIs](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) in the AWS documentation.</p></td> - </tr> - <tr> - <th>Node Groups</th> - <td>Yes</td> - </tr> - <tr> - <th>Node Auto Scaling</th> - <td>Yes. Cost will be based on the max number of nodes.</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports multiple nodes.</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4`.</td> - </tr> - <tr> - <th>Limitations</th> - <td><p>You can only choose a minor version, not a patch version. The EKS installer chooses the latest patch for that minor version.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> - </tr> - <tr> - <th>Common Use Cases</th> - <td>Customer release tests</td> - </tr> -</table> - -### GKE - -Compatibility Matrix supports creating [Google GKE](https://cloud.google.com/kubernetes-engine) clusters. - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported Kubernetes Versions</th> - <td>{/* START_gke_VERSIONS */}1.29, 1.30, 1.31, 1.32{/* END_gke_VERSIONS */}</td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td><p>n2-standard-2, n2-standard-4, n2-standard-8, n2-standard-16, n2-standard-32, t2a-standard-2 (arm), t2a-standard-4 (arm), t2a-standard-8 (arm), t2a-standard-16 (arm), t2a-standard-32 (arm), t2a-standard-48 (arm), e2-standard-2, e2-standard-4, e2-standard-8, e2-standard-16, e2-standard-32, n1-standard-1+nvidia-tesla-t4+1 (gpu), n1-standard-1+nvidia-tesla-t4+2 (gpu), n1-standard-1+nvidia-tesla-t4+4 (gpu), n1-standard-2+nvidia-tesla-t4+1 (gpu), n1-standard-2+nvidia-tesla-t4+2 (gpu), n1-standard-2+nvidia-tesla-t4+4 (gpu), n1-standard-4+nvidia-tesla-t4+1 (gpu), n1-standard-4+nvidia-tesla-t4+2 (gpu), n1-standard-4+nvidia-tesla-t4+4 (gpu), n1-standard-8+nvidia-tesla-t4+1 (gpu), n1-standard-8+nvidia-tesla-t4+2 (gpu), n1-standard-8+nvidia-tesla-t4+4 (gpu), n1-standard-16+nvidia-tesla-t4+1 (gpu), n1-standard-16+nvidia-tesla-t4+2 (gpu), n1-standard-16+nvidia-tesla-t4+4 (gpu), n1-standard-32+nvidia-tesla-t4+1 (gpu), n1-standard-32+nvidia-tesla-t4+2 (gpu), n1-standard-32+nvidia-tesla-t4+4 (gpu), n1-standard-64+nvidia-tesla-t4+1 (gpu), n1-standard-64+nvidia-tesla-t4+2 (gpu), n1-standard-64+nvidia-tesla-t4+4 (gpu), n1-standard-96+nvidia-tesla-t4+1 (gpu), n1-standard-96+nvidia-tesla-t4+2 (gpu), n1-standard-96+nvidia-tesla-t4+4 (gpu)</p><p>You can specify more than one node.</p></td> - </tr> - <tr> - <th>Node Groups</th> - <td>Yes</td> - </tr> - <tr> - <th>Node Auto Scaling</th> - <td>Yes. Cost will be based on the max number of nodes.</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports multiple nodes.</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4`.</td> - </tr> - <tr> - <th>Limitations</th> - <td><p>You can choose only a minor version, not a patch version. The GKE installer chooses the latest patch for that minor version.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> - </tr> - <tr> - <th>Common Use Cases</th> - <td>Customer release tests</td> - </tr> -</table> - -### AKS - -Compatibility Matrix supports creating [Azure AKS](https://azure.microsoft.com/en-us/products/kubernetes-service) clusters. - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported Kubernetes Versions</th> - <td>{/* START_aks_VERSIONS */}1.29, 1.30, 1.31{/* END_aks_VERSIONS */}</td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td><p>Standard_B2ms, Standard_B4ms, Standard_B8ms, Standard_B16ms, Standard_DS2_v2, Standard_DS3_v2, Standard_DS4_v2, Standard_DS5_v2, Standard_DS2_v5, Standard_DS3_v5, Standard_DS4_v5, Standard_DS5_v5, Standard_D2ps_v5 (arm), Standard_D4ps_v5 (arm), Standard_D8ps_v5 (arm), Standard_D16ps_v5 (arm), Standard_D32ps_v5 (arm), Standard_D48ps_v5 (arm), Standard_NC4as_T4_v3 (gpu), Standard_NC8as_T4_v3 (gpu), Standard_NC16as_T4_v3 (gpu), Standard_NC64as_T4_v3 (gpu)</p><p>GPU instance types depend on available capacity. After a GPU cluster is running, you also need to install your version of the NVIDIA device plugin for Kubernetes. See [NVIDIA GPU Operator with Azure Kubernetes Service](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/microsoft-aks.html) in the NVIDIA documentation.</p></td> - </tr> - <tr> - <th>Node Groups</th> - <td>Yes</td> - </tr> - <tr> - <th>Node Auto Scaling</th> - <td>Yes. Cost will be based on the max number of nodes.</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports multiple nodes.</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4`.</td> - </tr> - <tr> - <th>Limitations</th> - <td><p>You can choose only a minor version, not a patch version. The AKS installer chooses the latest patch for that minor version.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> - </tr> - <tr> - <th>Common Use Cases</th> - <td>Customer release tests</td> - </tr> -</table> - -### OKE (Beta) - -Compatibility Matrix supports creating [Oracle Container Engine for Kubernetes (OKE)](https://docs.oracle.com/en-us/iaas/Content/ContEng/Concepts/contengoverview.htm) clusters. - -<table> - <tr> - <th width="35%">Type</th> - <th width="65%">Description</th> - </tr> - <tr> - <th>Supported Kubernetes Versions</th> - <td>{/* START_oke_VERSIONS */}1.29.1, 1.30.1, 1.31.1{/* END_oke_VERSIONS */}</td> - </tr> - <tr> - <th>Supported Instance Types</th> - <td><p>VM.Standard2.1, VM.Standard2.2, VM.Standard2.4, VM.Standard2.8, VM.Standard2.16, VM.Standard3.Flex.1, VM.Standard3.Flex.2, VM.Standard3.Flex.4, VM.Standard3.Flex.8, VM.Standard3.Flex.16, VM.Standard.A1.Flex.1 (arm), VM.Standard.A1.Flex.2 (arm), VM.Standard.A1.Flex.4 (arm), VM.Standard.A1.Flex.8 (arm), VM.Standard.A1.Flex.16 (arm)</p></td> - </tr> - <tr> - <th>Node Groups</th> - <td>Yes</td> - </tr> - <tr> - <th>Node Auto Scaling</th> - <td>No.</td> - </tr> - <tr> - <th>Nodes</th> - <td>Supports multiple nodes.</td> - </tr> - <tr> - <th>IP Family</th> - <td>Supports `ipv4`.</td> - </tr> - <tr> - <th>Limitations</th> - <td><p>Provising an OKE cluster does take between 8 to 10 minutes. If needed, some timeouts in your CI pipelines might have to be adjusted.</p><p>For additional limitations that apply to all distributions, see <a href="testing-about#limitations">Limitations</a>.</p></td> - </tr> - <tr> - <th>Common Use Cases</th> - <td>Customer release tests</td> - </tr> -</table> - -## Replicated Instance Types {#types} - -When creating a VM-based cluster with Compatibility Matrix, you must specify a Replicated instance type. - -<table> - <tr> - <th width="30%">Type</th> - <th width="35%">Memory (GiB)</th> - <th width="35%">VCPU Count</th> - </tr> - <tr> - <th>r1.small</th> - <td>8 GB</td> - <td>2 VCPUs</td> - </tr> - <tr> - <th>r1.medium</th> - <td>16 GB</td> - <td>4 VCPUs</td> - </tr> - <tr> - <th>r1.large</th> - <td>32 GB</td> - <td>8 VCPUs</td> - </tr> - <tr> - <th>r1.xlarge</th> - <td>64 GB</td> - <td>16 VCPUs</td> - </tr> - <tr> - <th>r1.2xlarge</th> - <td>128 GB</td> - <td>32 VCPUs</td> - </tr> -</table> - -## Kubernetes Version Support Policy - -We do not maintain forks or patches of the supported distributions. When a Kubernetes version in Compatibility Matrix is out of support (EOL), Replicated will attempt to continue to support this version for six months for compatibility testing to support customers who are running out-of-date versions of Kubernetes. In the event that a critical security issue or bug is found and unresolved, we might discontinue support for EOL versions of Kubernetes prior to 6 months post EOL. - -================ -File: docs/vendor/tutorial-adding-db-config.md -================ -# Example: Adding Database Configuration Options - -In this tutorial, we'll explore ways to give your end user the option to either embed a database instance with the application, or connect your application to an external database instance that they will manage. -We'll use a PostgreSQL database as an example, configuring an example app to connect. - -This tutorial explores advanced topics like workload coordination, credential management, and refactoring your application's user-facing configuration in the Replicated Admin Console. We'll also review best practices for integrating persistent stores like databases, queues, and caches. - -It is split into 5 sections: - -- [The Example Application](#the-example-application) -- [User-Facing Configuration](#user-facing-configuration) -- [Embedding a Database](#embedding-a-database) -- [Connecting to an External Database](#connecting-to-an-external-database) - -### Prerequisites - -This guide assumes you have: - -* A running instance of the Replicated Admin Console (`kotsadm`) to iterate against in either an existing cluster or an embedded cluster created with Replicated kURL. If you do not have a running instance of the Admin Console in an existing or kURL cluster, complete the [Install with KOTS in an Existing Cluster](tutorial-cli-setup) tutorial to package and install a sample application. -* A local git checkout of your application manifests. - -### Accompanying Code Examples - -A full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). - -* * * - -## The Example Application - -For demonstration purposes, we'll use a simple app that connects to a Postgres database via the `psql` CLI. -Once you've finished this guide, you should feel confident replacing it with any Kubernetes workload(s) that need to connect to a database. -The deployment we'll use can be seen below: - -```yaml -# pg-consumer.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: postgres:10 - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - # hard coded for now, we'll wire these up later - env: - - name: DB_HOST - value: postgres - - name: DB_PORT - value: "5432" - - name: DB_USER - value: postgres - - name: DB_PASSWORD - value: postgres - - name: DB_NAME - value: postgres -``` - -This app simply connects to the database every 20 seconds and writes the server timestamp to stdout. -Even though `psql` supports [default environment variables](https://www.postgresql.org/docs/current/libpq-envars.html) for host, username, etc that can be read transparently, we're intentionally using these generic `DB_` variables for clarity. -Later, you can change these environment variable names to whatever format your application consumes. - -For now we'll hard code the DB variable values, in the next sections we'll wire these up to the user-provided configuration. - - -### Deploying the example application - - Once you've added this deployment to you application's `manifests` directory, create a release by running `replicated release create --auto` locally. - Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: - -![View Update](/images/guides/kots/view-update.png) - -Click **Deploy**. You should be able to review the logs and see `deployment.apps/pg-consumer created` in `applyStdout`: - - -![Deployed PG Consumer](/images/guides/kots/pg-consumer-deployed.png) - - -After it is deployed, you can run `kubectl get pods` to inspect the cluster. -We should expect the Pod to be crashlooping at this point, since there's no database to connect to just yet: - -```text -$ kubectl get pod -NAME READY STATUS RESTARTS AGE -kotsadm-5bbf54df86-p7kqg 1/1 Running 0 12m -kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 12m -kotsadm-minio-0 1/1 Running 0 12m -kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 12m -kotsadm-postgres-0 1/1 Running 0 12m -pg-consumer-75f49bfb69-mljr6 0/1 CrashLoopBackOff 1 10s -``` - -Checking the logs, we should see a connect error: - -```text -$ kubectl logs -l app=pg-consumer -psql: could not translate host name "postgres" to address: Name or service not known -``` - -If the `kubectl logs` command hangs, you can try using the `--previous` flag to fetch the logs of the most recent crash: - - -```text -$ kubectl logs -l app=pg-consumer --previous -psql: could not translate host name "postgres" to address: Name or service not known -``` - -Now that our test app is deployed, we'll walk through presenting options to the end user for connecting a Postgres instance to this app. - -* * * - -## User-Facing Configuration - -The core of this guide will be around how to give your end users the option to do one of the following actions: - -* Bring their own PostgreSQL instance for your app to connect to -* Use an "embedded" database bundled in with the application - -The first step here is to present that option to the user, then we'll walk through implementing each scenario. -The `kots.io/v1beta1` `Config` resource controls what configuration options are presented to the end user. -If you followed one of the "Getting Started" guides, you probably have a `config.yaml` in your manifests that looks something like the following YAML file: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: example_settings - title: My Example Config - description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Ingress object. - items: - - name: use_ingress - title: Use Ingress? - help_text: An example field to toggle inclusion of an Ingress Object - type: bool - default: "0" - - name: ingress_hostname - title: Ingress Hostname - help_text: If desired, enter the hostname for ingress to this application. You can enter the IP of this instance, or a DNS hostname. - type: text - when: repl{{ ConfigOptionEquals "use_ingress" "1" }} -``` - -To add a database section, we'll modify it to include some database settings. -In this case we'll remove the Ingress toggle that is included as an example, although you might also choose to leave this in. None of these database settings will have any effect yet, but we'll still be able to preview what the end user will see. -Modify your YAML to include this database section: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: database - title: Database - items: - - name: postgres_type - help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? - type: radio - title: Postgres - default: embedded_postgres - items: - - name: embedded_postgres - title: Embedded Postgres - - name: external_postgres - title: External Postgres - - name: embedded_postgres_password - hidden: true - type: password - value: "{{repl RandomString 32}}" -``` - -This creates a toggle to allow the user to choose between an embedded or external Postgres instance, and a `hidden` field to generate a unique password for the embedded instance. - -As mentioned in the introduction, a full example of the code for this guide can be found in the [kotsapps repository](https://github.com/replicatedhq/kotsapps/tree/master/postgres-snapshots). - - -### Validating Config Changes - -Even though the options aren't wired, let's create a new release to validate the configuration screen was modified. -Create a release by running `replicated release create --auto`. -Then head to the Admin Console instance and click **Check for Updates** on the Version History tab to pull the new release: - -![View Update](/images/guides/kots/view-update.png) - -After the update is deployed, click the Config tab and review our new toggle. -You might also notice that we've removed the Ingress settings to simplify things for this guide: - -![Database Config](/images/guides/kots/database-config.png) - -Now that we have the configuration screen started, we can proceed to implement the "Embedded Postgres" option. - -* * * - -## Embedding a Database - -To implement the embedded Database option, we'll add a Kubernetes [Statefulset](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), and use the [annotations for optional resources](packaging-include-resources/) to control when it will be included in the application. - -### Adding the Secret and StatefulSet - -First, we'll create a secret to store the root password for our embedded postgres instance: - -```yaml -# postgres-secret.yaml -apiVersion: v1 -kind: Secret -metadata: - name: postgres -data: - DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' -``` - -Next, create a new YAML file in your `manifests` directory with the following contents. -Note the use of `kots.io/when` to only conditionally include this based on end-user inputs: - -```yaml -# postgres-statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: postgres - labels: - app: pg-provider - annotations: - kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' -spec: - replicas: 1 - selector: - matchLabels: - app: pg-provider - serviceName: postgres - template: - metadata: - labels: - app: pg-provider - spec: - containers: - - env: - - name: PGDATA - value: /var/lib/postgresql/data/pgdata - # create a db called "postgres" - - name: POSTGRES_DB - value: postgres - # create admin user with name "postgres" - - name: POSTGRES_USER - value: postgres - # use admin password from secret - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - key: DB_PASSWORD - name: postgres - image: postgres:10 - name: postgres - volumeMounts: - - mountPath: /var/lib/postgresql/data - name: pgdata - volumes: - - name: pgdata - persistentVolumeClaim: - claimName: pgdata - volumeClaimTemplates: - - metadata: - name: pgdata - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -``` - -Finally, lets add a Service object so we can route traffic to our postgres instance, again using `kots.io/when` to conditionally include this resource: - - -```yaml -# postgres-service.yaml -apiVersion: v1 -kind: Service -metadata: - name: postgres - labels: - app: pg-provider - annotations: - kots.io/when: '{{repl ConfigOptionEquals "postgres_type" "embedded_postgres" }}' -spec: - ports: - - port: 5432 - selector: - app: pg-provider - type: ClusterIP -``` - -### Validating the embedded Database - -After you've added these resources, you can push a new release and update in the Admin Console. -You should see the following in the deployment logs: - -![Embedded PG Deployed](/images/guides/kots/embedded-pg-deployed.png) - -We should now see an instance of Postgres running in our namespace as well. -The consumer may still be crashlooping, but we can see the error is different now: - -```text -$ kubectl logs -l app=pg-consumer -psql: FATAL: password authentication failed for user "postgres" -``` - -This is because we still need to deliver the generated password to our workload pod. -In `pg-consumer.yaml`, we'll remove this section: - -```yaml - - name: DB_PASSWORD - value: postgres -``` - -and replace it with: - -```yaml - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: postgres - key: DB_PASSWORD -``` - -The full Deployment should now look like the following YAML file: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - # hard coded for now, we'll wire these up later - env: - - name: DB_HOST - value: postgres - - name: DB_PORT - value: "5432" - - name: DB_USER - value: postgres - - name: DB_NAME - value: postgres - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: postgres - key: DB_PASSWORD -``` - -From here, make another release and deploy it. -You should see the consumer pod is now able to connect to the database: - - -```text -$ kubectl get pod -NAME READY STATUS RESTARTS AGE -kotsadm-5bbf54df86-p7kqg 1/1 Running 0 144m -kotsadm-api-cbccb97ff-b6qxp 1/1 Running 2 144m -kotsadm-minio-0 1/1 Running 0 144m -kotsadm-operator-84477b5c4-tplcp 1/1 Running 0 144m -kotsadm-postgres-0 1/1 Running 0 144m -pg-consumer-77b868d7d8-xdn9v 1/1 Running 0 20s -postgres-0 1/1 Running 0 6m22s -``` - -Checking the logs, we can connect now: - -```text -$ kubectl logs -l app=pg-consumer - now -------------------------------- - 2020-04-12 17:11:45.019293+00 -(1 row) - - now -------------------------------- - 2020-04-12 17:11:55.072041+00 -(1 row) -``` - -Now that we've configured our application to read from an embedded postgres instance, we'll switch to allowing the end user to provide their own database connection parameters. - -* * * - -## Connecting to an External Database - -In this section, we'll expand our configuration section to allow end users to bring their own Postgres instance. - -### Modifying the Config Screen - -Let's update our config screen to allow an end user to input some details about their database. -We'll add the following YAML, noting the use of the `when` field to conditionally hide or show fields in the user-facing config screen: - -```yaml - - name: external_postgres_host - title: Postgres Host - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: postgres - - name: external_postgres_port - title: Postgres Port - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: "5432" - - name: external_postgres_user - title: Postgres Username - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - required: true - - name: external_postgres_password - title: Postgres Password - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: password - required: true - - name: external_postgres_db - title: Postgres Database - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: sentry -``` - -Your full configuration screen should now look something like the following YAMl file: - -```yaml -apiVersion: kots.io/v1beta1 -kind: Config -metadata: - name: config-sample -spec: - groups: - - name: database - title: Database - items: - - name: postgres_type - help_text: Would you like to use an embedded postgres instance, or connect to an external instance that you manage? - type: radio - title: Postgres - default: embedded_postgres - items: - - name: embedded_postgres - title: Embedded Postgres - - name: external_postgres - title: External Postgres - - name: embedded_postgres_password - hidden: true - type: password - value: "{{repl RandomString 32}}" - - name: external_postgres_host - title: Postgres Host - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: postgres - - name: external_postgres_port - title: Postgres Port - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: "5432" - - name: external_postgres_user - title: Postgres Username - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - required: true - - name: external_postgres_password - title: Postgres Password - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: password - required: true - - name: external_postgres_db - title: Postgres Database - when: '{{repl ConfigOptionEquals "postgres_type" "external_postgres"}}' - type: text - default: postgres -``` - -Let's save this and create a new release. After deploying the release in the Admin Console, click **Config** and set the toggle to "External Postgres" to see the new fields: - -In order to demonstrate that these are working, let's add some values that we know won't work, and just check to confirm that checking "External Postgres" will remove our embedded postgres instance: - - -![External PG Config Fake](/images/guides/kots/external-pg-config-fake.png) - -Save these settings, and then you'll be directed back to the Version History page to apply the change: - -![Deploy Config Change](/images/guides/kots/deploy-config-change.png) - -after this is deployed, we should see that the postgres statefulset has been removed, and that our sample application is back to failing: - - -```text -$ kubectl get pod -NAME READY STATUS RESTARTS AGE -kotsadm-5bbf54df86-8ws98 1/1 Running 0 12m -kotsadm-api-cbccb97ff-r7mz6 1/1 Running 2 12m -kotsadm-minio-0 1/1 Running 0 12m -kotsadm-operator-84477b5c4-4gmbm 1/1 Running 0 12m -kotsadm-postgres-0 1/1 Running 0 12m -pg-consumer-6bd78594d-n7nmw 0/1 Error 2 29s -``` - -You'll note that it is failing, but it is still using our hardcoded environment variables, not the user-entered config. -In the next step, we'll wire the end-user configuration values into our service. - -```text -$ kubectl logs -l app=pg-consumer -psql: could not translate host name "postgres" to address: Name or service not known -``` - -### Mapping User Inputs - -To map the user-supplied configuration, we'll start by expanding our secret we created before, adding fields for additional variables, using `{{repl if ... }}` blocks to switch between embedded/external contexts. - -To start, you can add a field for hostname, using Base64Encode. You must use a single line, as shown in the following example. - - - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: postgres -data: - DB_PASSWORD: '{{repl ConfigOption "embedded_postgres_password" | Base64Encode }}' - DB_HOST: - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" }}{{repl Base64Encode "postgres" }}{{repl else}}{{repl ConfigOption"external_postgres_host" | Base64Encode }}{{repl end}} -``` - -Now that we have the value in our Secret, we can modify our deployment to consume it. -Replace this text: - -```yaml - - name: DB_HOST - value: postgres -``` - -with this text: - -```yaml - - name: DB_HOST - valueFrom: - secretKeyRef: - name: postgres - key: DB_HOST -``` - -Your full deployment should look something like the following YAML file: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - name: postgres - key: DB_HOST - - name: DB_PORT - value: "5432" - - name: DB_USER - value: postgres - - name: DB_NAME - value: postgres - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: postgres - key: DB_PASSWORD -``` - -From here, let's create and deploy a release, and verify that the secret has the customer-provided value, base64 decoding the secret contents: - -```text -$ kubectl get secret postgres -o yaml | head -n 4 -apiVersion: v1 -data: - DB_HOST: ZmFrZQ== - DB_PASSWORD: ajNVWDd1RnRfc0NkVTJqOFU3Q25xUkxRQk5fUlh3RjA= -``` - -You can verify we pulled in our user-provided config by base64-decoding the `DB_HOST` field: - -```text -$ echo ZmFrZQ== | base64 --decode -fake -``` - -Checking on our service itself, we can verify that it's now trying to connect to the `fake` hostname instead of `postgres`: - -```text -$ kubectl logs -l app=pg-consumer -psql: could not translate host name "fake" to address: Name or service not known -``` - -We'll optionally wire this to a real external Postgres database later, but for now we'll proceed to add the rest of the fields. - -### Extending this to All Fields - -Now that we've wired the DB_HOST field all the way through, we'll do the same for the other fields. -In the end, your Secret and Deployment should look like the following YAML files: - -```yaml -# postgres-secret.yaml -apiVersion: v1 -kind: Secret -metadata: - name: postgres -data: - DB_HOST: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "postgres" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_host" | Base64Encode }} - {{repl end}} - DB_PORT: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "5432" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_port" | Base64Encode }} - {{repl end}} - DB_USER: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "postgres" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_user" | Base64Encode }} - {{repl end}} - DB_PASSWORD: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl ConfigOption "embedded_postgres_password" | Base64Encode }} - {{repl else -}} - {{repl ConfigOption "external_postgres_password" | Base64Encode }} - {{repl end}} - DB_NAME: >- - {{repl if ConfigOptionEquals "postgres_type" "embedded_postgres" -}} - {{repl Base64Encode "postgres" }} - {{repl else -}} - {{repl ConfigOption "external_postgres_db" | Base64Encode }} - {{repl end}} -``` - -```yaml -# pg-consumer.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - env: - - name: DB_HOST - valueFrom: - secretKeyRef: - name: postgres - key: DB_HOST - - name: DB_PORT - valueFrom: - secretKeyRef: - name: postgres - key: DB_PORT - - name: DB_USER - valueFrom: - secretKeyRef: - name: postgres - key: DB_USER - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: postgres - key: DB_PASSWORD - - name: DB_NAME - valueFrom: - secretKeyRef: - name: postgres - key: DB_NAME -``` - -Optionally, you can be extra concise and collapse each individual `env` `valueFrom` into a single `envFrom` `secretRef` entry: - -```yaml -# pg-consumer.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - envFrom: - - secretRef: - name: postgres -``` - - -After deploying this, you should see all of the fields in the secret: - -```text -$ kubectl get secret postgres -o yaml -apiVersion: v1 -data: - DB_HOST: ZmFrZQ== - DB_NAME: ZmFrZQ== - DB_PASSWORD: ZXh0cmEgZmFrZQ== - DB_PORT: NTQzMjE= - DB_USER: ZmFrZQ== -kind: Secret -# ...snip... -``` - -We can also print the environment in our sample app to verify that all of the values are piped properly: - -```text -$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' -DB_PORT=54321 -DB_NAME=fake -DB_PASSWORD=extra fake -DB_HOST=fake -DB_USER=fake -``` - -### Testing Config Changes - -Now let's make some changes to the database credentials. In this case, we'll use a Postgres database provisioned in Amazon RDS, but you can use any external database. -To start, head to the "Config" screen and input your values: - -![Real Postgres Values](/images/guides/kots/real-postgres-values.png) - -Let's save and apply this config and check in our pod again: - -```text -$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' -DB_PORT=54321 -DB_NAME=fake -DB_PASSWORD=extra fake -DB_HOST=fake -DB_USER=fake -``` - -Uh oh, It appears that our values did not get updated! If you've worked with Secrets before, you may know that there's a [long-standing issue in Kubernetes](https://github.com/kubernetes/kubernetes/issues/22368) where pods that load config from Secrets or ConfigMaps won't automatically restart when underlying config is changed. -There are some tricks to make this work, and in the next step we'll implement one of them, but for now we can delete the pod to verify that the configuration is being piped through to our sample application: - -```text -$ kubectl delete pod -l app=pg-consumer -pod "pg-consumer-6df9d5d7fd-bd5z6"" deleted -``` - -If the pod is crashlooping, you might need to add `--force --grace-period 0` to force delete it. -In either case, once a new pod starts, we should now see it loading the correct config: - -```text -$ kubectl exec $(kubectl get pod -l app=pg-consumer -o jsonpath='{.items[0].metadata.name}' ) -- /bin/sh -c 'printenv | grep DB_' -DB_PORT=5432 -DB_NAME=postgres -DB_PASSWORD=<redacted> -DB_HOST=10.128.0.12 -DB_USER=postgres -``` - -### Triggering Restarts on Changes - -In order to automate this restart on changes, we're going to use a hash of all database parameters to trigger a rolling update whenever database parameters are changed. -We'll use a `hidden`, `readonly` field to store this in our config screen: - -```yaml - - name: external_postgres_confighash - hidden: true - readonly: true - type: text - value: '{{repl (sha256sum (print (ConfigOption "external_postgres_host") (ConfigOption "external_postgres_port") (ConfigOption "external_postgres_user") (ConfigOption "external_postgres_password") (ConfigOption "external_postgres_db") ))}}' -``` - -The `hidden` flag will hide it from the UI, and the `readonly` flag in this case will cause the value to be re-computed any time an upstream `ConfigOption` value changes. - -Next, let's add this as an annotation to our deployment's pod template at `spec.template.metadata.annotations`: - -```yaml -annotations: - kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' -``` - -**Note**: It's worth noting here that there's nothing special about the `kots.io/config-hash` annotation. We could have just as easily called this annotation `my-app-something-fake` instead. -What matters here is that when the value in a Deployment annotation changes, it will cause Kubernetes to roll out a new version of the pod, stopping the old one and thus picking up our config changes. - - -Your full deployment should now look like the following YAML file: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pg-consumer -spec: - selector: - matchLabels: - app: pg-consumer - template: - metadata: - labels: - app: pg-consumer - annotations: - kots.io/config-hash: '{{repl ConfigOption "external_postgres_confighash"}}' - spec: - containers: - - name: pg-consumer - image: 'postgres:10' - # connect to the database every 20 seconds - command: - - /bin/sh - - -ec - - | - while :; do - sleep 20 - PGPASSWORD=${DB_PASSWORD} \ - psql --host ${DB_HOST} \ - --port ${DB_PORT} \ - --user ${DB_USER} \ - --dbname ${DB_NAME} \ - --command 'SELECT NOW()' - done - envFrom: - - secretRef: - name: postgres -``` - - -### Integrating a Real Database - -If you'd like at this point, you can integrate a real database in your environment, just fill out your configuration fields. You'll know you did it right if your pg-consumer pod can connect. - -================ -File: docs/vendor/tutorial-cli-create-app.mdx -================ -# Step 2: Create an Application - -After you install the Replicated CLI and create an API token, you can use the CLI to create a new application. - -To create an application: - -1. Run the following command to create an application named `cli-tutorial`: - - ``` - replicated app create cli-tutorial - ``` - - **Example output**: - - ``` - ID NAME SLUG SCHEDULER - 2GmY... cli-tutorial cli-tutorial kots - ``` - -1. Export the application slug in the output of the `app create` command as an environment variable: - - ``` - export REPLICATED_APP=YOUR_SLUG - ``` - Replace `YOUR_SLUG` with the slug for the application you created in the previous step. - -1. Verify that both the `REPLICATED_API_TOKEN` environment variable that you created as part of [Step 1: Install the Replicated CLI](tutorial-cli-install-cli) and the `REPLICATED_APP` environment variable are set correctly: - - ``` - replicated release ls - ``` - - In the output of this command, you now see an empty list of releases for the application: - - ``` - SEQUENCE CREATED EDITED ACTIVE_CHANNELS - ``` - -## Next Step - -Continue to [Step 3: Get the Sample Manifests](tutorial-cli-manifests) to download the manifest files for a sample Kubernetes application. You will use these manifest files to create the first release for the `cli-tutorial` application. - -================ -File: docs/vendor/tutorial-cli-create-customer.mdx -================ -# Step 5: Create a Customer - -After promoting the first release for the `cli-tutorial` application, create a customer so that you can install the application. - -A _customer_ is an object in the Vendor Portal that represents a single licensed user of your application. When you create a customer, you define entitlement information for the user, and the Vendor Portal generates a YAML license file for the customer that you can download. - -When you install the application later in this tutorial, you will upload the license file that you create in this step to allow KOTS to create the application containers. - -To create a customer and download the license file: - -1. From the `replicated-cli-tutorial` directory, create a license for a customer named `Some-Big-Bank` that is assigned to the Unstable channel and expires in 10 days: - - ``` - replicated customer create \ - --name "Some-Big-Bank" \ - --expires-in "240h" \ - --channel "Unstable" - ``` - The Unstable channel is the channel where you promoted the release in [Step 4: Create a Release](tutorial-cli-create-release). Assigning the customer to a channel allows them to install the releases that are promoted to that channel. - - **Example output:** - - ``` - ID NAME CHANNELS EXPIRES TYPE - 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev - ``` - -1. Verify the customer creation details: - - ``` - replicated customer ls - ``` - - **Example output:** - - ``` - ID NAME CHANNELS EXPIRES TYPE - 2GuB3VYLjU5t9vNDK6byjgiTKUs Some-Big-Bank Unstable 2022-11-10 14:59:49 +0000 UTC dev - ``` - -1. Download the license file for the customer that you just created: - - ``` - replicated customer download-license \ - --customer "Some-Big-Bank" - ``` - - The license downloads to `stdout`. - - **Example output**: - - ``` - apiVersion: kots.io/v1beta1 - kind: License - metadata: - name: some-big-bank - spec: - appSlug: cli-tutorial - channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 - channelName: Unstable - customerName: Some-Big-Bank - endpoint: https://replicated.app - entitlements: - expires_at: - description: License Expiration - title: Expiration - value: "2022-11-10T14:59:49Z" - valueType: String - isNewKotsUiEnabled: true - licenseID: 2GuB3ZLQsU38F5SX3n03x8qBzeL - licenseSequence: 1 - licenseType: dev - signature: eyJsaW... - ``` - -1. Rename the license file and save it to your Desktop folder: - - ``` - export LICENSE_FILE=~/Desktop/Some-Big-Bank-${REPLICATED_APP}-license.yaml - replicated customer download-license --customer "Some-Big-Bank" > "${LICENSE_FILE}" - ``` - -1. Verify that the license was written properly using either `cat` or `head`: - - ``` - head ${LICENSE_FILE} - ``` - - **Example output**: - - ``` - apiVersion: kots.io/v1beta1 - kind: License - metadata: - name: some-big-bank - spec: - appSlug: cli-tutorial - channelID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 - channelName: Unstable - customerName: Some-Big-Bank - endpoint: https://replicated.app - ``` - -## Next Step - -Continue to [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to get the installation commands from the Unstable channel, then install the KOTS components and the sample application in your cluster. - -================ -File: docs/vendor/tutorial-cli-create-new-version.mdx -================ -# Step 8: Create a New Version - -In this step, you make an edit to the Config custom resource manifest file in the `replicated-cli-tutorial/manifests` directory for the `cli-tutorial` application to create a new field on the **Config** page in the Admin Console. You will then create and promote a new release to the Unstable channel with your changes. - -To create and promote a new version of the application: - -1. In your local directory, go to the the `replicated-cli-tutorial/manifests` folder and open the `kots-config.yaml` file in a text editor. - -1. Copy and paste the following YAML into the file under the `example_default_value` field to create a new text field on the **Config** page: - - ```yaml - - name: more_text - title: Another Text Example - type: text - value: "" - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - ``` - The following shows the full YAML for the `kots-config.yaml` file after you add the new field: - - ```yaml - --- - apiVersion: kots.io/v1beta1 - kind: Config - metadata: - name: config-sample - spec: - groups: - - name: example_settings - title: My Example Config - description: Configuration to serve as an example for creating your own. See [https://kots.io/reference/v1beta1/config/](https://kots.io/reference/v1beta1/config/) for configuration docs. In this case, we provide example fields for configuring an Nginx welcome page. - items: - - name: show_text_inputs - title: Customize Text Inputs - help_text: "Show custom user text inputs" - type: bool - default: "0" - recommended: true - - name: example_default_value - title: Text Example (with default value) - type: text - value: "" - default: please change this value - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - # Add the new more_text field here - - name: more_text - title: Another Text Example - type: text - value: "" - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - - name: api_token - title: API token - type: password - props: - rows: 5 - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - - name: readonly_text_left - title: Readonly Text - type: text - value: "{{repl RandomString 10}}" - readonly: true - when: repl{{ ConfigOptionEquals "show_text_inputs" "1" }} - - name: hidden_text - title: Secret Key - type: password - hidden: true - value: "{{repl RandomString 40}}" - - ``` - -1. Open the `example-configmap.yaml` file. - -1. In the `example-configmap.yaml` file, copy and paste the following HTML to replace the `<body>` section: - - ``` - <body> - This is an example KOTS application. - <p>This is text from a user config value: '{{repl ConfigOption "example_default_value"}}' </p> - <p>This is more text from a user config value: '{{repl ConfigOption "more_text"}}' </p> - <p>This is a hidden value: '{{repl ConfigOption "hidden_text"}}'</p> - </body> - ``` - This creates a reference to the `more_text` field using a Replicated KOTS template function. The ConfigOption template function renders the user input from the configuration item that you specify. For more information, see [Config Context](/reference/template-functions-config-context) in _Reference_. - -1. Save the changes to both YAML files. - -1. Change to the root `replicated-cli-tutorial` directory, then run the following command to verify that there are no errors in the YAML: - - ``` - replicated release lint --yaml-dir=manifests - ``` - -1. Create a new release and promote it to the Unstable channel: - - ``` - replicated release create --auto - ``` - - **Example output**: - - ``` - • Reading manifests from ./manifests ✓ - • Creating Release ✓ - • SEQUENCE: 2 - • Promoting ✓ - • Channel 2GxpUm7lyB2g0ramqUXqjpLHzK0 successfully set to release 2 - ``` - -1. Type `y` and press **Enter** to continue with the defaults. - - **Example output**: - - ``` - RULE TYPE FILENAME LINE MESSAGE - - • Reading manifests from ./manifests ✓ - • Creating Release ✓ - • SEQUENCE: 2 - • Promoting ✓ - • Channel 2GmYFUFzj8JOSLYw0jAKKJKFua8 successfully set to release 2 - ``` - - The release is created and promoted to the Unstable channel with `SEQUENCE: 2`. - -1. Verify that the release was promoted to the Unstable channel: - - ``` - replicated release ls - ``` - **Example output**: - - ``` - SEQUENCE CREATED EDITED ACTIVE_CHANNELS - 2 2022-11-03T19:16:24Z 0001-01-01T00:00:00Z Unstable - 1 2022-11-03T18:49:13Z 0001-01-01T00:00:00Z - ``` - -## Next Step - -Continue to [Step 9: Update the Application](tutorial-cli-update-app) to return to the Admin Console and update the application to the new version that you promoted. - -================ -File: docs/vendor/tutorial-cli-create-release.mdx -================ -# Step 4: Create a Release - -Now that you have the manifest files for the sample Kubernetes application, you can create a release for the `cli-tutorial` application and promote the release to the Unstable channel. - -By default, the Vendor Portal includes Unstable, Beta, and Stable release channels. The Unstable channel is intended for software vendors to use for internal testing, before promoting a release to the Beta or Stable channels for distribution to customers. For more information about channels, see [About Channels and Releases](releases-about). - -To create and promote a release to the Unstable channel: - -1. From the `replicated-cli-tutorial` directory, lint the application manifest files and ensure that there are no errors in the YAML: - - ``` - replicated release lint --yaml-dir=manifests - ``` - - If there are no errors, an empty list is displayed with a zero exit code: - - ```text - RULE TYPE FILENAME LINE MESSAGE - ``` - - For a complete list of the possible error, warning, and informational messages that can appear in the output of the `release lint` command, see [Linter Rules](/reference/linter). - -1. Initialize the project as a Git repository: - - ``` - git init - git add . - git commit -m "Initial Commit: CLI Tutorial" - ``` - - Initializing the project as a Git repository allows you to track your history. The Replicated CLI also reads Git metadata to help with the generation of release metadata, such as version labels. - -1. From the `replicated-cli-tutorial` directory, create a release with the default settings: - - ``` - replicated release create --auto - ``` - - The `--auto` flag generates release notes and metadata based on the Git status. - - **Example output:** - - ``` - • Reading Environment ✓ - - Prepared to create release with defaults: - - yaml-dir "./manifests" - promote "Unstable" - version "Unstable-ba710e5" - release-notes "CLI release of master triggered by exampleusername [SHA: d4173a4] [31 Oct 22 08:51 MDT]" - ensure-channel true - lint-release true - - Create with these properties? [Y/n] - ``` - -1. Type `y` and press **Enter** to confirm the prompt. - - **Example output:** - - ```text - • Reading manifests from ./manifests ✓ - • Creating Release ✓ - • SEQUENCE: 1 - • Promoting ✓ - • Channel VEr0nhJBBUdaWpPvOIK-SOryKZEwa3Mg successfully set to release 1 - ``` - The release is created and promoted to the Unstable channel. - -1. Verify that the release was promoted to the Unstable channel: - - ``` - replicated release ls - ``` - **Example output:** - - ```text - SEQUENCE CREATED EDITED ACTIVE_CHANNELS - 1 2022-10-31T14:55:35Z 0001-01-01T00:00:00Z Unstable - ``` - -## Next Step - -Continue to [Step 5: Create a Customer](tutorial-cli-create-customer) to create a customer license file that you will upload when installing the application. - -================ -File: docs/vendor/tutorial-cli-deploy-app.mdx -================ -# Step 7: Configure the Application - -After you install KOTS, you can log in to the KOTS Admin Console. This procedure shows you how to make a configuration change for the application from the Admin Console, which is a typical task performed by end users. - -To configure the application: - -1. Access the Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: - - ```bash - kubectl kots admin-console --namespace NAMESPACE - ``` - - Replace `NAMESPACE` with the namespace where KOTS is installed. - -1. Enter the password that you created in [Step 6: Install KOTS and the Application](tutorial-cli-install-app-manager) to log in to the Admin Console. - - The Admin Console dashboard opens. On the Admin Console **Dashboard** tab, users can take various actions, including viewing the application status, opening the application, checking for application updates, syncing their license, and setting up application monitoring on the cluster with Prometheus. - - ![Admin Console app dashboard](/images/tutorials/tutorial-admin-console-dashboard.png) - -1. On the **Config** tab, select the **Customize Text Inputs** checkbox. In the **Text Example** field, enter any text. For example, `Hello`. - - ![Admin Console configuration tab](/images/tutorials/tutorial-install-config-tab.png) - - This page displays configuration settings that are specific to the application. Software vendors define the fields that are displayed on this page in the KOTS Config custom resource. For more information, see [Config](/reference/custom-resource-config) in _Reference_. - -1. Click **Save config**. In the dialog that opens, click **Go to updated version**. - - The **Version history** tab opens. - -1. Click **Deploy** for the new version. Then click **Yes, deploy** in the confirmation dialog. - - ![Admin Console configuration tab](/images/tutorials/tutorial-install-version-history.png) - -1. Click **Open App** to view the application in your browser. - - ![web page that displays text](/images/tutorials/tutorial-open-app.png) - - Notice the text that you entered previously on the configuration page is displayed on the screen. - - :::note - If you do not see the new text, refresh your browser. - ::: - -## Next Step - -Continue to [Step 8: Create a New Version](tutorial-cli-create-new-version) to make a change to one of the manifest files for the `cli-tutorial` application, then use the Replicated CLI to create and promote a new release. - -================ -File: docs/vendor/tutorial-cli-install-app-manager.mdx -================ -# Step 6: Install KOTS and the Application - -The next step is to test the installation process for the application release that you promoted. Using the KOTS CLI, you will install KOTS and the sample application in your cluster. - -KOTS is the Replicated component that allows your users to install, manage, and upgrade your application. Users can interact with KOTS through the Admin Console or through the KOTS CLI. - -To install KOTS and the application: - -1. From the `replicated-cli-tutorial` directory, run the following command to get the installation commands for the Unstable channel, where you promoted the release for the `cli-tutorial` application: - - ``` - replicated channel inspect Unstable - ``` - - **Example output:** - - ``` - ID: 2GmYFUFzj8JOSLYw0jAKKJKFua8 - NAME: Unstable - DESCRIPTION: - RELEASE: 1 - VERSION: Unstable-d4173a4 - EXISTING: - - curl -fsSL https://kots.io/install | bash - kubectl kots install cli-tutorial/unstable - - EMBEDDED: - - curl -fsSL https://k8s.kurl.sh/cli-tutorial-unstable | sudo bash - - AIRGAP: - - curl -fSL -o cli-tutorial-unstable.tar.gz https://k8s.kurl.sh/bundle/cli-tutorial-unstable.tar.gz - # ... scp or sneakernet cli-tutorial-unstable.tar.gz to airgapped machine, then - tar xvf cli-tutorial-unstable.tar.gz - sudo bash ./install.sh airgap - ``` - This command prints information about the channel, including the commands for installing in: - * An existing cluster - * An _embedded cluster_ created by Replicated kURL - * An air gap cluster that is not connected to the internet - -1. If you have not already, configure kubectl access to the cluster you provisioned as part of [Set Up the Environment](tutorial-cli-setup#set-up-the-environment). For more information about setting the context for kubectl, see [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. - -1. Run the `EXISTING` installation script with the following flags to automatically upload the license file and run the preflight checks at the same time you run the installation. - - **Example:** - - ``` - curl -fsSL https://kots.io/install | bash - kubectl kots install cli-tutorial/unstable \ - --license-file ./LICENSE_YAML \ - --shared-password PASSWORD \ - --namespace NAMESPACE - ``` - - Replace: - - - `LICENSE_YAML` with the local path to your license file. - - `PASSWORD` with a password to access the Admin Console. - - `NAMESPACE` with the namespace where KOTS and application will be installed. - - When the Admin Console is ready, the script prints the `https://localhost:8800` URL where you can access the Admin Console and the `http://localhost:8888` URL where you can access the application. - - **Example output**: - - ``` - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - • Waiting for Admin Console to be ready ✓ - • Waiting for installation to complete ✓ - • Waiting for preflight checks to complete ✓ - - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console - - • Go to http://localhost:8888 to access the application - ``` - -1. Verify that the Pods are running for the example NGNIX service and for kotsadm: - - ```bash - kubectl get pods --namespace NAMESPACE - ``` - - Replace `NAMESPACE` with the namespace where KOTS and application was installed. - - **Example output:** - - ```NAME READY STATUS RESTARTS AGE - kotsadm-7ccc8586b8-n7vf6 1/1 Running 0 12m - kotsadm-minio-0 1/1 Running 0 17m - kotsadm-rqlite-0 1/1 Running 0 17m - nginx-688f4b5d44-8s5v7 1/1 Running 0 11m - ``` - -## Next Step - -Continue to [Step 7: Configure the Application](tutorial-cli-deploy-app) to log in to the Admin Console and make configuration changes. - -================ -File: docs/vendor/tutorial-cli-install-cli.mdx -================ -# Step 1: Install the Replicated CLI - -In this tutorial, you use the Replicated CLI to create and promote releases for a sample application with Replicated. The Replicated CLI is the CLI for the Replicated Vendor Portal. - -This procedure describes how to create a Vendor Portal account, install the Replicated CLI on your local machine, and set up a `REPLICATED_API_TOKEN` environment variable for authentication. - -To install the Replicated CLI: - -1. Do one of the following to create an account in the Replicated Vendor Portal: - * **Join an existing team**: If you have an existing Vendor Portal team, you can ask your team administrator to send you an invitation to join. - * **Start a trial**: Alternatively, go to [vendor.replicated.com](https://vendor.replicated.com/) and click **Sign up** to create a 21-day trial account for completing this tutorial. - -1. Run the following command to use [Homebrew](https://brew.sh) to install the CLI: - - ``` - brew install replicatedhq/replicated/cli - ``` - - For the latest Linux or macOS versions of the Replicated CLI, see the [replicatedhq/replicated](https://github.com/replicatedhq/replicated/releases) releases in GitHub. - -1. Verify the installation: - - ``` - replicated version - ``` - **Example output**: - - ```json - { - "version": "0.37.2", - "git": "8664ac3", - "buildTime": "2021-08-24T17:05:26Z", - "go": { - "version": "go1.14.15", - "compiler": "gc", - "os": "darwin", - "arch": "amd64" - } - } - ``` - If you run a Replicated CLI command, such as `replicated release ls`, you see the following error message about a missing API token: - - ``` - Error: set up APIs: Please provide your API token - ``` - -1. Create an API token for the Replicated CLI: - - 1. Log in to the Vendor Portal, and go to the [Account settings](https://vendor.replicated.com/account-settings) page. - - 1. Under **User API Tokens**, click **Create user API token**. For Nickname, provide a name for the token. For Permissions, select **Read and Write**. - - For more information about User API tokens, see [User API Tokens](replicated-api-tokens#user-api-tokens) in _Generating API Tokens_. - - 1. Click **Create Token**. - - 1. Copy the string that appears in the dialog. - -1. Export the string that you copied in the previous step to an environment variable named `REPLICATED_API_TOKEN`: - - ```bash - export REPLICATED_API_TOKEN=YOUR_TOKEN - ``` - Replace `YOUR_TOKEN` with the token string that you copied from the Vendor Portal in the previous step. - -1. Verify the User API token: - - ``` - replicated release ls - ``` - - You see the following error message: - - ``` - Error: App not found: - ``` - -## Next Step - -Continue to [Step 2: Create an Application](tutorial-cli-create-app) to use the Replicated CLI to create an application. - -================ -File: docs/vendor/tutorial-cli-manifests.mdx -================ -# Step 3: Get the Sample Manifests - -To create a release for the `cli-tutorial` application, first create the Kubernetes manifest files for the application. This tutorial provides a set of sample manifest files for a simple Kubernetes application that deploys an NGINX service. - -To get the sample manifest files: - -1. Run the following command to create and change to a `replicated-cli-tutorial` directory: - - ``` - mkdir replicated-cli-tutorial - cd replicated-cli-tutorial - ``` - -1. Create a `/manifests` directory and download the sample manifest files from the [kots-default-yaml](https://github.com/replicatedhq/kots-default-yaml) repository in GitHub: - - ``` - mkdir ./manifests - curl -fSsL https://github.com/replicatedhq/kots-default-yaml/archive/refs/heads/main.zip | \ - tar xzv --strip-components=1 -C ./manifests \ - --exclude README.md --exclude LICENSE --exclude .gitignore - ``` - -1. Verify that you can see the YAML files in the `replicated-cli-tutorial/manifests` folder: - - ``` - ls manifests/ - ``` - ``` - example-configmap.yaml example-service.yaml kots-app.yaml kots-lint-config.yaml kots-support-bundle.yaml - example-deployment.yaml k8s-app.yaml kots-config.yaml kots-preflight.yaml - ``` - -## Next Step - -Continue to [Step 4: Create a Release](tutorial-cli-create-release) to create and promote the first release for the `cli-tutorial` application using these manifest files. - -================ -File: docs/vendor/tutorial-cli-setup.mdx -================ -import KubernetesTraining from "../partials/getting-started/_kubernetes-training.mdx" -import LabsIntro from "../partials/getting-started/_labs-intro.mdx" -import TutorialIntro from "../partials/getting-started/_tutorial-intro.mdx" -import RelatedTopics from "../partials/getting-started/_related-topics.mdx" -import VMRequirements from "../partials/getting-started/_vm-requirements.mdx" - -# Introduction and Setup - -<TutorialIntro/> - -The steps in this KOTS CLI-based tutorial show you how to use the Replicated CLI to perform these tasks. The Replicated CLI is the CLI for the Replicated Vendor Portal. You can use the Replicated CLI as a software vendor to programmatically create, configure, and manage your application artifacts, including application releases, release channels, customer entitlements, private image registries, and more. - -<KubernetesTraining/> - -## Set Up the Environment - -As part of this tutorial, you will install a sample application into a Kubernetes cluster. Before you begin, do the following to set up your environment: - -* Create a Kubernetes cluster that meets the minimum system requirements described in [KOTS Installation Requirements](/enterprise/installing-general-requirements). You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. - - **Example:** - - For example, to create a cluster in GKE, run the following command in the gcloud CLI: - - ``` - gcloud container clusters create NAME --preemptible --no-enable-ip-alias - ``` - Where `NAME` is any name for the cluster. - -* Install kubectl, the Kubernetes command line tool. See [Install Tools](https://kubernetes.io/docs/tasks/tools/) in the Kubernetes documentation. -* Configure kubectl command line access to the cluster that you created. See [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) in the Kubernetes documentation. - -## Related Topics - -<RelatedTopics/> - -================ -File: docs/vendor/tutorial-cli-update-app.mdx -================ -# Step 9: Update the Application - -To test the new release that you promoted, return to the Admin Console in a browser to update the application. - -To update the application: - -1. Access the KOTS Admin Console using `https://localhost:8800` if the installation script is still running. Otherwise, run the following command to access the Admin Console: - - ```bash - kubectl kots admin-console --namespace NAMESPACE - ``` - - Replace `NAMESPACE` with the namespace where the Admin Console is installed. - -1. Go to the Version history page, and click **Check for update**. - - ![Admin Console version history page](/images/tutorials/tutorial-check-for-update.png) - - The Admin Console loads the new release that you promoted. - -1. Click **Deploy**. In the dialog, click **Yes, deploy** to deploy the new version. - - ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-app.png) - -1. After the Admin Console deploys the new version, go to the **Config** page where the **Another Text Example** field that you added is displayed. - - ![Admin Console configuration page with Another Text Example field](/images/tutorials/tutorial-new-config-item.png) - -1. In the new **Another Text Example** field, enter any text. Click **Save config**. - - The Admin Console notifies you that the configuration settings for the application have changed. - - ![dialog over Admin Console configuration screen](/images/tutorials/tutorial-go-to-updated-version.png) - -1. In the dialog, click **Go to updated version**. - - The Admin Console loads the updated version on the Version history page. - -1. On the Version history page, click **Deploy** next to the latest version to deploy the configuration change. - - ![Admin Console version history page with new version](/images/tutorials/tutorial-deploy-config-change.png) - -1. Go to the **Dashboard** page and click **Open App**. The application displays the text that you added to the field. - - ![web page with text from the new configuration field](/images/tutorials/tutorial-updated-app.png) - - :::note - If you do not see the new text, refresh your browser. - ::: - -## Summary - -Congratulations! As part of this tutorial, you: -* Created and promoted a release for a Kubernetes application using the Replicated CLI -* Installed the application in a Kubernetes cluster -* Edited the manifest files for the application, adding a new configuration field and using template functions to reference the field -* Promoted a new release with your changes -* Used the Admin Console to update the application to the latest version - -================ -File: docs/vendor/tutorial-config-create-app.md -================ -# Step 2: Create an Application - -Next, install the Replicated CLI and then create an application. - -To create an application: - -1. Install the Replicated CLI: - - ``` - brew install replicatedhq/replicated/cli - ``` - For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - -1. Authorize the Replicated CLI: - - ``` - replicated login - ``` - In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. - -1. Create an application named `Grafana`: - - ``` - replicated app create Grafana - ``` - -1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: - - 1. Get the slug for the application that you created: - - ``` - replicated app ls - ``` - **Example output**: - ``` - ID NAME SLUG SCHEDULER - 2WthxUIfGT13RlrsUx9HR7So8bR Grafana grafana-python kots - ``` - In the example above, the application slug is `grafana-python`. - - :::info - The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. - ::: - - 1. Set the `REPLICATED_APP` environment variable to the application slug. - - **MacOS Example:** - - ``` - export REPLICATED_APP=grafana-python - ``` - -## Next Step - -Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-config-package-chart). - -## Related Topics - -* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) -* [Installing the Replicated CLI](/reference/replicated-cli-installing) -* [replicated app create](/reference/replicated-cli-app-create) - -================ -File: docs/vendor/tutorial-config-create-customer.md -================ -# Step 5: Create a KOTS-Enabled Customer - -After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. - -To create a customer: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. - - The **Create a new customer** page opens: - - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - - [View a larger version of this image](/images/create-customer.png) - -1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. - -1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. - -1. For **License type**, select Development. - -1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. - -1. Click **Save Changes**. - -1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. - - ![Download license button on the customer page](/images/customer-download-license.png) - - [View a larger version of this image](/images/customer-download-license.png) - -## Next Step - -Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-config-install-kots). - -## Related Topics - -* [About Customers](/vendor/licenses-about) -* [Creating and Managing Customers](/vendor/releases-creating-customer) - -================ -File: docs/vendor/tutorial-config-create-release.md -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HelmChart from "../partials/getting-started/_grafana-helmchart.mdx" -import KotsApp from "../partials/getting-started/_grafana-kots-app.mdx" -import K8sApp from "../partials/getting-started/_grafana-k8s-app.mdx" -import Config from "../partials/getting-started/_grafana-config.mdx" - -# Step 4: Add the Chart Archive to a Release - -Next, add the Helm chart archive to a new release for the application in the Replicated vendor platform. - -The purpose of this step is to configure a release that supports installation with KOTS. Additionally, this step defines a user-facing application configuration page that displays in the KOTS Admin Console during installation where users can set their own Grafana login credentials. - -To create a release: - -1. In the `grafana` directory, create a subdirectory named `manifests`: - - ``` - mkdir manifests - ``` - - You will add the files required to support installation with Replicated KOTS to this subdirectory. - -1. Move the Helm chart archive that you created to `manifests`: - - ``` - mv grafana-9.6.5.tgz manifests - ``` - -1. In the `manifests` directory, create the following YAML files to configure the release: - - ``` - cd manifests - ``` - ``` - touch kots-app.yaml k8s-app.yaml kots-config.yaml grafana.yaml - ``` - -1. In each file, paste the corresponding YAML provided in the tabs below: - - <Tabs> - <TabItem value="kots-app" label="kots-app.yaml" default> - <h5>Description</h5> - <p>The KOTS Application custom resource enables features in the Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>grafana</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Grafana application in a browser.</p> - <h5>YAML</h5> - <KotsApp/> - </TabItem> - <TabItem value="k8s-app" label="k8s-app.yaml"> - <h5>Description</h5> - <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.</p> - <h5>YAML</h5> - <K8sApp/> - </TabItem> - <TabItem value="config" label="kots-config.yaml"> - <h5>Description</h5> - <p>The Config custom resource specifies a user-facing configuration page in the Admin Console designed for collecting application configuration from users. The YAML below creates "Admin User" and "Admin Password" fields that will be shown to the user on the configuration page during installation. These fields will be used to set the login credentials for Grafana.</p> - <h5>YAML</h5> - <Config/> - </TabItem> - <TabItem value="helmchart" label="grafana.yaml"> - <h5>Description</h5> - <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart.</p> - <p>The HelmChart custom resource below contains a <code>values</code> key, which creates a mapping to the Grafana <code>values.yaml</code> file. In this case, the <code>values.admin.user</code> and <code>values.admin.password</code> fields map to <code>admin.user</code> and <code>admin.password</code> in the Grafana <code>values.yaml</code> file.</p> - <p>During installation, KOTS renders the ConfigOption template functions in the <code>values.admin.user</code> and <code>values.admin.password</code> fields and then sets the corresponding Grafana values accordingly.</p> - <h5>YAML</h5> - <HelmChart/> - </TabItem> - </Tabs> - -1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: - - ``` - replicated release lint --yaml-dir . - ``` - `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. - - **Example output**: - - ``` - RULE TYPE FILENAME LINE MESSAGE - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - :::note - The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `grafana` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. - ::: - -1. Create a release: - - ``` - replicated release create --yaml-dir . - ``` - **Example output**: - ``` - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 1 - ``` - -1. Log in to the Vendor Portal and go to **Releases**. - - The release that you created is listed under **All releases**. - - ![Release page in the Vendor Portal with one release](/images/grafana-release-seq-1.png) - - [View a larger version of this image](/images/grafana-release-seq-1.png) - -1. Click **Edit release** to view the files in the release. - - In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Grafana Helm chart. You can also see the same warning messages that were displayed in the CLI output. - - ![Edit Release page in the Vendor Portal](/images/grafana-edit-release-seq-1.png) - - [View a larger version of this image](/images/grafana-edit-release-seq-1.png) - -1. At the top of the page, click **Promote**. - -1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. - - <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> - - [View a larger version of this image](/images/release-promote.png) - -## Next Step - -Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-config-create-customer). - -## Related Topics - -* [About Channels and Releases](/vendor/releases-about) -* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) -* [Config Custom Resource](/reference/custom-resource-config) -* [Manipulating Helm Chart Values with KOTS](/vendor/helm-optional-value-keys) - -================ -File: docs/vendor/tutorial-config-get-chart.md -================ -# Step 1: Get the Sample Chart and Test - -To begin, get the sample Grafana Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated vendor platform. - -To get the sample Grafana chart and test installation: - -1. Run the following command to pull and untar version 9.6.5 of the Bitnami Grafana Helm chart: - - ``` - helm pull --untar oci://registry-1.docker.io/bitnamicharts/grafana --version 9.6.5 - ``` - For more information about this chart, see the [bitnami/grafana](https://github.com/bitnami/charts/tree/main/bitnami/grafana) repository in GitHub. - -1. Change to the new `grafana` directory that was created: - ``` - cd grafana - ``` -1. View the files in the directory: - ``` - ls - ``` - The directory contains the following files: - ``` - Chart.lock Chart.yaml README.md charts templates values.yaml - ``` -1. Install the chart in your cluster: - - ``` - helm install grafana . --namespace grafana --create-namespace - ``` - To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/grafana/README.md#installing-the-chart) in the `bitnami/grafana` repository. - - After running the installation command, the following output is displayed: - - ``` - NAME: grafana - LAST DEPLOYED: Thu Dec 14 14:54:50 2023 - NAMESPACE: grafana - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - CHART NAME: grafana - CHART VERSION: 9.6.5 - APP VERSION: 10.2.2 - - ** Please be patient while the chart is being deployed ** - - 1. Get the application URL by running these commands: - echo "Browse to http://127.0.0.1:8080" - kubectl port-forward svc/grafana 8080:3000 & - - 2. Get the admin credentials: - - echo "User: admin" - echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" - # Note: Do not include grafana.validateValues.database here. See https://github.com/bitnami/charts/issues/20629 - ``` - -1. Watch the `grafana` Deployment until it is ready: - - ``` - kubectl get deploy grafana --namespace grafana --watch - ``` - -1. When the Deployment is created, run the commands provided in the output of the installation command to get the Grafana login credentials: - - ``` - echo "User: admin" - echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" - ``` - -1. Run the commands provided in the ouptut of the installation command to get the Grafana URL: - - ``` - echo "Browse to http://127.0.0.1:8080" - kubectl port-forward svc/grafana 8080:3000 --namespace grafana - ``` - - :::note - Include `--namespace grafana` in the `kubectl port-forward` command. - ::: - -1. In a browser, go to the URL to open the Grafana login page: - - <img alt="Grafana login page" src="/images/grafana-login.png" width="300px"/> - - [View a larger version of this image](/images/grafana-login.png) - -1. Log in using the credentials provided to open the Grafana dashboard: - - <img alt="Grafana dashboard" src="/images/grafana-dashboard.png" width="500px"/> - - [View a larger version of this image](/images/grafana-dashboard.png) - -1. Uninstall the Helm chart: - - ``` - helm uninstall grafana --namespace grafana - ``` - This command removes all the Kubernetes resources associated with the chart and uninstalls the `grafana` release. - -1. Delete the namespace: - - ``` - kubectl delete namespace grafana - ``` - -## Next Step - -Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-config-create-app). - -## Related Topics - -* [Helm Install](https://helm.sh/docs/helm/helm_install/) -* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) -* [Helm Create](https://helm.sh/docs/helm/helm_create/) -* [Helm Package](https://helm.sh/docs/helm/helm_package/) -* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) - -================ -File: docs/vendor/tutorial-config-install-kots.md -================ -# Step 6: Install the Release with KOTS - -Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. - -As part of installation, you will set Grafana login credentials on the KOTS Admin Console configuration page. - -To install the release with KOTS: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. - - ![KOTS Install tab on the Unstable channel card](/images/grafana-unstable-channel.png) - - [View a larger version of this image](/images/grafana-unstable-channel.png) - -1. On the command line, run the **KOTS Install** command that you copied: - - ```bash - curl https://kots.io/install | bash - kubectl kots install $REPLICATED_APP/unstable - ``` - - This installs the latest version of the KOTS CLI and the Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. - - For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). - - :::note - KOTS v1.104.0 or later is required to deploy the Replicated SDK. You can verify the version of KOTS installed with `kubectl kots version`. - ::: - -1. Complete the installation command prompts: - - 1. For `Enter the namespace to deploy to`, enter `grafana`. - - 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. - - When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. - - **Example output:** - - ```bash - Enter the namespace to deploy to: grafana - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - Enter a new password for the Admin Console (6+ characters): •••••••• - • Waiting for Admin Console to be ready ✓ - - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console - ``` - -1. With the port forward running, go to `http://localhost:8800` in a browser to access the Admin Console. - -1. On the login page, enter the password that you created for the Admin Console. - -1. On the license page, select the license file that you downloaded previously and click **Upload license**. - -1. On the **Configure Grafana** page, enter a username and password. You will use these credentials to log in to Grafana. - - ![Admin Console config page with username and password fields](/images/grafana-config.png) - - [View a larger version of this image](/images/grafana-config.png) - -1. Click **Continue**. - - The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `grafana` Deployment is being created. - - ![Admin Console dashboard showing unavailable application status](/images/grafana-unavailable.png) - - [View a larger version of this image](/images/grafana-unavailable.png) - -1. On the command line, press Ctrl+C to exit the port forward. - -1. Watch for the `grafana` Deployment to become ready: - - ``` - kubectl get deploy grafana --namespace grafana --watch - ``` - -1. After the Deployment is ready, run the following command to confirm that the `grafana-admin` Secret was updated with the new password that you created on the **Configure Grafana** page: - - ``` - echo "Password: $(kubectl get secret grafana-admin --namespace grafana -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 -d)" - ``` - - The ouput of this command displays the password that you created. - -1. Start the port foward again to access the Admin Console: - - ``` - kubectl kots admin-console --namespace grafana - ``` - -1. Go to `http://localhost:8800` to open the Admin Console. - - On the Admin Console dashboard, the application status is now displayed as Ready: - - ![Admin console dashboard showing ready application status](/images/grafana-ready.png) - - [View a larger version of this image](/images/grafana-ready.png) - -1. Click **Open App** to open the Grafana login page in a browser. - - <img alt="Grafana login webpage" src="/images/grafana-login.png" width="300px"/> - - [View a larger version of this image](/images/grafana-login.png) - -1. On the Grafana login page, enter the username and password that you created on the **Configure Grafana** page. Confirm that you can log in to the application to access the Grafana dashboard: - - <img alt="Grafana dashboard" src="/images/grafana-dashboard.png" width="500px"/> - - [View a larger version of this image](/images/grafana-dashboard.png) - -1. On the command line, press Ctrl+C to exit the port forward. - -1. Uninstall the Grafana application from your cluster: - - ```bash - kubectl kots remove $REPLICATED_APP --namespace grafana --undeploy - ``` - **Example output**: - ``` - • Removing application grafana-python reference from Admin Console and deleting associated resources from the cluster ✓ - • Application grafana-python has been removed - ``` - -1. Remove the Admin Console from the cluster: - - 1. Delete the namespace where the Admin Console is installed: - - ``` - kubectl delete namespace grafana - ``` - 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: - - ``` - kubectl delete clusterrole kotsadm-role - ``` - ``` - kubectl delete clusterrolebinding kotsadm-rolebinding - ``` - -## Next Step - -Congratulations! As part of this tutorial, you used the KOTS Config custom resource to define a configuration page in the Admin Console. You also used the KOTS HelmChart custom resource and KOTS ConfigOption template function to override the default Grafana login credentials with a user-supplied username and password. - -To learn more about how to customize the Config custom resource to create configuration fields for your application, see [Config](/reference/custom-resource-config). - -## Related Topics - -* [kots install](/reference/kots-cli-install/) -* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) -* [Installing an Application](/enterprise/installing-overview) -* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) - -================ -File: docs/vendor/tutorial-config-package-chart.md -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" - -# Step 3: Package the Helm Chart - -Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. - -To add the Replicated SDK and package the Helm chart: - -1. In your local file system, go to the `grafana` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-config-get-chart). - -1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: - - <DependencyYaml/> - -1. Update dependencies and package the Helm chart to a `.tgz` chart archive: - - ```bash - helm package . --dependency-update - ``` - <UnauthorizedError/> - -## Next Step - -Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-config-create-release). - -## Related Topics - -* [About the Replicated SDK](/vendor/replicated-sdk-overview) -* [Helm Package](https://helm.sh/docs/helm/helm_package/) - -================ -File: docs/vendor/tutorial-config-setup.md -================ -# Introduction and Setup - -This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. - -## Summary - -This tutorial introduces you to mapping user-supplied values from the Replicated KOTS Admin Console configuration page to a Helm chart `values.yaml` file. - -In this tutorial, you use a sample Helm chart to learn how to: - -* Define a user-facing application configuration page in the KOTS Admin Console -* Set Helm chart values with the user-supplied values from the Admin Console configuration page - -## Set Up the Environment - -Before you begin, ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as [Replicated Compatibility Matrix](/vendor/testing-how-to), Google Kubernetes Engine (GKE), or minikube. - -## Next Step - -Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-config-get-chart) - -================ -File: docs/vendor/tutorial-ecr-private-images.md -================ -# Tutorial: Using ECR for Private Images - -## Objective - -The purpose of this tutorial is to walk you through how to configure Replicated KOTS to pull images from a private registry in Amazon's Elastic Container Registry (ECR). This tutorial demonstrates the differences between using public and private images with KOTS. - -## Prerequisites - -* To install the application in this tutorial, you must have a virtual machine (VM) that meets the following minimum requirements: - * Ubuntu 18.04 - * At least 8 GB of RAM - * 4 CPU cores - * At least 40GB of disk space - -* To pull a public NGINX container and push it to a private repository in ECR as part of this tutorial, you must have the following: - * An ECR Repository - * An AWS account to use with Docker to pull and push the public NGINX image to the ECR repository. The AWS account must be able to create a read-only user. - * Docker - * The AWS CLI - -## Overview - -The guide is divided into the following steps: - - 1. [Set Up the Testing Environment](#set-up) - - 2. [Configure Private Registries in Replicated](#2-configure-private-registries-in-replicated) - - 3. [Update Definition Files](#3-update-definition-files) - - 4. [Install the New Version](#4-install-the-new-version) - -## 1. Set Up the Testing Environment {#set-up} - -We are going to use the default NGINX deployment to create our application and then update it to pull the same container from a private repository in ECR and note the differences. - -### Create Sample Application and deploy the first release - -In this section, we cover at a high level the steps to create a new application and install it on a VM. - -To create our sample application follow these steps: - -* Create a new application in the Replicated [vendor portal](https://vendor.replicated.com) and call it 'MySampleECRApp'. -* Create the first release using the default definition files and promote it to the *unstable* channel. -* Create a customer, assign it to the *Unstable* channel and download the license file after creating the customer. -* Install the application to a VM - -Log in to the Replicated admin console. To inspect what was deployed, look at the files under **View Files** from the admin console. -In the Upstream files (files from the release created in the vendor portal) show that we are pulling the public image. - -![admin-console-view-files-upstream-release1](/images/guides/kots/priv-reg-ecr-ups-files-rel1.png) - -We can further validate this if we switch back to the terminal window on the VM where we installed the application. -If we run `kubectl describe pod <pod-name>` on the NGINX pod, we can confirm that it was in fact pulled from the public repository. - -![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubctl-describe-rel1.png) - -Now that we have the basic application installed, we are now going to pull the same image, but from an ECR repository. - -### Pull Public Image and Push to ECR - -To keep the changes to a minimum and only focus on using a private registry, we are going to pull the public NGINX container (as specified in the `deployment.yaml` file) to our local environment, and then push it to a repository in ECR. -To use `docker login` with ECR, we will need to configure AWS CLI with the AWS Access Key ID and AWS Secret Key for this user. - -Let's start by pulling the public image: - -```shell -$ docker pull nginx -``` - -You should have an output similar to this: - -```shell -Using default tag: latest -latest: Pulling from library/nginx -d121f8d1c412: Pull complete -ebd81fc8c071: Pull complete -655316c160af: Pull complete -d15953c0e0f8: Pull complete -2ee525c5c3cc: Pull complete -Digest: sha256:c628b67d21744fce822d22fdcc0389f6bd763daac23a6b77147d0712ea7102d0 -Status: Downloaded newer image for nginx:latest -docker.io/library/nginx:latest -``` - -Next, log in to ECR and push this container. -To use `docker login` with ECR, [install the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) and [configure it](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) if not already done. -As part of this, we will need to provide the AWS Access Key ID and AWS Secret Key for a user that has permissions to create and push images to the repository. For more information about working with containers and ECR in the AWS CLI, see [Using Amazon ECR with the AWS CLI](https://docs.aws.amazon.com/AmazonECR/latest/userguide/getting-started-cli.html). - -Just like with any other private registry, we need to know the registry endpoint to pass the `docker login` command. -The syntax is as follows: - -```shell - -docker login [some.private.registry]:[port] - -``` -In this case, the endpoint is the **[some.private.registry]:[port]** - -To determine the endpoint for ECR, log in to the AWS console and search for 'ECR', which should bring up Elastic Container Registry as an option as shown below. - -![search-4-ecr](/images/guides/kots/priv-reg-ecr-search-4-ecr.png) - -Select 'Elastic Container Registry' from the options in the dropdown to get to the list of repositories. - -![ecr-repos](/images/guides/kots/priv-reg-ecr-repos.png) - -As you can see from the screenshot above, you can see the endpoints for each repository under the URI column. -For the purpose of this guide, we will push the NGINX image to the **demo-apps** repository. - -To determine the endpoint to use in the login command, use the URL without the repository name. - -When logging in to ECR, use the AWS CLI to the user credentials. -For example, to log in to ECR, we run the following command: - -```shell - -$ aws ecr get-login-password --region us-east-2 | docker login --username AWS --password-stdin 4999999999999.dkr.ecr.us-east-2.amazonaws.com -``` - -A successful login will display a `Login Succeeded` message. -To push this image to our private repository, tag the image. -The new tag will consist of: - -`<ecr repoendpoint>/image` - -For example, to tag the public NGINX image, we run the following command: - -```shell -$ docker tag nginx 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx -``` - -Assuming the tagging is successful, push the container to our ECR repository: - -```shell -$ docker push 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx -The push refers to repository [4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx] -908cf8238301: Pushed -eabfa4cd2d12: Pushed -60c688e8765e: Pushed -f431d0917d41: Pushed -07cab4339852: Pushed -latest: digest: sha256:794275d96b4ab96eeb954728a7bf11156570e8372ecd5ed0cbc7280313a27d19 size: 1362 - -``` -Our testing environment is all set. -We are now ready to update Replicated to use the private registry. - -* * * - -## 2. Configure Private Registries in Replicated - -To configure a Private Registry in Replicated, we need to provide the same information we needed to login to ECR in the previous step: - -- **Endpoint** -- **Username** -- **Password** - -The difference is that we'll use a different user than the one we used previously. Since Replicated only needs to pull images, it is a best practice to create a 'read-only' user for this specific purpose. - -### Determine the endpoint - -The endpoint should be the same as the one we provided in the previous step. - -### Setting up the Service Account User - -Replicated only needs access to pull images from the private registry. Let's create a new user in AWS: - -![aws-new-user](/images/guides/kots/priv-reg-ecr-new-user.png) - -As far as permissions go, there are a couple of options, depending on scope of access. -If exposing all images to Replicated is an acceptable solution, the Amazon-provided [AmazonEC2ContainerRegistryReadOnly](https://docs.aws.amazon.com/AmazonECR/latest/userguide/ecr_managed_policies.html#AmazonEC2ContainerRegistryReadOnly) policy will work: - -```shell -{ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "Action": [ - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:DescribeImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - }] -} -``` -If you wish to limit Replicated to only certain images, this policy should be used instead: - -```shell -{ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "Action": [ - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:DescribeImages", - "ecr:BatchGetImage" - ], - "Resource": [ - "arn:aws:ecr:us-east-1:<account-id>:repository/<repo1>", - "arn:aws:ecr:us-east-1:<account-id>:repository/<repo2>" - ] - }] -}{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ecr:GetAuthorizationToken" - ], - "Resource": "*" - }, - ] -} -``` - -We will need the AWS Access Key ID and AWS Secret Key in the next section as these will map to the *Username* and *Password* fields. You can obtain these as you create the user or after the user has been created. - -### Enter Registry Information in Replicated - -First, we must link Replicated with the registry. To do this, click on **Add External Registry** from the *Images* tab. - -<img src="/images/add-external-registry.png" alt="/images/add-external-registry.png" width="400px"></img> - -[View a larger version of this image](/images/add-external-registry.png) - -The values for the fields are: - -**Endpoint:** -Enter the same URL used to log in to ECR. -For example, to link to the same registry as the one in the section, we would enter *4999999999999.dkr.ecr.us-east-2.amazonaws.com*. - -**Username:** -Enter the AWS Access Key ID for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. - -**Password:** -Enter the AWS Secret Key for the user created in the [Setting Up the Service Account User](#setting-up-the-service-account-user) section. - -* * * - -## 3. Update Definition Files - -Last step is to update our definition manifest to pull the image from the ECR repository. -To do this, we'll update the `deployment.yaml` file by adding the ECR registry URL to the `image` value. -Below is an example using the registry URL used in this guide. - -```diff - spec: - containers: - - name: nginx -- image: nginx -+ image: 4999999999999.dkr.ecr.us-east-2.amazonaws.com/demo-apps/nginx - envFrom: -``` - -Save your changes and create the new release and promote it to the *Unstable* channel. - -* * * - -## 4. Install the New Version - -To deploy the new version of the application, go back to the admin console and select the *Version History* tab. -Click on **Check for Updates** and then **Deploy** when the new version is listed. -To confirm that the new version was in fact installed, it should look like the screenshot below. - -![version-history](/images/guides/kots/priv-reg-ecr-version-history.png) - -Now, we can inspect to see the changes in the definition files. -Looking at the `deployment.yaml` upstream file, we see the image path as we set it in the [Update Definition Files](#3-update-definition-files) section. - -![admin-console-view-files-upstream-release2](/images/guides/kots/priv-reg-ecr-upstream-file-rel2.png) - -Because KOTS is able to detect that it cannot pull this image anonymously, it then tries to proxy the private registries configured. Looking at the `kustomization.yaml` downstream file we can see that the image path is changed to use the Replicated proxy. - -![admin-console-view-files-downstream-release2](/images/guides/kots/priv-reg-ecr-downstream-file-rel2.png) - -The install of the new version should have created a new pod. If we run `kubectl describe pod` on the new NGINX pod, we can confirm that the image was in fact pulled from the ECR repository. - -![admin-console-kubectl-describe-release2](/images/guides/kots/priv-reg-ecr-kubectl-describe-rel2.png) - -* * * - -## Related Topics - -- [Connecting to an External Registry](packaging-private-images/) - -- [Replicated Community Thread on AWS Roles and Permissions](https://help.replicated.com/community/t/what-are-the-minimal-aws-iam-permissions-needed-to-proxy-images-from-elastic-container-registry-ecr/267) - -- [AWS ECR Managed Policies Documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecr_managed_policies.html) - -================ -File: docs/vendor/tutorial-embedded-cluster-create-app.mdx -================ -# Step 1: Create an Application - -To begin, install the Replicated CLI and create an application in the Replicated Vendor Portal. - -An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. - -To create an application: - -1. Install the Replicated CLI: - - ``` - brew install replicatedhq/replicated/cli - ``` - For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - -1. Authorize the Replicated CLI: - - ``` - replicated login - ``` - In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. - -1. Create an application named `Gitea`: - - ``` - replicated app create Gitea - ``` - -1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: - - 1. Get the slug for the application that you created: - - ``` - replicated app ls - ``` - **Example output**: - ``` - ID NAME SLUG SCHEDULER - 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-kite kots - ``` - In the example above, the application slug is `gitea-kite`. - - :::note - The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. - ::: - - 1. Set the `REPLICATED_APP` environment variable to the application slug. - - **Example:** - - ``` - export REPLICATED_APP=gitea-kite - ``` - -## Next Step - -Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 2: Package the Helm Chart](tutorial-embedded-cluster-package-chart). - -## Related Topics - -* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) -* [Installing the Replicated CLI](/reference/replicated-cli-installing) -* [replicated app create](/reference/replicated-cli-app-create) - -================ -File: docs/vendor/tutorial-embedded-cluster-create-customer.mdx -================ -# Step 4: Create an Embedded Cluster-Enabled Customer - -After promoting the release, create a customer with the Replicated KOTS and Embedded Cluster entitlements so that you can install the release with Embedded Cluster. A _customer_ represents a single licensed user of your application. - -To create a customer: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. - - The **Create a new customer** page opens: - - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - - [View a larger version of this image](/images/create-customer.png) - -1. For **Customer name**, enter a name for the customer. For example, `Example Customer`. - -1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. - -1. For **License type**, select **Development**. - -1. For **License options**, enable the following entitlements: - * **KOTS Install Enabled** - * **Embedded Cluster Enabled** - -1. Click **Save Changes**. - -## Next Step - -Get the Embedded Cluster installation commands and install. See [Step 5: Install the Release on a VM](tutorial-embedded-cluster-install). - -## Related Topics - -* [About Customers](/vendor/licenses-about) -* [Creating and Managing Customers](/vendor/releases-creating-customer) - -================ -File: docs/vendor/tutorial-embedded-cluster-create-release.mdx -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr-ec.mdx" -import KotsCr from "../partials/getting-started/_gitea-kots-app-cr-ec.mdx" -import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" -import EcCr from "../partials/embedded-cluster/_ec-config.mdx" - -# Step 3: Add the Chart Archive to a Release - -Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with Replicated Embedded Cluster. - -A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. - -To create a release: - -1. In the `gitea` directory, create a subdirectory named `manifests`: - - ``` - mkdir manifests - ``` - - You will add the files required to support installation with Replicated KOTS to this subdirectory. - -1. Move the Helm chart archive that you created to `manifests`: - - ``` - mv gitea-1.0.6.tgz manifests - ``` - -1. In `manifests`, create the YAML manifests required by KOTS: - ``` - cd manifests - ``` - ``` - touch gitea.yaml kots-app.yaml k8s-app.yaml embedded-cluster.yaml - ``` - -1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: - - <Tabs> - <TabItem value="helmchart" label="gitea.yaml" default> - <h5>Description</h5> - <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. The <a href="/vendor/helm-optional-value-keys#conditionally-set-values"><code>optionalValues</code></a> field sets the specified Helm values when a given conditional statement evaluates to true. In this case, if the application is installed with Embedded Cluster, then the Gitea service type is set to `NodePort` and the node port is set to `"32000"`. This will allow Gitea to be accessed from the local machine after deployment.</p> - <h5>YAML</h5> - <HelmChartCr/> - </TabItem> - <TabItem value="kots-app" label="kots-app.yaml"> - <h5>Description</h5> - <p>The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and adds the port where the Gitea service can be accessed so that the user can open the application after installation.</p> - <h5>YAML</h5> - <KotsCr/> - </TabItem> - <TabItem value="k8s-app" label="k8s-app.yaml"> - <h5>Description</h5> - <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the service port defined in the KOTS Application custom resource.</p> - <h5>YAML</h5> - <K8sCr/> - </TabItem> - <TabItem value="ec" label="embedded-cluster.yaml"> - <h5>Description</h5> - <p>To install your application with Embedded Cluster, an Embedded Cluster Config must be present in the release. At minimum, the Embedded Cluster Config sets the version of Embedded Cluster that will be installed. You can also define several characteristics about the cluster.</p> - <h5>YAML</h5> - <EcCr/> - </TabItem> - </Tabs> - -1. Lint: - - ```bash - replicated release lint --yaml-dir . - ``` - ```bash - RULE TYPE FILENAME LINE MESSAGE - config-spec warn Missing config spec - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - :::note - You can ignore any warning messages for the purpose of this tutorial. - ::: - -1. Create a release: - - ``` - replicated release create --yaml-dir . - ``` - **Example output**: - ``` - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 1 - ``` - -1. Log in to the Vendor Portal and go to **Releases**. - - The release that you created is listed under **All releases**. - - ![Release page in the Vendor Portal with one release](/images/gitea-ec-release-seq-1.png) - - [View a larger version of this image](/images/gitea-ec-release-seq-1.png) - -1. Click the dot menu then **Edit release** to view the files in the release. - - ![dot menu](/images/gitea-ec-release-edit-button.png) - - [View a larger version of this image](/images/gitea-ec-release-edit-button.png) - - In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. - - ![Edit Release page in the Vendor Portal](/images/gitea-ec-release-edit-seq-1.png) - - [View a larger version of this image](/images/gitea-ec-release-edit-seq-1.png) - -1. At the top of the page, click **Promote**. - -1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. - - <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> - - [View a larger version of this image](/images/release-promote.png) - -## Next Step - -Create a customer with the Embedded Cluster entitlement so that you can install the release using Embedded Cluster. See [Step 4: Create an Embedded Cluster-Enabled Customer](tutorial-embedded-cluster-create-customer). - -## Related Topics - -* [About Channels and Releases](/vendor/releases-about) -* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) -* [Embedded Cluster Config](/reference/embedded-config) -* [Setting Helm Values with KOTS](/vendor/helm-optional-value-keys) - -================ -File: docs/vendor/tutorial-embedded-cluster-install.mdx -================ -import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" - -# Step 5: Install the Release on a VM - -Next, get the customer-specific Embedded Cluster installation commands and then install the release on a Linux VM. - -To install the release with Embedded Cluster: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Customers**. Click on the name of the customer you created. - -1. Click **Install instructions > Embedded cluster**. - - <img alt="Customer install instructions dropdown" src="/images/customer-install-instructions-dropdown.png" width="600px"/> - - [View a larger version of this image](/images/customer-install-instructions-dropdown.png) - - The **Embedded cluster install instructions** dialog opens. - - <img alt="Embedded Cluster install instructions dialog" src="/images/embedded-cluster-install-dialog-latest.png" width="600px"/> - - [View a larger version of this image](/images/embedded-cluster-install-dialog-latest.png) - -1. On the command line, SSH onto your Linux VM. - -1. Run the first command in the **Embedded cluster install instructions** dialog to download the latest release. - -1. Run the second command to extract the release. - -1. Run the third command to install the release. - -1. When prompted, enter a password for accessing the KOTS Admin Console. - - The installation command takes a few minutes to complete. - -1. When the installation command completes, go to the URL provided in the output to log in to the Admin Console. - - **Example output:** - - ```bash - ✔ Host files materialized - ? Enter an Admin Console password: ******** - ? Confirm password: ******** - ✔ Node installation finished - ✔ Storage is ready! - ✔ Embedded Cluster Operator is ready! - ✔ Admin Console is ready! - ✔ Finished! - Visit the admin console to configure and install gitea-kite: http://104.155.145.60:30000 - ``` - - At this point, the cluster is provisioned and the KOTS Admin Console is deployed, but the application is not yet installed. - -1. Bypass the browser TLS warning by clicking **Continue to Setup**. - -1. Click **Advanced > Proceed**. - -1. On the **HTTPS for the Gitea Admin Console** page, select **Self-signed** and click **Continue**. - -1. On the login page, enter the Admin Console password that you created during installation and click **Log in**. - -1. On the **Nodes** page, you can view details about the VM where you installed, including its node role, status, CPU, and memory. Users can also optionally add additional nodes on this page before deploying the application. Click **Continue**. - - The Admin Console dashboard opens. - -1. In the **Version** section, for version `0.1.0`, click **Deploy** then **Yes, Deploy**. - - The application status changes from Missing to Unavailable while the `gitea` Deployment is being created. - -1. After a few minutes when the application status is Ready, click **Open App** to view the Gitea application in a browser: - - ![Admin console dashboard showing ready status](/images/gitea-ec-ready.png) - - [View a larger version of this image](/images/gitea-ec-ready.png) - - <img alt="Gitea app landing page" src="/images/gitea-app.png" width="600px"/> - - [View a larger version of this image](/images/gitea-app.png) - -1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created. - - On the **Reporting** page for the customer, you can see details about the customer's license and installed instances: - - ![Customer reporting page](/images/gitea-customer-reporting-ec.png) - - [View a larger version of this image](/images/gitea-customer-reporting-ec.png) - -1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. - - On the instance details page, you can see additional insights such as the version of Embedded Cluster that is running, instance status and uptime, and more: - - ![Customer instance details page](/images/gitea-instance-insights-ec.png) - - [View a larger version of this image](/images/gitea-instance-insights-ec.png) - -1. (Optional) Reset the node to remove the cluster and the application from the node. This is useful for iteration and development so that you can reset a machine and reuse it instead of having to procure another machine. - - ```bash - sudo ./APP_SLUG reset --reboot - ``` - Where `APP_SLUG` is the unique slug for the application that you created. You can find the appication slug by running `replicated app ls` on the command line on your local machine. - -## Summary - -Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with Replicated Embedded Cluster in a VM. To learn more about Embedded Cluster, see [Embedded Cluster Overview](embedded-overview). - -## Related Topics - -* [Embedded Cluster Overview](embedded-overview) -* [Customer Reporting](/vendor/customer-reporting) -* [Instance Details](/vendor/instance-insights-details) -* [Reset a Node](/vendor/embedded-using#reset-a-node) - -================ -File: docs/vendor/tutorial-embedded-cluster-package-chart.mdx -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" - -# Step 2: Package the Gitea Helm Chart - -Next, get the sample Gitea Helm chart from Bitnami. Add the Replicated SDK as a dependency of the chart, then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. - -The Replicated SDK is a Helm chart that can be optionally added as a dependency of your application Helm chart. The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. Additionally, the Replicated SDK provides access to insights and telemetry for instances of your application installed with the Helm CLI. - -To add the Replicated SDK and package the Helm chart: - -1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: - - ``` - helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 - ``` - For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. - -1. Change to the new `gitea` directory that was created: - ``` - cd gitea - ``` -1. View the files in the directory: - ``` - ls - ``` - The directory contains the following files: - ``` - Chart.lock Chart.yaml README.md charts templates values.yaml - ``` - -1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: - - <DependencyYaml/> - -1. Update dependencies and package the Helm chart to a `.tgz` chart archive: - - ```bash - helm package . --dependency-update - ``` - <UnauthorizedError/> - -## Next Step - -Create a release using the Helm chart archive. See [Step 3: Add the Chart Archive to a Release](tutorial-embedded-cluster-create-release). - -## Related Topics - -* [Packaging a Helm Chart for a Release](/vendor/helm-install-release.md) -* [About the Replicated SDK](/vendor/replicated-sdk-overview) -* [Helm Package](https://helm.sh/docs/helm/helm_package/) - -================ -File: docs/vendor/tutorial-embedded-cluster-setup.mdx -================ -import Requirements from "../partials/embedded-cluster/_requirements.mdx" - -# Introduction and Setup - -This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. - -## Summary - -This tutorial introduces you to installing an application on a Linux virtual machine (VM) using Replicated Embedded Cluster. Embedded Cluster allows you to distribute a Kubernetes cluster and your application together as a single appliance, making it easy for enterprise users to install, update, and manage the application and the cluster in tandem. - -In this tutorial, you use a sample application to learn how to: - -* Add the Embedded Cluster Config to a release -* Use Embedded Cluster to install the application on a Linux VM - -## Set Up the Environment - -Before you begin, ensure that you have access to a VM that meets the requirements for Embedded Cluster: - -<Requirements/> - -## Next Step - -Install the Replicated CLI and create an application in the Replicated Vendor Portal. See [Step 1: Create an Application](/vendor/tutorial-embedded-cluster-create-app). - -================ -File: docs/vendor/tutorial-kots-helm-create-app.md -================ -# Step 2: Create an Application - -Next, install the Replicated CLI and then create an application. - -An _application_ is an object that has its own customers, channels, releases, license fields, and more. A single team can have more than one application. It is common for teams to have multiple applications for the purpose of onboarding, testing, and iterating. - -To create an application: - -1. Install the Replicated CLI: - - ``` - brew install replicatedhq/replicated/cli - ``` - For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - -1. Authorize the Replicated CLI: - - ``` - replicated login - ``` - In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. - -1. Create an application named `Gitea`: - - ``` - replicated app create Gitea - ``` - -1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: - - 1. Get the slug for the application that you created: - - ``` - replicated app ls - ``` - **Example output**: - ``` - ID NAME SLUG SCHEDULER - 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots - ``` - In the example above, the application slug is `gitea-boxer`. - - :::note - The application _slug_ is a unique string that is generated based on the application name. You can use the application slug to interact with the application through the Replicated CLI and the Vendor API v3. The application name and slug are often different from one another because it is possible to create more than one application with the same name. - ::: - - 1. Set the `REPLICATED_APP` environment variable to the application slug. - - **Example:** - - ``` - export REPLICATED_APP=gitea-boxer - ``` - -## Next Step - -Add the Replicated SDK to the Helm chart and package the chart to an archive. See [Step 3: Package the Helm Chart](tutorial-kots-helm-package-chart). - -## Related Topics - -* [Create an Application](/vendor/vendor-portal-manage-app#create-an-application) -* [Installing the Replicated CLI](/reference/replicated-cli-installing) -* [replicated app create](/reference/replicated-cli-app-create) - -================ -File: docs/vendor/tutorial-kots-helm-create-customer.md -================ -# Step 5: Create a KOTS-Enabled Customer - -After promoting the release, create a customer with the KOTS entitlement so that you can install the release with KOTS. A _customer_ represents a single licensed user of your application. - -To create a customer: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. - - The **Create a new customer** page opens: - - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - - [View a larger version of this image](/images/create-customer.png) - -1. For **Customer name**, enter a name for the customer. For example, `KOTS Customer`. - -1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. - -1. For **License type**, select Development. - -1. For **License options**, verify that **KOTS Install Enabled** is enabled. This is the entitlement that allows the customer to install with KOTS. - -1. Click **Save Changes**. - -1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. - - ![Download license button on the customer page](/images/customer-download-license.png) - - [View a larger version of this image](/images/customer-download-license.png) - -## Next Step - -Get the KOTS installation command and install. See [Step 6: Install the Release with KOTS](tutorial-kots-helm-install-kots). - -## Related Topics - -* [About Customers](/vendor/licenses-about) -* [Creating and Managing Customers](/vendor/releases-creating-customer) - -================ -File: docs/vendor/tutorial-kots-helm-create-release.md -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" -import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" -import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" - -# Step 4: Add the Chart Archive to a Release - -Next, add the Helm chart archive to a new release for the application in the Replicated Vendor Portal. The purpose of this step is to configure a release that supports installation with both Replicated KOTS and with the Helm CLI. - -A _release_ represents a single version of your application and contains your application files. Each release is promoted to one or more _channels_. Channels provide a way to progress releases through the software development lifecycle: from internal testing, to sharing with early-adopters, and finally to making the release generally available. - -To create a release: - -1. In the `gitea` directory, create a subdirectory named `manifests`: - - ``` - mkdir manifests - ``` - - You will add the files required to support installation with Replicated KOTS to this subdirectory. - -1. Move the Helm chart archive that you created to `manifests`: - - ``` - mv gitea-1.0.6.tgz manifests - ``` - -1. In `manifests`, create the YAML manifests required by KOTS: - ``` - cd manifests - ``` - ``` - touch gitea.yaml kots-app.yaml k8s-app.yaml - ``` - -1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: - - <Tabs> - <TabItem value="helmchart" label="gitea.yaml" default> - <h5>Description</h5> - <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.</p> - <h5>YAML</h5> - <HelmChartCr/> - </TabItem> - <TabItem value="kots-app" label="kots-app.yaml"> - <h5>Description</h5> - <p>The KOTS Application custom resource enables features in the KOTS Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.</p> - <h5>YAML</h5> - <KotsCr/> - </TabItem> - <TabItem value="k8s-app" label="k8s-app.yaml"> - <h5>Description</h5> - <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the KOTS Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.</p> - <h5>YAML</h5> - <K8sCr/> - </TabItem> - </Tabs> - -1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: - - ``` - replicated release lint --yaml-dir . - ``` - `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. - - **Example output**: - - ``` - RULE TYPE FILENAME LINE MESSAGE - config-spec warn Missing config spec - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - :::note - The output includes warning messages that list missing manifest files. These manifests control additional KOTS functionality and can be ignored for the purpose of this tutorial. The `nonexistent-status-informer-object` warning can also be ignored because the `gitea` Deployment resource that was added as a status informer in the KOTS Application custom resource is a Helm resource. - ::: - -1. Create a release: - - ``` - replicated release create --yaml-dir . - ``` - **Example output**: - ``` - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 1 - ``` - -1. Log in to the Vendor Portal and go to **Releases**. - - The release that you created is listed under **All releases**. - - ![Release page in the Vendor Portal with one release](/images/tutorial-kots-helm-release-seq-1.png) - - [View a larger version of this image](/images/tutorial-kots-helm-release-seq-1.png) - -1. Click **Edit release** to view the files in the release. - - In the release editor, you can see the manifest files that you created, the Helm chart `.tgz` archive, and the `Chart.yaml` and `values.yaml` files for the Gitea Helm chart. You can also see the same warning messages that were displayed in the CLI output. - - ![Edit Release page in the Vendor Portal](/images/tutorial-kots-helm-release-edit-seq-1.png) - - [View a larger version of this image](/images/tutorial-kots-helm-release-edit-seq-1.png) - -1. At the top of the page, click **Promote**. - -1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. Click **Promote**. - - <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> - - [View a larger version of this image](/images/release-promote.png) - -## Next Step - -Create a customer with the KOTS entitlement so that you can install the release in your cluster using Replicated KOTS. See [Step 5: Create a KOTS-Enabled Customer](tutorial-kots-helm-create-customer). - -## Related Topics - -* [About Channels and Releases](/vendor/releases-about) -* [Configuring the HelmChart Custom Resource](/vendor/helm-native-v2-using) - -================ -File: docs/vendor/tutorial-kots-helm-get-chart.md -================ -# Step 1: Get the Sample Chart and Test - -To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install and access the application before adding the chart to a release in the Replicated Vendor Portal. - -To get the sample Gitea Helm chart and test installation: - -1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: - - ``` - helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 - ``` - For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. - -1. Change to the new `gitea` directory that was created: - ``` - cd gitea - ``` -1. View the files in the directory: - ``` - ls - ``` - The directory contains the following files: - ``` - Chart.lock Chart.yaml README.md charts templates values.yaml - ``` -1. Install the Gitea chart in your cluster: - - ``` - helm install gitea . --namespace gitea --create-namespace - ``` - To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/gitea/README.md#installing-the-chart) in the `bitnami/gitea` repository. - - When the chart is installed, the following output is displayed: - - ``` - NAME: gitea - LAST DEPLOYED: Tue Oct 24 12:44:55 2023 - NAMESPACE: gitea - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - CHART NAME: gitea - CHART VERSION: 1.0.6 - APP VERSION: 1.20.5 - - ** Please be patient while the chart is being deployed ** - - 1. Get the Gitea URL: - - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - Watch the status with: 'kubectl get svc --namespace gitea -w gitea' - - export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") - echo "Gitea URL: http://$SERVICE_IP/" - - WARNING: You did not specify a Root URL for Gitea. The rendered URLs in Gitea may not show correctly. In order to set a root URL use the rootURL value. - - 2. Get your Gitea login credentials by running: - - echo Username: bn_user - echo Password: $(kubectl get secret --namespace gitea gitea -o jsonpath="{.data.admin-password}" | base64 -d) - ``` - -1. Watch the `gitea` LoadBalancer service until an external IP is available: - - ``` - kubectl get svc gitea --namespace gitea --watch - ``` - -1. When the external IP for the `gitea` LoadBalancer service is available, run the commands provided in the output of the installation command to get the Gitea URL: - - ``` - export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") - echo "Gitea URL: http://$SERVICE_IP/" - ``` - -1. In a browser, go to the Gitea URL to confirm that you can see the welcome page for the application: - - <img alt="Gitea application webpage" src="/images/gitea-app.png" width="500px"/> - - [View a larger version of this image](/images/gitea-app.png) - -1. Uninstall the Helm chart: - - ``` - helm uninstall gitea --namespace gitea - ``` - This command removes all the Kubernetes components associated with the chart and uninstalls the `gitea` release. - -1. Delete the namespace: - - ``` - kubectl delete namespace gitea - ``` - -## Next Step - -Log in to the Vendor Portal and create an application. See [Step 2: Create an Application](tutorial-kots-helm-create-app). - -## Related Topics - -* [Helm Install](https://helm.sh/docs/helm/helm_install/) -* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) -* [Helm Create](https://helm.sh/docs/helm/helm_create/) -* [Helm Package](https://helm.sh/docs/helm/helm_package/) -* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) - -================ -File: docs/vendor/tutorial-kots-helm-install-helm.md -================ -# Step 7: Install the Release with the Helm CLI - -Next, install the same release using the Helm CLI. All releases that contain one or more Helm charts can be installed with the Helm CLI. - -All Helm charts included in a release are automatically pushed to the Replicated registry when the release is promoted to a channel. Helm CLI installations require that the customer has a valid email address to authenticate with the Replicated registry. - -To install the release with the Helm CLI: - -1. Create a new customer to test the Helm CLI installation: - - 1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. - - The **Create a new customer** page opens: - - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - - [View a larger version of this image](/images/create-customer.png) - - 1. For **Customer name**, enter a name for the customer. For example, `Helm Customer`. - - 1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. - - 1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. - - 1. For **License type**, select Trial. - - 1. (Optional) For **License options**, _disable_ the **KOTS Install Enabled** entitlement. - - 1. Click **Save Changes**. - -1. On the **Manage customer** page for the new customer, click **Helm install instructions**. - - ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) - - [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) - - You will use the instructions provided in the **Helm install instructions** dialog to install the chart. - -1. Before you run the first command in the **Helm install instructions** dialog, create a `gitea` namespace for the installation: - - ``` - kubectl create namespace gitea - ``` - -1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: - - ``` - kubectl config set-context --namespace=gitea --current - ``` - -1. Run the commands in the provided in the **Helm install instructions** dialog to log in to the registry and install the Helm chart. - - <img alt="Helm install instructions dialog" src="/images/tutorial-gitea-helm-install-instructions.png" width="500px"/> - - [View a larger version of this image](/images/tutorial-gitea-helm-install-instructions.png) - - :::note - You can ignore the **No preflight checks found** warning for the purpose of this tutorial. This warning appears because there are no specifications for preflight checks in the Helm chart archive. - ::: - -1. After the installation command completes, you can see that both the `gitea` Deployment and the Replicated SDK `replicated` Deployment were created: - - ``` - kubectl get deploy - ``` - **Example output:** - ``` - NAME READY UP-TO-DATE AVAILABLE AGE - gitea 0/1 1 0 35s - replicated 1/1 1 1 35s - ``` - -1. Watch the `gitea` LoadBalancer service until an external IP is available: - - ``` - kubectl get svc gitea --watch - ``` - -1. After an external IP address is available for the `gitea` LoadBalancer service, follow the instructions in the output of the installation command to get the Gitea URL and then confirm that you can open the application in a browser. - -1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created for the Helm CLI installation. - - On the **Reporting** page for the customer, because the Replicated SDK was installed alongside the Gitea Helm chart, you can see details about the customer's license and installed instances: - - ![Customer reporting](/images/tutorial-gitea-helm-reporting.png) - - [View a larger version of this image](/images/tutorial-gitea-helm-reporting.png) - -1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. - - On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of the Replicated SDK running in the cluster, instance status and uptime, and more: - - ![Customer instance details](/images/tutorial-gitea-helm-instance.png) - - [View a larger version of this image](/images/tutorial-gitea-helm-instance.png) - -1. Uninstall the Helm chart and the Replicated SDK: - - ``` - helm uninstall gitea - ``` - -1. Delete the `gitea` namespace: - - ``` - kubectl delete namespace gitea - ``` - -## Next Step - -Congratulations! As part of this tutorial, you created a release in the Replicated Vendor Portal and installed the release with both KOTS and the Helm CLI. - -## Related Topics - -* [Installing with Helm](/vendor/install-with-helm) -* [About the Replicated SDK](/vendor/replicated-sdk-overview) -* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) -* [Helm Delete](https://helm.sh/docs/helm/helm_delete/) - -================ -File: docs/vendor/tutorial-kots-helm-install-kots.md -================ -import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" - -# Step 6: Install the Release with KOTS - -Next, get the KOTS installation command from the Unstable channel in the Vendor Portal and then install the release using the customer license that you downloaded. - -To install the release with KOTS: - -1. In the [Vendor Portal](https://vendor.replicated.com), go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. - - ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) - - [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) - -1. On the command line, run the **KOTS Install** command that you copied: - - ```bash - curl https://kots.io/install | bash - kubectl kots install $REPLICATED_APP/unstable - ``` - - This installs the latest version of the KOTS CLI and the Replicated KOTS Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. - - For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). - - :::note - <KotsVerReq/> - ::: - -1. Complete the installation command prompts: - - 1. For `Enter the namespace to deploy to`, enter `gitea`. - - 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. - - When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. - - **Example output:** - - ```bash - Enter the namespace to deploy to: gitea - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - Enter a new password for the admin console (6+ characters): •••••••• - • Waiting for Admin Console to be ready ✓ - - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console - ``` - -1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. - -1. On the login page, enter the password that you created. - -1. On the license page, select the license file that you downloaded previously and click **Upload license**. - - The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: - - ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) - - [View a larger version of this image](/images/tutorial-gitea-unavailable.png) - -1. While waiting for the `gitea` Deployment to be created, do the following: - - 1. On the command line, press Ctrl+C to exit the port forward. - - 1. Watch for the `gitea` Deployment to become ready: - - ``` - kubectl get deploy gitea --namespace gitea --watch - ``` - - 1. After the `gitea` Deployment is ready, confirm that an external IP for the `gitea` LoadBalancer service is available: - - ``` - kubectl get svc gitea --namespace gitea - ``` - - 1. Start the port foward again to access the Admin Console: - - ``` - kubectl kots admin-console --namespace gitea - ``` - - 1. Go to `http://localhost:8800` to open the Admin Console. - -1. On the Admin Console dashboard, the application status is now displayed as Ready and you can click **Open App** to view the Gitea application in a browser: - - ![Admin console dashboard showing ready status](/images/tutorial-gitea-ready.png) - - [View a larger version of this image](/images/tutorial-gitea-ready.png) - -1. In another browser window, open the [Vendor Portal](https://vendor.replicated.com/) and go to **Customers**. Select the customer that you created. - - On the **Reporting** page for the customer, you can see details about the customer's license and installed instances: - - ![Customer reporting page](/images/tutorial-gitea-customer-reporting.png) - - [View a larger version of this image](/images/tutorial-gitea-customer-reporting.png) - -1. On the **Reporting** page, under **Instances**, click on the instance that you just installed to open the instance details page. - - On the instance details page, you can see additional insights such as the cluster where the application is installed, the version of KOTS running in the cluster, instance status and uptime, and more: - - ![Customer instance details page](/images/tutorial-gitea-instance-insights.png) - - [View a larger version of this image](/images/tutorial-gitea-instance-insights.png) - -1. Uninstall the Gitea application from your cluster so that you can install the same release again using the Helm CLI: - - ```bash - kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy - ``` - **Example output**: - ``` - • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ - • Application gitea-boxer has been removed - ``` - -1. Remove the Admin Console from the cluster: - - 1. Delete the namespace where the Admin Console is installed: - - ``` - kubectl delete namespace gitea - ``` - 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: - - ``` - kubectl delete clusterrole kotsadm-role - ``` - ``` - kubectl delete clusterrolebinding kotsadm-rolebinding - ``` - -## Next Step - -Install the same release with the Helm CLI. See [Step 7: Install the Release with the Helm CLI](tutorial-kots-helm-install-helm). - -## Related Topics - -* [kots install](/reference/kots-cli-install/) -* [Installing the KOTS CLI](/reference/kots-cli-getting-started/) -* [Deleting the Admin Console and Removing Applications](/enterprise/delete-admin-console) -* [Customer Reporting](customer-reporting) -* [Instance Details](instance-insights-details) - -================ -File: docs/vendor/tutorial-kots-helm-package-chart.md -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" -import UnauthorizedError from "../partials/replicated-sdk/_401-unauthorized.mdx" - -# Step 3: Package the Helm Chart - -Next, add the Replicated SDK as a dependency of the Helm chart and then package the chart into a `.tgz` archive. The purpose of this step is to prepare the Helm chart to be added to a release. - -The Replicated SDK is a Helm chart that can be optionally added as a dependency of your application Helm chart. The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. Additionally, the Replicated SDK provides access to insights and telemetry for instances of your application installed with the Helm CLI. - -To add the Replicated SDK and package the Helm chart: - -1. In your local file system, go to the `gitea` directory that was created as part of [Step 1: Get the Sample Chart and Test](tutorial-kots-helm-get-chart). - -1. In the `Chart.yaml` file, add the Replicated SDK as a dependency: - - <DependencyYaml/> - -1. Update dependencies and package the Helm chart to a `.tgz` chart archive: - - ```bash - helm package . --dependency-update - ``` - <UnauthorizedError/> - -## Next Step - -Create a release using the Helm chart archive. See [Step 4: Add the Chart Archive to a Release](tutorial-kots-helm-create-release). - -## Related Topics - -* [Packaging a Helm Chart for a Release](/vendor/helm-install-release.md) -* [About the Replicated SDK](/vendor/replicated-sdk-overview) -* [Helm Package](https://helm.sh/docs/helm/helm_package/) - -================ -File: docs/vendor/tutorial-kots-helm-setup.md -================ -# Introduction and Setup - -This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. - -## Summary - -This tutorial introduces you to the Replicated Vendor Portal, the Replicated CLI, the Replicated SDK, and the Replicated KOTS installer. - -In this tutorial, you use a sample Helm chart to learn how to: - -* Add the Replicated SDK to a Helm chart as a dependency -* Create a release with the Helm chart using the Replicated CLI -* Add custom resources to the release so that it supports installation with both the Helm CLI and Replicated KOTS -* Install the release in a cluster using KOTS and the KOTS Admin Console -* Install the same release using the Helm CLI - -## Set Up the Environment - -Before you begin, do the following to set up your environment: - -* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. - - For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: - * [Install Tools](https://kubernetes.io/docs/tasks/tools/) - * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) - -* Install the Helm CLI. To install the Helm CLI using Homebrew, run: - - ``` - brew install helm - ``` - - For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. - -* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). - - :::note - If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. - ::: - -## Next Step - -Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-kots-helm-get-chart) - -================ -File: docs/vendor/tutorial-preflight-helm-add-spec.mdx -================ -import DependencyYaml from "../partials/replicated-sdk/_dependency-yaml.mdx" - -# Step 2: Add a Preflight Spec to the Chart - -Create a preflight specification that fails if the cluster is running a version of Kubernetes earlier than 1.23.0, and add the specification to the Gitea chart as a Kubernetes Secret. - -To add a preflight specification to the Gitea chart: - -1. In the `gitea/templates` directory, create a `gitea-preflights.yaml` file: - - ``` - touch templates/gitea-preflights.yaml - ``` - -1. In the `gitea-preflights.yaml` file, add the following YAML to create a Kubernetes Secret with a preflight check specification: - - ```yaml - apiVersion: v1 - kind: Secret - metadata: - labels: - troubleshoot.sh/kind: preflight - name: gitea-preflight-checks - stringData: - preflight.yaml: | - apiVersion: troubleshoot.sh/v1beta2 - kind: Preflight - metadata: - name: gitea-preflight-checks - spec: - analyzers: - - clusterVersion: - outcomes: - - fail: - when: "< 1.23.0" - message: |- - Your cluster is running a version of Kubernetes that is not supported and your installation will not succeed. To continue, upgrade your cluster to Kubernetes 1.23.0 or later. - uri: https://www.kubernetes.io - - pass: - message: Your cluster is running the required version of Kubernetes. - ``` - - The YAML above defines a preflight check that fails if the target cluster is running a version of Kubernetes earlier than 1.23.0. The preflight check also includes a message to the user that describes the failure and lists the required Kubernetes version. The `troubleshoot.sh/kind: preflight` label is required to run preflight checks defined in Secrets. - -1. In the Gitea `Chart.yaml` file, add the Replicated SDK as a dependency: - - <DependencyYaml/> - - The SDK is installed as a small service running alongside your application, and provides an in-cluster API that you can use to embed Replicated features into your application. - -1. Update dependencies and package the chart to a `.tgz` chart archive: - - ```bash - helm package . --dependency-update - ``` - - :::note - If you see a `401 Unauthorized` error message, log out of the Replicated registry by running `helm registry logout registry.replicated.com` and then run `helm package . --dependency-update` again. - ::: - -## Next Step - -Add the chart archive to a release. See [Add the Chart Archive to a Release](tutorial-preflight-helm-create-release). - -## Related Topics - -* [Defining Preflight Checks](/vendor/preflight-defining) -* [Packaging a Helm Chart for a Release](/vendor/helm-install-release) - -================ -File: docs/vendor/tutorial-preflight-helm-create-customer.mdx -================ -# Step 4: Create a Customer - -After promoting the release, create a customer so that you can run the preflight checks and install. - -To create a customer: - -1. In the [Vendor Portal](https://vendor.replicated.com), click **Customers > Create customer**. - - The **Create a new customer** page opens: - - ![Customer a new customer page in the Vendor Portal](/images/create-customer.png) - - [View a larger version of this image](/images/create-customer.png) - -1. For **Customer name**, enter a name for the customer. For example, `Preflight Customer`. - -1. For **Channel**, select **Unstable**. This allows the customer to install releases promoted to the Unstable channel. - -1. For **Customer email**, enter the email address for the customer. The customer email address is required to install the application with the Helm CLI. This email address is never used send emails to customers. - -1. For **License type**, select Development. - -1. Click **Save Changes**. - -## Next Step - -Use the Helm CLI to run the preflight checks you defined and install Gitea. See [Run Preflights with the Helm CLI](tutorial-preflight-helm-install). - -## Related Topics - -* [About Customers](/vendor/licenses-about) -* [Creating and Managing Customers](/vendor/releases-creating-customer) - -================ -File: docs/vendor/tutorial-preflight-helm-create-release.mdx -================ -# Step 3: Add the Chart Archive to a Release - -Use the Replicated CLI to add the Gitea Helm chart archive to a release in the Replicated vendor platform. - -To create a release: - -1. Install the Replicated CLI: - - ``` - brew install replicatedhq/replicated/cli - ``` - For more installation options, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - -1. Authorize the Replicated CLI: - - ``` - replicated login - ``` - In the browser window that opens, complete the prompts to log in to your vendor account and authorize the CLI. - -1. Create an application named `Gitea`: - - ``` - replicated app create Gitea - ``` - -1. Get the slug for the application that you created: - - ``` - replicated app ls - ``` - **Example output**: - ``` - ID NAME SLUG SCHEDULER - 2WthxUIfGT13RlrsUx9HR7So8bR Gitea gitea-boxer kots - ``` - In the example above, the application slug is `gitea-boxer`. - -1. Set the `REPLICATED_APP` environment variable to the application that you created. This allows you to interact with the application using the Replicated CLI without needing to use the `--app` flag with every command: - - **Example:** - - ``` - export REPLICATED_APP=gitea-boxer - ``` - -1. Go to the `gitea` directory. - -1. Create a release with the Gitea chart archive: - - ``` - replicated release create --chart=gitea-1.0.6.tgz - ``` - ```bash - You are creating a release that will only be installable with the helm CLI. - For more information, see - https://docs.replicated.com/vendor/helm-install#about-helm-installations-with-replicated - - • Reading chart from gitea-1.0.6.tgz ✓ - • Creating Release ✓ - • SEQUENCE: 1 - ``` - -1. Log in to the Vendor Portal and go to **Releases**. - - The release that you created is listed under **All releases**. - -1. Click **View YAML** to view the files in the release. - -1. At the top of the page, click **Promote**. - - <img alt="Promote release dialog" src="/images/release-promote.png" width="400px"/> - - [View a larger version of this image](/images/release-promote.png) - -1. In the dialog, for **Which channels you would like to promote this release to?**, select **Unstable**. Unstable is a default channel that is intended for use with internal testing. - -1. For **Version label**, open the dropdown and select **1.0.6**. - -1. Click **Promote**. - - -## Next Step - -Create a customer so that you can install the release in a development environment. See [Create a Customer](tutorial-preflight-helm-create-customer). - -## Related Topics - -* [About Channels and Releases](/vendor/releases-about) -* [Managing Releases with the CLI](/vendor/releases-creating-cli) - -================ -File: docs/vendor/tutorial-preflight-helm-get-chart.mdx -================ -# Step 1: Get the Sample Chart and Test - -To begin, get the sample Gitea Helm chart from Bitnami, install the chart in your cluster using the Helm CLI, and then uninstall. The purpose of this step is to confirm that you can successfully install the application before adding preflight checks to the chart. - -To get the sample Gitea Helm chart and test installation: - -1. Run the following command to pull and untar version 1.0.6 of the Bitnami Gitea Helm chart: - - ``` - helm pull --untar oci://registry-1.docker.io/bitnamicharts/gitea --version 1.0.6 - ``` - For more information about this chart, see the [bitnami/gitea](https://github.com/bitnami/charts/tree/main/bitnami/gitea) repository in GitHub. - -1. Change to the new `gitea` directory that was created: - ``` - cd gitea - ``` -1. View the files in the directory: - ``` - ls - ``` - The directory contains the following files: - ``` - Chart.lock Chart.yaml README.md charts templates values.yaml - ``` -1. Install the Gitea chart in your cluster: - - ``` - helm install gitea . --namespace gitea --create-namespace - ``` - To view the full installation instructions from Bitnami, see [Installing the Chart](https://github.com/bitnami/charts/blob/main/bitnami/gitea/README.md#installing-the-chart) in the `bitnami/gitea` repository. - - When the chart is installed, the following output is displayed: - - ``` - NAME: gitea - LAST DEPLOYED: Tue Oct 24 12:44:55 2023 - NAMESPACE: gitea - STATUS: deployed - REVISION: 1 - TEST SUITE: None - NOTES: - CHART NAME: gitea - CHART VERSION: 1.0.6 - APP VERSION: 1.20.5 - - ** Please be patient while the chart is being deployed ** - - 1. Get the Gitea URL: - - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - Watch the status with: 'kubectl get svc --namespace gitea -w gitea' - - export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") - echo "Gitea URL: http://$SERVICE_IP/" - - WARNING: You did not specify a Root URL for Gitea. The rendered URLs in Gitea may not show correctly. In order to set a root URL use the rootURL value. - - 2. Get your Gitea login credentials by running: - - echo Username: bn_user - echo Password: $(kubectl get secret --namespace gitea gitea -o jsonpath="{.data.admin-password}" | base64 -d) - ``` - -1. Watch the `gitea` LoadBalancer service until an external IP is available: - - ``` - kubectl get svc gitea --namespace gitea --watch - ``` - -1. When the external IP for the `gitea` LoadBalancer service is available, run the commands provided in the output of the installation command to get the Gitea URL: - - ``` - export SERVICE_IP=$(kubectl get svc --namespace gitea gitea --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}") - echo "Gitea URL: http://$SERVICE_IP/" - ``` - - :::note - Alternatively, you can run the following command to forward a local port to a port on the Gitea Pod: - - ``` - POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=gitea -o jsonpath='{.items[0].metadata.name}') - kubectl port-forward pod/$POD_NAME 8080:3000 - ``` - ::: - -1. In a browser, go to the Gitea URL to confirm that you can see the welcome page for the application: - - <img alt="Gitea application webpage" src="/images/gitea-app.png" width="500px"/> - - [View a larger version of this image](/images/gitea-app.png) - -1. Uninstall the Helm chart: - - ``` - helm uninstall gitea --namespace gitea - ``` - This command removes all the Kubernetes components associated with the chart and uninstalls the `gitea` release. - -1. Delete the namespace: - - ``` - kubectl delete namespace gitea - ``` - -## Next Step - -Define preflight checks and add them to the Gitea Helm chart. See [Add a Preflight Spec to the Chart](tutorial-preflight-helm-add-spec). - -## Related Topics - -* [Helm Install](https://helm.sh/docs/helm/helm_install/) -* [Helm Uninstall](https://helm.sh/docs/helm/helm_uninstall/) -* [Helm Package](https://helm.sh/docs/helm/helm_package/) -* [bitnami/gitea](https://github.com/bitnami/charts/blob/main/bitnami/gitea) - -================ -File: docs/vendor/tutorial-preflight-helm-install-kots.mdx -================ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import HelmChartCr from "../partials/getting-started/_gitea-helmchart-cr.mdx" -import KotsCr from "../partials/getting-started/_gitea-kots-app-cr.mdx" -import K8sCr from "../partials/getting-started/_gitea-k8s-app-cr.mdx" -import KotsVerReq from "../partials/replicated-sdk/_kots-version-req.mdx" - -# Step 6: Run Preflights with KOTS - -Create a KOTS-enabled release and then install Gitea with KOTS. This purpose of this step is to see how preflight checks automatically run in the KOTS Admin Console during installation. - -To run preflight checks during installation with KOTS: - -1. In the `gitea` directory, create a subdirectory named `manifests`: - - ``` - mkdir manifests - ``` - - You will add the files required to support installation with KOTS to this subdirectory. - -1. Move the Helm chart archive to `manifests`: - - ``` - mv gitea-1.0.6.tgz manifests - ``` - -1. In `manifests`, create the YAML manifests required by KOTS: - ``` - cd manifests - ``` - ``` - touch gitea.yaml kots-app.yaml k8s-app.yaml - ``` - -1. In each of the files that you created, paste the corresponding YAML provided in the tabs below: - - <Tabs> - <TabItem value="helmchart" label="gitea.yaml" default> - <h5>Description</h5> - <p>The KOTS HelmChart custom resource provides instructions to KOTS about how to deploy the Helm chart. The <code>name</code> and <code>chartVersion</code> listed in the HelmChart custom resource must match the name and version of a Helm chart archive in the release. Each Helm chart archive in a release requires a unique HelmChart custom resource.</p> - <h5>YAML</h5> - <HelmChartCr/> - </TabItem> - <TabItem value="kots-app" label="kots-app.yaml"> - <h5>Description</h5> - <p>The KOTS Application custom resource enables features in the Replicated Admin Console such as branding, release notes, port forwarding, dashboard buttons, application status indicators, and custom graphs.</p><p>The YAML below provides a name for the application to display in the Admin Console, adds a custom <em>status informer</em> that displays the status of the <code>gitea</code> Deployment resource in the Admin Console dashboard, adds a custom application icon, and creates a port forward so that the user can open the Gitea application in a browser.</p> - <h5>YAML</h5> - <KotsCr/> - </TabItem> - <TabItem value="k8s-app" label="k8s-app.yaml"> - <h5>Description</h5> - <p>The Kubernetes Application custom resource supports functionality such as including buttons and links on the Replicated Admin Console dashboard. The YAML below adds an <strong>Open App</strong> button to the Admin Console dashboard that opens the application using the port forward configured in the KOTS Application custom resource.</p> - <h5>YAML</h5> - <K8sCr/> - </TabItem> - </Tabs> - -1. From the `manifests` directory, lint the YAML files to confirm that there are no errors: - - ``` - replicated release lint --yaml-dir . - ``` - `--yaml-dir` is the path to the directory that contains the Helm chart archive and the manifest files required by KOTS. - - **Example output**: - - ``` - RULE TYPE FILENAME LINE MESSAGE - config-spec warn Missing config spec - preflight-spec warn Missing preflight spec - troubleshoot-spec warn Missing troubleshoot spec - nonexistent-status-informer-object warn kots-app.yaml 8 Status informer points to a nonexistent kubernetes object. If this is a Helm resource, this warning can be ignored. - ``` - - The output includes warning messages, including a warning about a missing preflight spec. This warning appears because the preflight spec is defined in the Helm chart. The warnings can be ignored for the purpose of this tutorial. - -1. Create a release: - - ```bash - replicated release create --yaml-dir . - ``` - **Example output**: - ```bash - • Reading manifests from . ✓ - • Creating Release ✓ - • SEQUENCE: 2 - ``` - -1. Log in to the [vendor portal](https://vendor.replicated.com) and go to **Releases**. The new release is labeled **Sequence 2**. - -1. Promote the release to the Unstable channel. - -1. Go to the **Customers** page. - -1. Create a new customer named `KOTS Preflight Customer`. For **License options**, enable the **KOTS Install Enabled** checkbox. This is the entitlement that allows the customer to install with KOTS. - -1. On the **Manage customer** page for the customer, click **Download license**. You will use the license file to install with KOTS. - -1. Go to **Channels**. From the **Unstable** channel card, under **Install**, copy the **KOTS Install** command. - - ![KOTS Install tab on the Unstable channel card](/images/helm-tutorial-unstable-kots-install-command.png) - - [View a larger version of this image](/images/helm-tutorial-unstable-kots-install-command.png) - -1. On the command line, run the **KOTS Install** command that you copied: - - ```bash - curl https://kots.io/install | bash - kubectl kots install $REPLICATED_APP/unstable - ``` - - This installs the latest version of the KOTS CLI and the Replicated Admin Console. The Admin Console provides a user interface where you can upload the customer license file and deploy the application. - - For additional KOTS CLI installation options, including how to install without root access, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). - - :::note - <KotsVerReq/> - ::: - -1. Complete the installation command prompts: - - 1. For `Enter the namespace to deploy to`, enter `gitea`. - - 1. For `Enter a new password to be used for the Admin Console`, provide a password to access the Admin Console. - - When the Admin Console is ready, the command prints the URL where you can access the Admin Console. At this point, the KOTS CLI is installed and the Admin Console is running, but the application is not yet deployed. - - **Example output:** - - ```bash - Enter the namespace to deploy to: gitea - • Deploying Admin Console - • Creating namespace ✓ - • Waiting for datastore to be ready ✓ - Enter a new password for the Admin Console (6+ characters): •••••••• - • Waiting for Admin Console to be ready ✓ - - • Press Ctrl+C to exit - • Go to http://localhost:8800 to access the Admin Console - ``` - -1. With the port forward running, in a browser, go to `http://localhost:8800` to access the Admin Console. - -1. On the login page, enter the password that you created. - -1. On the license page, select the license file that you downloaded previously and click **Upload license**. - - Preflight checks run automatically: - - ![Gitea preflight checks page](/images/gitea-preflights-admin-console.png) - - [View a larger version of this image](/images/gitea-preflights-admin-console.png) - -1. When the preflight checks finish, click **Deploy** to deploy the application. - - The Admin Console dashboard opens. The application status changes from Missing to Unavailable while the `gitea` Deployment is being created: - - ![Admin console dashboard](/images/tutorial-gitea-unavailable.png) - - [View a larger version of this image](/images/tutorial-gitea-unavailable.png) - -1. (Optional) After the application is in a Ready status, click **Open App** to view the Gitea application in a browser. - -1. Uninstall the Gitea application from your cluster: - - ```bash - kubectl kots remove $REPLICATED_APP --namespace gitea --undeploy - ``` - **Example output**: - ``` - • Removing application gitea-boxer reference from Admin Console and deleting associated resources from the cluster ✓ - • Application gitea-boxer has been removed - ``` - -1. Remove the Admin Console from the cluster: - - 1. Delete the namespace where the Admin Console is installed: - - ``` - kubectl delete namespace gitea - ``` - 1. Delete the Admin Console ClusterRole and ClusterRoleBinding: - - ``` - kubectl delete clusterrole kotsadm-role - ``` - ``` - kubectl delete clusterrolebinding kotsadm-rolebinding - ``` - -## Summary - -Congratulations! In this tutorial, you defined a preflight check for Gitea that checks the version of Kubernetes running in the cluster. You also ran preflight checks before installing with both the Helm CLI and with KOTS. - -To learn more about defining and running preflight checks, see: -* [Defining Preflight Checks](/vendor/preflight-defining) -* [Running Preflight Checks](/vendor/preflight-running) -* [Getting Started](https://troubleshoot.sh/docs/) in the open source Troubleshoot documentation. - -================ -File: docs/vendor/tutorial-preflight-helm-install.mdx -================ -# Step 5: Run Preflights with the Helm CLI - -Use the Helm CLI installation instructions provided for the customer that you created to run the preflight checks for Gitea and install. The purpose of this step is to demonstrate how enterprise users can run preflight checks defined in a Helm chart before installing. - -To run preflight checks and install with the Helm CLI: - -1. Create a `gitea` namespace for the installation: - - ``` - kubectl create namespace gitea - ``` - -1. Update the current kubectl context to target the new `gitea` namespace. This ensures that the chart is installed in the `gitea` namespace without requiring you to set the `--namespace` flag with the `helm install` command: - - ``` - kubectl config set-context --namespace=gitea --current - ``` - -1. In the [vendor portal](https://vendor.replicated.com), go to the **Customers** page. - -1. On the **Customer details** page for the customer that you created, click **Helm install instructions**. - - ![Helm install instrucitons button](/images/tutorial-gitea-helm-customer-install-button.png) - - [View a larger version of this image](/images/tutorial-gitea-helm-customer-install-button.png) - -1. Run the first command in the **Helm install instructions** dialog to log in to the Replicated registry. - -1. Run the second command to install the preflight kubectl plugin: - - ```bash - curl https://krew.sh/preflight | bash - ``` - The preflight plugin is a client-side utility used to run preflight checks. - -1. Run the third command to run preflight checks: - - ```bash - helm template oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea | kubectl preflight - - ``` - This command templates the Gitea chart and then pipes the result to the preflight plugin. The following shows an example of the ouput for this command: - - <img alt="Preflight CLI output" src="/images/gitea-preflights-cli.png" width="600px"/> - - [View a larger version of this image](/images/gitea-preflights-cli.png) - -1. Run the fourth command listed under **Option 1: Install Gitea** to install the application: - - ```bash - helm install gitea oci://registry.replicated.com/$REPLICATED_APP/unstable/gitea - ``` - -1. Uninstall and delete the namespace: - - ```bash - helm uninstall gitea --namespace gitea - ``` - ```bash - kubectl delete namespace gitea - ``` - -## Next Step - -Install the application with KOTS to see how preflight checks are run from the KOTS Admin Console. See [Run Preflights with KOTS](tutorial-preflight-helm-install-kots). - -## Related Topics - -* [Running Preflight Checks](/vendor/preflight-running) -* [Installing with Helm](/vendor/install-with-helm) - -================ -File: docs/vendor/tutorial-preflight-helm-setup.mdx -================ -# Introduction and Setup - -This topic provides a summary of the goals and outcomes for the tutorial and also lists the prerequisites to set up your environment before you begin. - -## Summary - -This tutorial introduces you to preflight checks. The purpose of preflight checks is to provide clear feedback about any missing requirements or incompatibilities in the customer's cluster _before_ they install or upgrade an application. Thorough preflight checks provide increased confidence that an installation or upgrade will succeed and help prevent support escalations. - -Preflight checks are part of the [Troubleshoot](https://troubleshoot.sh/) open source project, which is maintained by Replicated. - -In this tutorial, you use a sample Helm chart to learn how to: - -* Define custom preflight checks in a Kubernetes Secret in a Helm chart -* Package a Helm chart and add it to a release in the Replicated Vendor Portal -* Run preflight checks using the Helm CLI -* Run preflight checks in the Replicated KOTS Admin Console - -## Set Up the Environment - -Before you begin, do the following to set up your environment: - -* Ensure that you have kubectl access to a Kubernetes cluster. You can use any cloud provider or tool that you prefer to create a cluster, such as Google Kubernetes Engine (GKE), Amazon Web Services (AWS), or minikube. - - For information about installing kubectl and configuring kubectl access to a cluster, see the following in the Kubernetes documentation: - * [Install Tools](https://kubernetes.io/docs/tasks/tools/) - * [Command line tool (kubectl)](https://kubernetes.io/docs/reference/kubectl/) - -* Install the Helm CLI. To install the Helm CLI using Homebrew, run: - - ``` - brew install helm - ``` - - For more information, including alternative installation options, see [Install Helm](https://helm.sh/docs/intro/install/) in the Helm documentation. - -* Create a vendor account to access the Vendor Portal. See [Creating a Vendor Portal](/vendor/vendor-portal-creating-account). - - :::note - If you do not yet have a Vendor Portal team to join, you can sign up for a trial account. By default, trial accounts do not include access to Replicated KOTS. To get access to KOTS with your trial account so that you can complete this and other tutorials, contact Replicated at contact@replicated.com. - ::: - -## Next Step - -Get the sample Bitnami Helm chart and test installation with the Helm CLI. See [Step 1: Get the Sample Chart and Test](/vendor/tutorial-preflight-helm-get-chart) - -================ -File: docs/vendor/using-third-party-registry-proxy.mdx -================ -# Using a Registry Proxy for Helm Air Gap Installations - -This topic describes how to connect the Replicated proxy registry to a Harbor or jFrog Artifactory instance to support pull-through image caching. It also includes information about how to set up replication rules in Harbor for image mirroring. - -## Overview - -For applications distributed with Replicated, the [Replicated proxy registry](/vendor/private-images-about) grants proxy, or _pull-through_, access to application images without exposing registry credentials to customers. - -Users can optionally connect the Replicated proxy registry with their own [Harbor](https://goharbor.io) or [jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) instance to proxy and cache the images that are required for installation on demand. This can be particularly helpful in Helm installations in air-gapped environments because it allows users to pull and cache images from an internet-connected machine, then access the cached images during installation from a machine with limited or no outbound internet access. - -In addition to the support for on-demand pull-through caching, connecting the Replicated proxy registry to a Harbor or Artifactory instance also has the following benefits: -* Registries like Harbor or Artifactory typically support access controls as well as scanning images for security vulnerabilities -* With Harbor, users can optionally set up replication rules for image mirroring, which can be used to improve data availability and reliability - -## Limtiation - -Artifactory does not support mirroring or replication for Docker registries. If you need to set up image mirroring, use Harbor. See [Set Up Mirroring in Harbor](#harbor-mirror) below. - -## Connect the Replicated Proxy Registry to Harbor - -[Harbor](https://goharbor.io) is a popular open-source container registry. Users can connect the Replicated proxy registry to Harbor in order to cache images on demand and set up pull-based replication rules to proactively mirror images. Connecting the Replicated proxy registry to Harbor also allows customers use Harbor's security features. - -### Use Harbor for Pull-Through Proxy Caching {#harbor-proxy-cache} - -To connect the Replicated proxy registry to Harbor for pull-through proxy caching: - -1. Log in to Harbor and create a new replication endpoint. This endpoint connects the Replicated proxy registry to the Harbor instance. For more information, see [Creating Replication Endpoints](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-endpoints/) in the Harbor documentation. - -1. Enter the following details for the endpoint: - - * For the provider field, choose Docker Registry. - * For the URL field, enter `https://proxy.replicated.com` or the custom domain that is configured for the Replicated proxy registry. For more information about configuring custom domains in the Vendor Portal, see [Using Custom Domains](/vendor/custom-domains-using). - * For the access ID, enter the email address associated with the customer in the Vendor Portal. - * For the access secret, enter the customer's unique license ID. You can find the license ID in the Vendor Portal by going to **Customers > [Customer Name]**. - -1. Verify your configuration by testing the connection and then save the endpoint. - -1. After adding the Replicated proxy registry as a replication endpoint in Harbor, set up a proxy cache. This allows for pull-through image caching with Harbor. For more information, see [Configure Proxy Cache](https://goharbor.io/docs/2.11.0/administration/configure-proxy-cache/) in the Harbor documentation. - -1. (Optional) Add a pull-based replication rule to support image mirroring. See [Configure Image Mirroring in Harbor](#harbor-mirror) below. - -### Configure Image Mirroring in Harbor {#harbor-mirror} - -To enable image mirroring with Harbor, users create a pull-based replication rule. This periodically (or when manually triggered) pulls images from the Replicated proxy registry to store them in Harbor. - -The Replicated proxy regsitry exposes standard catalog and tag listing endpoints that are used by Harbor to support image mirroring: -* The catalog endpoint returns a list of repositories built from images of the last 10 releases. -* The tags listing endpoint lists the tags available in a given repository for those same releases. - -When image mirroring is enabled, Harbor uses these endpoints to build a list of images to cache and then serve. - -#### Limitations - -Image mirroring with Harbor has the following limitations: - -* Neither the catalog or tags listing endpoints exposed by the Replicated proxy service respect pagination requests. However, Harbor requests 1000 items at a time. - -* Only authenticated users can perform catalog calls or list tags. Authenticated users are those with an email address and license ID associated with a customer in the Vendor Portal. - -#### Create a Pull-Based Replication Rule in Harbor for Image Mirroring - -To configure image mirroring in Harbor: - -1. Follow the steps in [Use Harbor for Pull-Through Proxy Caching](#harbor-proxy-cache) above to add the Replicated proxy registry to Harbor as a replication endpoint. - -1. Create a **pull-based** replication rule in Harbor to mirror images proactively. For more information, see [Creating a replication rule](https://goharbor.io/docs/2.11.0/administration/configuring-replication/create-replication-rules/) in the Harbor documentation. - -## Use Artifactory for Pull-Through Proxy Caching - -[jFrog Artifactory](https://jfrog.com/help/r/jfrog-artifactory-documentation) supports pull-through caching for Docker registries. - -For information about how to configure a pull-through cache with Artifactory, see [Remote Repository](https://jfrog.com/help/r/jfrog-artifactory-documentation/configure-a-remote-repository) in the Artifactory documentation. - -================ -File: docs/vendor/vendor-portal-application-settings.md -================ -# Application Settings Page - -Each application has its own settings, which include the application name and application slug. - -The following shows the **Application Settings** page, which you access by selecting **_Application Name_ > Settings**: - -<img alt="Settings page" src="/images/application-settings.png" width="600px"/> - -[View a larger version of this image](/images/application-settings.png) - -The following describes each of the application settings: - -- **Application name:** The application name is initially set when you first create the application in the Vendor Portal. You can change the name at any time so that it displays as a user-friendly name that your team can easily identify. -- **Application slug:** The application slug is used with the Replicated CLI and with some of the KOTS CLI commands. You can click on the link below the slug to toggle between the application ID number and the slug name. The application ID and application slug are unique identifiers that cannot be edited. -- **Service Account Tokens:** Provides a link to the the **Service Accounts** page, where you can create or remove a service account. Service accounts are paired with API tokens and are used with the Vendor API to automate tasks. For more information, see [Using Vendor API Tokens](/reference/vendor-api-using). -- **Scheduler:** Displayed if the application has a KOTS entitlement. -- **Danger Zone:** Lets you delete the application, and all of the licenses and data associated with the application. The delete action cannot be undone. - -================ -File: docs/vendor/vendor-portal-creating-account.md -================ -# Creating a Vendor Account - -To get started with Replicated, you must create a Replicated vendor account. When you create your account, you are also prompted to create an application. To create additional applications in the future, log in to the Replicated Vendor Portal and select **Create new app** from the Applications drop-down list. - -To create a vendor account: - -1. Go to the [Vendor Portal](https://vendor.replicated.com), and select **Sign up**. - - The sign up page opens. -3. Enter your email address or continue with Google authentication. - - - If registering with an email, the Activate account page opens and you will receive an activation code in your email. - - :::note - To resend the code, click **Resend it**. - ::: - - - Copy and paste the activation code into the text box and click **Activate**. Your account is now activated. - - :::note - After your account is activated, you might have the option to accept a pending invitation, or to automatically join an existing team if the auto-join feature is enabled by your administrator. For more information about enabling the auto-join feature, see [Enable Users to Auto-join Your Team](https://docs.replicated.com/vendor/team-management#enable-users-to-auto-join-your-team). - ::: - -4. On the Create your team page, enter you first name, last name, and company name. Click **Continue** to complete the setup. - - :::note - The company name you provide is used as your team name in Vendor Portal. - ::: - - The Create application page opens. - -5. Enter a name for the application, such as `My-Application-Demo`. Click **Create application**. - - The application is created and the Channels page opens. - - :::important - Replicated recommends that you use a temporary name for the application at this time such as `My-Application-Demo` or `My-Application-Test`. - - Only use an official name for your application when you have completed testing and are ready to distribute the application to your customers. - - Replicated recommends that you use a temporary application name for testing because you are not able to restore or modify previously-used application names or application slugs in the Vendor Portal. - ::: - -## Next Step - -Invite team members to collaborate with you in Vendor Portal. See [Invite Members](team-management#invite-members). - -================ -File: docs/vendor/vendor-portal-manage-app.md -================ -# Managing Applications - -This topic provides information about managing applications, including how to create, delete, and retrieve the slug for applications in the Replicated Vendor Portal and with the Replicated CLI. - -For information about creating and managing application with the Vendor API v3, see the [apps](https://replicated-vendor-api.readme.io/reference/createapp) section in the Vendor API v3 documentation. - -## Create an Application - -Teams can create one or more applications. It is common to create multiple applications for testing purposes. - -### Vendor Portal - -To create a new application: - -1. Log in to the [Vendor Portal](https://vendor.replicated.com/). If you do not have an account, see [Creating a Vendor Account](/vendor/vendor-portal-creating-account). - -1. In the top left of the page, open the application drop down and click **Create new app...**. - - <img alt="create new app drop down" src="/images/create-new-app.png" width="300px"/> - - [View a larger version of this image](/images/create-new-app.png) - -1. On the **Create application** page, enter a name for the application. - - <img alt="create new app page" src="/images/create-application-page.png" width="500px"/> - - [View a larger version of this image](/images/create-application-page.png) - - :::important - If you intend to use the application for testing purposes, Replicated recommends that you use a temporary name such as `My Application Demo` or `My Application Test`. - - You are not able to restore or modify previously-used application names or application slugs. - ::: - -1. Click **Create application**. - -### Replicated CLI - -To create an application with the Replicated CLI: - -1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -1. Run the following command: - - ```bash - replicated app create APP-NAME - ``` - Replace `APP-NAME` with the name that you want to use for the new application. - - **Example**: - - ```bash - replicated app create cli-app - ID NAME SLUG SCHEDULER - 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots - ``` - -## Get the Application Slug {#slug} - -Each application has a slug, which is used for interacting with the application using the Replicated CLI. The slug is automatically generated based on the application name and cannot be changed. - -### Vendor Portal - -To get an application slug in the Vendor Portal: - -1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. - -1. Under **Application Slug**, copy the slug. - - <img alt="Application slug" src="/images/application-settings.png" width="600px"/> - - [View a larger version of this image](/images/application-settings.png) - -### Replicated CLI - -To get an application slug with the Replicated CLI: - -1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -1. Run the following command: - - ```bash - replicated app ls APP-NAME - ``` - Replace `APP-NAME` with the name of the target application. Or, exclude `APP-NAME` to list all applications in the team. - - **Example:** - - ```bash - replicated app ls cli-app - ID NAME SLUG SCHEDULER - 1xy9t8G9CO0PRGzTwSwWFkMUjZO cli-app cli-app kots - ``` - -1. Copy the value in the `SLUG` field. - -## Delete an Application - -When you delete an application, you also delete all licenses and data associated with the application. You can also optionally delete all images associated with the application from the Replicated registry. Deleting an application cannot be undone. - -### Vendor Portal - -To delete an application in the Vendor Portal: - -1. Log in to the [Vendor Portal](https://vendor.replicated.com/) and go to **_Application Name_ > Settings**. - -1. Under **Danger Zone**, click **Delete App**. - - <img alt="Setting page" src="/images/application-settings.png" width="600px"/> - - [View a larger version of this image](/images/application-settings.png) - -1. In the **Are you sure you want to delete this app?** dialog, enter the application name. Optionally, enter your password if you want to delete all images associated with the application from the Replicated registry. - - <img alt="delete app dialog" src="/images/delete-app-dialog.png" width="400px"/> - - [View a larger version of this image](/images/delete-app-dialog.png) - -1. Click **Delete app**. - -### Replicated CLI - -To delete an application with the Replicated CLI: - -1. Install the Replicated CLI. See [Installing the Replicated CLI](/reference/replicated-cli-installing). - -1. Run the following command: - - ```bash - replicated app delete APP-NAME - ``` - Replace `APP-NAME` with the name of the target application. - -1. When prompted, type `yes` to confirm that you want to delete the application. - - **Example:** - - ```bash - replicated app delete deletion-example - • Fetching App ✓ - ID NAME SLUG SCHEDULER - 1xyAIzrmbvq... deletion-example deletion-example kots - Delete the above listed application? There is no undo: yes█ - • Deleting App ✓ - ``` - -================ -File: docs/intro-kots.mdx -================ -import Kots from "../docs/partials/kots/_kots-definition.mdx" - -# Introduction to KOTS - -This topic provides an introduction to the Replicated KOTS installer, including information about KOTS features, installation options, and user interfaces. - -:::note -The Replicated KOTS entitlement is required to install applications with KOTS. For more information, see [Pricing](https://www.replicated.com/pricing) on the Replicated website. -::: - -## Overview - -<Kots/> - -KOTS communicates securely with the Replicated Vendor Portal to synchronize customer licenses, check for available application updates, send instance data, share customer-generated support bundles, and more. - -Installing an application with KOTS provides access to features such as: - -* Support for air gap installations in environments with limited or no outbound internet access -* Support for installations on VMs or bare metal servers, when using Replicated Embedded Cluster or Replicated kURL -* The KOTS Admin Console, which provides a user interface where customers can install and manage their application instances -* Instance telemetry automatically sent to the Vendor Portal for instances running in customer environments -* Strict preflight checks that block installation if environment requirements are not met -* Backup and restore with Replicated snapshots -* Support for marking releases as required to prevent users from skipping them during upgrades - -KOTS is an open source project that is maintained by Replicated. For more information, see the [kots](https://github.com/replicatedhq/kots) repository in GitHub. - -## About Installing with KOTS - -KOTS can be used to install Kubernetes applications and Helm charts in the following environments: -* Clusters provisioned on VMs or bare metal servers with Replicated Embedded Cluster or Replicated kURL -* Existing clusters brought by the user -* Online (internet-connected) or air-gapped (disconnected) environments - -To install an application with KOTS, users first run an installation script that installs KOTS in the target cluster and deploys the KOTS Admin Console. After KOTS is installed, users can log in to the KOTS Admin Console to upload their license file, configure the application, run preflight checks, and install and deploy the application. - -The following diagram demonstrates how a single release promoted to the Stable channel in the Vendor Portal can be installed with KOTS in an embedded cluster on a VM, in an existing air-gapped cluster, and in an existing internet-connected cluster: - -<img alt="Embedded cluster, air gap, and existing cluster app installation workflows" src="/images/kots-installation-overview.png"/> - -[View a larger version of this image](/images/kots-installation-overview.png) - -As shown in the diagram above: -* For installations in existing online (internet-connected) clusters, users run a command to install KOTS in their cluster. -* For installations on VMs or bare metal servers, users run an Embedded Cluster or kURL installation script that both provisions a cluster in their environment and installs KOTS in the cluster. -* For installations in air-gapped clusters, users download air gap bundles for KOTS and the application from the Replicated Download Portal and then provide the bundles during installation. - -All users must have a valid license file to install with KOTS. After KOTS is installed in the cluster, users can access the KOTS Admin Console to provide their license and deploy the application. - -For more information about how to install applications with KOTS, see the [Installing an Application](/enterprise/installing-overview) section. - -## KOTS User Interfaces - -This section describes the KOTS interfaces available to users for installing and managing applications. - -### KOTS Admin Console - -KOTS provides an Admin Console to make it easy for users to install, manage, update, configure, monitor, backup and restore, and troubleshoot their application instance from a GUI. - -The following shows an example of the Admin Console dashboard for an application: - -![Admin Console Dashboard](/images/guides/kots/application.png) - -[View a larger version of this image](/images/guides/kots/application.png) - -For applications installed with Replicated Embedded Cluster in a VM or bare metal server, the Admin Console also includes a **Cluster Management** tab where users can add and manage nodes in the embedded cluster, as shown below: - -![Admin console dashboard with Cluster Management tab](/images/gitea-ec-ready.png) - -[View a larger version of this image](/images/gitea-ec-ready.png) - -### KOTS CLI - -The KOTS command-line interface (CLI) is a kubectl plugin. Customers can run commands with the KOTS CLI to install and manage their application instances with KOTS programmatically. - -For information about getting started with the KOTS CLI, see [Installing the KOTS CLI](/reference/kots-cli-getting-started). - -The KOTS CLI can also be used to install an application without needing to access the Admin Console. This can be useful for automating installations and upgrades, such as in CI/CD pipelines. For information about how to perform headless installations from the command line, see [Installing with the KOTS CLI](/enterprise/installing-existing-cluster-automation). - -================ -File: docs/intro-replicated.mdx -================ ---- -pagination_prev: null ---- - -import ApiAbout from "/docs/partials/vendor-api/_api-about.mdx" -import Replicated from "/docs/partials/getting-started/_replicated-definition.mdx" -import Helm from "/docs/partials/helm/_helm-definition.mdx" -import Kots from "/docs/partials/kots/_kots-definition.mdx" -import KotsEntitlement from "/docs/partials/kots/_kots-entitlement-note.mdx" -import SDKOverview from "/docs/partials/replicated-sdk/_overview.mdx" -import CSDL from "/docs/partials/getting-started/_csdl-overview.mdx" -import PreflightSbAbout from "/docs/partials/preflights/_preflights-sb-about.mdx" - -# Introduction to Replicated - -This topic provides an introduction to the Replicated Platform, including a platform overview and a list of key features. It also describes the Commercial Software Distribution Lifecycle and how Replicated features can be used in each phase of the lifecycle. - -## About the Replicated Platform - -<Replicated/> - -The Replicated Platform features are designed to support ISVs during each phase of the Commercial Software Distribution Lifecycle. For more information, see [Commercial Software Distribution Lifecycle](#csdl) below. - -The following diagram demonstrates the process of using the Replicated Platform to distribute an application, install the application in a customer environment, and support the application after installation: - -![replicated platform features workflow](/images/replicated-platform.png) - -[View a larger version of this image](/images/replicated-platform.png) - -The diagram above shows an application that is packaged with the [**Replicated SDK**](/vendor/replicated-sdk-overview). The application is tested in clusters provisioned with the [**Replicated Compatibility Matrix**](/vendor/testing-about), then added to a new release in the [**Vendor Portal**](/vendor/releases-about) using an automated CI/CD pipeline. - -The application is then installed by a customer ("Big Bank") on a VM. To install, the customer downloads their license, which grants proxy access to the application images through the [**Replicated proxy registry**](/vendor/private-images-about). They also download the installation assets for the [**Replicated Embedded Cluster**](/vendor/embedded-overview) installer. - -Embedded Cluster runs [**preflight checks**](/vendor/preflight-support-bundle-about) to verify that the environment meets the installation requirements, provisions a cluster on the VM, and installs [**Replicated KOTS**](intro-kots) in the cluster. KOTS provides an [**Admin Console**](intro-kots#kots-admin-console) where the customer enters application-specific configurations, runs application preflight checks, optionally joins nodes to the cluster, and then deploys the application. After installation, customers can manage both the application and the cluster from the Admin Console. - -Finally, the diagram shows how [**instance data**](/vendor/instance-insights-event-data) is automatically sent from the customer environment to the Vendor Portal by the Replicated SDK API and the KOTS Admin Console. Additionally, tooling from the open source [**Troubleshoot**](https://troubleshoot.sh/docs/collect/) project is used to generate and send [**support bundles**](/vendor/preflight-support-bundle-about), which include logs and other important diagnostic data. - -## Replicated Platform Features - -The following describes the key features of the Replicated Platform. - -### Compatibility Matrix - -Replicated Compatibility Matrix can be used to get kubectl access to running clusters within minutes or less. Compatibility Matrix supports various Kubernetes distributions and versions and can be interacted with through the Vendor Portal or the Replicated CLI. - -For more information, see [About Compatibility Matrix](/vendor/testing-about). - -### Embedded Cluster - -Replicated Embedded Cluster is a Kubernetes installer based on the open source Kubernetes distribution k0s. With Embedded Cluster, users install and manage both the cluster and the application together as a single appliance on a VM or bare metal server. In this way, Kubernetes is _embedded_ with the application. - -Additionally, each version of Embedded Cluster includes a specific version of [Replicated KOTS](#kots) that is installed in the cluster during installation. KOTS is used by Embedded Cluster to deploy the application and also provides the Admin Console UI where users can manage both the application and the cluster. - -For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). - -### KOTS (Admin Console) {#kots} - -KOTS is a kubectl plugin and in-cluster Admin Console that installs Kubernetes applications in customer-controlled environments. - -KOTS is used by [Replicated Embedded Cluster](#embedded-cluster) to deploy applications and also to provide the Admin Console UI where users can manage both the application and the cluster. KOTS can also be used to install applications in existing Kubernetes clusters in customer-controlled environments, including clusters in air-gapped environments with limited or no outbound internet access. - -For more information, see [Introduction to KOTS](intro-kots). - -### Preflight Checks and Support Bundles - -<PreflightSbAbout/> - -For more information, see [About Preflight Checks and Support Bundles](/vendor/preflight-support-bundle-about). - -### Proxy Registry - -The Replicated proxy registry grants proxy access to an application's images using the customer's unique license. This means that customers can get access to application images during installation without the vendor needing to provide registry credentials. - -For more information, see [About the Replicated Proxy Registry](/vendor/private-images-about). - -### Replicated SDK - -The Replicated SDK is a Helm chart that can be installed as a small service alongside your application. It provides an in-cluster API that can be used to communicate with the Vendor Portal. For example, the SDK API can return details about the customer's license or report telemetry on the application instance back to the Vendor Portal. - -For more information, see [About the Replicated SDK](/vendor/replicated-sdk-overview). - -### Vendor Portal - -The Replicated Vendor Portal is the web-based user interface that you can use to configure and manage all of the Replicated features for distributing and managing application releases, supporting your release, viewing customer insights and reporting, and managing teams. - -The Vendor Portal can also be interacted with programmatically using the following developer tools: - -* **Replicated CLI**: The Replicated CLI can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams, license files, and so on. For more information, see [Installing the Replicated CLI](/reference/replicated-cli-installing). - -* **Vendor API v3**: The Vendor API can be used to complete tasks programmatically, including all tasks for packaging and managing applications, and managing artifacts such as teams and license files. For more information, see [Using the Vendor API v3](/reference/vendor-api-using). - -## Commercial Software Distribution Lifecycle {#csdl} - -Replicated Platform features are designed to support ISVs in each phase of the Commercial Software Distribution Lifecycle shown below: - -![software distribution lifecycle wheel](/images/software-dev-lifecycle.png) - -[View a larger version of this image](/images/software-dev-lifecycle.png) - -<CSDL/> - -For more information about to download a copy of The Commercial Software Distribution Handbook, see [The Commercial Software Distribution Handbook](https://www.replicated.com/the-commercial-software-distribution-handbook). - -The following describes the phases of the software distribution lifecycle: - -* **[Develop](#develop)**: Application design and architecture decisions align with customer needs, and development teams can quickly iterate on new features. -* **[Test](#test)**: Run automated tests in several customer-representative environments as part of continuous integration and continuous delivery (CI/CD) workflows. -* **[Release](#release)**: Use channels to share releases with external and internal users, publish release artifacts securely, and use consistent versioning. -* **[License](#license)**: Licenses are customized to each customer and are easy to issue, manage, and update. -* **[Install](#install)**: Provide unique installation options depending on customers' preferences and experience levels. -* **[Report](#report)**: Make more informed prioritization decisions by collecting usage and performance metadata for application instances running in customer environments. -* **[Support](#support)**: Diagnose and resolve support issues quickly. - -For more information about the Replicated features that support each of these phases, see the sections below. - -### Develop - -The Replicated SDK exposes an in-cluster API that can be developed against to quickly integrate and test core functionality with an application. For example, when the SDK is installed alongside an application in a customer environment, the in-cluster API can be used to send custom metrics from the instance to the Replicated vendor platform. - -For more information about using the Replicated SDK, see [About the Replicated SDK](/vendor/replicated-sdk-overview). - -### Test - -The Replicated Compatibility Matrix rapidly provisions ephemeral Kubernetes clusters, including multi-node and OpenShift clusters. When integrated into existing CI/CD pipelines for an application, the Compatibility Matrix can be used to automatically create a variety of customer-representative environments for testing code changes. - -For more information, see [About Compatibility Matrix](/vendor/testing-about). - -### Release - -Release channels in the Replicated Vendor Portal allow ISVs to make different application versions available to different customers, without needing to maintain separate code bases. For example, a "Beta" channel can be used to share beta releases of an application with only a certain subset of customers. - -For more information about working with channels, see [About Channels and Releases](/vendor/releases-about). - -Additionally, the Replicated proxy registry grants proxy access to private application images using the customers' license. This ensures that customers have the right access to images based on the channel they are assigned. For more information about using the proxy registry, see [About the Replicated Proxy Registry](/vendor/private-images-about). - -### License - -Create customers in the Replicated Vendor Portal to handle licensing for your application in both online and air gap environments. For example: -* License free trials and different tiers of product plans -* Create and manage custom license entitlements -* Verify license entitlements both before installation and during runtime -* Measure and report usage - -For more information about working with customers and custom license fields, see [About Customers](/vendor/licenses-about). - -### Install - -Applications distributed with the Replicated Platform can support multiple different installation methods from the same application release, helping you to meet your customers where they are. For example: - -* Customers who are not experienced with Kubernetes or who prefer to deploy to a dedicated cluster in their environment can install on a VM or bare metal server with the Replicated Embedded Cluster installer. For more information, see [Embedded Cluster Overview](/vendor/embedded-overview). -* Customers familiar with Kubernetes and Helm can install in their own existing cluster using Helm. For more information, see [Installing with Helm](/vendor/install-with-helm). -* Customers installing into environments with limited or no outbound internet access (often referred to as air-gapped environments) can securely access and push images to their own internal registry, then install using Helm or a Replicated installer. For more information, see [Air Gap Installation with Embedded Cluster](/enterprise/installing-embedded-air-gap) and [Installing and Updating with Helm in Air Gap Environments (Alpha)](/vendor/helm-install-airgap). - -### Report - -When installed alongside an application, the Replicated SDK and Replicated KOTS automatically send instance data from the customer environment to the Replicated Vendor Portal. This instance data includes health and status indicators, adoption metrics, and performance metrics. For more information, see [About Instance and Event Data](/vendor/instance-insights-event-data). - -ISVs can also set up email and Slack notifications to get alerted of important instance issues or performance trends. For more information, see [Configuring Instance Notifications](/vendor/instance-notifications-config). - -### Support - -Support teams can use Replicated features to more quickly diagnose and resolve application issues. For example: - -- Customize and generate support bundles, which collect and analyze redacted information from the customer's cluster, environment, and application instance. See [About Preflights Checks and Support Bundles](/vendor/preflight-support-bundle-about). -- Provision customer-representative environments with Compatibility Matrix to recreate and diagnose issues. See [About Compatibility Matrix](/vendor/testing-about). -- Get insights into an instance's status by accessing telemetry data, which covers the health of the application, the current application version, and details about the infrastructure and cluster where the application is running. For more information, see [Customer Reporting](/vendor/customer-reporting). For more information, see [Customer Reporting](/vendor/customer-reporting). - -================ -File: docs/intro.md -================ ---- -slug: / -pagination_next: null ---- - -# Home - -<section class="tile__container"> - <ul id="whats-new"> - <li class="tile__header"> - <img src="/images/icons/chat_bubble.png" alt="chat bubble icon" width="55px" height="55px"></img> - <p>What's New?</p> - </li> - <li> - <h3>Embedded Cluster 2.0 Release</h3> - <p>The 2.0 release brings improvements to architecture that increase the reliability and stability of Embedded Cluster.</p> - </li> - <li> - <a href="/release-notes/rn-embedded-cluster#200">Learn more</a> - </li> - </ul> - <ul id="did-you-know"> - <li class="tile__header"> - <img src="/images/icons/lightbulb.png" alt="lightbulb icon" width="55px" height="55px"></img> - <p>Did You Know?</p> - </li> - <li> - <h3>Manage Supported Install Methods Per Customer</h3> - <p>Control which installation methods are available for each customer from the **Install types** field in the customer's license.</p> - </li> - <li> - <a href="/vendor/licenses-install-types">Learn more</a> - </li> - </ul> -</section> -<section class="tile__container"> -<ul> - <li class="tile__header"> - <img src="images/icons/alien_vault.png" alt="ufo icon" width="55px" height="55px"></img> - <p>Getting Started with Replicated</p> - </li> - <li> - <p>Onboarding workflows, tutorials, and labs to help you get started with Replicated quickly.</p> - </li> - <li> - <a href="intro-replicated">Introduction to Replicated</a> - </li> - <li> - <a href="/vendor/kots-faq">Replicated FAQs</a> - </li> - <li> - <a href="/vendor/replicated-onboarding">Replicated Onboarding</a> - </li> - <li> - <a href="/vendor/tutorial-embedded-cluster-setup">Tutorials</a> - </li> - </ul> -</section> -<section class="tile__container"> -<ul> - <li class="tile__header"> - <img src="images/icons/vendor_portal_1.png" alt="vendor portal icon" width="55px" height="55px"></img> - <p>Vendor Platform</p> - </li> - <li> - <p>Create and manage your account and team.</p> - </li> - <li> - <a href="/vendor/vendor-portal-creating-account">Creating a Vendor Account</a> - </li> - <li> - <a href="/vendor/team-management#invite-members">Managing Team Members</a> - </li> - <li> - <a href="/vendor/team-management-rbac-configuring">Configuring RBAC Policies</a> - </li> - </ul> - <ul> - <li class="tile__header"> - <img src="images/icons/release.png" alt="rocket ship icon" width="55px" height="55px"></img> - <p>Compatibility Matrix</p> - </li> - <li> - <p>Rapidly create Kubernetes clusters, including OpenShift.</p> - </li> - <li> - <a href="/vendor/testing-about">About Compatibility Matrix</a> - </li> - <li> - <a href="/vendor/testing-how-to">Using Compatibility Matrix</a> - </li> - <li> - <a href="/vendor/testing-supported-clusters">Supported Cluster Types</a> - </li> - <li> - <a href="/vendor/testing-cluster-addons">Cluster Add-ons</a> - </li> - <li> - <a href="/vendor/ci-workflows">Recommended CI/CD Workflows</a> - </li> - </ul> - </section> -<section class="tile__container"> - <ul> - <li class="tile__header"> - <img src="images/icons/helm-logo.png" alt="helm logo" id="helm"></img> - <p>Helm Charts</p> - </li> - <li> - <p>Distribute Helm charts with Replicated.</p> - </li> - <li> - <a href="/vendor/install-with-helm">Helm Installations with Replicated</a> - </li> - <li> - <a href="/vendor/helm-install-release">Packaging a Helm Chart for a Release</a> - </li> - <li> - <a href="/vendor/replicated-sdk-overview">About the Replicated SDK</a> - </li> - </ul> - </section> -<section class="tile__container"> - <ul> - <li class="tile__header"> - <img src="images/icons/admin.png" alt="kots icon"></img> - <p>Replicated KOTS</p> - </li> - <li> - <p>A kubectl plugin and in-cluster Admin Console that installs applications in customer-controlled environments.</p> - </li> - <li> - <a href="intro-kots">Introduction to KOTS</a> - </li> - <li> - <a href="/vendor/helm-native-about">About Distributing Helm Charts with KOTS</a> - </li> - </ul> - <ul> - <li class="tile__header"> - <img src="images/icons/k8s_installer.png" alt="installer icon"></img> - <p>Embedded Cluster</p> - </li> - <li> - <p>Embed Kubernetes with your application to support installations on VMs or bare metal servers.</p> - </li> - <li> - <a href="/vendor/embedded-overview">Embedded Cluster Overview</a> - </li> - <li> - <a href="/enterprise/installing-embedded">Installing with Embedded Cluster</a> - </li> - <li> - <a href="/vendor/tutorial-embedded-cluster-setup">Tutorial: Deploy a Helm Chart on a VM with Embedded Cluster</a> - </li> - </ul> -</section> -<section class="tile__container"> - <ul> - <li class="tile__header"> - <img src="images/icons/dashboard_1.png" alt="dashboard icon" width="55px" height="55px"></img> - <p>Insights and Telemetry</p> - </li> - <li> - <p>Get insights on installed instances of your application.</p> - </li> - <li> - <a href="/vendor/instance-insights-event-data">About Instance and Event Data</a> - </li> - <li> - <a href="/vendor/customer-adoption">Adoption Report</a> - </li> - <li> - <a href="/vendor/instance-insights-details">Instance Details</a> - </li> - <li> - <a href="/vendor/custom-metrics-about">Configuring Custom Metrics</a> - </li> - </ul> - <ul> - <li class="tile__header"> - <img src="images/icons/vendor_portal_2.png" alt="vendor portal icon" width="55px" height="55px"></img> - <p>Channels and Releases</p> - </li> - <li> - <p>Manage application releases with the vendor platform.</p> - </li> - <li> - <a href="/vendor/releases-about">About Channels and Releases</a> - </li> - <li> - <a href="/vendor/releases-creating-releases">Managing Releases with the Vendor Portal</a> - </li> - <li> - <a href="/vendor/releases-creating-cli">Managing Releases with the CLI</a> - </li> - </ul> - <ul> - <li class="tile__header"> - <img src="images/icons/licensing.png" alt="dashboard icon" width="55px" height="55px"></img> - <p>Customer Licensing</p> - </li> - <li> - <p>Create, customize, and issue customer licenses.</p> - </li> - <li> - <a href="/vendor/licenses-about">About Customers</a> - </li> - <li> - <a href="/vendor/releases-creating-customer">Creating and Managing Customers</a> - </li> - <li> - <a href="/vendor/licenses-adding-custom-fields">Managing Customer License Fields</a> - </li> - </ul> -</section> -<section class="tile__container"> - <ul> - <li class="tile__header"> - <img src="images/icons/checklist.png" alt="checklist icon" width="55px" height="55px"></img> - <p>Preflight Checks</p> - </li> - <li> - <p>Define and verify installation environment requirements.</p> - </li> - <li> - <a href="/vendor/preflight-defining">Defining Preflight Checks</a> - </li> - <li> - <a href="/vendor/preflight-running">Running Preflight Checks for Helm Installations</a> - </li> - <li> - <a href="/vendor/tutorial-preflight-helm-setup">Preflight Checks Tutorial for Helm Charts</a> - </li> - <li> - <a href="https://play.instruqt.com/embed/replicated/tracks/avoiding-installation-pitfalls?token=em_gJjtIzzTTtdd5RFG">Preflight Checks Lab in Instruqt</a> - </li> - </ul> - <ul> - <li class="tile__header"> - <img src="images/icons/support_bundle.png" alt="support bundle icon" width="55px" height="55px"></img> - <p>Support Bundles</p> - </li> - <li> - <p>Gather information about customer environments for troubleshooting.</p> - </li> - <li> - <a href="vendor/support-bundle-customizing">Adding and Customizing Support Bundles</a> - </li> - <li> - <a href="/vendor/support-host-support-bundles">Configuring Host Support Bundles</a> - </li> - <li> - <a href="/vendor/support-bundle-generating">Generating Support Bundles</a> - </li> - <li> - <a href="https://play.instruqt.com/embed/replicated/tracks/closing-information-gap?token=em_MO2XXCz3bAgwtEca">Support Bundles Lab in Instruqt</a> - </li> - </ul> -</section> -<section class="tile__container"> - <ul> - <li class="tile__header"> - <img src="images/icons/tools.png" alt="carpenter tools icon" width="55px" height="55px"></img> - <p>Developer Tools</p> - </li> - <li> - <p>APIs, CLIs, and an SDK for interacting with the Replicated platform.</p> - </li> - <li> - <a href="/reference/replicated-cli-installing">Replicated CLI</a> - </li> - <li> - <a href="/reference/vendor-api-using">Vendor API v3</a> - </li> - <li> - <a href="/reference/kots-cli-getting-started">KOTS CLI</a> - </li> - <li> - <a href="/vendor/replicated-sdk-overview">Replicated SDK</a> - </li> - <li> - <a href="/reference/replicated-sdk-apis">Replicated SDK API</a> - </li> - </ul> -</section> - - - -================================================================ -End of Codebase -================================================================ diff --git a/static/llms/llms.txt b/static/llms/llms.txt deleted file mode 100644 index d5a31fe2b8..0000000000 --- a/static/llms/llms.txt +++ /dev/null @@ -1,16 +0,0 @@ -# Replicated Documentation for LLMs - -> Replicated is a commercial software distribution platform. Independent software vendors (ISVs) can use features of the Replicated Platform to distribute modern commercial software into complex, customer-controlled environments, including on-prem and air gap. - -## Docs - -- [llms-docs.txt](https://docs.replicated.com/llms/llms-docs.txt): This file contains the contents of the docs/ directory in the [replicated-docs](https://github.com/replicatedhq/replicated-docs) repository, excluding certain subdirectories as explained in Notes below. - -## Notes - -- The content in the llms-docs.txt file is automatically generated from the same source as the Replicated documentation each time the site is built -- The llms-docs.txt file excludes any files and directories in the replicated-docs repo that are outside of the docs/ directory. Additionally, the following subdirectories of docs/ are excluded from the llms-docs.txt file: - - docs/release-notes/ - - docs/templates/ - - docs/pdfs/ - - docs/.history/ \ No newline at end of file From d70692dd7ed3530c5ce11cc3e51f83f631d4ea4b Mon Sep 17 00:00:00 2001 From: Paige Calvert <paige@replicated.com> Date: Thu, 27 Mar 2025 17:47:35 -0600 Subject: [PATCH 8/9] edit readme --- README.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/README.md b/README.md index 7fe7c4b755..245202d916 100644 --- a/README.md +++ b/README.md @@ -71,9 +71,4 @@ Before pushing changes to the remote repository, build and serve the site locall ```bash npm run serve - ``` - -## Replicated Documentation for LLMs - -- [llms.txt](https://docs.replicated.com/llms/llms.txt): This file provides an overview of the Replicated Documentation -- [llms-docs.txt](https://docs.replicated.com/llms/llms-docs.txt): This file contains the contents of the docs/ directory in the [replicated-docs](https://github.com/replicatedhq/replicated-docs) repository \ No newline at end of file + ``` \ No newline at end of file From 14a082c260e862d0aa06f3ee0040164e0e21fcbf Mon Sep 17 00:00:00 2001 From: Paige Calvert <paige@replicated.com> Date: Thu, 27 Mar 2025 17:53:40 -0600 Subject: [PATCH 9/9] edit readme --- README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 245202d916..aae413683a 100644 --- a/README.md +++ b/README.md @@ -71,4 +71,14 @@ Before pushing changes to the remote repository, build and serve the site locall ```bash npm run serve - ``` \ No newline at end of file + ``` + +## Replicated Documentation for LLMs + +Replicated supports the [llms.txt](https://llmstxt.org/) convention for making documentation available to LLMs. + +/llms.txt — a listing of the available files +/llms-full.txt — complete documentation for Svelte, SvelteKit and the CLI + +- [llms.txt](https://docs.replicated.com/llms/llms.txt): This file contains Markdown versions each page on the docs site as well as optional resources. +- [llms-full.txt](https://docs.replicated.com/llms/llms-docs.txt): This file contains the contents of the docs/ directory in the [replicated-docs](https://github.com/replicatedhq/replicated-docs) repository \ No newline at end of file